From ec241de0feb2c888a59b7c3a935f0562d8084716 Mon Sep 17 00:00:00 2001 From: Ashwin Rajeev Date: Thu, 4 Jun 2015 12:52:38 +0530 Subject: [PATCH 0001/2005] fix unicode encoding issue for query.bind_params --- cassandra/cluster.py | 2 -- cassandra/query.py | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cb2502e7d2..2708736089 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1653,8 +1653,6 @@ def _create_response_future(self, query, parameters, trace, custom_payload): if isinstance(query, SimpleStatement): query_string = query.query_string - if six.PY2 and isinstance(query_string, six.text_type): - query_string = query_string.encode('utf-8') if parameters: query_string = bind_params(query_string, parameters, self.encoder) message = QueryMessage( diff --git a/cassandra/query.py b/cassandra/query.py index 6709cbeff6..19d227beb8 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -793,6 +793,8 @@ def __str__(self): def bind_params(query, params, encoder): + if six.PY2 and isinstance(query, six.text_type): + query = query.encode('utf-8') if isinstance(params, dict): return query % dict((k, encoder.cql_encode_all_types(v)) for k, v in six.iteritems(params)) else: From 0a1e472ec45b66bb6309ca753643dd192911a42f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 29 Jun 2015 15:31:28 -0500 Subject: [PATCH 0002/2005] Connection server_version property Lazily queried, set preemptively whenever a topo query is performed. --- cassandra/cluster.py | 5 +++-- cassandra/connection.py | 13 +++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 828fa8266b..5996a70d69 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1981,7 +1981,6 @@ def __init__(self, control_connection, *args, **kwargs): self.control_connection = weakref.proxy(control_connection) def try_reconnect(self): - # we'll either get back a new Connection or a NoHostAvailable return self.control_connection._reconnect_internal() def on_reconnection(self, connection): @@ -2032,7 +2031,7 @@ class ControlConnection(object): _SELECT_TRIGGERS = "SELECT * FROM system.schema_triggers" _SELECT_PEERS = "SELECT peer, data_center, rack, tokens, rpc_address, schema_version FROM system.peers" - _SELECT_LOCAL = "SELECT cluster_name, data_center, rack, tokens, partitioner, schema_version FROM system.local WHERE key='local'" + _SELECT_LOCAL = "SELECT cluster_name, data_center, rack, tokens, partitioner, release_version, schema_version FROM system.local WHERE key='local'" _SELECT_SCHEMA_PEERS = "SELECT peer, rpc_address, schema_version FROM system.peers" _SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'" @@ -2448,6 +2447,8 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, if partitioner and tokens: token_map[host] = tokens + connection.server_version = local_row['release_version'] + # Check metadata.partitioner to see if we haven't built anything yet. If # every node in the cluster was in the contact points, we won't discover # any new nodes, so we need this additional check. (See PYTHON-90) diff --git a/cassandra/connection.py b/cassandra/connection.py index 464ec792ba..05fc68fa54 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -196,6 +196,7 @@ class Connection(object): is_unsupported_proto_version = False is_control_connection = False + _server_version = None _iobuf = None _current_frame = None @@ -737,6 +738,18 @@ def is_idle(self): def reset_idle(self): self.msg_received = False + @property + def server_version(self): + if self._server_version is None: + query_message = QueryMessage(query="SELECT release_version FROM system.local", consistency_level=ConsistencyLevel.ONE) + message = self.wait_for_response(query_message) + self._server_version = message.results[1][0][0] # (col names, rows)[rows][first row][only item] + return self._server_version + + @server_version.setter + def server_version(self, version): + self._server_version = version + def __str__(self): status = "" if self.is_defunct: From 28f8887b992183f347bea76b3167cb8a485cca47 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jul 2015 10:58:22 -0500 Subject: [PATCH 0003/2005] Refactor schema query/build into cassandra.metadata Changes in anticipation of supporting modernized schema tables (CASSANDRA-6717) - schema change response/events normalized according to protocol spec keywords -- events propagate with intrinsic keywords - Parsing is defined by connection server version - option to specialize query/parsing by earlier server versions (not implemented -- currently just refactored current conditional logic) --- cassandra/__init__.py | 14 + cassandra/cluster.py | 243 +---- cassandra/metadata.py | 1076 ++++++++++++------- cassandra/protocol.py | 16 +- tests/integration/standard/test_metadata.py | 2 +- tests/unit/test_control_connection.py | 30 +- tests/unit/test_metadata.py | 10 +- tests/unit/test_response_future.py | 8 +- 8 files changed, 764 insertions(+), 635 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 23d4623d24..ad073f7ccc 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -126,6 +126,20 @@ def consistency_value_to_name(value): return ConsistencyLevel.value_to_name[value] if value is not None else "Not Set" +class SchemaChangeType(object): + DROPPED = 'DROPPED' + CREATED = 'CREATED' + UPDATED = 'UPDATED' + + +class SchemaTargetType(object): + KEYSPACE = 'KEYSPACE' + TABLE = 'TABLE' + TYPE = 'TYPE' + FUNCTION = 'FUNCTION' + AGGREGATE = 'AGGREGATE' + + class SignatureDescriptor(object): def __init__(self, name, type_signature): diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 5996a70d69..28d808eeca 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -44,8 +44,8 @@ from itertools import groupby from cassandra import (ConsistencyLevel, AuthenticationFailed, - InvalidRequest, OperationTimedOut, - UnsupportedOperation, Unauthorized) + OperationTimedOut, UnsupportedOperation, + SchemaTargetType) from cassandra.connection import (ConnectionException, ConnectionShutdown, ConnectionHeartbeat, ProtocolVersionUnsupported) from cassandra.cqltypes import UserType @@ -79,6 +79,7 @@ def _is_eventlet_monkey_patched(): import eventlet.patcher return eventlet.patcher.is_monkey_patched('socket') + def _is_gevent_monkey_patched(): if 'gevent.monkey' not in sys.modules: return False @@ -1201,13 +1202,28 @@ def _ensure_core_connections(self): for pool in session._pools.values(): pool.ensure_core_connections() - def _validate_refresh_schema(self, keyspace, table, usertype, function, aggregate): + @staticmethod + def _validate_refresh_schema(keyspace, table, usertype, function, aggregate): if any((table, usertype, function, aggregate)): if not keyspace: raise ValueError("keyspace is required to refresh specific sub-entity {table, usertype, function, aggregate}") if sum(1 for e in (table, usertype, function) if e) > 1: raise ValueError("{table, usertype, function, aggregate} are mutually exclusive") + @staticmethod + def _target_type_from_refresh_args(keyspace, table, usertype, function, aggregate): + if aggregate: + return SchemaTargetType.AGGREGATE + elif function: + return SchemaTargetType.FUNCTION + elif usertype: + return SchemaTargetType.TYPE + elif table: + return SchemaTargetType.TABLE + elif keyspace: + return SchemaTargetType.KEYSPACE + return None + def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None, aggregate=None, max_schema_agreement_wait=None): """ .. deprecated:: 2.6.0 @@ -1242,8 +1258,10 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None log.warning(msg) self._validate_refresh_schema(keyspace, table, usertype, function, aggregate) - if not self.control_connection.refresh_schema(keyspace, table, usertype, function, - aggregate, max_schema_agreement_wait): + target_type = self._target_type_from_refresh_args(keyspace, table, usertype, function, aggregate) + if not self.control_connection.refresh_schema(target_type=target_type, keyspace=keyspace, table=table, + type=usertype, function=function, aggregate=aggregate, + schema_agreement_wait=max_schema_agreement_wait): raise Exception("Schema was not refreshed. See log for details.") def submit_schema_refresh(self, keyspace=None, table=None, usertype=None, function=None, aggregate=None): @@ -1259,8 +1277,10 @@ def submit_schema_refresh(self, keyspace=None, table=None, usertype=None, functi log.warning(msg) self._validate_refresh_schema(keyspace, table, usertype, function, aggregate) + target_type = self._target_type_from_refresh_args(keyspace, table, usertype, function, aggregate) return self.executor.submit( - self.control_connection.refresh_schema, keyspace, table, usertype, function, aggregate) + self.control_connection.refresh_schema, target_type=target_type, keyspace=keyspace, table=table, + type=usertype, function=function, aggregate=aggregate) def refresh_schema_metadata(self, max_schema_agreement_wait=None): """ @@ -1285,7 +1305,8 @@ def refresh_keyspace_metadata(self, keyspace, max_schema_agreement_wait=None): See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ - if not self.control_connection.refresh_schema(keyspace, schema_agreement_wait=max_schema_agreement_wait): + if not self.control_connection.refresh_schema(target_type=SchemaTargetType.KEYSPACE, keyspace=keyspace, + schema_agreement_wait=max_schema_agreement_wait): raise Exception("Keyspace metadata was not refreshed. See log for details.") def refresh_table_metadata(self, keyspace, table, max_schema_agreement_wait=None): @@ -1295,7 +1316,7 @@ def refresh_table_metadata(self, keyspace, table, max_schema_agreement_wait=None See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ - if not self.control_connection.refresh_schema(keyspace, table, schema_agreement_wait=max_schema_agreement_wait): + if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=table, schema_agreement_wait=max_schema_agreement_wait): raise Exception("Table metadata was not refreshed. See log for details.") def refresh_user_type_metadata(self, keyspace, user_type, max_schema_agreement_wait=None): @@ -1304,7 +1325,7 @@ def refresh_user_type_metadata(self, keyspace, user_type, max_schema_agreement_w See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ - if not self.control_connection.refresh_schema(keyspace, usertype=user_type, schema_agreement_wait=max_schema_agreement_wait): + if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TYPE, keyspace=keyspace, type=user_type, schema_agreement_wait=max_schema_agreement_wait): raise Exception("User Type metadata was not refreshed. See log for details.") def refresh_user_function_metadata(self, keyspace, function, max_schema_agreement_wait=None): @@ -1315,7 +1336,7 @@ def refresh_user_function_metadata(self, keyspace, function, max_schema_agreemen See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ - if not self.control_connection.refresh_schema(keyspace, function=function, schema_agreement_wait=max_schema_agreement_wait): + if not self.control_connection.refresh_schema(target_type=SchemaTargetType.FUNCTION, keyspace=keyspace, function=function, schema_agreement_wait=max_schema_agreement_wait): raise Exception("User Function metadata was not refreshed. See log for details.") def refresh_user_aggregate_metadata(self, keyspace, aggregate, max_schema_agreement_wait=None): @@ -1326,7 +1347,7 @@ def refresh_user_aggregate_metadata(self, keyspace, aggregate, max_schema_agreem See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ - if not self.control_connection.refresh_schema(keyspace, aggregate=aggregate, schema_agreement_wait=max_schema_agreement_wait): + if not self.control_connection.refresh_schema(target_type=SchemaTargetType.AGGREGATE, keyspace=keyspace, aggregate=aggregate, schema_agreement_wait=max_schema_agreement_wait): raise Exception("User Aggregate metadata was not refreshed. See log for details.") def refresh_nodes(self): @@ -2022,14 +2043,6 @@ class ControlConnection(object): Internal """ - _SELECT_KEYSPACES = "SELECT * FROM system.schema_keyspaces" - _SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies" - _SELECT_COLUMNS = "SELECT * FROM system.schema_columns" - _SELECT_USERTYPES = "SELECT * FROM system.schema_usertypes" - _SELECT_FUNCTIONS = "SELECT * FROM system.schema_functions" - _SELECT_AGGREGATES = "SELECT * FROM system.schema_aggregates" - _SELECT_TRIGGERS = "SELECT * FROM system.schema_triggers" - _SELECT_PEERS = "SELECT peer, data_center, rack, tokens, rpc_address, schema_version FROM system.peers" _SELECT_LOCAL = "SELECT cluster_name, data_center, rack, tokens, partitioner, release_version, schema_version FROM system.local WHERE key='local'" @@ -2219,16 +2232,14 @@ def shutdown(self): self._connection.close() del self._connection - def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None, - aggregate=None, schema_agreement_wait=None): + def refresh_schema(self, **kwargs): if not self._meta_refresh_enabled: log.debug("[control connection] Skipping schema refresh because meta refresh is disabled") return False try: if self._connection: - return self._refresh_schema(self._connection, keyspace, table, usertype, function, - aggregate, schema_agreement_wait=schema_agreement_wait) + return self._refresh_schema(self._connection, **kwargs) except ReferenceError: pass # our weak reference to the Cluster is no good except Exception: @@ -2236,13 +2247,10 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None self._signal_error() return False - def _refresh_schema(self, connection, keyspace=None, table=None, usertype=None, function=None, - aggregate=None, preloaded_results=None, schema_agreement_wait=None): + def _refresh_schema(self, connection, preloaded_results=None, schema_agreement_wait=None, **kwargs): if self._cluster.is_shutdown: return False - assert sum(1 for arg in (table, usertype, function, aggregate) if arg) <= 1 - agreed = self.wait_for_schema_agreement(connection, preloaded_results=preloaded_results, wait_time=schema_agreement_wait) @@ -2250,148 +2258,8 @@ def _refresh_schema(self, connection, keyspace=None, table=None, usertype=None, log.debug("Skipping schema refresh due to lack of schema agreement") return False - cl = ConsistencyLevel.ONE - if table: - def _handle_results(success, result): - if success: - return dict_factory(*result.results) if result else {} - else: - raise result - - # a particular table changed - where_clause = " WHERE keyspace_name = '%s' AND columnfamily_name = '%s'" % (keyspace, table) - cf_query = QueryMessage(query=self._SELECT_COLUMN_FAMILIES + where_clause, consistency_level=cl) - col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) - triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) - (cf_success, cf_result), (col_success, col_result), (triggers_success, triggers_result) \ - = connection.wait_for_responses(cf_query, col_query, triggers_query, timeout=self._timeout, fail_on_error=False) - - log.debug("[control connection] Fetched table info for %s.%s, rebuilding metadata", keyspace, table) - cf_result = _handle_results(cf_success, cf_result) - col_result = _handle_results(col_success, col_result) - - # handle the triggers table not existing in Cassandra 1.2 - if not triggers_success and isinstance(triggers_result, InvalidRequest): - triggers_result = {} - else: - triggers_result = _handle_results(triggers_success, triggers_result) - - self._cluster.metadata.table_changed(keyspace, table, cf_result, col_result, triggers_result) - elif usertype: - # user defined types within this keyspace changed - where_clause = " WHERE keyspace_name = '%s' AND type_name = '%s'" % (keyspace, usertype) - types_query = QueryMessage(query=self._SELECT_USERTYPES + where_clause, consistency_level=cl) - types_result = connection.wait_for_response(types_query) - log.debug("[control connection] Fetched user type info for %s.%s, rebuilding metadata", keyspace, usertype) - types_result = dict_factory(*types_result.results) if types_result.results else {} - self._cluster.metadata.usertype_changed(keyspace, usertype, types_result) - elif function: - # user defined function within this keyspace changed - where_clause = " WHERE keyspace_name = '%s' AND function_name = '%s' AND signature = [%s]" \ - % (keyspace, function.name, ','.join("'%s'" % t for t in function.type_signature)) - functions_query = QueryMessage(query=self._SELECT_FUNCTIONS + where_clause, consistency_level=cl) - functions_result = connection.wait_for_response(functions_query) - log.debug("[control connection] Fetched user function info for %s.%s, rebuilding metadata", keyspace, function.signature) - functions_result = dict_factory(*functions_result.results) if functions_result.results else {} - self._cluster.metadata.function_changed(keyspace, function, functions_result) - elif aggregate: - # user defined aggregate within this keyspace changed - where_clause = " WHERE keyspace_name = '%s' AND aggregate_name = '%s' AND signature = [%s]" \ - % (keyspace, aggregate.name, ','.join("'%s'" % t for t in aggregate.type_signature)) - aggregates_query = QueryMessage(query=self._SELECT_AGGREGATES + where_clause, consistency_level=cl) - aggregates_result = connection.wait_for_response(aggregates_query) - log.debug("[control connection] Fetched user aggregate info for %s.%s, rebuilding metadata", keyspace, aggregate.signature) - aggregates_result = dict_factory(*aggregates_result.results) if aggregates_result.results else {} - self._cluster.metadata.aggregate_changed(keyspace, aggregate, aggregates_result) - elif keyspace: - # only the keyspace itself changed (such as replication settings) - where_clause = " WHERE keyspace_name = '%s'" % (keyspace,) - ks_query = QueryMessage(query=self._SELECT_KEYSPACES + where_clause, consistency_level=cl) - ks_result = connection.wait_for_response(ks_query) - log.debug("[control connection] Fetched keyspace info for %s, rebuilding metadata", keyspace) - ks_result = dict_factory(*ks_result.results) if ks_result.results else {} - self._cluster.metadata.keyspace_changed(keyspace, ks_result) - else: - # build everything from scratch - queries = [ - QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), - QueryMessage(query=self._SELECT_COLUMN_FAMILIES, consistency_level=cl), - QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), - QueryMessage(query=self._SELECT_USERTYPES, consistency_level=cl), - QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), - QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), - QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl) - ] - - responses = connection.wait_for_responses(*queries, timeout=self._timeout, fail_on_error=False) - (ks_success, ks_result), (cf_success, cf_result), \ - (col_success, col_result), (types_success, types_result), \ - (functions_success, functions_result), \ - (aggregates_success, aggregates_result), \ - (trigger_success, triggers_result) = responses - - if ks_success: - ks_result = dict_factory(*ks_result.results) - else: - raise ks_result - - if cf_success: - cf_result = dict_factory(*cf_result.results) - else: - raise cf_result - - if col_success: - col_result = dict_factory(*col_result.results) - else: - raise col_result - - # if we're connected to Cassandra < 2.0, the trigges table will not exist - if trigger_success: - triggers_result = dict_factory(*triggers_result.results) - else: - if isinstance(triggers_result, InvalidRequest): - log.debug("[control connection] triggers table not found") - triggers_result = {} - elif isinstance(triggers_result, Unauthorized): - log.warning("[control connection] this version of Cassandra does not allow access to schema_triggers metadata with authorization enabled (CASSANDRA-7967); " - "The driver will operate normally, but will not reflect triggers in the local metadata model, or schema strings.") - triggers_result = {} - else: - raise triggers_result - - # if we're connected to Cassandra < 2.1, the usertypes table will not exist - if types_success: - types_result = dict_factory(*types_result.results) if types_result.results else {} - else: - if isinstance(types_result, InvalidRequest): - log.debug("[control connection] user types table not found") - types_result = {} - else: - raise types_result - - # functions were introduced in Cassandra 2.2 - if functions_success: - functions_result = dict_factory(*functions_result.results) if functions_result.results else {} - else: - if isinstance(functions_result, InvalidRequest): - log.debug("[control connection] user functions table not found") - functions_result = {} - else: - raise functions_result - - # aggregates were introduced in Cassandra 2.2 - if aggregates_success: - aggregates_result = dict_factory(*aggregates_result.results) if aggregates_result.results else {} - else: - if isinstance(aggregates_result, InvalidRequest): - log.debug("[control connection] user aggregates table not found") - aggregates_result = {} - else: - raise aggregates_result + self._cluster.metadata.refresh(connection, self._timeout, **kwargs) - log.debug("[control connection] Fetched schema, rebuilding metadata") - self._cluster.metadata.rebuild_schema(ks_result, types_result, functions_result, - aggregates_result, cf_result, col_result, triggers_result) return True def refresh_node_list_and_token_map(self, force_token_rebuild=False): @@ -2538,13 +2406,8 @@ def _handle_status_change(self, event): def _handle_schema_change(self, event): if self._schema_event_refresh_window < 0: return - keyspace = event.get('keyspace') - table = event.get('table') - usertype = event.get('type') - function = event.get('function') - aggregate = event.get('aggregate') delay = random() * self._schema_event_refresh_window - self._cluster.scheduler.schedule_unique(delay, self.refresh_schema, keyspace, table, usertype, function, aggregate) + self._cluster.scheduler.schedule_unique(delay, self.refresh_schema, **event) def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wait_time=None): @@ -2727,11 +2590,11 @@ def shutdown(self): self.is_shutdown = True self._queue.put_nowait((0, None)) - def schedule(self, delay, fn, *args): - self._insert_task(delay, (fn, args)) + def schedule(self, delay, fn, *args, **kwargs): + self._insert_task(delay, (fn, args, tuple(kwargs.items()))) - def schedule_unique(self, delay, fn, *args): - task = (fn, args) + def schedule_unique(self, delay, fn, *args, **kwargs): + task = (fn, args, tuple(kwargs.items())) if task not in self._scheduled_tasks: self._insert_task(delay, task) else: @@ -2758,8 +2621,9 @@ def run(self): return if run_at <= time.time(): self._scheduled_tasks.remove(task) - fn, args = task - future = self._executor.submit(fn, *args) + fn, args, kwargs = task + kwargs = dict(kwargs) + future = self._executor.submit(fn, *args, **kwargs) future.add_done_callback(self._log_if_failed) else: self._queue.put_nowait((run_at, task)) @@ -2777,20 +2641,18 @@ def _log_if_failed(self, future): exc_info=exc) -def refresh_schema_and_set_result(keyspace, table, usertype, function, aggregate, control_conn, response_future): +def refresh_schema_and_set_result(control_conn, response_future, **kwargs): try: if control_conn._meta_refresh_enabled: log.debug("Refreshing schema in response to schema change. " - "Keyspace: %s; Table: %s, Type: %s, Function: %s, Aggregate: %s", - keyspace, table, usertype, function, aggregate) - control_conn._refresh_schema(response_future._connection, keyspace, table, usertype, function, aggregate) + "%s", kwargs) + control_conn._refresh_schema(response_future._connection, **kwargs) else: log.debug("Skipping schema refresh in response to schema change because meta refresh is disabled; " - "Keyspace: %s; Table: %s, Type: %s, Function: %s", keyspace, table, usertype, function, aggregate) + "%s", kwargs) except Exception: log.exception("Exception refreshing schema in response to schema change:") - response_future.session.submit( - control_conn.refresh_schema, keyspace, table, usertype, function, aggregate) + response_future.session.submit(control_conn.refresh_schema, **kwargs) finally: response_future._set_final_result(None) @@ -3015,13 +2877,8 @@ def _set_result(self, response): # thread instead of the event loop thread self.session.submit( refresh_schema_and_set_result, - response.results['keyspace'], - response.results.get('table'), - response.results.get('type'), - response.results.get('function'), - response.results.get('aggregate'), self.session.cluster.control_connection, - self) + self, **response.results) else: results = getattr(response, 'results', None) if results is not None and response.kind == RESULT_KIND_ROWS: diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 9668a8c2c4..73c7687c6c 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -29,10 +29,12 @@ except ImportError as e: pass -from cassandra import SignatureDescriptor +from cassandra import SignatureDescriptor, ConsistencyLevel, InvalidRequest, SchemaChangeType, Unauthorized import cassandra.cqltypes as types from cassandra.encoder import Encoder from cassandra.marshal import varint_unpack +from cassandra.protocol import QueryMessage +from cassandra.query import dict_factory from cassandra.util import OrderedDict log = logging.getLogger(__name__) @@ -110,64 +112,38 @@ def export_schema_as_string(self): """ return "\n".join(ks.export_as_string() for ks in self.keyspaces.values()) - def rebuild_schema(self, ks_results, type_results, function_results, - aggregate_results, cf_results, col_results, triggers_result): - """ - Rebuild the view of the current schema from a fresh set of rows from - the system schema tables. - - For internal use only. - """ - cf_def_rows = defaultdict(list) - col_def_rows = defaultdict(lambda: defaultdict(list)) - usertype_rows = defaultdict(list) - fn_rows = defaultdict(list) - agg_rows = defaultdict(list) - trigger_rows = defaultdict(lambda: defaultdict(list)) - - for row in cf_results: - cf_def_rows[row["keyspace_name"]].append(row) - - for row in col_results: - ksname = row["keyspace_name"] - cfname = row["columnfamily_name"] - col_def_rows[ksname][cfname].append(row) + def refresh(self, connection, timeout, target_type=None, change_type=None, **kwargs): - for row in type_results: - usertype_rows[row["keyspace_name"]].append(row) - - for row in function_results: - fn_rows[row["keyspace_name"]].append(row) + if not target_type: + self._rebuild_all(connection, timeout) + return - for row in aggregate_results: - agg_rows[row["keyspace_name"]].append(row) + tt_lower = target_type.lower() + try: + if change_type == SchemaChangeType.DROPPED: + drop_method = getattr(self, '_drop_' + tt_lower) + drop_method(**kwargs) + else: + parser = get_schema_parser(connection, timeout) + parse_method = getattr(parser, 'get_' + tt_lower) + meta = parse_method(**kwargs) + if meta: + update_method = getattr(self, '_update_' + tt_lower) + update_method(meta) + else: + drop_method = getattr(self, '_drop_' + tt_lower) + drop_method(**kwargs) + except AttributeError: + raise ValueError("Unknown schema target_type: '%s'" % target_type) - for row in triggers_result: - ksname = row["keyspace_name"] - cfname = row["columnfamily_name"] - trigger_rows[ksname][cfname].append(row) + def _rebuild_all(self, connection, timeout): + """ + For internal use only. + """ + parser = get_schema_parser(connection, timeout) current_keyspaces = set() - for row in ks_results: - keyspace_meta = self._build_keyspace_metadata(row) - keyspace_col_rows = col_def_rows.get(keyspace_meta.name, {}) - keyspace_trigger_rows = trigger_rows.get(keyspace_meta.name, {}) - for table_row in cf_def_rows.get(keyspace_meta.name, []): - table_meta = self._build_table_metadata(keyspace_meta, table_row, keyspace_col_rows, keyspace_trigger_rows) - keyspace_meta._add_table_metadata(table_meta) - - for usertype_row in usertype_rows.get(keyspace_meta.name, []): - usertype = self._build_usertype(keyspace_meta.name, usertype_row) - keyspace_meta.user_types[usertype.name] = usertype - - for fn_row in fn_rows.get(keyspace_meta.name, []): - fn = self._build_function(keyspace_meta.name, fn_row) - keyspace_meta.functions[fn.signature] = fn - - for agg_row in agg_rows.get(keyspace_meta.name, []): - agg = self._build_aggregate(keyspace_meta.name, agg_row) - keyspace_meta.aggregates[agg.signature] = agg - + for keyspace_meta in parser.get_all_keyspaces(): current_keyspaces.add(keyspace_meta.name) old_keyspace_meta = self.keyspaces.get(keyspace_meta.name, None) self.keyspaces[keyspace_meta.name] = keyspace_meta @@ -184,16 +160,10 @@ def rebuild_schema(self, ks_results, type_results, function_results, for ksname in removed_keyspaces: self._keyspace_removed(ksname) - def keyspace_changed(self, keyspace, ks_results): - if not ks_results: - if keyspace in self.keyspaces: - del self.keyspaces[keyspace] - self._keyspace_removed(keyspace) - return - - keyspace_meta = self._build_keyspace_metadata(ks_results[0]) - old_keyspace_meta = self.keyspaces.get(keyspace, None) - self.keyspaces[keyspace] = keyspace_meta + def _update_keyspace(self, keyspace_meta): + ks_name = keyspace_meta.name + old_keyspace_meta = self.keyspaces.get(ks_name, None) + self.keyspaces[ks_name] = keyspace_meta if old_keyspace_meta: keyspace_meta.tables = old_keyspace_meta.tables keyspace_meta.user_types = old_keyspace_meta.user_types @@ -201,50 +171,69 @@ def keyspace_changed(self, keyspace, ks_results): keyspace_meta.functions = old_keyspace_meta.functions keyspace_meta.aggregates = old_keyspace_meta.aggregates if (keyspace_meta.replication_strategy != old_keyspace_meta.replication_strategy): - self._keyspace_updated(keyspace) - else: - self._keyspace_added(keyspace) - - def usertype_changed(self, keyspace, name, type_results): - if type_results: - new_usertype = self._build_usertype(keyspace, type_results[0]) - self.keyspaces[keyspace].user_types[name] = new_usertype + self._keyspace_updated(ks_name) else: - # the type was deleted - self.keyspaces[keyspace].user_types.pop(name, None) + self._keyspace_added(ks_name) - def function_changed(self, keyspace, function, function_results): - if function_results: - new_function = self._build_function(keyspace, function_results[0]) - self.keyspaces[keyspace].functions[function.signature] = new_function - else: - # the function was deleted - self.keyspaces[keyspace].functions.pop(function.signature, None) + def _drop_keyspace(self, keyspace): + if self.keyspaces.pop(keyspace, None): + self._keyspace_removed(keyspace) - def aggregate_changed(self, keyspace, aggregate, aggregate_results): - if aggregate_results: - new_aggregate = self._build_aggregate(keyspace, aggregate_results[0]) - self.keyspaces[keyspace].aggregates[aggregate.signature] = new_aggregate - else: - # the aggregate was deleted - self.keyspaces[keyspace].aggregates.pop(aggregate.signature, None) + def _update_table(self, table_meta): + try: + keyspace_meta = self.keyspaces[table_meta.keyspace_name] + table_meta.keyspace = keyspace_meta # temporary while TableMetadata.keyspace is deprecated + keyspace_meta._add_table_metadata(table_meta) + except KeyError: + # can happen if keyspace disappears while processing async event + pass - def table_changed(self, keyspace, table, cf_results, col_results, triggers_result): + def _drop_table(self, keyspace, table): try: keyspace_meta = self.keyspaces[keyspace] + keyspace_meta._drop_table_metadata(table) except KeyError: - # we're trying to update a table in a keyspace we don't know about - log.error("Tried to update schema for table '%s' in unknown keyspace '%s'", - table, keyspace) - return + # can happen if keyspace disappears while processing async event + pass - if not cf_results: - # the table was removed - keyspace_meta._drop_table_metadata(table) - else: - assert len(cf_results) == 1 - table_meta = self._build_table_metadata(keyspace_meta, cf_results[0], {table: col_results}, {table: triggers_result}) - keyspace_meta._add_table_metadata(table_meta) + def _update_type(self, type_meta): + try: + self.keyspaces[type_meta.keyspace].user_types[type_meta.name] = type_meta + except KeyError: + # can happen if keyspace disappears while processing async event + pass + + def _drop_type(self, keyspace, type): + try: + self.keyspaces[keyspace].user_types.pop(type, None) + except KeyError: + # can happen if keyspace disappears while processing async event + pass + + def _update_function(self, function_meta): + try: + self.keyspaces[function_meta.keyspace].functions[function_meta.signature] = function_meta + except KeyError: + # can happen if keyspace disappears while processing async event + pass + + def _drop_function(self, keyspace, function): + try: + self.keyspaces[keyspace].functions.pop(function.signature, None) + except KeyError: + pass + + def _update_aggregate(self, aggregate_meta): + try: + self.keyspaces[aggregate_meta.keyspace].aggregates[aggregate_meta.signature] = aggregate_meta + except KeyError: + pass + + def _drop_aggregate(self, keyspace, aggregate): + try: + self.keyspaces[keyspace].aggregates.pop(aggregate.signature, None) + except KeyError: + pass def _keyspace_added(self, ksname): if self.token_map: @@ -258,337 +247,118 @@ def _keyspace_removed(self, ksname): if self.token_map: self.token_map.remove_keyspace(ksname) - def _build_keyspace_metadata(self, row): - name = row["keyspace_name"] - durable_writes = row["durable_writes"] - strategy_class = row["strategy_class"] - strategy_options = json.loads(row["strategy_options"]) - return KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) + def rebuild_token_map(self, partitioner, token_map): + """ + Rebuild our view of the topology from fresh rows from the + system topology tables. + For internal use only. + """ + self.partitioner = partitioner + if partitioner.endswith('RandomPartitioner'): + token_class = MD5Token + elif partitioner.endswith('Murmur3Partitioner'): + token_class = Murmur3Token + elif partitioner.endswith('ByteOrderedPartitioner'): + token_class = BytesToken + else: + self.token_map = None + return - def _build_usertype(self, keyspace, usertype_row): - type_classes = list(map(types.lookup_casstype, usertype_row['field_types'])) - return UserType(usertype_row['keyspace_name'], usertype_row['type_name'], - usertype_row['field_names'], type_classes) + token_to_host_owner = {} + ring = [] + for host, token_strings in six.iteritems(token_map): + for token_string in token_strings: + token = token_class(token_string) + ring.append(token) + token_to_host_owner[token] = host - def _build_function(self, keyspace, function_row): - return_type = types.lookup_casstype(function_row['return_type']) - return Function(function_row['keyspace_name'], function_row['function_name'], - function_row['signature'], function_row['argument_names'], - return_type, function_row['language'], function_row['body'], - function_row['called_on_null_input']) + all_tokens = sorted(ring) + self.token_map = TokenMap( + token_class, token_to_host_owner, all_tokens, self) - def _build_aggregate(self, keyspace, aggregate_row): - state_type = types.lookup_casstype(aggregate_row['state_type']) - initial_condition = aggregate_row['initcond'] - if initial_condition is not None: - initial_condition = state_type.deserialize(initial_condition, 3) - return_type = types.lookup_casstype(aggregate_row['return_type']) - return Aggregate(aggregate_row['keyspace_name'], aggregate_row['aggregate_name'], - aggregate_row['signature'], aggregate_row['state_func'], state_type, - aggregate_row['final_func'], initial_condition, return_type) + def get_replicas(self, keyspace, key): + """ + Returns a list of :class:`.Host` instances that are replicas for a given + partition key. + """ + t = self.token_map + if not t: + return [] + try: + return t.get_replicas(keyspace, t.token_class.from_key(key)) + except NoMurmur3: + return [] - def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): - cfname = row["columnfamily_name"] - cf_col_rows = col_rows.get(cfname, []) + def can_support_partitioner(self): + if self.partitioner.endswith('Murmur3Partitioner') and murmur3 is None: + return False + else: + return True - if not cf_col_rows: # CASSANDRA-8487 - log.warning("Building table metadata with no column meta for %s.%s", - keyspace_metadata.name, cfname) + def add_or_return_host(self, host): + """ + Returns a tuple (host, new), where ``host`` is a Host + instance, and ``new`` is a bool indicating whether + the host was newly added. + """ + with self._hosts_lock: + try: + return self._hosts[host.address], False + except KeyError: + self._hosts[host.address] = host + return host, True - comparator = types.lookup_casstype(row["comparator"]) + def remove_host(self, host): + with self._hosts_lock: + return bool(self._hosts.pop(host.address, False)) - if issubclass(comparator, types.CompositeType): - column_name_types = comparator.subtypes - is_composite_comparator = True - else: - column_name_types = (comparator,) - is_composite_comparator = False + def get_host(self, address): + return self._hosts.get(address) - num_column_name_components = len(column_name_types) - last_col = column_name_types[-1] + def all_hosts(self): + """ + Returns a list of all known :class:`.Host` instances in the cluster. + """ + with self._hosts_lock: + return self._hosts.values() - column_aliases = row.get("column_aliases", None) - clustering_rows = [r for r in cf_col_rows - if r.get('type', None) == "clustering_key"] - if len(clustering_rows) > 1: - clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) +REPLICATION_STRATEGY_CLASS_PREFIX = "org.apache.cassandra.locator." - if column_aliases is not None: - column_aliases = json.loads(column_aliases) - else: - column_aliases = [r.get('column_name') for r in clustering_rows] - if is_composite_comparator: - if issubclass(last_col, types.ColumnToCollectionType): - # collections - is_compact = False - has_value = False - clustering_size = num_column_name_components - 2 - elif (len(column_aliases) == num_column_name_components - 1 - and issubclass(last_col, types.UTF8Type)): - # aliases? - is_compact = False - has_value = False - clustering_size = num_column_name_components - 1 - else: - # compact table - is_compact = True - has_value = column_aliases or not cf_col_rows - clustering_size = num_column_name_components +def trim_if_startswith(s, prefix): + if s.startswith(prefix): + return s[len(prefix):] + return s - # Some thrift tables define names in composite types (see PYTHON-192) - if not column_aliases and hasattr(comparator, 'fieldnames'): - column_aliases = comparator.fieldnames - else: - is_compact = True - if column_aliases or not cf_col_rows: - has_value = True - clustering_size = num_column_name_components - else: - has_value = False - clustering_size = 0 - table_meta = TableMetadata(keyspace_metadata, cfname) - table_meta.comparator = comparator +_replication_strategies = {} - # partition key - partition_rows = [r for r in cf_col_rows - if r.get('type', None) == "partition_key"] - if len(partition_rows) > 1: - partition_rows = sorted(partition_rows, key=lambda row: row.get('component_index')) +class ReplicationStrategyTypeType(type): + def __new__(metacls, name, bases, dct): + dct.setdefault('name', name) + cls = type.__new__(metacls, name, bases, dct) + if not name.startswith('_'): + _replication_strategies[name] = cls + return cls - key_aliases = row.get("key_aliases") - if key_aliases is not None: - key_aliases = json.loads(key_aliases) if key_aliases else [] - else: - # In 2.0+, we can use the 'type' column. In 3.0+, we have to use it. - key_aliases = [r.get('column_name') for r in partition_rows] - key_validator = row.get("key_validator") - if key_validator is not None: - key_type = types.lookup_casstype(key_validator) - key_types = key_type.subtypes if issubclass(key_type, types.CompositeType) else [key_type] - else: - key_types = [types.lookup_casstype(r.get('validator')) for r in partition_rows] +@six.add_metaclass(ReplicationStrategyTypeType) +class _ReplicationStrategy(object): + options_map = None - for i, col_type in enumerate(key_types): - if len(key_aliases) > i: - column_name = key_aliases[i] - elif i == 0: - column_name = "key" - else: - column_name = "key%d" % i + @classmethod + def create(cls, strategy_class, options_map): + if not strategy_class: + return None - col = ColumnMetadata(table_meta, column_name, col_type) - table_meta.columns[column_name] = col - table_meta.partition_key.append(col) + strategy_name = trim_if_startswith(strategy_class, REPLICATION_STRATEGY_CLASS_PREFIX) - # clustering key - for i in range(clustering_size): - if len(column_aliases) > i: - column_name = column_aliases[i] - else: - column_name = "column%d" % i - - col = ColumnMetadata(table_meta, column_name, column_name_types[i]) - table_meta.columns[column_name] = col - table_meta.clustering_key.append(col) - - # value alias (if present) - if has_value: - value_alias_rows = [r for r in cf_col_rows - if r.get('type', None) == "compact_value"] - - if not key_aliases: # TODO are we checking the right thing here? - value_alias = "value" - else: - value_alias = row.get("value_alias", None) - if value_alias is None and value_alias_rows: # CASSANDRA-8487 - # In 2.0+, we can use the 'type' column. In 3.0+, we have to use it. - value_alias = value_alias_rows[0].get('column_name') - - default_validator = row.get("default_validator") - if default_validator: - validator = types.lookup_casstype(default_validator) - else: - if value_alias_rows: # CASSANDRA-8487 - validator = types.lookup_casstype(value_alias_rows[0].get('validator')) - - col = ColumnMetadata(table_meta, value_alias, validator) - if value_alias: # CASSANDRA-8487 - table_meta.columns[value_alias] = col - - # other normal columns - for col_row in cf_col_rows: - column_meta = self._build_column_metadata(table_meta, col_row) - table_meta.columns[column_meta.name] = column_meta - - if trigger_rows: - for trigger_row in trigger_rows[cfname]: - trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) - table_meta.triggers[trigger_meta.name] = trigger_meta - - table_meta.options = self._build_table_options(row) - table_meta.is_compact_storage = is_compact - - return table_meta - - def _build_table_options(self, row): - """ Setup the mostly-non-schema table options, like caching settings """ - options = dict((o, row.get(o)) for o in TableMetadata.recognized_options if o in row) - - # the option name when creating tables is "dclocal_read_repair_chance", - # but the column name in system.schema_columnfamilies is - # "local_read_repair_chance". We'll store this as dclocal_read_repair_chance, - # since that's probably what users are expecting (and we need it for the - # CREATE TABLE statement anyway). - if "local_read_repair_chance" in options: - val = options.pop("local_read_repair_chance") - options["dclocal_read_repair_chance"] = val - - return options - - def _build_column_metadata(self, table_metadata, row): - name = row["column_name"] - data_type = types.lookup_casstype(row["validator"]) - is_static = row.get("type", None) == "static" - column_meta = ColumnMetadata(table_metadata, name, data_type, is_static=is_static) - index_meta = self._build_index_metadata(column_meta, row) - column_meta.index = index_meta - if index_meta: - table_metadata.indexes[index_meta.name] = index_meta - return column_meta - - def _build_index_metadata(self, column_metadata, row): - index_name = row.get("index_name") - index_type = row.get("index_type") - if index_name or index_type: - options = row.get("index_options") - index_options = json.loads(options) if options else {} - return IndexMetadata(column_metadata, index_name, index_type, index_options) - else: - return None - - def _build_trigger_metadata(self, table_metadata, row): - name = row["trigger_name"] - options = row["trigger_options"] - trigger_meta = TriggerMetadata(table_metadata, name, options) - return trigger_meta - - def rebuild_token_map(self, partitioner, token_map): - """ - Rebuild our view of the topology from fresh rows from the - system topology tables. - For internal use only. - """ - self.partitioner = partitioner - if partitioner.endswith('RandomPartitioner'): - token_class = MD5Token - elif partitioner.endswith('Murmur3Partitioner'): - token_class = Murmur3Token - elif partitioner.endswith('ByteOrderedPartitioner'): - token_class = BytesToken - else: - self.token_map = None - return - - token_to_host_owner = {} - ring = [] - for host, token_strings in six.iteritems(token_map): - for token_string in token_strings: - token = token_class(token_string) - ring.append(token) - token_to_host_owner[token] = host - - all_tokens = sorted(ring) - self.token_map = TokenMap( - token_class, token_to_host_owner, all_tokens, self) - - def get_replicas(self, keyspace, key): - """ - Returns a list of :class:`.Host` instances that are replicas for a given - partition key. - """ - t = self.token_map - if not t: - return [] - try: - return t.get_replicas(keyspace, t.token_class.from_key(key)) - except NoMurmur3: - return [] - - def can_support_partitioner(self): - if self.partitioner.endswith('Murmur3Partitioner') and murmur3 is None: - return False - else: - return True - - def add_or_return_host(self, host): - """ - Returns a tuple (host, new), where ``host`` is a Host - instance, and ``new`` is a bool indicating whether - the host was newly added. - """ - with self._hosts_lock: - try: - return self._hosts[host.address], False - except KeyError: - self._hosts[host.address] = host - return host, True - - def remove_host(self, host): - with self._hosts_lock: - return bool(self._hosts.pop(host.address, False)) - - def get_host(self, address): - return self._hosts.get(address) - - def all_hosts(self): - """ - Returns a list of all known :class:`.Host` instances in the cluster. - """ - with self._hosts_lock: - return self._hosts.values() - - -REPLICATION_STRATEGY_CLASS_PREFIX = "org.apache.cassandra.locator." - - -def trim_if_startswith(s, prefix): - if s.startswith(prefix): - return s[len(prefix):] - return s - - -_replication_strategies = {} - - -class ReplicationStrategyTypeType(type): - def __new__(metacls, name, bases, dct): - dct.setdefault('name', name) - cls = type.__new__(metacls, name, bases, dct) - if not name.startswith('_'): - _replication_strategies[name] = cls - return cls - - -@six.add_metaclass(ReplicationStrategyTypeType) -class _ReplicationStrategy(object): - options_map = None - - @classmethod - def create(cls, strategy_class, options_map): - if not strategy_class: - return None - - strategy_name = trim_if_startswith(strategy_class, REPLICATION_STRATEGY_CLASS_PREFIX) - - rs_class = _replication_strategies.get(strategy_name, None) - if rs_class is None: - rs_class = _UnknownStrategyBuilder(strategy_name) - _replication_strategies[strategy_name] = rs_class + rs_class = _replication_strategies.get(strategy_name, None) + if rs_class is None: + rs_class = _UnknownStrategyBuilder(strategy_name) + _replication_strategies[strategy_name] = rs_class try: rs_instance = rs_class(options_map) @@ -1138,7 +908,15 @@ class TableMetadata(object): """ keyspace = None - """ An instance of :class:`~.KeyspaceMetadata`. """ + """ + An instance of :class:`~.KeyspaceMetadata`. + + .. deprecated:: 2.7.0 + + """ + + keyspace_name = None + """ String name of this Table's keyspace """ name = None """ The string name of the table. """ @@ -1236,8 +1014,8 @@ def is_cql_compatible(self): return not incompatible - def __init__(self, keyspace_metadata, name, partition_key=None, clustering_key=None, columns=None, triggers=None, options=None): - self.keyspace = keyspace_metadata + def __init__(self, keyspace_name, name, partition_key=None, clustering_key=None, columns=None, triggers=None, options=None): + self.keyspace_name = keyspace_name self.name = name self.partition_key = [] if partition_key is None else partition_key self.clustering_key = [] if clustering_key is None else clustering_key @@ -1258,7 +1036,7 @@ def export_as_string(self): else: # If we can't produce this table with CQL, comment inline ret = "/*\nWarning: Table %s.%s omitted because it has constructs not compatible with CQL (was created via legacy API).\n" % \ - (self.keyspace.name, self.name) + (self.keyspace_name, self.name) ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s" % self.all_as_cql() ret += "\n*/" @@ -1283,7 +1061,7 @@ def as_cql_query(self, formatted=False): extra whitespace will be added to make the query human readable. """ ret = "CREATE TABLE %s.%s (%s" % ( - protect_name(self.keyspace.name), + protect_name(self.keyspace_name), protect_name(self.name), "\n" if formatted else "") @@ -1738,3 +1516,471 @@ def as_cql_query(self): protect_value(self.options['class']) ) return ret + + +class _SchemaParser(object): + + def __init__(self, connection, timeout): + self.connection = connection + self.timeout = timeout + + def _handle_results(self, success, result): + if success: + return dict_factory(*result.results) if result else [] + else: + raise result + + +class SchemaParserV12(_SchemaParser): + _SELECT_KEYSPACES = "SELECT * FROM system.schema_keyspaces" + _SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies" + _SELECT_COLUMNS = "SELECT * FROM system.schema_columns" + + pass + + +class SchemaParserV20(SchemaParserV12): + _SELECT_TRIGGERS = "SELECT * FROM system.schema_triggers" + + +class SchemaParserV21(SchemaParserV20): + _SELECT_USERTYPES = "SELECT * FROM system.schema_usertypes" + + +class SchemaParserV22(SchemaParserV21): + _SELECT_FUNCTIONS = "SELECT * FROM system.schema_functions" + _SELECT_AGGREGATES = "SELECT * FROM system.schema_aggregates" + + def __init__(self, connection, timeout): + super(SchemaParserV22, self).__init__(connection, timeout) + self.keyspaces_result = [] + self.tables_result = [] + self.columns_result = [] + self.triggers_result = [] + self.types_result = [] + self.functions_result = [] + self.aggregates_result = [] + + self.keyspace_table_rows = defaultdict(list) + self.keyspace_table_col_rows = defaultdict(lambda: defaultdict(list)) + self.keyspace_type_rows = defaultdict(list) + self.keyspace_func_rows = defaultdict(list) + self.keyspace_agg_rows = defaultdict(list) + self.keyspace_table_trigger_rows = defaultdict(lambda: defaultdict(list)) + + def get_all_keyspaces(self): + self._query_all() + + for row in self.keyspaces_result: + keyspace_meta = self._build_keyspace_metadata(row) + + keyspace_col_rows = self.keyspace_table_col_rows.get(keyspace_meta.name, {}) + keyspace_trigger_rows = self.keyspace_table_trigger_rows.get(keyspace_meta.name, {}) + for table_row in self.keyspace_table_rows.get(keyspace_meta.name, []): + table_meta = self._build_table_metadata(keyspace_meta.name, table_row, keyspace_col_rows, keyspace_trigger_rows) + table_meta.keyspace = keyspace_meta # temporary while TableMetadata.keyspace is deprecated + keyspace_meta._add_table_metadata(table_meta) + + for usertype_row in self.keyspace_type_rows.get(keyspace_meta.name, []): + usertype = self._build_user_type(keyspace_meta.name, usertype_row) + keyspace_meta.user_types[usertype.name] = usertype + + for fn_row in self.keyspace_func_rows.get(keyspace_meta.name, []): + fn = self._build_function(keyspace_meta.name, fn_row) + keyspace_meta.functions[fn.signature] = fn + + for agg_row in self.keyspace_agg_rows.get(keyspace_meta.name, []): + agg = self._build_aggregate(keyspace_meta.name, agg_row) + keyspace_meta.aggregates[agg.signature] = agg + + yield keyspace_meta + + def get_table(self, keyspace, table): + cl = ConsistencyLevel.ONE + where_clause = " WHERE keyspace_name = '%s' AND columnfamily_name = '%s'" % (keyspace, table) + cf_query = QueryMessage(query=self._SELECT_COLUMN_FAMILIES + where_clause, consistency_level=cl) + col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) + triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) + (cf_success, cf_result), (col_success, col_result), (triggers_success, triggers_result) \ + = self.connection.wait_for_responses(cf_query, col_query, triggers_query, timeout=self.timeout, fail_on_error=False) + table_result = self._handle_results(cf_success, cf_result) + col_result = self._handle_results(col_success, col_result) + + # handle the triggers table not existing in Cassandra 1.2 + if not triggers_success and isinstance(triggers_result, InvalidRequest): + triggers_result = [] + else: + triggers_result = self._handle_results(triggers_success, triggers_result) + + if table_result: + return self._build_table_metadata(keyspace, table_result[0], {table: col_result}, {table: triggers_result}) + + # TODO: refactor common query/build code + def get_type(self, keyspace, type): + where_clause = " WHERE keyspace_name = '%s' AND type_name = '%s'" % (keyspace, type) + type_query = QueryMessage(query=self._SELECT_USERTYPES + where_clause, consistency_level=ConsistencyLevel.ONE) + type_result = self.connection.wait_for_response(type_query, self.timeout) + if type_result.results: + type_result = dict_factory(*type_result.results) + return self._build_user_type(keyspace, type_result[0]) + + def get_function(self, keyspace, function): + where_clause = " WHERE keyspace_name = '%s' AND function_name = '%s' AND signature = [%s]" \ + % (keyspace, function.name, ','.join("'%s'" % t for t in function.type_signature)) + function_query = QueryMessage(query=self._SELECT_FUNCTIONS + where_clause, consistency_level=ConsistencyLevel.ONE) + function_result = self.connection.wait_for_response(function_query, self.timeout) + if function_result.results: + function_result = dict_factory(*function_result.results) + return self._build_function(keyspace, function_result[0]) + + def get_aggregate(self, keyspace, aggregate): + # user defined aggregate within this keyspace changed + where_clause = " WHERE keyspace_name = '%s' AND aggregate_name = '%s' AND signature = [%s]" \ + % (keyspace, aggregate.name, ','.join("'%s'" % t for t in aggregate.type_signature)) + aggregate_query = QueryMessage(query=self._SELECT_AGGREGATES + where_clause, consistency_level=ConsistencyLevel.ONE) + aggregate_result = self.connection.wait_for_response(aggregate_query, self.timeout) + if aggregate_result.results: + aggregate_result = dict_factory(*aggregate_result.results) + return self._build_aggregate(keyspace, aggregate_result[0]) + + def get_keyspace(self, keyspace): + where_clause = " WHERE keyspace_name = '%s'" % (keyspace,) + ks_query = QueryMessage(query=self._SELECT_KEYSPACES + where_clause, consistency_level=ConsistencyLevel.ONE) + ks_result = self.connection.wait_for_response(ks_query, self.timeout) + if ks_result.results: + ks_result = dict_factory(*ks_result.results) + return self._build_keyspace_metadata(ks_result[0]) + + @staticmethod + def _build_keyspace_metadata(row): + name = row["keyspace_name"] + durable_writes = row["durable_writes"] + strategy_class = row["strategy_class"] + strategy_options = json.loads(row["strategy_options"]) + return KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) + + @staticmethod + def _build_user_type(keyspace, usertype_row): + type_classes = list(map(types.lookup_casstype, usertype_row['field_types'])) + return UserType(keyspace, usertype_row['type_name'], + usertype_row['field_names'], type_classes) + + @staticmethod + def _build_function(keyspace, function_row): + return_type = types.lookup_casstype(function_row['return_type']) + return Function(keyspace, function_row['function_name'], + function_row['signature'], function_row['argument_names'], + return_type, function_row['language'], function_row['body'], + function_row['called_on_null_input']) + + @staticmethod + def _build_aggregate(keyspace, aggregate_row): + state_type = types.lookup_casstype(aggregate_row['state_type']) + initial_condition = aggregate_row['initcond'] + if initial_condition is not None: + initial_condition = state_type.deserialize(initial_condition, 3) + return_type = types.lookup_casstype(aggregate_row['return_type']) + return Aggregate(keyspace, aggregate_row['aggregate_name'], + aggregate_row['signature'], aggregate_row['state_func'], state_type, + aggregate_row['final_func'], initial_condition, return_type) + + def _build_table_metadata(self, keyspace_name, row, col_rows, trigger_rows): + cfname = row["columnfamily_name"] + cf_col_rows = col_rows.get(cfname, []) + + if not cf_col_rows: # CASSANDRA-8487 + log.warning("Building table metadata with no column meta for %s.%s", + keyspace_name, cfname) + + comparator = types.lookup_casstype(row["comparator"]) + + if issubclass(comparator, types.CompositeType): + column_name_types = comparator.subtypes + is_composite_comparator = True + else: + column_name_types = (comparator,) + is_composite_comparator = False + + num_column_name_components = len(column_name_types) + last_col = column_name_types[-1] + + column_aliases = row.get("column_aliases", None) + + clustering_rows = [r for r in cf_col_rows + if r.get('type', None) == "clustering_key"] + if len(clustering_rows) > 1: + clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) + + if column_aliases is not None: + column_aliases = json.loads(column_aliases) + else: + column_aliases = [r.get('column_name') for r in clustering_rows] + + if is_composite_comparator: + if issubclass(last_col, types.ColumnToCollectionType): + # collections + is_compact = False + has_value = False + clustering_size = num_column_name_components - 2 + elif (len(column_aliases) == num_column_name_components - 1 + and issubclass(last_col, types.UTF8Type)): + # aliases? + is_compact = False + has_value = False + clustering_size = num_column_name_components - 1 + else: + # compact table + is_compact = True + has_value = column_aliases or not cf_col_rows + clustering_size = num_column_name_components + + # Some thrift tables define names in composite types (see PYTHON-192) + if not column_aliases and hasattr(comparator, 'fieldnames'): + column_aliases = comparator.fieldnames + else: + is_compact = True + if column_aliases or not cf_col_rows: + has_value = True + clustering_size = num_column_name_components + else: + has_value = False + clustering_size = 0 + + table_meta = TableMetadata(keyspace_name, cfname) + table_meta.comparator = comparator + + # partition key + partition_rows = [r for r in cf_col_rows + if r.get('type', None) == "partition_key"] + + if len(partition_rows) > 1: + partition_rows = sorted(partition_rows, key=lambda row: row.get('component_index')) + + key_aliases = row.get("key_aliases") + if key_aliases is not None: + key_aliases = json.loads(key_aliases) if key_aliases else [] + else: + # In 2.0+, we can use the 'type' column. In 3.0+, we have to use it. + key_aliases = [r.get('column_name') for r in partition_rows] + + key_validator = row.get("key_validator") + if key_validator is not None: + key_type = types.lookup_casstype(key_validator) + key_types = key_type.subtypes if issubclass(key_type, types.CompositeType) else [key_type] + else: + key_types = [types.lookup_casstype(r.get('validator')) for r in partition_rows] + + for i, col_type in enumerate(key_types): + if len(key_aliases) > i: + column_name = key_aliases[i] + elif i == 0: + column_name = "key" + else: + column_name = "key%d" % i + + col = ColumnMetadata(table_meta, column_name, col_type) + table_meta.columns[column_name] = col + table_meta.partition_key.append(col) + + # clustering key + for i in range(clustering_size): + if len(column_aliases) > i: + column_name = column_aliases[i] + else: + column_name = "column%d" % i + + col = ColumnMetadata(table_meta, column_name, column_name_types[i]) + table_meta.columns[column_name] = col + table_meta.clustering_key.append(col) + + # value alias (if present) + if has_value: + value_alias_rows = [r for r in cf_col_rows + if r.get('type', None) == "compact_value"] + + if not key_aliases: # TODO are we checking the right thing here? + value_alias = "value" + else: + value_alias = row.get("value_alias", None) + if value_alias is None and value_alias_rows: # CASSANDRA-8487 + # In 2.0+, we can use the 'type' column. In 3.0+, we have to use it. + value_alias = value_alias_rows[0].get('column_name') + + default_validator = row.get("default_validator") + if default_validator: + validator = types.lookup_casstype(default_validator) + else: + if value_alias_rows: # CASSANDRA-8487 + validator = types.lookup_casstype(value_alias_rows[0].get('validator')) + + col = ColumnMetadata(table_meta, value_alias, validator) + if value_alias: # CASSANDRA-8487 + table_meta.columns[value_alias] = col + + # other normal columns + for col_row in cf_col_rows: + column_meta = self._build_column_metadata(table_meta, col_row) + table_meta.columns[column_meta.name] = column_meta + + if trigger_rows: + for trigger_row in trigger_rows[cfname]: + trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) + table_meta.triggers[trigger_meta.name] = trigger_meta + + table_meta.options = self._build_table_options(row) + table_meta.is_compact_storage = is_compact + + return table_meta + + @staticmethod + def _build_table_options(row): + """ Setup the mostly-non-schema table options, like caching settings """ + options = dict((o, row.get(o)) for o in TableMetadata.recognized_options if o in row) + + # the option name when creating tables is "dclocal_read_repair_chance", + # but the column name in system.schema_columnfamilies is + # "local_read_repair_chance". We'll store this as dclocal_read_repair_chance, + # since that's probably what users are expecting (and we need it for the + # CREATE TABLE statement anyway). + if "local_read_repair_chance" in options: + val = options.pop("local_read_repair_chance") + options["dclocal_read_repair_chance"] = val + + return options + + def _build_column_metadata(self, table_metadata, row): + name = row["column_name"] + data_type = types.lookup_casstype(row["validator"]) + is_static = row.get("type", None) == "static" + column_meta = ColumnMetadata(table_metadata, name, data_type, is_static=is_static) + index_meta = self._build_index_metadata(column_meta, row) + column_meta.index = index_meta + if index_meta: + table_metadata.indexes[index_meta.name] = index_meta + return column_meta + + @staticmethod + def _build_index_metadata(column_metadata, row): + index_name = row.get("index_name") + index_type = row.get("index_type") + if index_name or index_type: + options = row.get("index_options") + index_options = json.loads(options) if options else {} + return IndexMetadata(column_metadata, index_name, index_type, index_options) + else: + return None + + @staticmethod + def _build_trigger_metadata(table_metadata, row): + name = row["trigger_name"] + options = row["trigger_options"] + trigger_meta = TriggerMetadata(table_metadata, name, options) + return trigger_meta + + def _query_all(self): + cl = ConsistencyLevel.ONE + queries = [ + QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), + QueryMessage(query=self._SELECT_COLUMN_FAMILIES, consistency_level=cl), + QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), + QueryMessage(query=self._SELECT_USERTYPES, consistency_level=cl), + QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), + QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), + QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl) + ] + + responses = self.connection.wait_for_responses(*queries, timeout=self.timeout, fail_on_error=False) + (ks_success, ks_result), (table_success, table_result), \ + (col_success, col_result), (types_success, types_result), \ + (functions_success, functions_result), \ + (aggregates_success, aggregates_result), \ + (trigger_success, triggers_result) = responses + + self.keyspaces_result = self._handle_results(ks_success, ks_result) + self.tables_result = self._handle_results(table_success, table_result) + self.columns_result = self._handle_results(col_success, col_result) + + # if we're connected to Cassandra < 2.0, the triggers table will not exist + if trigger_success: + self.triggers_result = dict_factory(*triggers_result.results) + else: + if isinstance(triggers_result, InvalidRequest): + log.debug("triggers table not found") + elif isinstance(triggers_result, Unauthorized): + log.warning("this version of Cassandra does not allow access to schema_triggers metadata with authorization enabled (CASSANDRA-7967); " + "The driver will operate normally, but will not reflect triggers in the local metadata model, or schema strings.") + else: + raise triggers_result + + # if we're connected to Cassandra < 2.1, the usertypes table will not exist + if types_success: + self.types_result = dict_factory(*types_result.results) + else: + if isinstance(types_result, InvalidRequest): + log.debug("user types table not found") + self.types_result = {} + else: + raise types_result + + # functions were introduced in Cassandra 2.2 + if functions_success: + self.functions_result = dict_factory(*functions_result.results) + else: + if isinstance(functions_result, InvalidRequest): + log.debug("user functions table not found") + else: + raise functions_result + + # aggregates were introduced in Cassandra 2.2 + if aggregates_success: + self.aggregates_result = dict_factory(aggregates_result) + else: + if isinstance(aggregates_result, InvalidRequest): + log.debug("user aggregates table not found") + else: + raise aggregates_result + + self._aggregate_results() + + def _aggregate_results(self): + m = self.keyspace_table_rows + for row in self.tables_result: + m[row["keyspace_name"]].append(row) + + m = self.keyspace_table_col_rows + for row in self.columns_result: + ksname = row["keyspace_name"] + cfname = row["columnfamily_name"] + m[ksname][cfname].append(row) + + m = self.keyspace_type_rows + for row in self.types_result: + m[row["keyspace_name"]].append(row) + + m = self.keyspace_func_rows + for row in self.functions_result: + m[row["keyspace_name"]].append(row) + + m = self.keyspace_agg_rows + for row in self.aggregates_result: + m[row["keyspace_name"]].append(row) + + m = self.keyspace_table_trigger_rows + for row in self.triggers_result: + ksname = row["keyspace_name"] + cfname = row["columnfamily_name"] + m[ksname][cfname].append(row) + + +class SchemaParserV3(SchemaParserV22): + pass + + +def get_schema_parser(connection, timeout): + server_version = connection.server_version + if server_version.startswith('3'): + return SchemaParserV3(connection, timeout) + else: + # we could further specialize by version. Right now just refactoring the + # multi-version parser we have. + return SchemaParserV22(connection, timeout) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 9b544236ec..d257825d79 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -26,7 +26,7 @@ WriteFailure, ReadFailure, FunctionFailure, AlreadyExists, InvalidRequest, Unauthorized, UnsupportedOperation, UserFunctionDescriptor, - UserAggregateDescriptor) + UserAggregateDescriptor, SchemaTargetType) from cassandra.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack, int8_pack, int8_unpack, uint64_pack, header_pack, v3_header_pack) @@ -69,6 +69,7 @@ class InternalError(Exception): _UNSET_VALUE = object() + class _RegisterMessageType(type): def __init__(cls, name, bases, dct): if not name.startswith('_'): @@ -948,19 +949,22 @@ def recv_schema_change(cls, f, protocol_version): if protocol_version >= 3: target = read_string(f) keyspace = read_string(f) - event = {'change_type': change_type, 'keyspace': keyspace} - if target != "KEYSPACE": + event = {'target_type': target, 'change_type': change_type, 'keyspace': keyspace} + if target != SchemaTargetType.KEYSPACE: target_name = read_string(f) - if target == 'FUNCTION': + if target == SchemaTargetType.FUNCTION: event['function'] = UserFunctionDescriptor(target_name, [read_string(f) for _ in range(read_short(f))]) - elif target == 'AGGREGATE': + elif target == SchemaTargetType.AGGREGATE: event['aggregate'] = UserAggregateDescriptor(target_name, [read_string(f) for _ in range(read_short(f))]) else: event[target.lower()] = target_name else: keyspace = read_string(f) table = read_string(f) - event = {'change_type': change_type, 'keyspace': keyspace, 'table': table} + if table: + event = {'target_type': SchemaTargetType.TABLE, 'change_type': change_type, 'keyspace': keyspace, 'table': table} + else: + event = {'target_type': SchemaTargetType.KEYSPACE, 'change_type': change_type, 'keyspace': keyspace} return event diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 1d7f68df57..582977a95e 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -125,7 +125,7 @@ def test_basic_table_meta_properties(self): self.assertTrue(self.cfname in ksmeta.tables) tablemeta = ksmeta.tables[self.cfname] - self.assertEqual(tablemeta.keyspace, ksmeta) + self.assertEqual(tablemeta.keyspace, ksmeta) # tablemeta.keyspace is deprecated self.assertEqual(tablemeta.name, self.cfname) self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key]) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index 9c93f5afe4..dc3e8fa1fc 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -20,7 +20,7 @@ from concurrent.futures import ThreadPoolExecutor from mock import Mock, ANY, call -from cassandra import OperationTimedOut +from cassandra import OperationTimedOut, SchemaTargetType, SchemaChangeType from cassandra.protocol import ResultMessage, RESULT_KIND_ROWS from cassandra.cluster import ControlConnection, _Scheduler from cassandra.pool import Host @@ -94,8 +94,8 @@ class MockConnection(object): def __init__(self): self.host = "192.168.1.0" self.local_results = [ - ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "tokens"], - [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", ["0", "100", "200"]]] + ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"], + [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]] ] self.peer_results = [ @@ -136,8 +136,8 @@ def setUp(self): def _get_matching_schema_preloaded_results(self): local_results = [ - ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "tokens"], - [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", ["0", "100", "200"]]] + ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"], + [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]] ] local_response = ResultMessage(kind=RESULT_KIND_ROWS, results=local_results) @@ -152,8 +152,8 @@ def _get_matching_schema_preloaded_results(self): def _get_nonmatching_schema_preloaded_results(self): local_results = [ - ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "tokens"], - [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", ["0", "100", "200"]]] + ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"], + [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]] ] local_response = ResultMessage(kind=RESULT_KIND_ROWS, results=local_results) @@ -402,26 +402,30 @@ def test_handle_status_change(self): def test_handle_schema_change(self): - for change_type in ('CREATED', 'DROPPED', 'UPDATED'): + change_types = [getattr(SchemaChangeType, attr) for attr in vars(SchemaChangeType) if attr[0] != '_'] + for change_type in change_types: event = { + 'target_type': SchemaTargetType.TABLE, 'change_type': change_type, 'keyspace': 'ks1', 'table': 'table1' } self.cluster.scheduler.reset_mock() self.control_connection._handle_schema_change(event) - self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, 'ks1', 'table1', None, None, None) + self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, **event) self.cluster.scheduler.reset_mock() - event['table'] = None + event['target_type'] = SchemaTargetType.KEYSPACE + del event['table'] self.control_connection._handle_schema_change(event) - self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, 'ks1', None, None, None, None) + self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, **event) def test_refresh_disabled(self): cluster = MockCluster() schema_event = { - 'change_type': 'CREATED', + 'target_type': SchemaTargetType.TABLE, + 'change_type': SchemaChangeType.CREATED, 'keyspace': 'ks1', 'table': 'table1' } @@ -463,4 +467,4 @@ def test_refresh_disabled(self): cc_no_topo_refresh._handle_schema_change(schema_event) cluster.scheduler.schedule_unique.assert_has_calls([call(ANY, cc_no_topo_refresh.refresh_node_list_and_token_map), call(0.0, cc_no_topo_refresh.refresh_schema, - schema_event['keyspace'], schema_event['table'], None, None, None)]) + **schema_event)]) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 8bdd428c65..3da3945f2d 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -26,7 +26,7 @@ NetworkTopologyStrategy, SimpleStrategy, LocalStrategy, NoMurmur3, protect_name, protect_names, protect_value, is_valid_name, - UserType, KeyspaceMetadata, Metadata, + UserType, KeyspaceMetadata, get_schema_parser, _UnknownStrategy) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host @@ -316,15 +316,17 @@ def test_build_index_as_cql(self): column_meta.name = 'column_name_here' column_meta.table.name = 'table_name_here' column_meta.table.keyspace.name = 'keyspace_name_here' - meta_model = Metadata() + connection = Mock() + connection.server_version = '2.1.0' + parser = get_schema_parser(connection, 0.1) row = {'index_name': 'index_name_here', 'index_type': 'index_type_here'} - index_meta = meta_model._build_index_metadata(column_meta, row) + index_meta = parser._build_index_metadata(column_meta, row) self.assertEqual(index_meta.as_cql_query(), 'CREATE INDEX index_name_here ON keyspace_name_here.table_name_here (column_name_here)') row['index_options'] = '{ "class_name": "class_name_here" }' row['index_type'] = 'CUSTOM' - index_meta = meta_model._build_index_metadata(column_meta, row) + index_meta = parser._build_index_metadata(column_meta, row) self.assertEqual(index_meta.as_cql_query(), "CREATE CUSTOM INDEX index_name_here ON keyspace_name_here.table_name_here (column_name_here) USING 'class_name_here'") diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 92351a9d1d..600f85bcac 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -19,7 +19,7 @@ from mock import Mock, MagicMock, ANY -from cassandra import ConsistencyLevel, Unavailable +from cassandra import ConsistencyLevel, Unavailable, SchemaTargetType, SchemaChangeType from cassandra.cluster import Session, ResponseFuture, NoHostAvailable from cassandra.connection import Connection, ConnectionException from cassandra.protocol import (ReadTimeoutErrorMessage, WriteTimeoutErrorMessage, @@ -101,11 +101,13 @@ def test_schema_change_result(self): rf = self.make_response_future(session) rf.send_request() + event_results={'target_type': SchemaTargetType.TABLE, 'change_type': SchemaChangeType.CREATED, + 'keyspace': "keyspace1", "table": "table1"} result = Mock(spec=ResultMessage, kind=RESULT_KIND_SCHEMA_CHANGE, - results={'keyspace': "keyspace1", "table": "table1"}) + results=event_results) rf._set_result(result) - session.submit.assert_called_once_with(ANY, 'keyspace1', 'table1', None, None, None, ANY, rf) + session.submit.assert_called_once_with(ANY, ANY, rf, **event_results) def test_other_result_message_kind(self): session = self.make_session() From c7690aaefa5e90e71576740f2a008e183fc32513 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jul 2015 14:14:05 -0500 Subject: [PATCH 0004/2005] Don't use change_type in schema refresh. Was hoping to use change_type to avoid a query on DROPPED. However, this introduces issues when the async drop event coincides with a create or updatee on the same schema element. Always querying makes sure the refresh sees the new element, assuming schema agreement wait is enabled. --- cassandra/__init__.py | 3 +++ cassandra/metadata.py | 36 ++++++++++++++++-------------------- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index ad073f7ccc..a6beab2032 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -159,6 +159,9 @@ def signature(self): def format_signature(name, type_signature): return "%s(%s)" % (name, ','.join(t for t in type_signature)) + def __repr__(self): + return "%s(%s, %s)" % (self.__class__.__name__, self.name, self.type_signature) + class UserFunctionDescriptor(SignatureDescriptor): """ diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 73c7687c6c..b79ff10a58 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -120,19 +120,15 @@ def refresh(self, connection, timeout, target_type=None, change_type=None, **kwa tt_lower = target_type.lower() try: - if change_type == SchemaChangeType.DROPPED: + parser = get_schema_parser(connection, timeout) + parse_method = getattr(parser, 'get_' + tt_lower) + meta = parse_method(**kwargs) + if meta: + update_method = getattr(self, '_update_' + tt_lower) + update_method(meta) + else: drop_method = getattr(self, '_drop_' + tt_lower) drop_method(**kwargs) - else: - parser = get_schema_parser(connection, timeout) - parse_method = getattr(parser, 'get_' + tt_lower) - meta = parse_method(**kwargs) - if meta: - update_method = getattr(self, '_update_' + tt_lower) - update_method(meta) - else: - drop_method = getattr(self, '_drop_' + tt_lower) - drop_method(**kwargs) except AttributeError: raise ValueError("Unknown schema target_type: '%s'" % target_type) @@ -1620,8 +1616,8 @@ def get_type(self, keyspace, type): where_clause = " WHERE keyspace_name = '%s' AND type_name = '%s'" % (keyspace, type) type_query = QueryMessage(query=self._SELECT_USERTYPES + where_clause, consistency_level=ConsistencyLevel.ONE) type_result = self.connection.wait_for_response(type_query, self.timeout) - if type_result.results: - type_result = dict_factory(*type_result.results) + type_result = dict_factory(*type_result.results) + if type_result: return self._build_user_type(keyspace, type_result[0]) def get_function(self, keyspace, function): @@ -1629,8 +1625,8 @@ def get_function(self, keyspace, function): % (keyspace, function.name, ','.join("'%s'" % t for t in function.type_signature)) function_query = QueryMessage(query=self._SELECT_FUNCTIONS + where_clause, consistency_level=ConsistencyLevel.ONE) function_result = self.connection.wait_for_response(function_query, self.timeout) - if function_result.results: - function_result = dict_factory(*function_result.results) + function_result = dict_factory(*function_result.results) + if function_result: return self._build_function(keyspace, function_result[0]) def get_aggregate(self, keyspace, aggregate): @@ -1639,16 +1635,16 @@ def get_aggregate(self, keyspace, aggregate): % (keyspace, aggregate.name, ','.join("'%s'" % t for t in aggregate.type_signature)) aggregate_query = QueryMessage(query=self._SELECT_AGGREGATES + where_clause, consistency_level=ConsistencyLevel.ONE) aggregate_result = self.connection.wait_for_response(aggregate_query, self.timeout) - if aggregate_result.results: - aggregate_result = dict_factory(*aggregate_result.results) + aggregate_result = dict_factory(*aggregate_result.results) + if aggregate_result: return self._build_aggregate(keyspace, aggregate_result[0]) def get_keyspace(self, keyspace): where_clause = " WHERE keyspace_name = '%s'" % (keyspace,) ks_query = QueryMessage(query=self._SELECT_KEYSPACES + where_clause, consistency_level=ConsistencyLevel.ONE) ks_result = self.connection.wait_for_response(ks_query, self.timeout) - if ks_result.results: - ks_result = dict_factory(*ks_result.results) + ks_result = dict_factory(*ks_result.results) + if ks_result: return self._build_keyspace_metadata(ks_result[0]) @staticmethod @@ -1933,7 +1929,7 @@ def _query_all(self): # aggregates were introduced in Cassandra 2.2 if aggregates_success: - self.aggregates_result = dict_factory(aggregates_result) + self.aggregates_result = dict_factory(*aggregates_result.results) else: if isinstance(aggregates_result, InvalidRequest): log.debug("user aggregates table not found") From 483d7d366d44380c748aec01abe688e07a911f3e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jul 2015 15:00:04 -0500 Subject: [PATCH 0005/2005] Refactor common query row/build meta functionality --- cassandra/metadata.py | 58 ++++++++++++++++++------------------------- 1 file changed, 24 insertions(+), 34 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index b79ff10a58..763b081ab1 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1526,6 +1526,13 @@ def _handle_results(self, success, result): else: raise result + def _query_build_row(self, query_string, build_func): + query = QueryMessage(query=query_string, consistency_level=ConsistencyLevel.ONE) + response = self.connection.wait_for_response(query, self.timeout) + result = dict_factory(*response.results) + if result: + return build_func(result[0]) + class SchemaParserV12(_SchemaParser): _SELECT_KEYSPACES = "SELECT * FROM system.schema_keyspaces" @@ -1573,20 +1580,20 @@ def get_all_keyspaces(self): keyspace_col_rows = self.keyspace_table_col_rows.get(keyspace_meta.name, {}) keyspace_trigger_rows = self.keyspace_table_trigger_rows.get(keyspace_meta.name, {}) for table_row in self.keyspace_table_rows.get(keyspace_meta.name, []): - table_meta = self._build_table_metadata(keyspace_meta.name, table_row, keyspace_col_rows, keyspace_trigger_rows) + table_meta = self._build_table_metadata(table_row, keyspace_col_rows, keyspace_trigger_rows) table_meta.keyspace = keyspace_meta # temporary while TableMetadata.keyspace is deprecated keyspace_meta._add_table_metadata(table_meta) for usertype_row in self.keyspace_type_rows.get(keyspace_meta.name, []): - usertype = self._build_user_type(keyspace_meta.name, usertype_row) + usertype = self._build_user_type(usertype_row) keyspace_meta.user_types[usertype.name] = usertype for fn_row in self.keyspace_func_rows.get(keyspace_meta.name, []): - fn = self._build_function(keyspace_meta.name, fn_row) + fn = self._build_function(fn_row) keyspace_meta.functions[fn.signature] = fn for agg_row in self.keyspace_agg_rows.get(keyspace_meta.name, []): - agg = self._build_aggregate(keyspace_meta.name, agg_row) + agg = self._build_aggregate(agg_row) keyspace_meta.aggregates[agg.signature] = agg yield keyspace_meta @@ -1609,43 +1616,25 @@ def get_table(self, keyspace, table): triggers_result = self._handle_results(triggers_success, triggers_result) if table_result: - return self._build_table_metadata(keyspace, table_result[0], {table: col_result}, {table: triggers_result}) + return self._build_table_metadata(table_result[0], {table: col_result}, {table: triggers_result}) - # TODO: refactor common query/build code def get_type(self, keyspace, type): where_clause = " WHERE keyspace_name = '%s' AND type_name = '%s'" % (keyspace, type) - type_query = QueryMessage(query=self._SELECT_USERTYPES + where_clause, consistency_level=ConsistencyLevel.ONE) - type_result = self.connection.wait_for_response(type_query, self.timeout) - type_result = dict_factory(*type_result.results) - if type_result: - return self._build_user_type(keyspace, type_result[0]) + return self._query_build_row(self._SELECT_USERTYPES + where_clause, self._build_user_type) def get_function(self, keyspace, function): where_clause = " WHERE keyspace_name = '%s' AND function_name = '%s' AND signature = [%s]" \ % (keyspace, function.name, ','.join("'%s'" % t for t in function.type_signature)) - function_query = QueryMessage(query=self._SELECT_FUNCTIONS + where_clause, consistency_level=ConsistencyLevel.ONE) - function_result = self.connection.wait_for_response(function_query, self.timeout) - function_result = dict_factory(*function_result.results) - if function_result: - return self._build_function(keyspace, function_result[0]) + return self._query_build_row(self._SELECT_FUNCTIONS + where_clause, self._build_function) def get_aggregate(self, keyspace, aggregate): - # user defined aggregate within this keyspace changed where_clause = " WHERE keyspace_name = '%s' AND aggregate_name = '%s' AND signature = [%s]" \ % (keyspace, aggregate.name, ','.join("'%s'" % t for t in aggregate.type_signature)) - aggregate_query = QueryMessage(query=self._SELECT_AGGREGATES + where_clause, consistency_level=ConsistencyLevel.ONE) - aggregate_result = self.connection.wait_for_response(aggregate_query, self.timeout) - aggregate_result = dict_factory(*aggregate_result.results) - if aggregate_result: - return self._build_aggregate(keyspace, aggregate_result[0]) + return self._query_build_row(self._SELECT_AGGREGATES + where_clause, self._build_aggregate) def get_keyspace(self, keyspace): where_clause = " WHERE keyspace_name = '%s'" % (keyspace,) - ks_query = QueryMessage(query=self._SELECT_KEYSPACES + where_clause, consistency_level=ConsistencyLevel.ONE) - ks_result = self.connection.wait_for_response(ks_query, self.timeout) - ks_result = dict_factory(*ks_result.results) - if ks_result: - return self._build_keyspace_metadata(ks_result[0]) + return self._query_build_row(self._SELECT_KEYSPACES + where_clause, self._build_keyspace_metadata) @staticmethod def _build_keyspace_metadata(row): @@ -1656,31 +1645,32 @@ def _build_keyspace_metadata(row): return KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) @staticmethod - def _build_user_type(keyspace, usertype_row): + def _build_user_type(usertype_row): type_classes = list(map(types.lookup_casstype, usertype_row['field_types'])) - return UserType(keyspace, usertype_row['type_name'], + return UserType(usertype_row['keyspace_name'], usertype_row['type_name'], usertype_row['field_names'], type_classes) @staticmethod - def _build_function(keyspace, function_row): + def _build_function(function_row): return_type = types.lookup_casstype(function_row['return_type']) - return Function(keyspace, function_row['function_name'], + return Function(function_row['keyspace_name'], function_row['function_name'], function_row['signature'], function_row['argument_names'], return_type, function_row['language'], function_row['body'], function_row['called_on_null_input']) @staticmethod - def _build_aggregate(keyspace, aggregate_row): + def _build_aggregate(aggregate_row): state_type = types.lookup_casstype(aggregate_row['state_type']) initial_condition = aggregate_row['initcond'] if initial_condition is not None: initial_condition = state_type.deserialize(initial_condition, 3) return_type = types.lookup_casstype(aggregate_row['return_type']) - return Aggregate(keyspace, aggregate_row['aggregate_name'], + return Aggregate(aggregate_row['keyspace_name'], aggregate_row['aggregate_name'], aggregate_row['signature'], aggregate_row['state_func'], state_type, aggregate_row['final_func'], initial_condition, return_type) - def _build_table_metadata(self, keyspace_name, row, col_rows, trigger_rows): + def _build_table_metadata(self, row, col_rows, trigger_rows): + keyspace_name = row["keyspace_name"] cfname = row["columnfamily_name"] cf_col_rows = col_rows.get(cfname, []) From 179d0b6503028e54af6b4b5da509cce2e04fe21a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jul 2015 16:44:41 -0500 Subject: [PATCH 0006/2005] integration test setup: use cluster meta instead of selecting directly. Required for version-dependent schema parsing. --- tests/integration/__init__.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 3568b1ffc6..33d3ae93c0 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -280,10 +280,8 @@ def setup_keyspace(ipformat=None): session = cluster.connect() try: - results = execute_until_pass(session, "SELECT keyspace_name FROM system.schema_keyspaces") - existing_keyspaces = [row[0] for row in results] for ksname in ('test1rf', 'test2rf', 'test3rf'): - if ksname in existing_keyspaces: + if ksname in cluster.metadata.keyspaces: execute_until_pass(session, "DROP KEYSPACE %s" % ksname) ddl = ''' From 5c585e2304e9d987528742e88b999176623be1cd Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jul 2015 16:45:46 -0500 Subject: [PATCH 0007/2005] Initial SchemaParser for C* 3 - select from new 'system_schema' tables - use new keyspaces 'replication' config map --- cassandra/metadata.py | 49 ++++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 21 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 763b081ab1..6b177d7049 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1534,26 +1534,17 @@ def _query_build_row(self, query_string, build_func): return build_func(result[0]) -class SchemaParserV12(_SchemaParser): +class SchemaParserV22(_SchemaParser): _SELECT_KEYSPACES = "SELECT * FROM system.schema_keyspaces" _SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies" _SELECT_COLUMNS = "SELECT * FROM system.schema_columns" - - pass - - -class SchemaParserV20(SchemaParserV12): _SELECT_TRIGGERS = "SELECT * FROM system.schema_triggers" - - -class SchemaParserV21(SchemaParserV20): - _SELECT_USERTYPES = "SELECT * FROM system.schema_usertypes" - - -class SchemaParserV22(SchemaParserV21): + _SELECT_TYPES = "SELECT * FROM system.schema_usertypes" _SELECT_FUNCTIONS = "SELECT * FROM system.schema_functions" _SELECT_AGGREGATES = "SELECT * FROM system.schema_aggregates" + _table_name_col = 'columnfamily_name' + def __init__(self, connection, timeout): super(SchemaParserV22, self).__init__(connection, timeout) self.keyspaces_result = [] @@ -1600,7 +1591,7 @@ def get_all_keyspaces(self): def get_table(self, keyspace, table): cl = ConsistencyLevel.ONE - where_clause = " WHERE keyspace_name = '%s' AND columnfamily_name = '%s'" % (keyspace, table) + where_clause = " WHERE keyspace_name = '%s' AND %s = '%s'" % (keyspace, self._table_name_col, table) cf_query = QueryMessage(query=self._SELECT_COLUMN_FAMILIES + where_clause, consistency_level=cl) col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) @@ -1620,7 +1611,7 @@ def get_table(self, keyspace, table): def get_type(self, keyspace, type): where_clause = " WHERE keyspace_name = '%s' AND type_name = '%s'" % (keyspace, type) - return self._query_build_row(self._SELECT_USERTYPES + where_clause, self._build_user_type) + return self._query_build_row(self._SELECT_TYPES + where_clause, self._build_user_type) def get_function(self, keyspace, function): where_clause = " WHERE keyspace_name = '%s' AND function_name = '%s' AND signature = [%s]" \ @@ -1671,7 +1662,7 @@ def _build_aggregate(aggregate_row): def _build_table_metadata(self, row, col_rows, trigger_rows): keyspace_name = row["keyspace_name"] - cfname = row["columnfamily_name"] + cfname = row[self._table_name_col] cf_col_rows = col_rows.get(cfname, []) if not cf_col_rows: # CASSANDRA-8487 @@ -1869,7 +1860,7 @@ def _query_all(self): QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), QueryMessage(query=self._SELECT_COLUMN_FAMILIES, consistency_level=cl), QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), - QueryMessage(query=self._SELECT_USERTYPES, consistency_level=cl), + QueryMessage(query=self._SELECT_TYPES, consistency_level=cl), QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl) @@ -1936,7 +1927,7 @@ def _aggregate_results(self): m = self.keyspace_table_col_rows for row in self.columns_result: ksname = row["keyspace_name"] - cfname = row["columnfamily_name"] + cfname = row[self._table_name_col] m[ksname][cfname].append(row) m = self.keyspace_type_rows @@ -1954,12 +1945,28 @@ def _aggregate_results(self): m = self.keyspace_table_trigger_rows for row in self.triggers_result: ksname = row["keyspace_name"] - cfname = row["columnfamily_name"] + cfname = row[self._table_name_col] m[ksname][cfname].append(row) class SchemaParserV3(SchemaParserV22): - pass + _SELECT_KEYSPACES = "SELECT * FROM system_schema.keyspaces" + _SELECT_COLUMN_FAMILIES = "SELECT * FROM system_schema.tables" + _SELECT_COLUMNS = "SELECT * FROM system_schema.columns" + _SELECT_TRIGGERS = "SELECT * FROM system_schema.triggers" + _SELECT_TYPES = "SELECT * FROM system_schema.types" + _SELECT_FUNCTIONS = "SELECT * FROM system_schema.functions" + _SELECT_AGGREGATES = "SELECT * FROM system_schema.aggregates" + + _table_name_col = 'table_name' + + @staticmethod + def _build_keyspace_metadata(row): + name = row["keyspace_name"] + durable_writes = row["durable_writes"] + strategy_options = dict(row["replication"]) + strategy_class = strategy_options.pop("class") + return KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) def get_schema_parser(connection, timeout): @@ -1968,5 +1975,5 @@ def get_schema_parser(connection, timeout): return SchemaParserV3(connection, timeout) else: # we could further specialize by version. Right now just refactoring the - # multi-version parser we have. + # multi-version parser we have as of C* 2.2.0rc1. return SchemaParserV22(connection, timeout) From 6eebdebd64de676188f8c158f6b620b8dba9cbad Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 13 Jul 2015 17:40:11 -0500 Subject: [PATCH 0008/2005] Another iteration on updated schema This version parses the new tables schema. API is a little messy -- I would like to revisit that. PYTHON-276 --- cassandra/metadata.py | 176 +++++++++++++++----- tests/integration/standard/test_metadata.py | 6 +- 2 files changed, 134 insertions(+), 48 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 6b177d7049..8bca0beed1 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -960,30 +960,6 @@ def primary_key(self): table. """ - recognized_options = ( - "comment", - "read_repair_chance", - "dclocal_read_repair_chance", # kept to be safe, but see _build_table_options() - "local_read_repair_chance", - "replicate_on_write", - "gc_grace_seconds", - "bloom_filter_fp_chance", - "caching", - "compaction_strategy_class", - "compaction_strategy_options", - "min_compaction_threshold", - "max_compaction_threshold", - "compression_parameters", - "min_index_interval", - "max_index_interval", - "index_interval", - "speculative_retry", - "rows_per_partition_to_cache", - "memtable_flush_period_in_ms", - "populate_io_cache_on_flush", - "compression", - "default_time_to_live") - compaction_options = { "min_compaction_threshold": "min_threshold", "max_compaction_threshold": "max_threshold", @@ -999,16 +975,19 @@ def is_cql_compatible(self): """ A boolean indicating if this table can be represented as CQL in export """ - # no such thing as DCT in CQL - incompatible = issubclass(self.comparator, types.DynamicCompositeType) + comparator = getattr(self, 'comparator', None) + if comparator: + # no such thing as DCT in CQL + incompatible = issubclass(self.comparator, types.DynamicCompositeType) - # no compact storage with more than one column beyond PK if there - # are clustering columns - incompatible |= (self.is_compact_storage and - len(self.columns) > len(self.primary_key) + 1 and - len(self.clustering_key) >= 1) + # no compact storage with more than one column beyond PK if there + # are clustering columns + incompatible |= (self.is_compact_storage and + len(self.columns) > len(self.primary_key) + 1 and + len(self.clustering_key) >= 1) - return not incompatible + return not incompatible + return True def __init__(self, keyspace_name, name, partition_key=None, clustering_key=None, columns=None, triggers=None, options=None): self.keyspace_name = keyspace_name @@ -1101,18 +1080,10 @@ def as_cql_query(self, formatted=False): if self.clustering_key: cluster_str = "CLUSTERING ORDER BY " - clustering_names = protect_names([c.name for c in self.clustering_key]) - - if self.is_compact_storage and \ - not issubclass(self.comparator, types.CompositeType): - subtypes = [self.comparator] - else: - subtypes = self.comparator.subtypes - inner = [] - for colname, coltype in zip(clustering_names, subtypes): - ordering = "DESC" if issubclass(coltype, types.ReversedType) else "ASC" - inner.append("%s %s" % (colname, ordering)) + for col in self.clustering_key: + ordering = "DESC" if issubclass(col.data_type, types.ReversedType) else "ASC" + inner.append("%s %s" % (protect_name(col.name), ordering)) cluster_str += "(%s)" % ", ".join(inner) option_strings.append(cluster_str) @@ -1545,6 +1516,30 @@ class SchemaParserV22(_SchemaParser): _table_name_col = 'columnfamily_name' + recognized_table_options = ( + "comment", + "read_repair_chance", + "dclocal_read_repair_chance", # kept to be safe, but see _build_table_options() + "local_read_repair_chance", + "replicate_on_write", + "gc_grace_seconds", + "bloom_filter_fp_chance", + "caching", + "compaction_strategy_class", + "compaction_strategy_options", + "min_compaction_threshold", + "max_compaction_threshold", + "compression_parameters", + "min_index_interval", + "max_index_interval", + "index_interval", + "speculative_retry", + "rows_per_partition_to_cache", + "memtable_flush_period_in_ms", + "populate_io_cache_on_flush", + "compression", + "default_time_to_live") + def __init__(self, connection, timeout): super(SchemaParserV22, self).__init__(connection, timeout) self.keyspaces_result = [] @@ -1809,10 +1804,9 @@ def _build_table_metadata(self, row, col_rows, trigger_rows): return table_meta - @staticmethod - def _build_table_options(row): + def _build_table_options(self, row): """ Setup the mostly-non-schema table options, like caching settings """ - options = dict((o, row.get(o)) for o in TableMetadata.recognized_options if o in row) + options = dict((o, row.get(o)) for o in self.recognized_table_options if o in row) # the option name when creating tables is "dclocal_read_repair_chance", # but the column name in system.schema_columnfamilies is @@ -1960,6 +1954,21 @@ class SchemaParserV3(SchemaParserV22): _table_name_col = 'table_name' + recognized_table_options = ( + 'bloom_filter_fp_chance', + 'caching', + 'comment', + 'compaction', + 'compression', + 'dclocal_read_repair_chance', + 'default_time_to_live', + 'gc_grace_seconds', + 'max_index_interval', + 'memtable_flush_period_in_ms', + 'min_index_interval', + 'read_repair_chance', + 'speculative_retry') + @staticmethod def _build_keyspace_metadata(row): name = row["keyspace_name"] @@ -1968,6 +1977,81 @@ def _build_keyspace_metadata(row): strategy_class = strategy_options.pop("class") return KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) + def _build_table_metadata(self, row, col_rows, trigger_rows): + keyspace_name = row["keyspace_name"] + table_name = row[self._table_name_col] + cf_col_rows = col_rows.get(table_name, []) + + if not cf_col_rows: # CASSANDRA-8487 + log.warning("Building table metadata with no column meta for %s.%s", + keyspace_name, table_name) + + table_meta = TableMetadataV3(keyspace_name, table_name) + + for col_row in cf_col_rows: + column_meta = self._build_column_metadata(table_meta, col_row) + table_meta.columns[column_meta.name] = column_meta + + # partition key + partition_rows = [r for r in cf_col_rows + if r.get('type', None) == "partition_key"] + if len(partition_rows) > 1: + partition_rows = sorted(partition_rows, key=lambda row: row.get('component_index')) + for r in partition_rows: + table_meta.partition_key.append(table_meta.columns[r.get('column_name')]) + + # clustering key + clustering_rows = [r for r in cf_col_rows + if r.get('type', None) == "clustering"] + if len(clustering_rows) > 1: + clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) + for r in clustering_rows: + table_meta.clustering_key.append(table_meta.columns[r.get('column_name')]) + + if trigger_rows: + for trigger_row in trigger_rows[table_name]: + trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) + table_meta.triggers[trigger_meta.name] = trigger_meta + + table_meta.options = self._build_table_options(row) + flags = row.get('flags', set()) + if flags: + table_meta.is_compact_storage = 'dense' in flags or 'super' in flags or 'compound' not in flags + return table_meta + + def _build_table_options(self, row): + """ Setup the mostly-non-schema table options, like caching settings """ + return dict((o, row.get(o)) for o in self.recognized_table_options if o in row) + +class TableMetadataV3(TableMetadata): + """ + For now, until I figure out what to do with this API + """ + compaction_options = {} + + _option_maps = ['caching'] + option_maps = ['compaction', 'compression', 'caching'] + + @property + def is_cql_compatible(self): + return True + + def _make_option_strings(self): + ret = [] + options_copy = dict(self.options.items()) + + for option in self.option_maps: + value = options_copy.pop(option, {}) + params = ("'%s': '%s'" % (k, v) for k, v in value.items()) + ret.append("%s = {%s}" % (option, ', '.join(params))) + + for name, value in options_copy.items(): + if value is not None: + if name == "comment": + value = value or "" + ret.append("%s = %s" % (name, protect_value(value))) + + return list(sorted(ret)) def get_schema_parser(connection, timeout): server_version = connection.server_version diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 582977a95e..3152fdb311 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -28,7 +28,7 @@ from cassandra.cqltypes import DoubleType, Int32Type, ListType, UTF8Type, MapType from cassandra.encoder import Encoder from cassandra.metadata import (Metadata, KeyspaceMetadata, TableMetadata, IndexMetadata, - Token, MD5Token, TokenMap, murmur3, Function, Aggregate) + Token, MD5Token, TokenMap, murmur3, Function, Aggregate, get_schema_parser) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host @@ -132,8 +132,10 @@ def test_basic_table_meta_properties(self): self.assertEqual([], tablemeta.clustering_key) self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys())) + parser = get_schema_parser(self.cluster.control_connection._connection, 1) + for option in tablemeta.options: - self.assertIn(option, TableMetadata.recognized_options) + self.assertIn(option, parser.recognized_table_options) self.check_create_statement(tablemeta, create_statement) From dc24bf1d0ea23c5a6713f3c30988c09446a8fa01 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 14 Jul 2015 11:26:10 -0500 Subject: [PATCH 0009/2005] schema meta: make sure columns added in key order also modify table build based on compact static condition --- cassandra/metadata.py | 45 ++++++++++++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 8bca0beed1..014390bdea 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1988,9 +1988,15 @@ def _build_table_metadata(self, row, col_rows, trigger_rows): table_meta = TableMetadataV3(keyspace_name, table_name) - for col_row in cf_col_rows: - column_meta = self._build_column_metadata(table_meta, col_row) - table_meta.columns[column_meta.name] = column_meta + table_meta.options = self._build_table_options(row) + flags = row.get('flags', set()) + if flags: + compact_static = False + table_meta.is_compact_storage = 'dense' in flags or 'super' in flags or 'compound' not in flags + else: + # example: create table t (a int, b int, c int, primary key((a,b))) with compact storage + compact_static = True + table_meta.is_compact_storage = True # partition key partition_rows = [r for r in cf_col_rows @@ -1998,25 +2004,38 @@ def _build_table_metadata(self, row, col_rows, trigger_rows): if len(partition_rows) > 1: partition_rows = sorted(partition_rows, key=lambda row: row.get('component_index')) for r in partition_rows: + # we have to add meta here (and not in the later loop) because TableMetadata.columns is an + # OrderedDict, and it assumes keys are inserted first, in order, when exporting CQL + column_meta = self._build_column_metadata(table_meta, r) + table_meta.columns[column_meta.name] = column_meta table_meta.partition_key.append(table_meta.columns[r.get('column_name')]) # clustering key - clustering_rows = [r for r in cf_col_rows - if r.get('type', None) == "clustering"] - if len(clustering_rows) > 1: - clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) - for r in clustering_rows: - table_meta.clustering_key.append(table_meta.columns[r.get('column_name')]) + if not compact_static: + clustering_rows = [r for r in cf_col_rows + if r.get('type', None) == "clustering"] + if len(clustering_rows) > 1: + clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) + for r in clustering_rows: + column_meta = self._build_column_metadata(table_meta, r) + table_meta.columns[column_meta.name] = column_meta + table_meta.clustering_key.append(table_meta.columns[r.get('column_name')]) + + for col_row in (r for r in cf_col_rows + if r.get('type', None) not in ('parition_key', 'clustering_key')): + column_meta = self._build_column_metadata(table_meta, col_row) + if not compact_static or column_meta.is_static: + # for compact static tables, we omit the clustering key and value, and only add the logical columns. + # They are marked not static so that it generates appropriate CQL + if compact_static: + column_meta.is_static = False + table_meta.columns[column_meta.name] = column_meta if trigger_rows: for trigger_row in trigger_rows[table_name]: trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) table_meta.triggers[trigger_meta.name] = trigger_meta - table_meta.options = self._build_table_options(row) - flags = row.get('flags', set()) - if flags: - table_meta.is_compact_storage = 'dense' in flags or 'super' in flags or 'compound' not in flags return table_meta def _build_table_options(self, row): From 1a480f196ade42798596f5257d2cbeffcadf154f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 17 Jul 2015 16:14:30 -0500 Subject: [PATCH 0010/2005] Test updates removing refs to schema tables. --- tests/integration/standard/test_cluster.py | 6 +++--- tests/integration/standard/test_types.py | 5 ++--- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index b8439c4de3..f218fd5d26 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -314,13 +314,13 @@ def test_refresh_schema_table(self): original_meta = cluster.metadata.keyspaces original_system_meta = original_meta['system'] - original_system_schema_meta = original_system_meta.tables['schema_columnfamilies'] + original_system_schema_meta = original_system_meta.tables['local'] # only refresh one table - cluster.refresh_table_metadata('system', 'schema_columnfamilies') + cluster.refresh_table_metadata('system', 'local') current_meta = cluster.metadata.keyspaces current_system_meta = current_meta['system'] - current_system_schema_meta = current_system_meta.tables['schema_columnfamilies'] + current_system_schema_meta = current_system_meta.tables['local'] self.assertIs(original_meta, current_meta) self.assertIs(original_system_meta, current_system_meta) self.assertIsNot(original_system_schema_meta, current_system_schema_meta) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index f5c148ddd4..fa94b88ba4 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -677,9 +677,8 @@ def test_can_insert_unicode_query_string(self): Test to ensure unicode strings can be used in a query """ s = self.session - - query = u"SELECT * FROM system.schema_columnfamilies WHERE keyspace_name = 'ef\u2052ef' AND columnfamily_name = %s" - s.execute(query, (u"fe\u2051fe",)) + s.execute(u"SELECT * FROM system.local WHERE key = 'ef\u2052ef'") + s.execute(u"SELECT * FROM system.local WHERE key = %s", (u"fe\u2051fe",)) def test_can_read_composite_type(self): """ From 17f2bc13c2fda731e9de8217790730b151f962ec Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 22 Jul 2015 15:10:54 +0200 Subject: [PATCH 0011/2005] Unit-tests CQLengine's previous value tracking. Test previous_value of an already-persisted object as well as one that was just instanciated. Refs PYTHON-348. --- .../cqlengine/model/test_model_io.py | 74 +++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 3ba2aa9711..b6f76599e7 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -347,6 +347,80 @@ def test_get_changed_columns(self): self.instance.save() assert self.instance.get_changed_columns() == [] + def test_previous_value_tracking_of_persisted_instance(self): + # Check initial internal states. + assert self.instance.get_changed_columns() == [] + assert self.instance._values['count'].previous_value == 0 + + # Change value and check internal states. + self.instance.count = 1 + assert self.instance.get_changed_columns() == ['count'] + assert self.instance._values['count'].previous_value == 0 + + # Internal states should be updated on save. + self.instance.save() + assert self.instance.get_changed_columns() == [] + assert self.instance._values['count'].previous_value == 1 + + # Change value twice. + self.instance.count = 2 + assert self.instance.get_changed_columns() == ['count'] + assert self.instance._values['count'].previous_value == 1 + self.instance.count = 3 + assert self.instance.get_changed_columns() == ['count'] + assert self.instance._values['count'].previous_value == 1 + + # Internal states updated on save. + self.instance.save() + assert self.instance.get_changed_columns() == [] + assert self.instance._values['count'].previous_value == 3 + + # Change value and reset it. + self.instance.count = 2 + assert self.instance.get_changed_columns() == ['count'] + assert self.instance._values['count'].previous_value == 3 + self.instance.count = 3 + assert self.instance.get_changed_columns() == [] + assert self.instance._values['count'].previous_value == 3 + + # Nothing to save: values in initial conditions. + self.instance.save() + assert self.instance.get_changed_columns() == [] + assert self.instance._values['count'].previous_value == 3 + + def test_previous_value_tracking_on_instanciation(self): + self.instance = TestMultiKeyModel( + partition=random.randint(0, 1000), + cluster=random.randint(0, 1000), + count=0, + text='happy') + + # Columns of instances not persisted yet should be marked as changed. + assert set(self.instance.get_changed_columns()) == set([ + 'partition', 'cluster', 'count', 'text']) + assert self.instance._values['partition'].previous_value is None + assert self.instance._values['cluster'].previous_value is None + assert self.instance._values['count'].previous_value is None + assert self.instance._values['text'].previous_value is None + + # Value changes doesn't affect internal states. + self.instance.count = 1 + assert 'count' in self.instance.get_changed_columns() + assert self.instance._values['count'].previous_value is None + self.instance.count = 2 + assert 'count' in self.instance.get_changed_columns() + assert self.instance._values['count'].previous_value is None + + # Value reset is properly tracked. + self.instance.count = None + assert 'count' not in self.instance.get_changed_columns() + assert self.instance._values['count'].previous_value == None + + self.instance.save() + assert self.instance.get_changed_columns() == [] + assert self.instance._values['count'].previous_value == None + assert self.instance.count is None + class TestCanUpdate(BaseCassEngTestCase): From d2f673f78d4930cc7cfab5bf0e5a19befc11daeb Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Mon, 3 Aug 2015 20:38:35 +0200 Subject: [PATCH 0012/2005] First attempt at fixing previous value tracking. I reused the global '_is_persisted' flag on models to tell the value manager about the origin of the previous value. Previous value is kept aligned to the provided value only if the model instance is persisted, else it is reset to None. If I succeed, I propose to rename 'previous_value' to 'persisted_value' then. --- cassandra/cqlengine/columns.py | 7 +++++-- cassandra/cqlengine/models.py | 24 +++++++++++------------- cassandra/cqlengine/usertype.py | 5 ++--- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 185cccfbbc..0162152bf5 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -31,9 +31,12 @@ class BaseValueManager(object): def __init__(self, instance, column, value): self.instance = instance self.column = column - self.previous_value = deepcopy(value) self.value = value - self.explicit = False + + # Brand new instances do no have previous value yet. + self.previous_value = None + if self.instance._is_persisted: + self.previous_value = deepcopy(value) @property def deleted(self): diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index ddb7945995..8bfbdb9af0 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -357,26 +357,26 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): _table_name = None # used internally to cache a derived table name def __init__(self, **values): - self._values = {} self._ttl = self.__default_ttl__ self._timestamp = None self._transaction = None + self._batch = None + self._timeout = connection.NOT_SET + + # A flag set by the deserializer to indicate that update should be used + # when persisting changes. + self._is_persisted = values.pop('_is_persisted', False) + # Initialize each volumn to its provided or default right at + # instanciation. See PYTHON-348. + self._values = {} for name, column in self._columns.items(): value = values.get(name, None) if value is not None or isinstance(column, columns.BaseContainerColumn): value = column.to_python(value) value_mngr = column.value_manager(self, column, value) - if name in values: - value_mngr.explicit = True self._values[name] = value_mngr - # a flag set by the deserializer to indicate - # that update should be used when persisting changes - self._is_persisted = False - self._batch = None - self._timeout = connection.NOT_SET - def __repr__(self): return '{0}({1})'.format(self.__class__.__name__, ', '.join('{0}={1!r}'.format(k, getattr(self, k)) @@ -447,9 +447,7 @@ def _construct_instance(cls, values): else: klass = cls - instance = klass(**field_dict) - instance._is_persisted = True - return instance + return klass(_is_persisted=True, **field_dict) def _can_update(self): """ @@ -541,7 +539,7 @@ def validate(self): """ for name, col in self._columns.items(): v = getattr(self, name) - if v is None and not self._values[name].explicit and col.has_default: + if v is None and col.has_default: v = col.get_default() val = col.validate(v) setattr(self, name, val) diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index 88ec033ba8..e080371f26 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -27,14 +27,13 @@ class BaseUserType(object): def __init__(self, **values): self._values = {} + self._is_persisted = False for name, field in self._fields.items(): value = values.get(name, None) if value is not None or isinstance(field, columns.BaseContainerColumn): value = field.to_python(value) value_mngr = field.value_manager(self, field, value) - if name in values: - value_mngr.explicit = True self._values[name] = value_mngr def __eq__(self, other): @@ -134,7 +133,7 @@ def validate(self): pass for name, field in self._fields.items(): v = getattr(self, name) - if v is None and not self._values[name].explicit and field.has_default: + if v is None and field.has_default: v = field.get_default() val = field.validate(v) setattr(self, name, val) From 333b3c585cb6580342aef895656268d2a808a4df Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 6 Aug 2015 13:41:19 -0500 Subject: [PATCH 0013/2005] 3.0.0a1 version update --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index a6beab2032..2d30c78aaa 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 6, '0c2', 'post') +__version_info__ = (3, 0, '0a1') __version__ = '.'.join(map(str, __version_info__)) From 3393c23baa2696c12d7e206c72369fcae8c0733c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 6 Aug 2015 13:44:13 -0500 Subject: [PATCH 0014/2005] post release version update --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 2d30c78aaa..dadddeff14 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 0, '0a1') +__version_info__ = (3, 0, '0a1', 'post') __version__ = '.'.join(map(str, __version_info__)) From 07b22a35d575dacdd89d52d83b1ebb3cce41e71f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 10 Jul 2015 09:52:48 -0500 Subject: [PATCH 0015/2005] Limit mock dependency to version 1.0.1 1.1.0 dropped Python 2.6 support, and at least one call assert function currently used by tests. --- setup.py | 2 +- test-requirements.txt | 2 +- tox.ini | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index e85d332d37..ea6f4f0514 100644 --- a/setup.py +++ b/setup.py @@ -216,7 +216,7 @@ def run_setup(extensions): keywords='cassandra,cql,orm', include_package_data=True, install_requires=dependencies, - tests_require=['nose', 'mock', 'PyYAML', 'pytz', 'sure'], + tests_require=['nose', 'mock<=1.0.1', 'PyYAML', 'pytz', 'sure'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', diff --git a/test-requirements.txt b/test-requirements.txt index a90f1ad5c5..cc7b3c2b2e 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -2,7 +2,7 @@ blist scales nose -mock +mock<=1.0.1 ccm>=2.0 unittest2 PyYAML diff --git a/tox.ini b/tox.ini index 23b2aee1f4..b6b0b519dc 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ envlist = py26,py27,pypy,py33,py34 [base] deps = nose - mock + mock<=1.0.1 PyYAML six From 282143d7df90188a78147a38714f4ecda3b1a0ec Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 11 Aug 2015 16:15:28 -0500 Subject: [PATCH 0016/2005] test: Another UDF config option introduced in C* 3.0 --- tests/integration/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 33d3ae93c0..8890acb365 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -209,6 +209,8 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True): CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) if CASSANDRA_VERSION >= '2.2': CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) + if CASSANDRA_VERSION >= '3.0': + CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) common.switch_cluster(path, cluster_name) CCM_CLUSTER.populate(nodes, ipformat=ipformat) try: From 3e8621f0925964c71dc457e64c720c43682589d6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 11 Aug 2015 16:16:01 -0500 Subject: [PATCH 0017/2005] schema 3.0: 2i moved to distinct schema table --- cassandra/metadata.py | 189 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 154 insertions(+), 35 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 014390bdea..8027df9e32 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1563,10 +1563,8 @@ def get_all_keyspaces(self): for row in self.keyspaces_result: keyspace_meta = self._build_keyspace_metadata(row) - keyspace_col_rows = self.keyspace_table_col_rows.get(keyspace_meta.name, {}) - keyspace_trigger_rows = self.keyspace_table_trigger_rows.get(keyspace_meta.name, {}) for table_row in self.keyspace_table_rows.get(keyspace_meta.name, []): - table_meta = self._build_table_metadata(table_row, keyspace_col_rows, keyspace_trigger_rows) + table_meta = self._build_table_metadata(table_row) table_meta.keyspace = keyspace_meta # temporary while TableMetadata.keyspace is deprecated keyspace_meta._add_table_metadata(table_meta) @@ -1602,7 +1600,7 @@ def get_table(self, keyspace, table): triggers_result = self._handle_results(triggers_success, triggers_result) if table_result: - return self._build_table_metadata(table_result[0], {table: col_result}, {table: triggers_result}) + return self._build_table_metadata(table_result[0], col_result, triggers_result) def get_type(self, keyspace, type): where_clause = " WHERE keyspace_name = '%s' AND type_name = '%s'" % (keyspace, type) @@ -1655,12 +1653,14 @@ def _build_aggregate(aggregate_row): aggregate_row['signature'], aggregate_row['state_func'], state_type, aggregate_row['final_func'], initial_condition, return_type) - def _build_table_metadata(self, row, col_rows, trigger_rows): + def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): keyspace_name = row["keyspace_name"] cfname = row[self._table_name_col] - cf_col_rows = col_rows.get(cfname, []) - if not cf_col_rows: # CASSANDRA-8487 + col_rows = col_rows or self.keyspace_table_col_rows[keyspace_name][cfname] + trigger_rows = trigger_rows or self.keyspace_table_trigger_rows[keyspace_name][cfname] + + if not col_rows: # CASSANDRA-8487 log.warning("Building table metadata with no column meta for %s.%s", keyspace_name, cfname) @@ -1678,7 +1678,7 @@ def _build_table_metadata(self, row, col_rows, trigger_rows): column_aliases = row.get("column_aliases", None) - clustering_rows = [r for r in cf_col_rows + clustering_rows = [r for r in col_rows if r.get('type', None) == "clustering_key"] if len(clustering_rows) > 1: clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) @@ -1703,7 +1703,7 @@ def _build_table_metadata(self, row, col_rows, trigger_rows): else: # compact table is_compact = True - has_value = column_aliases or not cf_col_rows + has_value = column_aliases or not col_rows clustering_size = num_column_name_components # Some thrift tables define names in composite types (see PYTHON-192) @@ -1711,7 +1711,7 @@ def _build_table_metadata(self, row, col_rows, trigger_rows): column_aliases = comparator.fieldnames else: is_compact = True - if column_aliases or not cf_col_rows: + if column_aliases or not col_rows: has_value = True clustering_size = num_column_name_components else: @@ -1722,7 +1722,7 @@ def _build_table_metadata(self, row, col_rows, trigger_rows): table_meta.comparator = comparator # partition key - partition_rows = [r for r in cf_col_rows + partition_rows = [r for r in col_rows if r.get('type', None) == "partition_key"] if len(partition_rows) > 1: @@ -1767,7 +1767,7 @@ def _build_table_metadata(self, row, col_rows, trigger_rows): # value alias (if present) if has_value: - value_alias_rows = [r for r in cf_col_rows + value_alias_rows = [r for r in col_rows if r.get('type', None) == "compact_value"] if not key_aliases: # TODO are we checking the right thing here? @@ -1790,14 +1790,13 @@ def _build_table_metadata(self, row, col_rows, trigger_rows): table_meta.columns[value_alias] = col # other normal columns - for col_row in cf_col_rows: + for col_row in col_rows: column_meta = self._build_column_metadata(table_meta, col_row) table_meta.columns[column_meta.name] = column_meta - if trigger_rows: - for trigger_row in trigger_rows[cfname]: - trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) - table_meta.triggers[trigger_meta.name] = trigger_meta + for trigger_row in trigger_rows: + trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) + table_meta.triggers[trigger_meta.name] = trigger_meta table_meta.options = self._build_table_options(row) table_meta.is_compact_storage = is_compact @@ -1865,14 +1864,14 @@ def _query_all(self): (col_success, col_result), (types_success, types_result), \ (functions_success, functions_result), \ (aggregates_success, aggregates_result), \ - (trigger_success, triggers_result) = responses + (triggers_success, triggers_result) = responses self.keyspaces_result = self._handle_results(ks_success, ks_result) self.tables_result = self._handle_results(table_success, table_result) self.columns_result = self._handle_results(col_success, col_result) # if we're connected to Cassandra < 2.0, the triggers table will not exist - if trigger_success: + if triggers_success: self.triggers_result = dict_factory(*triggers_result.results) else: if isinstance(triggers_result, InvalidRequest): @@ -1945,8 +1944,9 @@ def _aggregate_results(self): class SchemaParserV3(SchemaParserV22): _SELECT_KEYSPACES = "SELECT * FROM system_schema.keyspaces" - _SELECT_COLUMN_FAMILIES = "SELECT * FROM system_schema.tables" + _SELECT_TABLES = "SELECT * FROM system_schema.tables" _SELECT_COLUMNS = "SELECT * FROM system_schema.columns" + _SELECT_INDEXES = "SELECT * FROM system_schema.indexes" _SELECT_TRIGGERS = "SELECT * FROM system_schema.triggers" _SELECT_TYPES = "SELECT * FROM system_schema.types" _SELECT_FUNCTIONS = "SELECT * FROM system_schema.functions" @@ -1969,6 +1969,29 @@ class SchemaParserV3(SchemaParserV22): 'read_repair_chance', 'speculative_retry') + def __init__(self, connection, timeout): + super(SchemaParserV3, self).__init__(connection, timeout) + self.indexes_result = [] + self.keyspace_table_index_rows = defaultdict(lambda: defaultdict(list)) + + def get_table(self, keyspace, table): + cl = ConsistencyLevel.ONE + where_clause = " WHERE keyspace_name = '%s' AND %s = '%s'" % (keyspace, self._table_name_col, table) + cf_query = QueryMessage(query=self._SELECT_TABLES + where_clause, consistency_level=cl) + col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) + indexes_query = QueryMessage(query=self._SELECT_INDEXES + where_clause, consistency_level=cl) + triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) + (cf_success, cf_result), (col_success, col_result), \ + (indexes_sucess, indexes_result), (triggers_success, triggers_result) \ + = self.connection.wait_for_responses(cf_query, col_query, indexes_query, triggers_query, timeout=self.timeout, fail_on_error=False) + table_result = self._handle_results(cf_success, cf_result) + + if table_result: + col_result = self._handle_results(col_success, col_result) + indexes_result = self._handle_results(indexes_sucess, indexes_result) + triggers_result = self._handle_results(triggers_success, triggers_result) + return self._build_table_metadata(table_result[0], col_result, triggers_result, indexes_result) + @staticmethod def _build_keyspace_metadata(row): name = row["keyspace_name"] @@ -1977,14 +2000,13 @@ def _build_keyspace_metadata(row): strategy_class = strategy_options.pop("class") return KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) - def _build_table_metadata(self, row, col_rows, trigger_rows): + def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_rows=None): keyspace_name = row["keyspace_name"] table_name = row[self._table_name_col] - cf_col_rows = col_rows.get(table_name, []) - if not cf_col_rows: # CASSANDRA-8487 - log.warning("Building table metadata with no column meta for %s.%s", - keyspace_name, table_name) + col_rows = col_rows or self.keyspace_table_col_rows[keyspace_name][table_name] + trigger_rows = trigger_rows or self.keyspace_table_trigger_rows[keyspace_name][table_name] + index_rows = index_rows or self.keyspace_table_index_rows[keyspace_name][table_name] table_meta = TableMetadataV3(keyspace_name, table_name) @@ -1999,7 +2021,7 @@ def _build_table_metadata(self, row, col_rows, trigger_rows): table_meta.is_compact_storage = True # partition key - partition_rows = [r for r in cf_col_rows + partition_rows = [r for r in col_rows if r.get('type', None) == "partition_key"] if len(partition_rows) > 1: partition_rows = sorted(partition_rows, key=lambda row: row.get('component_index')) @@ -2012,7 +2034,7 @@ def _build_table_metadata(self, row, col_rows, trigger_rows): # clustering key if not compact_static: - clustering_rows = [r for r in cf_col_rows + clustering_rows = [r for r in col_rows if r.get('type', None) == "clustering"] if len(clustering_rows) > 1: clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) @@ -2021,7 +2043,7 @@ def _build_table_metadata(self, row, col_rows, trigger_rows): table_meta.columns[column_meta.name] = column_meta table_meta.clustering_key.append(table_meta.columns[r.get('column_name')]) - for col_row in (r for r in cf_col_rows + for col_row in (r for r in col_rows if r.get('type', None) not in ('parition_key', 'clustering_key')): column_meta = self._build_column_metadata(table_meta, col_row) if not compact_static or column_meta.is_static: @@ -2031,10 +2053,14 @@ def _build_table_metadata(self, row, col_rows, trigger_rows): column_meta.is_static = False table_meta.columns[column_meta.name] = column_meta - if trigger_rows: - for trigger_row in trigger_rows[table_name]: - trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) - table_meta.triggers[trigger_meta.name] = trigger_meta + for trigger_row in trigger_rows: + trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) + table_meta.triggers[trigger_meta.name] = trigger_meta + + for index_row in index_rows: + index_meta = self._build_index_metadata(table_meta, index_row) + if index_meta: + table_meta.indexes[index_meta.name] = index_meta return table_meta @@ -2042,10 +2068,61 @@ def _build_table_options(self, row): """ Setup the mostly-non-schema table options, like caching settings """ return dict((o, row.get(o)) for o in self.recognized_table_options if o in row) + @staticmethod + def _build_index_metadata(table_metadata, row): + index_name = row.get("index_name") + index_type = row.get("index_type") + if index_name or index_type: + index_options = dict(row.get("index_options") or {}) + target_columns = row.get('target_columns') or set() + target_type = row.get('target_type') + return IndexMetadataV3(table_metadata, index_name, index_type, index_options, target_columns, target_type) + else: + return None + + def _query_all(self): + cl = ConsistencyLevel.ONE + queries = [ + QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), + QueryMessage(query=self._SELECT_TABLES, consistency_level=cl), + QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), + QueryMessage(query=self._SELECT_TYPES, consistency_level=cl), + QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), + QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), + QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl), + QueryMessage(query=self._SELECT_INDEXES, consistency_level=cl) + ] + + responses = self.connection.wait_for_responses(*queries, timeout=self.timeout, fail_on_error=False) + (ks_success, ks_result), (table_success, table_result), \ + (col_success, col_result), (types_success, types_result), \ + (functions_success, functions_result), \ + (aggregates_success, aggregates_result), \ + (triggers_success, triggers_result), \ + (indexes_success, indexes_result) = responses + + self.keyspaces_result = self._handle_results(ks_success, ks_result) + self.tables_result = self._handle_results(table_success, table_result) + self.columns_result = self._handle_results(col_success, col_result) + self.triggers_result = self._handle_results(triggers_success, triggers_result) + self.types_result = self._handle_results(types_success, types_result) + self.functions_result = self._handle_results(functions_success, functions_result) + self.aggregates_result = self._handle_results(aggregates_success, aggregates_result) + self.indexes_result = self._handle_results(indexes_success, indexes_result) + + self._aggregate_results() + + def _aggregate_results(self): + super(SchemaParserV3, self)._aggregate_results() + + m = self.keyspace_table_index_rows + for row in self.indexes_result: + ksname = row["keyspace_name"] + cfname = row[self._table_name_col] + m[ksname][cfname].append(row) + + class TableMetadataV3(TableMetadata): - """ - For now, until I figure out what to do with this API - """ compaction_options = {} _option_maps = ['caching'] @@ -2072,6 +2149,48 @@ def _make_option_strings(self): return list(sorted(ret)) + +class IndexMetadataV3(IndexMetadata): + + table = None + """ + The table (:class:`.TableMetadata`) this index is on. + """ + + columns = None + """ + The set of columns (:class:`.ColumnMetadata`) this index is on + (may be empty for per-row indexes). + """ + + name = None + """ A string name for the index. """ + + index_type = None + """ A string representing the type of index. """ + + index_options = {} + """ A dict of index options. """ + + target_columns = set() + """ A set of target columns """ + + target_type = None + + def __init__(self, table_metadata, index_name, index_type=None, index_options={}, target_columns=(), target_type=''): + self.table = table_metadata + self.name = index_name + self.index_type = index_type + self.index_options = index_options + self.target_columns = set(target_columns) + self.target_type = target_type + + # TODO: temporary until we diverge more with multiple column indexes + # giving the base class what it expects + self.column = table_metadata.columns[target_columns[0]] + self.column.index = self + + def get_schema_parser(connection, timeout): server_version = connection.server_version if server_version.startswith('3'): From 90a797eeb9b1c3592d6aab8352acfc329fd82f0d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 12 Aug 2015 14:07:06 -0500 Subject: [PATCH 0018/2005] schema 3.0: "index_options" --> "options" --- cassandra/metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 8027df9e32..b9f7b99663 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2073,7 +2073,7 @@ def _build_index_metadata(table_metadata, row): index_name = row.get("index_name") index_type = row.get("index_type") if index_name or index_type: - index_options = dict(row.get("index_options") or {}) + index_options = dict(row.get("options") or {}) target_columns = row.get('target_columns') or set() target_type = row.get('target_type') return IndexMetadataV3(table_metadata, index_name, index_type, index_options, target_columns, target_type) From f211f191d83e10d270b1638730b68b5a67a78203 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 19 Aug 2015 11:07:19 -0500 Subject: [PATCH 0019/2005] Make tuple before indexing Fixes an issue with indexing when blist sorted set is not in use. Symptom: "TypeError("'sortedset' object does not support indexing",)" --- cassandra/metadata.py | 4 ++-- tests/integration/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index b9f7b99663..43993e9741 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2177,7 +2177,7 @@ class IndexMetadataV3(IndexMetadata): target_type = None - def __init__(self, table_metadata, index_name, index_type=None, index_options={}, target_columns=(), target_type=''): + def __init__(self, table_metadata, index_name, index_type, index_options, target_columns, target_type): self.table = table_metadata self.name = index_name self.index_type = index_type @@ -2187,7 +2187,7 @@ def __init__(self, table_metadata, index_name, index_type=None, index_options={} # TODO: temporary until we diverge more with multiple column indexes # giving the base class what it expects - self.column = table_metadata.columns[target_columns[0]] + self.column = table_metadata.columns[tuple(target_columns)[0]] self.column.index = self diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 8890acb365..2dd889321d 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -80,7 +80,7 @@ def _tuple_version(version_string): USE_CASS_EXTERNAL = bool(os.getenv('USE_CASS_EXTERNAL', False)) -default_cassandra_version = '2.1.5' +default_cassandra_version = '2.2.0' if USE_CASS_EXTERNAL: if CCMClusterFactory: From 555c2fdf614b5f3190dea7656f5648d9668dce5d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 19 Aug 2015 11:41:09 -0500 Subject: [PATCH 0020/2005] __getitem__ for pure python sortedset --- cassandra/util.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/util.py b/cassandra/util.py index ba49b5687e..ce462a3805 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -504,6 +504,9 @@ def __init__(self, iterable=()): def __len__(self): return len(self._items) + def __getitem__(self, i): + return self._items[i] + def __iter__(self): return iter(self._items) From 002ff27a1b3a6ee9a0e3a6365d4e51ff3ecd1ce2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 21 Aug 2015 15:13:35 -0500 Subject: [PATCH 0021/2005] Handle multiple secondary indexes per column Also started working toward multiple-column indexes, but I don't think the server-side metadata is there yet. --- cassandra/metadata.py | 157 +++++++++++++++--------------------- tests/unit/test_metadata.py | 1 + 2 files changed, 68 insertions(+), 90 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 43993e9741..63e94670fb 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1021,9 +1021,8 @@ def all_as_cql(self): ret = self.as_cql_query(formatted=True) ret += ";" - for col_meta in self.columns.values(): - if col_meta.index: - ret += "\n%s;" % (col_meta.index.as_cql_query(),) + for index in self.indexes.values(): + ret += "\n%s;" % index.as_cql_query() for trigger_meta in self.triggers.values(): ret += "\n%s;" % (trigger_meta.as_cql_query(),) @@ -1223,64 +1222,84 @@ class IndexMetadata(object): A representation of a secondary index on a column. """ - column = None - """ - The column (:class:`.ColumnMetadata`) this index is on. - """ + table = None + """ The :class:`.TableMetadata` this column belongs to. """ name = None """ A string name for the index. """ + columns = None + """ + The set of columns (:class:`.ColumnMetadata`) this index is on + (may be empty for per-row indexes). + + .. versionchanged:: 3.0.0 + + Was previously a singular column + """ + index_type = None """ A string representing the type of index. """ - index_options = {} + index_options = None """ A dict of index options. """ - def __init__(self, column_metadata, index_name=None, index_type=None, index_options={}): - self.column = column_metadata + target_type = None + """ + String target type (COLUMN, ROW, etc), if available + + .. versionadded:: 3.0.0 + """ + + def __init__(self, table_metadata, index_name=None, index_type=None, index_options=None, target_columns=None, target_type=None): + self.table = table_metadata self.name = index_name + self.columns = set(table_metadata.columns[col_name] for col_name in target_columns) if target_columns else set() self.index_type = index_type - self.index_options = index_options + self.index_options = index_options or {} + self.target_type = target_type def as_cql_query(self): """ Returns a CQL query that can be used to recreate this index. """ - table = self.column.table if self.index_type != "CUSTOM": - index_target = protect_name(self.column.name) - if self.index_options is not None: - option_keys = self.index_options.keys() - if "index_keys" in option_keys: - index_target = 'keys(%s)' % (index_target,) - elif "index_values" in option_keys: - # don't use any "function" for collection values - pass - else: - # it might be a "full" index on a frozen collection, but - # we need to check the data type to verify that, because - # there is no special index option for full-collection - # indexes. - data_type = self.column.data_type - collection_types = ('map', 'set', 'list') - if data_type.typename == "frozen" and data_type.subtypes[0].typename in collection_types: - # no index option for full-collection index - index_target = 'full(%s)' % (index_target,) - - return "CREATE INDEX %s ON %s.%s (%s)" % ( + target_fmt = "CREATE INDEX %s ON %s.%s (%%s)" % ( self.name, # Cassandra doesn't like quoted index names for some reason - protect_name(table.keyspace.name), - protect_name(table.name), - index_target) + protect_name(self.table.keyspace.name), + protect_name(self.table.name)) else: - return "CREATE CUSTOM INDEX %s ON %s.%s (%s) USING '%s'" % ( + target_fmt = "CREATE CUSTOM INDEX %s ON %s.%s (%%s) USING '%s'" % ( self.name, # Cassandra doesn't like quoted index names for some reason - protect_name(table.keyspace.name), - protect_name(table.name), - protect_name(self.column.name), + protect_name(self.table.keyspace.name), + protect_name(self.table.name), self.index_options["class_name"]) + # TODO: when CASSANDRA-10124 lands, we'll need to loop over self.columns + # not updating yet because it seems like index_options will have to change to + # specify different options per column(?) + # Also, grammar will be changing for indexes with no target columns. This is TBD + col = tuple(self.columns)[0] if self.columns else None + index_target = protect_name(col.name) if col else '' + if col and self.index_options is not None: + option_keys = self.index_options.keys() + if "index_keys" in option_keys: + index_target = 'keys(%s)' % (index_target,) + elif "index_values" in option_keys: + # don't use any "function" for collection values + pass + else: + # it might be a "full" index on a frozen collection, but + # we need to check the data type to verify that, because + # there is no special index option for full-collection + # indexes. + data_type = col.data_type + collection_types = ('map', 'set', 'list') + if data_type.typename == "frozen" and data_type.subtypes[0].typename in collection_types: + # no index option for full-collection index + index_target = 'full(%s)' % (index_target,) + return target_fmt % index_target + def export_as_string(self): """ Returns a CQL query string that can be used to recreate this index. @@ -1605,7 +1624,6 @@ def get_table(self, keyspace, table): def get_type(self, keyspace, type): where_clause = " WHERE keyspace_name = '%s' AND type_name = '%s'" % (keyspace, type) return self._query_build_row(self._SELECT_TYPES + where_clause, self._build_user_type) - def get_function(self, keyspace, function): where_clause = " WHERE keyspace_name = '%s' AND function_name = '%s' AND signature = [%s]" \ % (keyspace, function.name, ','.join("'%s'" % t for t in function.type_signature)) @@ -1793,6 +1811,10 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): for col_row in col_rows: column_meta = self._build_column_metadata(table_meta, col_row) table_meta.columns[column_meta.name] = column_meta + index_meta = self._build_index_metadata(column_meta, col_row) + column_meta.index = index_meta + if index_meta: + table_meta.indexes[index_meta.name] = index_meta for trigger_row in trigger_rows: trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) @@ -1823,10 +1845,6 @@ def _build_column_metadata(self, table_metadata, row): data_type = types.lookup_casstype(row["validator"]) is_static = row.get("type", None) == "static" column_meta = ColumnMetadata(table_metadata, name, data_type, is_static=is_static) - index_meta = self._build_index_metadata(column_meta, row) - column_meta.index = index_meta - if index_meta: - table_metadata.indexes[index_meta.name] = index_meta return column_meta @staticmethod @@ -1835,8 +1853,8 @@ def _build_index_metadata(column_metadata, row): index_type = row.get("index_type") if index_name or index_type: options = row.get("index_options") - index_options = json.loads(options) if options else {} - return IndexMetadata(column_metadata, index_name, index_type, index_options) + index_options = json.loads(options) if options else None + return IndexMetadata(column_metadata.table, index_name, index_type, index_options, (column_metadata.name,)) else: return None @@ -2073,10 +2091,10 @@ def _build_index_metadata(table_metadata, row): index_name = row.get("index_name") index_type = row.get("index_type") if index_name or index_type: - index_options = dict(row.get("options") or {}) - target_columns = row.get('target_columns') or set() + index_options = row.get("options") + target_columns = row.get('target_columns') target_type = row.get('target_type') - return IndexMetadataV3(table_metadata, index_name, index_type, index_options, target_columns, target_type) + return IndexMetadata(table_metadata, index_name, index_type, index_options, target_columns, target_type) else: return None @@ -2150,47 +2168,6 @@ def _make_option_strings(self): return list(sorted(ret)) -class IndexMetadataV3(IndexMetadata): - - table = None - """ - The table (:class:`.TableMetadata`) this index is on. - """ - - columns = None - """ - The set of columns (:class:`.ColumnMetadata`) this index is on - (may be empty for per-row indexes). - """ - - name = None - """ A string name for the index. """ - - index_type = None - """ A string representing the type of index. """ - - index_options = {} - """ A dict of index options. """ - - target_columns = set() - """ A set of target columns """ - - target_type = None - - def __init__(self, table_metadata, index_name, index_type, index_options, target_columns, target_type): - self.table = table_metadata - self.name = index_name - self.index_type = index_type - self.index_options = index_options - self.target_columns = set(target_columns) - self.target_type = target_type - - # TODO: temporary until we diverge more with multiple column indexes - # giving the base class what it expects - self.column = table_metadata.columns[tuple(target_columns)[0]] - self.column.index = self - - def get_schema_parser(connection, timeout): server_version = connection.server_version if server_version.startswith('3'): diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 3da3945f2d..e4b636d0ed 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -316,6 +316,7 @@ def test_build_index_as_cql(self): column_meta.name = 'column_name_here' column_meta.table.name = 'table_name_here' column_meta.table.keyspace.name = 'keyspace_name_here' + column_meta.table.columns = {column_meta.name: column_meta} connection = Mock() connection.server_version = '2.1.0' parser = get_schema_parser(connection, 0.1) From e683b106fb6e6becfd7ed9c77d3c7aa2ecaa09b7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 24 Aug 2015 08:32:52 -0500 Subject: [PATCH 0022/2005] 3.0.0a2 version update --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index dadddeff14..569f6c49eb 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 0, '0a1', 'post') +__version_info__ = (3, 0, '0a2') __version__ = '.'.join(map(str, __version_info__)) From 9d3a3a9fc3b09bfabe1e42151a999d3847511923 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 24 Aug 2015 13:03:56 -0500 Subject: [PATCH 0023/2005] modernize system_schema.columns table PYTHON-276 --- cassandra/metadata.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 63e94670fb..75f1f06aae 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1840,7 +1840,8 @@ def _build_table_options(self, row): return options - def _build_column_metadata(self, table_metadata, row): + @staticmethod + def _build_column_metadata(table_metadata, row): name = row["column_name"] data_type = types.lookup_casstype(row["validator"]) is_static = row.get("type", None) == "static" @@ -2040,9 +2041,9 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_row # partition key partition_rows = [r for r in col_rows - if r.get('type', None) == "partition_key"] + if r.get('kind', None) == "partition_key"] if len(partition_rows) > 1: - partition_rows = sorted(partition_rows, key=lambda row: row.get('component_index')) + partition_rows = sorted(partition_rows, key=lambda row: row.get('position')) for r in partition_rows: # we have to add meta here (and not in the later loop) because TableMetadata.columns is an # OrderedDict, and it assumes keys are inserted first, in order, when exporting CQL @@ -2053,16 +2054,16 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_row # clustering key if not compact_static: clustering_rows = [r for r in col_rows - if r.get('type', None) == "clustering"] + if r.get('kind', None) == "clustering"] if len(clustering_rows) > 1: - clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) + clustering_rows = sorted(clustering_rows, key=lambda row: row.get('position')) for r in clustering_rows: column_meta = self._build_column_metadata(table_meta, r) table_meta.columns[column_meta.name] = column_meta table_meta.clustering_key.append(table_meta.columns[r.get('column_name')]) for col_row in (r for r in col_rows - if r.get('type', None) not in ('parition_key', 'clustering_key')): + if r.get('kind', None) not in ('parition_key', 'clustering_key')): column_meta = self._build_column_metadata(table_meta, col_row) if not compact_static or column_meta.is_static: # for compact static tables, we omit the clustering key and value, and only add the logical columns. @@ -2086,6 +2087,14 @@ def _build_table_options(self, row): """ Setup the mostly-non-schema table options, like caching settings """ return dict((o, row.get(o)) for o in self.recognized_table_options if o in row) + @staticmethod + def _build_column_metadata(table_metadata, row): + name = row["column_name"] + data_type = types.lookup_casstype(row["type"]) + is_static = row.get("kind", None) == "static" + column_meta = ColumnMetadata(table_metadata, name, data_type, is_static=is_static) + return column_meta + @staticmethod def _build_index_metadata(table_metadata, row): index_name = row.get("index_name") From 334b8633dd0caafd1ffe17646f4f8a06495d597e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 24 Aug 2015 13:05:03 -0500 Subject: [PATCH 0024/2005] schema 3.0: rename trigger_options -> options PYTHON-276 --- cassandra/metadata.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 75f1f06aae..4720a64fc1 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2107,6 +2107,13 @@ def _build_index_metadata(table_metadata, row): else: return None + @staticmethod + def _build_trigger_metadata(table_metadata, row): + name = row["trigger_name"] + options = row["options"] + trigger_meta = TriggerMetadata(table_metadata, name, options) + return trigger_meta + def _query_all(self): cl = ConsistencyLevel.ONE queries = [ From db87202cea00372867dfb17ca7a151e0b07cd54f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 24 Aug 2015 13:05:36 -0500 Subject: [PATCH 0025/2005] test: use Cluster proto downgrade in get_server_versions Can no longer connect to C* 3.0 using protocol < 3 --- tests/integration/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 2dd889321d..54d8977170 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -59,7 +59,7 @@ def get_server_versions(): if cass_version is not None: return (cass_version, cql_version) - c = Cluster(protocol_version=1) + c = Cluster() s = c.connect() row = s.execute('SELECT cql_version, release_version FROM system.local')[0] From 38317dd34283b2f365be3e89e0f0ef686f989381 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 24 Aug 2015 13:22:46 -0500 Subject: [PATCH 0026/2005] no such thing as 'clustering_key' column 'kind' --- cassandra/metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 4720a64fc1..8d84c72db7 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2063,7 +2063,7 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_row table_meta.clustering_key.append(table_meta.columns[r.get('column_name')]) for col_row in (r for r in col_rows - if r.get('kind', None) not in ('parition_key', 'clustering_key')): + if r.get('kind', None) not in ('parition_key', 'clustering')): column_meta = self._build_column_metadata(table_meta, col_row) if not compact_static or column_meta.is_static: # for compact static tables, we omit the clustering key and value, and only add the logical columns. From 836eaae07aacce73dc6d4805991027fe61a46037 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 4 Sep 2015 17:09:59 -0500 Subject: [PATCH 0027/2005] version update --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index dadddeff14..7d60b6a292 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 0, '0a1', 'post') +__version_info__ = (3, 0, '0a2', 'post') __version__ = '.'.join(map(str, __version_info__)) From cda3f0bbef8075d06c5fd682ce3cd39a1228beb9 Mon Sep 17 00:00:00 2001 From: vagrant Date: Thu, 10 Sep 2015 19:39:40 +0000 Subject: [PATCH 0028/2005] Remove 'closure' (generator comprehension) from cdef Makes the extension compatible with Cython < 0.21 PYTHON-396 --- cassandra/cython_marshal.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cython_marshal.pyx b/cassandra/cython_marshal.pyx index 1ba1143548..56437e32cd 100644 --- a/cassandra/cython_marshal.pyx +++ b/cassandra/cython_marshal.pyx @@ -107,7 +107,7 @@ cdef varint_unpack(Buffer *term): cdef varint_unpack_py3(bytes term): cdef int64_t one = 1L - val = int(''.join("%02x" % i for i in term), 16) + val = int(''.join(["%02x" % i for i in term]), 16) if (term[0] & 128) != 0: # There is a bug in Cython (0.20 - 0.22), where if we do # '1 << (len(term) * 8)' Cython generates '1' directly into the From ac25d94ecd6654ec748a22b58fdfa282e9832099 Mon Sep 17 00:00:00 2001 From: vagrant Date: Thu, 10 Sep 2015 19:42:24 +0000 Subject: [PATCH 0029/2005] relax Cython req, make build work with old setuptools see diff for explanation of old setuptools issue PYTHON-396 --- setup.py | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index b92063bfac..5b8772fa7e 100644 --- a/setup.py +++ b/setup.py @@ -178,6 +178,25 @@ def __init__(self, ext): build_concurrency = int(os.environ.get('CASS_DRIVER_BUILD_CONCURRENCY', '0')) +class NoPatchExtension(Extension): + + # Older versions of setuptools.extension has a static flag which is set False before our + # setup_requires lands Cython. It causes our *.pyx sources to be renamed to *.c in + # the initializer. + # The other workaround would be to manually generate sources, but that bypasses a lot + # of the niceness cythonize embodies (setup build dir, conditional build, etc). + # Newer setuptools does not have this problem because it checks for cython dynamically. + # https://bitbucket.org/pypa/setuptools/commits/714c3144e08fd01a9f61d1c88411e76d2538b2e4 + + def __init__(self, *args, **kwargs): + # bypass the patched init if possible + if Extension.__bases__: + base, = Extension.__bases__ + base.__init__(self, *args, **kwargs) + else: + Extension.__init__(self, *args, **kwargs) + + class build_extensions(build_ext): error_message = """ @@ -279,7 +298,9 @@ def _setup_extensions(self): for m in cython_candidates], nthreads=build_concurrency, exclude_failures=True)) - self.extensions.extend(cythonize("cassandra/*.pyx", nthreads=build_concurrency)) + + self.extensions.extend(cythonize(NoPatchExtension("*", ["cassandra/*.pyx"], extra_compile_args=compile_args), + nthreads=build_concurrency)) except Exception: sys.stderr.write("Failed to cythonize one or more modules. These will not be compiled as extensions (optional).\n") @@ -297,7 +318,7 @@ def run_setup(extensions): kw['ext_modules'] = [Extension('DUMMY', [])] # dummy extension makes sure build_ext is called for install if try_cython: - kw['setup_requires'] = ['Cython >=0.21'] + kw['setup_requires'] = ['Cython>=0.20'] dependencies = ['six >=1.6'] From cc80d543e65e9f7c701f52722551965ae9fe055e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 11 Sep 2015 09:10:51 -0500 Subject: [PATCH 0030/2005] key typo in metadata parsing --- cassandra/metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 8705b94a61..81b4ba7185 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2105,7 +2105,7 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_row table_meta.clustering_key.append(table_meta.columns[r.get('column_name')]) for col_row in (r for r in col_rows - if r.get('kind', None) not in ('parition_key', 'clustering')): + if r.get('kind', None) not in ('partition_key', 'clustering')): column_meta = self._build_column_metadata(table_meta, col_row) if not compact_static or column_meta.is_static: # for compact static tables, we omit the clustering key and value, and only add the logical columns. From c2daeff35d13ab068495252acad4e1af8960a761 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 11 Sep 2015 11:55:29 -0500 Subject: [PATCH 0031/2005] Adding cython to test-requirements.txt --- test-requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index f5eeabcb96..f3cc411c24 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -11,4 +11,5 @@ sure pure-sasl twisted gevent>=1.0 -eventlet \ No newline at end of file +eventlet +cython>=0.21 \ No newline at end of file From 9ac8d09a1b2064b1dec3dd6386b9408098346ab3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 9 Sep 2015 15:31:31 -0500 Subject: [PATCH 0032/2005] add keywords 'is' 'materialized' 'view' --- cassandra/metadata.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 81b4ba7185..7301dd60d0 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -45,13 +45,13 @@ 'bigint', 'blob', 'boolean', 'by', 'called', 'clustering', 'columnfamily', 'compact', 'contains', 'count', 'counter', 'create', 'custom', 'date', 'decimal', 'delete', 'desc', 'describe', 'distinct', 'double', 'drop', 'entries', 'execute', 'exists', 'filtering', 'finalfunc', 'float', 'from', 'frozen', 'full', 'function', - 'functions', 'grant', 'if', 'in', 'index', 'inet', 'infinity', 'initcond', 'input', 'insert', 'int', 'into', 'json', - 'key', 'keys', 'keyspace', 'keyspaces', 'language', 'limit', 'list', 'login', 'map', 'modify', 'nan', 'nologin', + 'functions', 'grant', 'if', 'in', 'index', 'inet', 'infinity', 'initcond', 'input', 'insert', 'int', 'into', 'is', 'json', + 'key', 'keys', 'keyspace', 'keyspaces', 'language', 'limit', 'list', 'login', 'map', 'materialized', 'modify', 'nan', 'nologin', 'norecursive', 'nosuperuser', 'not', 'null', 'of', 'on', 'options', 'or', 'order', 'password', 'permission', 'permissions', 'primary', 'rename', 'replace', 'returns', 'revoke', 'role', 'roles', 'schema', 'select', 'set', 'sfunc', 'smallint', 'static', 'storage', 'stype', 'superuser', 'table', 'text', 'time', 'timestamp', 'timeuuid', 'tinyint', 'to', 'token', 'trigger', 'truncate', 'ttl', 'tuple', 'type', 'unlogged', 'update', 'use', 'user', - 'users', 'using', 'uuid', 'values', 'varchar', 'varint', 'where', 'with', 'writetime' + 'users', 'using', 'uuid', 'values', 'varchar', 'varint', 'view', 'where', 'with', 'writetime' )) """ Set of keywords in CQL. From 3e31867ada40199d0e55ae6e9717b47a872afd30 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 11 Sep 2015 14:18:37 -0500 Subject: [PATCH 0033/2005] Original MV meta impl on cassandra-3.0, before CASSANDRA-9921 --- cassandra/metadata.py | 186 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 160 insertions(+), 26 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 7301dd60d0..b72fde8b52 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -111,7 +111,7 @@ def export_schema_as_string(self): Returns a string that can be executed as a query in order to recreate the entire schema. The string is formatted to be human readable. """ - return "\n".join(ks.export_as_string() for ks in self.keyspaces.values()) + return "\n\n".join(ks.export_as_string() for ks in self.keyspaces.values()) def refresh(self, connection, timeout, target_type=None, change_type=None, **kwargs): @@ -123,7 +123,7 @@ def refresh(self, connection, timeout, target_type=None, change_type=None, **kwa try: parser = get_schema_parser(connection, timeout) parse_method = getattr(parser, 'get_' + tt_lower) - meta = parse_method(**kwargs) + meta = parse_method(self.keyspaces, **kwargs) if meta: update_method = getattr(self, '_update_' + tt_lower) update_method(meta) @@ -167,6 +167,7 @@ def _update_keyspace(self, keyspace_meta): keyspace_meta.indexes = old_keyspace_meta.indexes keyspace_meta.functions = old_keyspace_meta.functions keyspace_meta.aggregates = old_keyspace_meta.aggregates + keyspace_meta.views = old_keyspace_meta.views if (keyspace_meta.replication_strategy != old_keyspace_meta.replication_strategy): self._keyspace_updated(ks_name) else: @@ -601,6 +602,11 @@ class KeyspaceMetadata(object): .. versionadded:: 2.6.0 """ + views = None + """ + A dict mapping view names to :class:`.MaterializedViewMetadata` instances. + """ + _exc_info = None """ set if metadata parsing failed """ @@ -613,6 +619,7 @@ def __init__(self, name, durable_writes, strategy_class, strategy_options): self.user_types = {} self.functions = {} self.aggregates = {} + self.views = {} def export_as_string(self): """ @@ -623,7 +630,8 @@ def export_as_string(self): + self.user_type_strings() + [f.as_cql_query(True) for f in self.functions.values()] + [a.as_cql_query(True) for a in self.aggregates.values()] - + [t.export_as_string() for t in self.tables.values()]) + + [t.export_as_string() for t in self.tables.values()] + + [v.as_cql_query(True) for v in self.views.values()]) if self._exc_info: import traceback ret = "/*\nWarning: Keyspace %s is incomplete because of an error processing metadata.\n" % \ @@ -666,12 +674,18 @@ def _add_table_metadata(self, table_metadata): self.tables[table_metadata.name] = table_metadata for index_name, index_metadata in six.iteritems(table_metadata.indexes): self.indexes[index_name] = index_metadata + if table_metadata.is_view: + view_meta = self.views.get(table_metadata.name) + if view_meta: + view_meta.table = table_metadata def _drop_table_metadata(self, table_name): table_meta = self.tables.pop(table_name, None) if table_meta: for index_name in table_meta.indexes: self.indexes.pop(index_name, None) + if table_meta.is_view: + self.views.pop(table_meta.name, None) class UserType(object): @@ -968,6 +982,8 @@ def primary_key(self): is_compact_storage = False + is_view = False + options = None """ A dict mapping table option names to their specific settings for this @@ -1049,6 +1065,7 @@ def all_as_cql(self): for trigger_meta in self.triggers.values(): ret += "\n%s;" % (trigger_meta.as_cql_query(),) + return ret def as_cql_query(self, formatted=False): @@ -1092,12 +1109,16 @@ def as_cql_query(self, formatted=False): ret += ")" - # options + # properties ret += "%s) WITH " % ("\n" if formatted else "") + ret += self._property_string(formatted) + + return ret - option_strings = [] + def _property_string(self, formatted): + properties = [] if self.is_compact_storage: - option_strings.append("COMPACT STORAGE") + properties.append("COMPACT STORAGE") if self.clustering_key: cluster_str = "CLUSTERING ORDER BY " @@ -1108,14 +1129,12 @@ def as_cql_query(self, formatted=False): inner.append("%s %s" % (protect_name(col.name), ordering)) cluster_str += "(%s)" % ", ".join(inner) - option_strings.append(cluster_str) + properties.append(cluster_str) - option_strings.extend(self._make_option_strings()) + properties.extend(self._make_option_strings()) join_str = "\n AND " if formatted else " AND " - ret += join_str.join(option_strings) - - return ret + return join_str.join(properties) def _make_option_strings(self): ret = [] @@ -1628,7 +1647,7 @@ def get_all_keyspaces(self): yield keyspace_meta - def get_table(self, keyspace, table): + def get_table(self, keyspaces, keyspace, table): cl = ConsistencyLevel.ONE where_clause = " WHERE keyspace_name = '%s' AND %s = '%s'" % (keyspace, self._table_name_col, table) cf_query = QueryMessage(query=self._SELECT_COLUMN_FAMILIES + where_clause, consistency_level=cl) @@ -1648,20 +1667,21 @@ def get_table(self, keyspace, table): if table_result: return self._build_table_metadata(table_result[0], col_result, triggers_result) - def get_type(self, keyspace, type): + def get_type(self, keyspaces, keyspace, type): where_clause = " WHERE keyspace_name = '%s' AND type_name = '%s'" % (keyspace, type) return self._query_build_row(self._SELECT_TYPES + where_clause, self._build_user_type) - def get_function(self, keyspace, function): + + def get_function(self, keyspaces, keyspace, function): where_clause = " WHERE keyspace_name = '%s' AND function_name = '%s' AND signature = [%s]" \ % (keyspace, function.name, ','.join("'%s'" % t for t in function.type_signature)) return self._query_build_row(self._SELECT_FUNCTIONS + where_clause, self._build_function) - def get_aggregate(self, keyspace, aggregate): + def get_aggregate(self, keyspaces, keyspace, aggregate): where_clause = " WHERE keyspace_name = '%s' AND aggregate_name = '%s' AND signature = [%s]" \ % (keyspace, aggregate.name, ','.join("'%s'" % t for t in aggregate.type_signature)) return self._query_build_row(self._SELECT_AGGREGATES + where_clause, self._build_aggregate) - def get_keyspace(self, keyspace): + def get_keyspace(self, keyspaces, keyspace): where_clause = " WHERE keyspace_name = '%s'" % (keyspace,) return self._query_build_row(self._SELECT_KEYSPACES + where_clause, self._build_keyspace_metadata) @@ -2012,6 +2032,7 @@ class SchemaParserV3(SchemaParserV22): _SELECT_TYPES = "SELECT * FROM system_schema.types" _SELECT_FUNCTIONS = "SELECT * FROM system_schema.functions" _SELECT_AGGREGATES = "SELECT * FROM system_schema.aggregates" + _SELECT_VIEWS = "SELECT * FROM system_schema.materialized_views" _table_name_col = 'table_name' @@ -2034,24 +2055,47 @@ def __init__(self, connection, timeout): super(SchemaParserV3, self).__init__(connection, timeout) self.indexes_result = [] self.keyspace_table_index_rows = defaultdict(lambda: defaultdict(list)) + self.keyspace_table_view_rows = defaultdict(lambda: defaultdict(list)) - def get_table(self, keyspace, table): + def get_all_keyspaces(self): + for keyspace_meta in super(SchemaParserV3, self).get_all_keyspaces(): + # views are handled after everything else is built so that both target + # and view tables resolve + keyspace_table_views = self.keyspace_table_view_rows[keyspace_meta.name] + tables = keyspace_meta.tables + for subject_table_name, views in keyspace_table_views.items(): + for row in views: + view_meta = self._build_view_metadata(tables, row) + keyspace_meta.views[view_meta.name] = view_meta + + yield keyspace_meta + + def get_table(self, keyspaces, keyspace, table): cl = ConsistencyLevel.ONE where_clause = " WHERE keyspace_name = '%s' AND %s = '%s'" % (keyspace, self._table_name_col, table) cf_query = QueryMessage(query=self._SELECT_TABLES + where_clause, consistency_level=cl) col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) indexes_query = QueryMessage(query=self._SELECT_INDEXES + where_clause, consistency_level=cl) triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) - (cf_success, cf_result), (col_success, col_result), \ - (indexes_sucess, indexes_result), (triggers_success, triggers_result) \ - = self.connection.wait_for_responses(cf_query, col_query, indexes_query, triggers_query, timeout=self.timeout, fail_on_error=False) + views_query = QueryMessage(query=self._SELECT_VIEWS + where_clause, consistency_level=cl) + (cf_success, cf_result), (col_success, col_result), (indexes_sucess, indexes_result), \ + (triggers_success, triggers_result), (views_success, views_result) \ + = self.connection.wait_for_responses(cf_query, col_query, indexes_query, triggers_query, views_query, + timeout=self.timeout, fail_on_error=False) table_result = self._handle_results(cf_success, cf_result) if table_result: col_result = self._handle_results(col_success, col_result) indexes_result = self._handle_results(indexes_sucess, indexes_result) triggers_result = self._handle_results(triggers_success, triggers_result) - return self._build_table_metadata(table_result[0], col_result, triggers_result, indexes_result) + views_result = self._handle_results(views_success, views_result) + table_meta = self._build_table_metadata(table_result[0], col_result, triggers_result, indexes_result) + keyspace_meta = keyspaces.get(keyspace) + if keyspace_meta: + for view_row in views_result: + view_meta = self._build_view_metadata(keyspace_meta.tables, view_row) + keyspace_meta.views[view_meta.name] = view_meta + return table_meta @staticmethod def _build_keyspace_metadata_internal(row): @@ -2149,8 +2193,17 @@ def _build_index_metadata(table_metadata, row): target_columns = row.get('target_columns') target_type = row.get('target_type') return IndexMetadata(table_metadata, index_name, index_type, index_options, target_columns, target_type) - else: - return None + + @staticmethod + def _build_view_metadata(tables, row): + table_name = row.get("table_name") + view_name = row.get("view_name") + view_table_metadata = tables.get(view_name) + clustering_columns = row.get("clustering_columns") + included_columns = row.get("included_columns") + target_columns = row.get("target_columns") + return MaterializedViewMetadata(view_table_metadata, table_name, view_name, + clustering_columns, included_columns, target_columns) @staticmethod def _build_trigger_metadata(table_metadata, row): @@ -2169,7 +2222,8 @@ def _query_all(self): QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl), - QueryMessage(query=self._SELECT_INDEXES, consistency_level=cl) + QueryMessage(query=self._SELECT_INDEXES, consistency_level=cl), + QueryMessage(query=self._SELECT_VIEWS, consistency_level=cl) ] responses = self.connection.wait_for_responses(*queries, timeout=self.timeout, fail_on_error=False) @@ -2178,7 +2232,8 @@ def _query_all(self): (functions_success, functions_result), \ (aggregates_success, aggregates_result), \ (triggers_success, triggers_result), \ - (indexes_success, indexes_result) = responses + (indexes_success, indexes_result), \ + (views_success, views_result) = responses self.keyspaces_result = self._handle_results(ks_success, ks_result) self.tables_result = self._handle_results(table_success, table_result) @@ -2188,6 +2243,7 @@ def _query_all(self): self.functions_result = self._handle_results(functions_success, functions_result) self.aggregates_result = self._handle_results(aggregates_success, aggregates_result) self.indexes_result = self._handle_results(indexes_success, indexes_result) + self.views_result = self._handle_results(views_success, views_result) self._aggregate_results() @@ -2200,11 +2256,16 @@ def _aggregate_results(self): cfname = row[self._table_name_col] m[ksname][cfname].append(row) + m = self.keyspace_table_view_rows + for row in self.views_result: + ksname = row["keyspace_name"] + cfname = row[self._table_name_col] + m[ksname][cfname].append(row) + class TableMetadataV3(TableMetadata): compaction_options = {} - _option_maps = ['caching'] option_maps = ['compaction', 'compression', 'caching'] @property @@ -2229,6 +2290,79 @@ def _make_option_strings(self): return list(sorted(ret)) +class MaterializedViewMetadata(object): + """ + A representation of a materialized view on a table + """ + + table = None + """ The :class:`.TableMetadata` describing this view (not the table the view is on). """ + + target_table_name = None + """ String name of the table this view targets """ + + name = None + """ A string name of the view.""" + + clustering_columns = None + """ List of clustering column names.""" + + included_columns = None + """ List of included column names selected by the view.""" + + target_columns = None + """ List of target column names (essentially the partition key of the view) """ + + def __init__(self, view_table_metadata, target_table_name, view_name, + clustering_columns, included_columns, target_columns): + self.table = view_table_metadata + self.target_table_name = target_table_name + self.name = view_name + self.clustering_columns = clustering_columns or [] + self.included_columns = included_columns or [] + self.target_columns = target_columns or [] + + def as_cql_query(self, formatted=False): + """ + Returns a CQL query that can be used to recreate this function. + If `formatted` is set to :const:`True`, extra whitespace will + be added to make the query more readable. + """ + sep = '\n ' if formatted else ' ' + keyspace = protect_name(self.table.keyspace_name) if self.table else '' + name = protect_name(self.name) + + selected_cols = ', '.join(protect_name(col) for col in self.included_columns) + target_table = protect_name(self.target_table_name) + where_clause = " AND ".join("%s IS NOT NULL" % protect_name(col) for col in self.target_columns + self.clustering_columns) + + part_key = ', '.join(protect_name(n) for n in self.target_columns) + if len(self.target_columns) > 1: + pk = "((%s)" % part_key + else: + pk = "(%s" % part_key + if self.clustering_columns: + pk += ", %s" % ', '.join(protect_name(n) for n in self.clustering_columns) + pk += ")" + + properties = self.table._property_string(formatted) if self.table else '' + + body = "CREATE MATERIALIZED VIEW %(keyspace)s.%(name)s AS%(sep)s" \ + "SELECT %(selected_cols)s FROM %(keyspace)s.%(target_table)s%(sep)s" \ + "WHERE %(where_clause)s%(sep)s" \ + "PRIMARY KEY %(pk)s%(sep)s" \ + "WITH %(properties)s;" % locals() + + if not self.table: + ret = "/*\nWarning: Materialize View omitted because the model is missing a table describing it.\n" \ + "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s" \ + + body + "\n*/" + return ret + + return body + + + def get_schema_parser(connection, timeout): server_version = connection.server_version if server_version.startswith('3'): From 09d68357db4abdaf0ebb17c25f7d19611d7f515c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 14 Sep 2015 13:13:35 -0500 Subject: [PATCH 0034/2005] Changelog and version update --- CHANGELOG.rst | 11 +++++++++++ cassandra/__init__.py | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 26c6936b2c..f0efa299ec 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,14 @@ +2.7.2 +===== +Setpember 14, 2015 + +Bug Fixes +--------- +* Resolve CQL export error for UDF with zero parameters (PYTHON-392) +* Remove futures dep. for Python 3 (PYTHON-393) +* Avoid Python closure in cdef (supports earlier Cython compiler) (PYTHON-396) +* Unit test runtime issues (PYTHON-397,398) + 2.7.1 ===== August 25, 2015 diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 8cb2f5bf38..02c48dd078 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 7, 1) +__version_info__ = (2, 7, 2) __version__ = '.'.join(map(str, __version_info__)) From 5b6456937be8d0c35ce1f73eda1e0a6ca27ce76c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 14 Sep 2015 13:17:19 -0500 Subject: [PATCH 0035/2005] note about row_factory in protocol handler section --- docs/api/cassandra/protocol.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/api/cassandra/protocol.rst b/docs/api/cassandra/protocol.rst index 21bc40b570..fe506b2c4b 100644 --- a/docs/api/cassandra/protocol.rst +++ b/docs/api/cassandra/protocol.rst @@ -38,7 +38,9 @@ and ``NumpyProtocolHandler``. They can be used as follows: .. code:: python from cassandra.protocol import NumpyProtocolHandler, LazyProtocolHandler + from cassandra.query import tuple_factory s.client_protocol_handler = LazyProtocolHandler # for a result iterator + s.row_factory = tuple_factory #required for Numpy results s.client_protocol_handler = NumpyProtocolHandler # for a dict of NumPy arrays as result These protocol handlers comprise different parsers, and return results as described below: From 3d04d48c27d200ccdc0faf75262587724c6c5a05 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 14 Sep 2015 13:54:13 -0500 Subject: [PATCH 0036/2005] post-release version update --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 02c48dd078..8d87316f84 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 7, 2) +__version_info__ = (2, 7, 2, 'post0') __version__ = '.'.join(map(str, __version_info__)) From 36b884dc1d5c9ceb3c3b192131b8b6f24f04b63c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 14 Sep 2015 16:07:23 -0500 Subject: [PATCH 0037/2005] Split materialized view meta into distinct table --- cassandra/metadata.py | 304 +++++++++++++++++++++++------------------- 1 file changed, 168 insertions(+), 136 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index b72fde8b52..dfd0805d88 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -30,7 +30,7 @@ except ImportError as e: pass -from cassandra import SignatureDescriptor, ConsistencyLevel, InvalidRequest, SchemaChangeType, Unauthorized +from cassandra import SignatureDescriptor, ConsistencyLevel, InvalidRequest, Unauthorized import cassandra.cqltypes as types from cassandra.encoder import Encoder from cassandra.marshal import varint_unpack @@ -177,11 +177,18 @@ def _drop_keyspace(self, keyspace): if self.keyspaces.pop(keyspace, None): self._keyspace_removed(keyspace) - def _update_table(self, table_meta): + def _update_table(self, meta): try: - keyspace_meta = self.keyspaces[table_meta.keyspace_name] - table_meta.keyspace = keyspace_meta # temporary while TableMetadata.keyspace is deprecated - keyspace_meta._add_table_metadata(table_meta) + keyspace_meta = self.keyspaces[meta.keyspace_name] + # this is unfortunate, but protocol v4 does not differentiate + # between events for tables and views. .get_table will + # return one or the other based on the query results. + # Here we deal with that. + if isinstance(meta, TableMetadata): + meta.keyspace = keyspace_meta # temporary while TableMetadata.keyspace is deprecated + keyspace_meta._add_table_metadata(meta) + else: + keyspace_meta._add_view_metadata(meta) except KeyError: # can happen if keyspace disappears while processing async event pass @@ -189,7 +196,7 @@ def _update_table(self, table_meta): def _drop_table(self, keyspace, table): try: keyspace_meta = self.keyspaces[keyspace] - keyspace_meta._drop_table_metadata(table) + keyspace_meta._drop_table_metadata(table) # handles either table or view except KeyError: # can happen if keyspace disappears while processing async event pass @@ -630,8 +637,7 @@ def export_as_string(self): + self.user_type_strings() + [f.as_cql_query(True) for f in self.functions.values()] + [a.as_cql_query(True) for a in self.aggregates.values()] - + [t.export_as_string() for t in self.tables.values()] - + [v.as_cql_query(True) for v in self.views.values()]) + + [t.export_as_string() for t in self.tables.values()]) if self._exc_info: import traceback ret = "/*\nWarning: Keyspace %s is incomplete because of an error processing metadata.\n" % \ @@ -674,18 +680,24 @@ def _add_table_metadata(self, table_metadata): self.tables[table_metadata.name] = table_metadata for index_name, index_metadata in six.iteritems(table_metadata.indexes): self.indexes[index_name] = index_metadata - if table_metadata.is_view: - view_meta = self.views.get(table_metadata.name) - if view_meta: - view_meta.table = table_metadata def _drop_table_metadata(self, table_name): table_meta = self.tables.pop(table_name, None) if table_meta: for index_name in table_meta.indexes: self.indexes.pop(index_name, None) - if table_meta.is_view: - self.views.pop(table_meta.name, None) + for view_name in table_meta.views: + self.views.pop(view_name, None) + # we can't tell table drops from views, so drop both + # (name is unique among them, within a keyspace) + self.views.pop(table_name, None) + + def _add_view_metadata(self, view_metadata): + try: + self.tables[view_metadata.base_table_name].views[view_metadata.name] = view_metadata + self.views[view_metadata.name] = view_metadata + except KeyError: + pass class UserType(object): @@ -982,8 +994,6 @@ def primary_key(self): is_compact_storage = False - is_view = False - options = None """ A dict mapping table option names to their specific settings for this @@ -1000,6 +1010,11 @@ def primary_key(self): A dict mapping trigger names to :class:`.TriggerMetadata` instances. """ + views = None + """ + A dict mapping view names to :class:`.MaterializedViewMetadata` instances. + """ + _exc_info = None """ set if metadata parsing failed """ @@ -1032,6 +1047,7 @@ def __init__(self, keyspace_name, name, partition_key=None, clustering_key=None, self.options = {} if options is None else options self.comparator = None self.triggers = OrderedDict() if triggers is None else triggers + self.views = {} def export_as_string(self): """ @@ -1066,6 +1082,9 @@ def all_as_cql(self): for trigger_meta in self.triggers.values(): ret += "\n%s;" % (trigger_meta.as_cql_query(),) + for view_meta in self.views.values(): + ret += "\n\n%s;" % (view_meta.as_cql_query(formatted=True),) + return ret def as_cql_query(self, formatted=False): @@ -1111,34 +1130,36 @@ def as_cql_query(self, formatted=False): # properties ret += "%s) WITH " % ("\n" if formatted else "") - ret += self._property_string(formatted) + ret += self._property_string(formatted, self.clustering_key, self.options, self.is_compact_storage) return ret - def _property_string(self, formatted): + @classmethod + def _property_string(cls, formatted, clustering_key, options_map, is_compact_storage=False): properties = [] - if self.is_compact_storage: + if is_compact_storage: properties.append("COMPACT STORAGE") - if self.clustering_key: + if clustering_key: cluster_str = "CLUSTERING ORDER BY " inner = [] - for col in self.clustering_key: + for col in clustering_key: ordering = "DESC" if issubclass(col.data_type, types.ReversedType) else "ASC" inner.append("%s %s" % (protect_name(col.name), ordering)) cluster_str += "(%s)" % ", ".join(inner) properties.append(cluster_str) - properties.extend(self._make_option_strings()) + properties.extend(cls._make_option_strings(options_map)) join_str = "\n AND " if formatted else " AND " return join_str.join(properties) - def _make_option_strings(self): + @classmethod + def _make_option_strings(cls, options_map): ret = [] - options_copy = dict(self.options.items()) + options_copy = dict(options_map.items()) actual_options = json.loads(options_copy.pop('compaction_strategy_options', '{}')) value = options_copy.pop("compaction_strategy_class", None) @@ -1147,7 +1168,7 @@ def _make_option_strings(self): compaction_option_strings = ["'%s': '%s'" % (k, v) for k, v in actual_options.items()] ret.append('compaction = {%s}' % ', '.join(compaction_option_strings)) - for system_table_name in self.compaction_options.keys(): + for system_table_name in cls.compaction_options.keys(): options_copy.pop(system_table_name, None) # delete if present options_copy.pop('compaction_strategy_option', None) @@ -2032,7 +2053,7 @@ class SchemaParserV3(SchemaParserV22): _SELECT_TYPES = "SELECT * FROM system_schema.types" _SELECT_FUNCTIONS = "SELECT * FROM system_schema.functions" _SELECT_AGGREGATES = "SELECT * FROM system_schema.aggregates" - _SELECT_VIEWS = "SELECT * FROM system_schema.materialized_views" + _SELECT_VIEWS = "SELECT * FROM system_schema.views" _table_name_col = 'table_name' @@ -2055,19 +2076,13 @@ def __init__(self, connection, timeout): super(SchemaParserV3, self).__init__(connection, timeout) self.indexes_result = [] self.keyspace_table_index_rows = defaultdict(lambda: defaultdict(list)) - self.keyspace_table_view_rows = defaultdict(lambda: defaultdict(list)) + self.keyspace_view_rows = defaultdict(list) def get_all_keyspaces(self): for keyspace_meta in super(SchemaParserV3, self).get_all_keyspaces(): - # views are handled after everything else is built so that both target - # and view tables resolve - keyspace_table_views = self.keyspace_table_view_rows[keyspace_meta.name] - tables = keyspace_meta.tables - for subject_table_name, views in keyspace_table_views.items(): - for row in views: - view_meta = self._build_view_metadata(tables, row) - keyspace_meta.views[view_meta.name] = view_meta - + for row in self.keyspace_view_rows[keyspace_meta.name]: + view_meta = self._build_view_metadata(row) + keyspace_meta._add_view_metadata(view_meta) yield keyspace_meta def get_table(self, keyspaces, keyspace, table): @@ -2077,25 +2092,24 @@ def get_table(self, keyspaces, keyspace, table): col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) indexes_query = QueryMessage(query=self._SELECT_INDEXES + where_clause, consistency_level=cl) triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) - views_query = QueryMessage(query=self._SELECT_VIEWS + where_clause, consistency_level=cl) + + # in protocol v4 we don't know if this event is a view or a table, so we look for both + view_query = QueryMessage(query=self._SELECT_VIEWS + " WHERE keyspace_name = '%s' AND view_name = '%s'" % (keyspace, table), + consistency_level=cl) (cf_success, cf_result), (col_success, col_result), (indexes_sucess, indexes_result), \ - (triggers_success, triggers_result), (views_success, views_result) \ - = self.connection.wait_for_responses(cf_query, col_query, indexes_query, triggers_query, views_query, + (triggers_success, triggers_result), (view_success, view_result) \ + = self.connection.wait_for_responses(cf_query, col_query, indexes_query, triggers_query, view_query, timeout=self.timeout, fail_on_error=False) table_result = self._handle_results(cf_success, cf_result) - + col_result = self._handle_results(col_success, col_result) if table_result: - col_result = self._handle_results(col_success, col_result) indexes_result = self._handle_results(indexes_sucess, indexes_result) triggers_result = self._handle_results(triggers_success, triggers_result) - views_result = self._handle_results(views_success, views_result) - table_meta = self._build_table_metadata(table_result[0], col_result, triggers_result, indexes_result) - keyspace_meta = keyspaces.get(keyspace) - if keyspace_meta: - for view_row in views_result: - view_meta = self._build_view_metadata(keyspace_meta.tables, view_row) - keyspace_meta.views[view_meta.name] = view_meta - return table_meta + return self._build_table_metadata(table_result[0], col_result, triggers_result, indexes_result) + + view_result = self._handle_results(view_success, view_result) + if view_result: + return self._build_view_metadata(view_result[0], col_result) @staticmethod def _build_keyspace_metadata_internal(row): @@ -2125,38 +2139,7 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_row compact_static = True table_meta.is_compact_storage = True - # partition key - partition_rows = [r for r in col_rows - if r.get('kind', None) == "partition_key"] - if len(partition_rows) > 1: - partition_rows = sorted(partition_rows, key=lambda row: row.get('position')) - for r in partition_rows: - # we have to add meta here (and not in the later loop) because TableMetadata.columns is an - # OrderedDict, and it assumes keys are inserted first, in order, when exporting CQL - column_meta = self._build_column_metadata(table_meta, r) - table_meta.columns[column_meta.name] = column_meta - table_meta.partition_key.append(table_meta.columns[r.get('column_name')]) - - # clustering key - if not compact_static: - clustering_rows = [r for r in col_rows - if r.get('kind', None) == "clustering"] - if len(clustering_rows) > 1: - clustering_rows = sorted(clustering_rows, key=lambda row: row.get('position')) - for r in clustering_rows: - column_meta = self._build_column_metadata(table_meta, r) - table_meta.columns[column_meta.name] = column_meta - table_meta.clustering_key.append(table_meta.columns[r.get('column_name')]) - - for col_row in (r for r in col_rows - if r.get('kind', None) not in ('partition_key', 'clustering')): - column_meta = self._build_column_metadata(table_meta, col_row) - if not compact_static or column_meta.is_static: - # for compact static tables, we omit the clustering key and value, and only add the logical columns. - # They are marked not static so that it generates appropriate CQL - if compact_static: - column_meta.is_static = False - table_meta.columns[column_meta.name] = column_meta + self._build_table_columns(table_meta, col_rows, compact_static) for trigger_row in trigger_rows: trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) @@ -2176,6 +2159,52 @@ def _build_table_options(self, row): """ Setup the mostly-non-schema table options, like caching settings """ return dict((o, row.get(o)) for o in self.recognized_table_options if o in row) + def _build_table_columns(self, meta, col_rows, compact_static=False): + # partition key + partition_rows = [r for r in col_rows + if r.get('kind', None) == "partition_key"] + if len(partition_rows) > 1: + partition_rows = sorted(partition_rows, key=lambda row: row.get('component_index')) + for r in partition_rows: + # we have to add meta here (and not in the later loop) because TableMetadata.columns is an + # OrderedDict, and it assumes keys are inserted first, in order, when exporting CQL + column_meta = self._build_column_metadata(meta, r) + meta.columns[column_meta.name] = column_meta + meta.partition_key.append(meta.columns[r.get('column_name')]) + + # clustering key + if not compact_static: + clustering_rows = [r for r in col_rows + if r.get('kind', None) == "clustering"] + if len(clustering_rows) > 1: + clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) + for r in clustering_rows: + column_meta = self._build_column_metadata(meta, r) + meta.columns[column_meta.name] = column_meta + meta.clustering_key.append(meta.columns[r.get('column_name')]) + + for col_row in (r for r in col_rows + if r.get('kind', None) not in ('partition_key', 'clustering_key')): + column_meta = self._build_column_metadata(meta, col_row) + if not compact_static or column_meta.is_static: + # for compact static tables, we omit the clustering key and value, and only add the logical columns. + # They are marked not static so that it generates appropriate CQL + if compact_static: + column_meta.is_static = False + meta.columns[column_meta.name] = column_meta + + def _build_view_metadata(self, row, col_rows=None): + keyspace_name = row["keyspace_name"] + view_name = row["view_name"] + base_table_name = row["base_table_name"] + include_all_columns = row["include_all_columns"] + col_rows = col_rows or self.keyspace_table_col_rows[keyspace_name][view_name] + view_meta = MaterializedViewMetadata(keyspace_name, view_name, base_table_name, + include_all_columns, self._build_table_options(row)) + self._build_table_columns(view_meta, col_rows) + + return view_meta + @staticmethod def _build_column_metadata(table_metadata, row): name = row["column_name"] @@ -2194,17 +2223,6 @@ def _build_index_metadata(table_metadata, row): target_type = row.get('target_type') return IndexMetadata(table_metadata, index_name, index_type, index_options, target_columns, target_type) - @staticmethod - def _build_view_metadata(tables, row): - table_name = row.get("table_name") - view_name = row.get("view_name") - view_table_metadata = tables.get(view_name) - clustering_columns = row.get("clustering_columns") - included_columns = row.get("included_columns") - target_columns = row.get("target_columns") - return MaterializedViewMetadata(view_table_metadata, table_name, view_name, - clustering_columns, included_columns, target_columns) - @staticmethod def _build_trigger_metadata(table_metadata, row): name = row["trigger_name"] @@ -2256,11 +2274,9 @@ def _aggregate_results(self): cfname = row[self._table_name_col] m[ksname][cfname].append(row) - m = self.keyspace_table_view_rows + m = self.keyspace_view_rows for row in self.views_result: - ksname = row["keyspace_name"] - cfname = row[self._table_name_col] - m[ksname][cfname].append(row) + m[row["keyspace_name"]].append(row) class TableMetadataV3(TableMetadata): @@ -2272,11 +2288,12 @@ class TableMetadataV3(TableMetadata): def is_cql_compatible(self): return True - def _make_option_strings(self): + @classmethod + def _make_option_strings(cls, options_map): ret = [] - options_copy = dict(self.options.items()) + options_copy = dict(options_map.items()) - for option in self.option_maps: + for option in cls.option_maps: value = options_copy.pop(option, {}) params = ("'%s': '%s'" % (k, v) for k, v in value.items()) ret.append("%s = {%s}" % (option, ', '.join(params))) @@ -2295,32 +2312,55 @@ class MaterializedViewMetadata(object): A representation of a materialized view on a table """ - table = None - """ The :class:`.TableMetadata` describing this view (not the table the view is on). """ + keyspace_name = None - target_table_name = None - """ String name of the table this view targets """ + """ A string name of the view.""" name = None """ A string name of the view.""" - clustering_columns = None - """ List of clustering column names.""" + base_table_name = None + """ A string name of the base table for this view.""" + + partition_key = None + """ + A list of :class:`.ColumnMetadata` instances representing the columns in + the partition key for this view. This will always hold at least one + column. + """ + + clustering_key = None + """ + A list of :class:`.ColumnMetadata` instances representing the columns + in the clustering key for this view. + + Note that a table may have no clustering keys, in which case this will + be an empty list. + """ - included_columns = None - """ List of included column names selected by the view.""" + columns = None + """ + A dict mapping column names to :class:`.ColumnMetadata` instances. + """ - target_columns = None - """ List of target column names (essentially the partition key of the view) """ + include_all_columns = False + """ A flag indicating whether the view was created AS SELECT * """ + + options = None + """ + A dict mapping table option names to their specific settings for this + view. + """ - def __init__(self, view_table_metadata, target_table_name, view_name, - clustering_columns, included_columns, target_columns): - self.table = view_table_metadata - self.target_table_name = target_table_name + def __init__(self, keyspace_name, view_name, base_table_name, include_all_columns, options): + self.keyspace_name = keyspace_name self.name = view_name - self.clustering_columns = clustering_columns or [] - self.included_columns = included_columns or [] - self.target_columns = target_columns or [] + self.base_table_name = base_table_name + self.partition_key = [] + self.clustering_key = [] + self.columns = OrderedDict() + self.include_all_columns = include_all_columns + self.options = options or {} def as_cql_query(self, formatted=False): """ @@ -2329,39 +2369,31 @@ def as_cql_query(self, formatted=False): be added to make the query more readable. """ sep = '\n ' if formatted else ' ' - keyspace = protect_name(self.table.keyspace_name) if self.table else '' + keyspace = protect_name(self.keyspace_name) name = protect_name(self.name) - selected_cols = ', '.join(protect_name(col) for col in self.included_columns) - target_table = protect_name(self.target_table_name) - where_clause = " AND ".join("%s IS NOT NULL" % protect_name(col) for col in self.target_columns + self.clustering_columns) + selected_cols = '*' if self.include_all_columns else ', '.join(protect_name(col.name) for col in self.columns.values()) + base_table = protect_name(self.base_table_name) + where_clause = " AND ".join("%s IS NOT NULL" % protect_name(col.name) for col in self.partition_key + self.clustering_key) - part_key = ', '.join(protect_name(n) for n in self.target_columns) - if len(self.target_columns) > 1: + part_key = ', '.join(protect_name(col.name) for col in self.partition_key) + if len(self.partition_key) > 1: pk = "((%s)" % part_key else: pk = "(%s" % part_key - if self.clustering_columns: - pk += ", %s" % ', '.join(protect_name(n) for n in self.clustering_columns) + if self.clustering_key: + pk += ", %s" % ', '.join(protect_name(col.name) for col in self.clustering_key) pk += ")" - properties = self.table._property_string(formatted) if self.table else '
' + properties = TableMetadataV3._property_string(formatted, self.clustering_key, self.options) - body = "CREATE MATERIALIZED VIEW %(keyspace)s.%(name)s AS%(sep)s" \ - "SELECT %(selected_cols)s FROM %(keyspace)s.%(target_table)s%(sep)s" \ + return "CREATE MATERIALIZED VIEW %(keyspace)s.%(name)s AS%(sep)s" \ + "SELECT %(selected_cols)s%(sep)s" \ + "FROM %(keyspace)s.%(base_table)s%(sep)s" \ "WHERE %(where_clause)s%(sep)s" \ "PRIMARY KEY %(pk)s%(sep)s" \ "WITH %(properties)s;" % locals() - if not self.table: - ret = "/*\nWarning: Materialize View omitted because the model is missing a table describing it.\n" \ - "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s" \ - + body + "\n*/" - return ret - - return body - - def get_schema_parser(connection, timeout): server_version = connection.server_version From 156a5b2f09e9f629dfde6828f4be51bb6744931f Mon Sep 17 00:00:00 2001 From: Clint Ascencio Date: Tue, 15 Sep 2015 06:29:44 -0500 Subject: [PATCH 0038/2005] Changed doxyfile out/input directories --- doxyfile_python => doxyfile | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) rename doxyfile_python => doxyfile (99%) diff --git a/doxyfile_python b/doxyfile similarity index 99% rename from doxyfile_python rename to doxyfile index 01cf58b6e0..d453557e22 100644 --- a/doxyfile_python +++ b/doxyfile @@ -58,7 +58,7 @@ PROJECT_LOGO = # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. -OUTPUT_DIRECTORY = /home/jenkins/workspace/python_docs +OUTPUT_DIRECTORY = # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and @@ -228,13 +228,15 @@ TAB_SIZE = 4 # "Side Effects:". You can put \n's in the value part of an alias to insert # newlines. -ALIASES = -ALIASES += expected_errors="\par Expected Errors\n" -ALIASES += jira_ticket="\par JIRA Ticket\n" -ALIASES += expected_result="\par Expected Result\n" -ALIASES += test_assumptions="\par Test Assumptions\n" -ALIASES += note="\par Note\n" -ALIASES += test_category="\par Test Category\n" +ALIASES = "test_assumptions=\par Test Assumptions\n" \ + "note=\par Note\n" \ + "test_category=\par Test Category\n" \ + "jira_ticket=\par JIRA Ticket\n" \ + "expected_result=\par Expected Result\n" \ + "since=\par Since\n" \ + "param=\par Parameters\n" \ + "return=\par Return\n" \ + "expected_errors=\par Expected Errors\n" # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding "class=itcl::class" @@ -759,7 +761,7 @@ WARN_LOGFILE = # spaces. # Note: If this tag is empty the current directory is searched. -INPUT = /home/jenkins/workspace/python_docs/tests/integration/ +INPUT = ./tests # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses From 96883eb8b61753bd704a087717f2d556980bf4d2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 15 Sep 2015 09:23:46 -0500 Subject: [PATCH 0039/2005] export_as_string for MV metadata --- cassandra/metadata.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index dfd0805d88..b7d76c8deb 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2394,6 +2394,9 @@ def as_cql_query(self, formatted=False): "PRIMARY KEY %(pk)s%(sep)s" \ "WITH %(properties)s;" % locals() + def export_as_string(self): + return self.as_cql_query(formatted=True) + def get_schema_parser(connection, timeout): server_version = connection.server_version From 3e75da2f3fa395a5742b86a41d087943608e5387 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 15 Sep 2015 14:10:34 -0500 Subject: [PATCH 0040/2005] Remove target type from index metadata PYTHON-400 (C* 3.0 udpate) --- cassandra/metadata.py | 162 ++++++++++++-------- tests/integration/standard/test_metadata.py | 22 +-- 2 files changed, 111 insertions(+), 73 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 81b4ba7185..bf4a221483 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1245,84 +1245,64 @@ class IndexMetadata(object): A representation of a secondary index on a column. """ - table = None - """ The :class:`.TableMetadata` this column belongs to. """ + column = None + """ + The column (:class:`.ColumnMetadata`) this index is on. + """ name = None """ A string name for the index. """ - columns = None - """ - The set of columns (:class:`.ColumnMetadata`) this index is on - (may be empty for per-row indexes). - - .. versionchanged:: 3.0.0 - - Was previously a singular column - """ - index_type = None """ A string representing the type of index. """ - index_options = None + index_options = {} """ A dict of index options. """ - target_type = None - """ - String target type (COLUMN, ROW, etc), if available - - .. versionadded:: 3.0.0 - """ - - def __init__(self, table_metadata, index_name=None, index_type=None, index_options=None, target_columns=None, target_type=None): - self.table = table_metadata + def __init__(self, column_metadata, index_name=None, index_type=None, index_options={}): + self.column = column_metadata self.name = index_name - self.columns = set(table_metadata.columns[col_name] for col_name in target_columns) if target_columns else set() self.index_type = index_type - self.index_options = index_options or {} - self.target_type = target_type + self.index_options = index_options def as_cql_query(self): """ Returns a CQL query that can be used to recreate this index. """ + table = self.column.table if self.index_type != "CUSTOM": - target_fmt = "CREATE INDEX %s ON %s.%s (%%s)" % ( + index_target = protect_name(self.column.name) + if self.index_options is not None: + option_keys = self.index_options.keys() + if "index_keys" in option_keys: + index_target = 'keys(%s)' % (index_target,) + elif "index_values" in option_keys: + # don't use any "function" for collection values + pass + else: + # it might be a "full" index on a frozen collection, but + # we need to check the data type to verify that, because + # there is no special index option for full-collection + # indexes. + data_type = self.column.data_type + collection_types = ('map', 'set', 'list') + if data_type.typename == "frozen" and data_type.subtypes[0].typename in collection_types: + # no index option for full-collection index + index_target = 'full(%s)' % (index_target,) + + return "CREATE INDEX %s ON %s.%s (%s)" % ( self.name, # Cassandra doesn't like quoted index names for some reason - protect_name(self.table.keyspace.name), - protect_name(self.table.name)) + protect_name(table.keyspace.name), + protect_name(table.name), + index_target) else: - target_fmt = "CREATE CUSTOM INDEX %s ON %s.%s (%%s) USING '%s'" % ( + return "CREATE CUSTOM INDEX %s ON %s.%s (%s) USING '%s'" % ( self.name, # Cassandra doesn't like quoted index names for some reason - protect_name(self.table.keyspace.name), - protect_name(self.table.name), + protect_name(table.keyspace.name), + protect_name(table.name), + protect_name(self.column.name), self.index_options["class_name"]) - # TODO: when CASSANDRA-10124 lands, we'll need to loop over self.columns - # not updating yet because it seems like index_options will have to change to - # specify different options per column(?) - # Also, grammar will be changing for indexes with no target columns. This is TBD - col = tuple(self.columns)[0] if self.columns else None - index_target = protect_name(col.name) if col else '' - if col and self.index_options is not None: - option_keys = self.index_options.keys() - if "index_keys" in option_keys: - index_target = 'keys(%s)' % (index_target,) - elif "index_values" in option_keys: - # don't use any "function" for collection values - pass - else: - # it might be a "full" index on a frozen collection, but - # we need to check the data type to verify that, because - # there is no special index option for full-collection - # indexes. - data_type = col.data_type - collection_types = ('map', 'set', 'list') - if data_type.typename == "frozen" and data_type.subtypes[0].typename in collection_types: - # no index option for full-collection index - index_target = 'full(%s)' % (index_target,) - return target_fmt % index_target - def export_as_string(self): """ Returns a CQL query string that can be used to recreate this index. @@ -1897,7 +1877,7 @@ def _build_index_metadata(column_metadata, row): if index_name or index_type: options = row.get("index_options") index_options = json.loads(options) if options else None - return IndexMetadata(column_metadata.table, index_name, index_type, index_options, (column_metadata.name,)) + return IndexMetadata(column_metadata, index_name, index_type, index_options) else: return None @@ -2143,12 +2123,10 @@ def _build_column_metadata(table_metadata, row): @staticmethod def _build_index_metadata(table_metadata, row): index_name = row.get("index_name") - index_type = row.get("index_type") - if index_name or index_type: + kind = row.get("kind") + if index_name or kind: index_options = row.get("options") - target_columns = row.get('target_columns') - target_type = row.get('target_type') - return IndexMetadata(table_metadata, index_name, index_type, index_options, target_columns, target_type) + return IndexMetadataV3(table_metadata.keyspace_name, table_metadata.name, index_name, kind, index_options) else: return None @@ -2229,6 +2207,66 @@ def _make_option_strings(self): return list(sorted(ret)) +class IndexMetadataV3(object): + """ + A representation of a secondary index on a column. + """ + keyspace_name = None + """ A string name of the keyspace. """ + + table_name = None + """ A string name of the table this index is on. """ + + name = None + """ A string name for the index. """ + + kind = None + """ A string representing the kind of index (COMPOSITE, CUSTOM,...). """ + + index_options = {} + """ A dict of index options. """ + + def __init__(self, keyspace_name, table_name, index_name, kind, index_options): + self.keyspace_name = keyspace_name + self.table_name = table_name + self.name = index_name + self.kind = kind + self.index_options = index_options + + def as_cql_query(self): + """ + Returns a CQL query that can be used to recreate this index. + """ + options = dict(self.index_options) + index_target = options.pop("target") + # "values(...) is implicit in CQL + if index_target.lower().startswith('values('): + index_target = index_target[7:-1] + if self.kind != "CUSTOM": + return "CREATE INDEX %s ON %s.%s (%s)" % ( + self.name, # Cassandra doesn't like quoted index names for some reason + protect_name(self.keyspace_name), + protect_name(self.table_name), + index_target) + else: + class_name = options.pop("class_name") + ret = "CREATE CUSTOM INDEX %s ON %s.%s (%s) USING '%s'" % ( + self.name, # Cassandra doesn't like quoted index names for some reason + protect_name(self.keyspace_name), + protect_name(self.table_name), + index_target, + class_name) + if options: + ret += " WITH OPTIONS = %s" % Encoder().cql_encode_all_types(options) + return ret + + def export_as_string(self): + """ + Returns a CQL query string that can be used to recreate this index. + """ + return self.as_cql_query() + ';' + + def get_schema_parser(connection, timeout): server_version = connection.server_version if server_version.startswith('3'): diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index ed24b736c3..10594ae952 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -27,7 +27,7 @@ from cassandra.cluster import Cluster from cassandra.cqltypes import DoubleType, Int32Type, ListType, UTF8Type, MapType from cassandra.encoder import Encoder -from cassandra.metadata import (Metadata, KeyspaceMetadata, TableMetadata, IndexMetadata, +from cassandra.metadata import (Metadata, KeyspaceMetadata, IndexMetadata, IndexMetadataV3, Token, MD5Token, TokenMap, murmur3, Function, Aggregate, protect_name, protect_names, get_schema_parser) from cassandra.policies import SimpleConvictionPolicy @@ -1276,10 +1276,10 @@ def test_index_updates(self): ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name] table_meta = ks_meta.tables[self.table_name] - self.assertIsInstance(ks_meta.indexes['a_idx'], IndexMetadata) - self.assertIsInstance(ks_meta.indexes['b_idx'], IndexMetadata) - self.assertIsInstance(table_meta.indexes['a_idx'], IndexMetadata) - self.assertIsInstance(table_meta.indexes['b_idx'], IndexMetadata) + self.assertIsInstance(ks_meta.indexes['a_idx'], (IndexMetadata, IndexMetadataV3)) + self.assertIsInstance(ks_meta.indexes['b_idx'], (IndexMetadata, IndexMetadataV3)) + self.assertIsInstance(table_meta.indexes['a_idx'], (IndexMetadata, IndexMetadataV3)) + self.assertIsInstance(table_meta.indexes['b_idx'], (IndexMetadata, IndexMetadataV3)) # both indexes updated when index dropped self.session.execute("DROP INDEX a_idx") @@ -1290,9 +1290,9 @@ def test_index_updates(self): ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name] table_meta = ks_meta.tables[self.table_name] self.assertNotIn('a_idx', ks_meta.indexes) - self.assertIsInstance(ks_meta.indexes['b_idx'], IndexMetadata) + self.assertIsInstance(ks_meta.indexes['b_idx'], (IndexMetadata, IndexMetadataV3)) self.assertNotIn('a_idx', table_meta.indexes) - self.assertIsInstance(table_meta.indexes['b_idx'], IndexMetadata) + self.assertIsInstance(table_meta.indexes['b_idx'], (IndexMetadata, IndexMetadataV3)) # keyspace index updated when table dropped self.drop_basic_table() @@ -1308,15 +1308,15 @@ def test_index_follows_alter(self): self.session.execute("CREATE INDEX %s ON %s (a)" % (idx, self.table_name)) ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name] table_meta = ks_meta.tables[self.table_name] - self.assertIsInstance(ks_meta.indexes[idx], IndexMetadata) - self.assertIsInstance(table_meta.indexes[idx], IndexMetadata) + self.assertIsInstance(ks_meta.indexes[idx], (IndexMetadata, IndexMetadataV3)) + self.assertIsInstance(table_meta.indexes[idx], (IndexMetadata, IndexMetadataV3)) self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name) old_meta = ks_meta ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name] self.assertIsNot(ks_meta, old_meta) table_meta = ks_meta.tables[self.table_name] - self.assertIsInstance(ks_meta.indexes[idx], IndexMetadata) - self.assertIsInstance(table_meta.indexes[idx], IndexMetadata) + self.assertIsInstance(ks_meta.indexes[idx], (IndexMetadata, IndexMetadataV3)) + self.assertIsInstance(table_meta.indexes[idx], (IndexMetadata, IndexMetadataV3)) self.drop_basic_table() From ec572c518cce960034403b62b29edf0e5ce27e9c Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 14 Aug 2015 12:00:20 -0500 Subject: [PATCH 0041/2005] Adding yaml for job builder automation. Fixing up some timing issues in unit tests --- build.yaml | 49 +++++++++++++++++++++ tests/integration/standard/test_metadata.py | 12 ++--- 2 files changed, 55 insertions(+), 6 deletions(-) create mode 100644 build.yaml diff --git a/build.yaml b/build.yaml new file mode 100644 index 0000000000..6a39582863 --- /dev/null +++ b/build.yaml @@ -0,0 +1,49 @@ +python: + - 2.7 + - 3.4 +os: + - ubuntu/trusty64 +cassandra: + - 1.2 + - 2.0 + - 2.1 + - 2.2 + - 3.0 +env: + EVENT_LOOP_MANAGER: + - libev + CYTHON: + - CYTHON + - NO_CYTHON +build: + - script: | + export JAVA_HOME=$CCM_JAVA_HOME + export PATH=$JAVA_HOME/bin:$PATH + + # Install dependencies + if [[ $EVENT_LOOP_MANAGER == 'libev' ]]; then + sudo apt-get install -y libev4 libev-dev + fi + pip install -r test-requirements.txt + + + if [[ $CYTHON == 'CYTHON' ]]; then + pip install cython + pip install numpy + # Install the driver & compile C extensions + python setup.py build_ext --inplace + else + # Install the driver & compile C extensions with no cython + python setup.py build_ext --inplace --no-cython + fi + + echo "==========RUNNING CQLENGINE TESTS==========" + CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true + + echo "==========RUNNING INTEGRATION TESTS==========" + CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true + + echo "==========RUNNING LONG INTEGRATION TESTS==========" + CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true + - xunit: + - "*_results.xml" \ No newline at end of file diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index b7c860716f..621f3b31ad 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1793,12 +1793,12 @@ def setup_class(cls): cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) cls.keyspace_name = cls.__name__.lower() cls.session = cls.cluster.connect() - cls.session.execute("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name) + cls.session.execute("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name) cls.session.set_keyspace(cls.keyspace_name) @classmethod def teardown_class(cls): - cls.session.execute("DROP KEYSPACE IF EXISTS %s" % cls.keyspace_name) + cls.session.execute("DROP KEYSPACE %s" % cls.keyspace_name) cls.cluster.shutdown() def _run_on_all_nodes(self, query, params): @@ -1821,8 +1821,8 @@ def test_keyspace_bad_options(self): self._run_on_all_nodes('UPDATE system.schema_keyspaces SET strategy_options=%s' + where_cls, (strategy_options,)) def test_keyspace_bad_index(self): - self.session.execute('CREATE TABLE %s (k int PRIMARY KEY, v int)' % self.function_name) - self.session.execute('CREATE INDEX ON %s(v)' % self.function_name) + self.session.execute('CREATE TABLE %s (k int PRIMARY KEY, v int)' % self.function_name, timeout=30.0) + self.session.execute('CREATE INDEX ON %s(v)' % self.function_name, timeout=30.0) where_cls = " WHERE keyspace_name='%s' AND columnfamily_name='%s' AND column_name='v'" \ % (self.keyspace_name, self.function_name) index_options = self.session.execute('SELECT index_options FROM system.schema_columns' + where_cls)[0].index_options @@ -1838,7 +1838,7 @@ def test_keyspace_bad_index(self): self._run_on_all_nodes('UPDATE system.schema_columns SET index_options=%s' + where_cls, (index_options,)) def test_table_bad_comparator(self): - self.session.execute('CREATE TABLE %s (k int PRIMARY KEY, v int)' % self.function_name) + self.session.execute('CREATE TABLE %s (k int PRIMARY KEY, v int)' % self.function_name, timeout=30.0) where_cls = " WHERE keyspace_name='%s' AND columnfamily_name='%s'" % (self.keyspace_name, self.function_name) comparator = self.session.execute('SELECT comparator FROM system.schema_columnfamilies' + where_cls)[0].comparator try: @@ -1854,7 +1854,7 @@ def test_table_bad_comparator(self): @unittest.skipUnless(PROTOCOL_VERSION >= 3, "Requires protocol version 3+") def test_user_type_bad_typename(self): - self.session.execute('CREATE TYPE %s (i int, d double)' % self.function_name) + self.session.execute('CREATE TYPE %s (i int, d double)' % self.function_name, timeout=60.0) where_cls = " WHERE keyspace_name='%s' AND type_name='%s'" % (self.keyspace_name, self.function_name) field_types = self.session.execute('SELECT field_types FROM system.schema_usertypes' + where_cls)[0].field_types try: From e36c94bed1f10718d5661a1628e6002aafb8aafe Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 15 Sep 2015 15:22:06 -0500 Subject: [PATCH 0042/2005] Don't parse target; values(collection_col) is supported PYTHON-400 --- cassandra/metadata.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index bf4a221483..dbf04813fb 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2239,9 +2239,6 @@ def as_cql_query(self): """ options = dict(self.index_options) index_target = options.pop("target") - # "values(...) is implicit in CQL - if index_target.lower().startswith('values('): - index_target = index_target[7:-1] if self.kind != "CUSTOM": return "CREATE INDEX %s ON %s.%s (%s)" % ( self.name, # Cassandra doesn't like quoted index names for some reason From 083aaaa409e64656f488d4673625c64f1d373fb0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 16 Sep 2015 15:15:22 -0500 Subject: [PATCH 0043/2005] Properly sort key components by 'position' --- cassandra/metadata.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 6b5026e6b8..21b004769f 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2144,7 +2144,7 @@ def _build_table_columns(self, meta, col_rows, compact_static=False): partition_rows = [r for r in col_rows if r.get('kind', None) == "partition_key"] if len(partition_rows) > 1: - partition_rows = sorted(partition_rows, key=lambda row: row.get('component_index')) + partition_rows = sorted(partition_rows, key=lambda row: row.get('position')) for r in partition_rows: # we have to add meta here (and not in the later loop) because TableMetadata.columns is an # OrderedDict, and it assumes keys are inserted first, in order, when exporting CQL @@ -2157,7 +2157,7 @@ def _build_table_columns(self, meta, col_rows, compact_static=False): clustering_rows = [r for r in col_rows if r.get('kind', None) == "clustering"] if len(clustering_rows) > 1: - clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) + clustering_rows = sorted(clustering_rows, key=lambda row: row.get('position')) for r in clustering_rows: column_meta = self._build_column_metadata(meta, r) meta.columns[column_meta.name] = column_meta From 9f91a6da05e67c297e7ed94a136abac7427748c1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 17 Sep 2015 11:29:44 -0500 Subject: [PATCH 0044/2005] get clustering order from ss.tables.clustering_order in 3.0+ PYTHON-402 CASSANDRA-6717 --- cassandra/cqltypes.py | 4 ++++ cassandra/metadata.py | 19 ++++++++++++++----- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index f39d28b94b..fabef397e7 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -184,6 +184,10 @@ def lookup_casstype(casstype): raise ValueError("Don't know how to parse type string %r: %s" % (casstype, e)) +def is_reversed_casstype(data_type): + return issubclass(data_type, ReversedType) + + class EmptyValue(object): """ See _CassandraType.support_empty_values """ diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 21b004769f..f3fa64ded0 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1145,7 +1145,7 @@ def _property_string(cls, formatted, clustering_key, options_map, is_compact_sto inner = [] for col in clustering_key: - ordering = "DESC" if issubclass(col.data_type, types.ReversedType) else "ASC" + ordering = "DESC" if col.is_reversed else "ASC" inner.append("%s %s" % (protect_name(col.name), ordering)) cluster_str += "(%s)" % ", ".join(inner) @@ -1258,12 +1258,18 @@ class ColumnMetadata(object): be :const:`True`, otherwise :const:`False`. """ - def __init__(self, table_metadata, column_name, data_type, index_metadata=None, is_static=False): + is_reversed = False + """ + If this column is reversed (DESC) as in clustering order + """ + + def __init__(self, table_metadata, column_name, data_type, index_metadata=None, is_static=False, is_reversed=False): self.table = table_metadata self.name = column_name self.data_type = data_type self.index = index_metadata self.is_static = is_static + self.is_reversed = is_reversed @property def typestring(self): @@ -1839,7 +1845,8 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): else: column_name = "column%d" % i - col = ColumnMetadata(table_meta, column_name, column_name_types[i]) + data_type = column_name_types[i] + col = ColumnMetadata(table_meta, column_name, data_type, is_reversed=types.is_reversed_casstype(data_type)) table_meta.columns[column_name] = col table_meta.clustering_key.append(col) @@ -1908,7 +1915,8 @@ def _build_column_metadata(table_metadata, row): name = row["column_name"] data_type = types.lookup_casstype(row["validator"]) is_static = row.get("type", None) == "static" - column_meta = ColumnMetadata(table_metadata, name, data_type, is_static=is_static) + is_reversed = types.is_reversed_casstype(data_type) + column_meta = ColumnMetadata(table_metadata, name, data_type, is_static=is_static, is_reversed=is_reversed) return column_meta @staticmethod @@ -2190,7 +2198,8 @@ def _build_column_metadata(table_metadata, row): name = row["column_name"] data_type = types.lookup_casstype(row["type"]) is_static = row.get("kind", None) == "static" - column_meta = ColumnMetadata(table_metadata, name, data_type, is_static=is_static) + is_reversed = row["clustering_order"].upper() == "DESC" + column_meta = ColumnMetadata(table_metadata, name, data_type, is_static=is_static, is_reversed=is_reversed) return column_meta @staticmethod From b927f091d26a630b04b7651b16c4c0b1e4d4261a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 17 Sep 2015 12:28:33 -0500 Subject: [PATCH 0045/2005] Remove view meta from table views on drop previously only removed from keyspace index --- cassandra/metadata.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 21b004769f..50a22dd0c4 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -688,9 +688,15 @@ def _drop_table_metadata(self, table_name): self.indexes.pop(index_name, None) for view_name in table_meta.views: self.views.pop(view_name, None) + return # we can't tell table drops from views, so drop both # (name is unique among them, within a keyspace) - self.views.pop(table_name, None) + view_meta = self.views.pop(table_name, None) + if view_meta: + try: + self.tables[view_meta.base_table_name].views.pop(table_name, None) + except KeyError: + pass def _add_view_metadata(self, view_metadata): try: From 4a276550900391aad331661d102543ac8a151c3e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 17 Sep 2015 12:29:16 -0500 Subject: [PATCH 0046/2005] Move semi to MaterializedView.export_as_string --- cassandra/metadata.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 50a22dd0c4..206596870a 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2435,10 +2435,10 @@ def as_cql_query(self, formatted=False): "FROM %(keyspace)s.%(base_table)s%(sep)s" \ "WHERE %(where_clause)s%(sep)s" \ "PRIMARY KEY %(pk)s%(sep)s" \ - "WITH %(properties)s;" % locals() + "WITH %(properties)s" % locals() def export_as_string(self): - return self.as_cql_query(formatted=True) + return self.as_cql_query(formatted=True) + ";" def get_schema_parser(connection, timeout): From 42b0dd3a46d16349fb99543236e212a7db327f0a Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 16 Sep 2015 16:50:19 -0500 Subject: [PATCH 0047/2005] fixing client warnings test to use different threshold with batch warnings --- tests/integration/standard/test_client_warnings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index 14405a8df1..4316c9398a 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -44,7 +44,7 @@ def setUpClass(cls): cls.warn_batch = BatchStatement() # 213 = 5 * 1024 / (4+4 + 4+4 + 4+4) # thresh_kb/ (min param size) - for x in range(213): + for x in range(214): cls.warn_batch.add(cls.prepared, (x, x, 1)) @classmethod From c2842228ef07bf4ce38499dc1b11e13ec3335d5a Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 16 Sep 2015 16:53:14 -0500 Subject: [PATCH 0048/2005] work around for session issue with setting schema versions --- tests/integration/standard/test_cluster.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index f218fd5d26..5fe978868c 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -365,9 +365,8 @@ def test_refresh_schema_no_wait(self): session = cluster.connect() schema_ver = session.execute("SELECT schema_version FROM system.local WHERE key='local'")[0][0] - - # create a schema disagreement - session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (uuid4(),)) + new_schema_ver = uuid4() + session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (new_schema_ver,)) try: agreement_timeout = 1 @@ -422,9 +421,10 @@ def test_refresh_schema_no_wait(self): end_time = time.time() self.assertGreaterEqual(end_time - start_time, agreement_timeout) self.assertIs(original_meta, c.metadata.keyspaces) - c.shutdown() finally: + # TODO once fixed this connect call + session = cluster.connect() session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (schema_ver,)) cluster.shutdown() From 4af508333f7bcd4de4c8f2bc77a67480b44102de Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 17 Sep 2015 16:38:23 -0500 Subject: [PATCH 0049/2005] Rework BadMetaTest to avoid messing with server metadata --- tests/integration/standard/test_metadata.py | 141 ++++++++++---------- 1 file changed, 72 insertions(+), 69 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 10594ae952..09972833c6 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -20,7 +20,7 @@ import difflib import six import sys -from mock import Mock +from mock import Mock, patch from cassandra import AlreadyExists, SignatureDescriptor, UserFunctionDescriptor, UserAggregateDescriptor @@ -38,6 +38,8 @@ def setup_module(): use_singledc() + global CASS_SERVER_VERSION + CASS_SERVER_VERSION = get_server_versions()[0] class SchemaMetadataTests(unittest.TestCase): @@ -49,8 +51,6 @@ def cfname(self): return self._testMethodName.lower() def setUp(self): - self._cass_version, self._cql_version = get_server_versions() - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) self.session = self.cluster.connect() execute_until_pass(self.session, @@ -314,7 +314,7 @@ def test_indexes(self): self.assertIn('CREATE INDEX e_index', statement) def test_collection_indexes(self): - if get_server_versions()[0] < (2, 1, 0): + if CASS_SERVER_VERSION < (2, 1, 0): raise unittest.SkipTest("Secondary index on collections were introduced in Cassandra 2.1") self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b map)" @@ -333,7 +333,7 @@ def test_collection_indexes(self): self.assertIn(' (b)', tablemeta.export_as_string()) # test full indexes on frozen collections, if available - if get_server_versions()[0] >= (2, 1, 3): + if CASS_SERVER_VERSION >= (2, 1, 3): self.session.execute("DROP TABLE %s.%s" % (self.ksname, self.cfname)) self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b frozen>)" % (self.ksname, self.cfname)) @@ -669,7 +669,7 @@ def test_export_keyspace_schema_udts(self): Test udt exports """ - if get_server_versions()[0] < (2, 1, 0): + if CASS_SERVER_VERSION < (2, 1, 0): raise unittest.SkipTest('UDTs were introduced in Cassandra 2.1') if PROTOCOL_VERSION < 3: @@ -871,11 +871,10 @@ def test_token_map(self): def test_legacy_tables(self): - cass_ver = get_server_versions()[0] - if cass_ver < (2, 1, 0): + if CASS_SERVER_VERSION < (2, 1, 0): raise unittest.SkipTest('Test schema output assumes 2.1.0+ options') - if cass_ver >= (2, 2, 0): + if CASS_SERVER_VERSION >= (2, 2, 0): raise unittest.SkipTest('Cannot test cli script on Cassandra 2.2.0+') if sys.version_info[0:2] != (2, 7): @@ -1766,6 +1765,10 @@ class BadMetaTest(unittest.TestCase): PYTHON-370 """ + class BadMetaException(Exception): + pass + + @property def function_name(self): return self._testMethodName.lower() @@ -1777,75 +1780,75 @@ def setup_class(cls): cls.session = cls.cluster.connect() cls.session.execute("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name) cls.session.set_keyspace(cls.keyspace_name) + connection = cls.cluster.control_connection._connection + cls.parser_class = get_schema_parser(connection, timeout=20).__class__ @classmethod def teardown_class(cls): cls.session.execute("DROP KEYSPACE IF EXISTS %s" % cls.keyspace_name) cls.cluster.shutdown() - def _run_on_all_nodes(self, query, params): - # used to update schema data on all nodes in the cluster - for _ in self.cluster.metadata.all_hosts(): - self.session.execute(query, params) + def _skip_if_not_version(self, version): + if CASS_SERVER_VERSION < version: + raise unittest.SkipTest("Requires server version >= %s" % (version,)) - def test_keyspace_bad_options(self): - strategy_options = self.session.execute('SELECT strategy_options FROM system.schema_keyspaces WHERE keyspace_name=%s', (self.keyspace_name,))[0].strategy_options - where_cls = " WHERE keyspace_name='%s'" % (self.keyspace_name,) - try: - self._run_on_all_nodes('UPDATE system.schema_keyspaces SET strategy_options=%s' + where_cls, ('some bad json',)) - c = Cluster(protocol_version=PROTOCOL_VERSION) - c.connect() - meta = c.metadata.keyspaces[self.keyspace_name] - self.assertIsNotNone(meta._exc_info) - self.assertIn("/*\nWarning:", meta.export_as_string()) - c.shutdown() - finally: - self._run_on_all_nodes('UPDATE system.schema_keyspaces SET strategy_options=%s' + where_cls, (strategy_options,)) + def test_bad_keyspace(self): + with patch.object(self.parser_class, '_build_keyspace_metadata_internal', side_effect=self.BadMetaException): + self.cluster.refresh_keyspace_metadata(self.keyspace_name) + m = self.cluster.metadata.keyspaces[self.keyspace_name] + self.assertIs(m._exc_info[0], self.BadMetaException) + self.assertIn("/*\nWarning:", m.export_as_string()) - def test_keyspace_bad_index(self): + def test_bad_table(self): self.session.execute('CREATE TABLE %s (k int PRIMARY KEY, v int)' % self.function_name) - self.session.execute('CREATE INDEX ON %s(v)' % self.function_name) - where_cls = " WHERE keyspace_name='%s' AND columnfamily_name='%s' AND column_name='v'" \ - % (self.keyspace_name, self.function_name) - index_options = self.session.execute('SELECT index_options FROM system.schema_columns' + where_cls)[0].index_options - try: - self._run_on_all_nodes('UPDATE system.schema_columns SET index_options=%s' + where_cls, ('some bad json',)) - c = Cluster(protocol_version=PROTOCOL_VERSION) - c.connect() - meta = c.metadata.keyspaces[self.keyspace_name].tables[self.function_name] - self.assertIsNotNone(meta._exc_info) - self.assertIn("/*\nWarning:", meta.export_as_string()) - c.shutdown() - finally: - self._run_on_all_nodes('UPDATE system.schema_columns SET index_options=%s' + where_cls, (index_options,)) + with patch.object(self.parser_class, '_build_column_metadata', side_effect=self.BadMetaException): + self.cluster.refresh_table_metadata(self.keyspace_name, self.function_name) + m = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_name] + self.assertIs(m._exc_info[0], self.BadMetaException) + self.assertIn("/*\nWarning:", m.export_as_string()) - def test_table_bad_comparator(self): + def test_bad_index(self): self.session.execute('CREATE TABLE %s (k int PRIMARY KEY, v int)' % self.function_name) - where_cls = " WHERE keyspace_name='%s' AND columnfamily_name='%s'" % (self.keyspace_name, self.function_name) - comparator = self.session.execute('SELECT comparator FROM system.schema_columnfamilies' + where_cls)[0].comparator - try: - self._run_on_all_nodes('UPDATE system.schema_columnfamilies SET comparator=%s' + where_cls, ('DynamicCompositeType()',)) - c = Cluster(protocol_version=PROTOCOL_VERSION) - c.connect() - meta = c.metadata.keyspaces[self.keyspace_name].tables[self.function_name] - self.assertIsNotNone(meta._exc_info) - self.assertIn("/*\nWarning:", meta.export_as_string()) - c.shutdown() - finally: - self._run_on_all_nodes('UPDATE system.schema_columnfamilies SET comparator=%s' + where_cls, (comparator,)) - - @unittest.skipUnless(PROTOCOL_VERSION >= 3, "Requires protocol version 3+") - def test_user_type_bad_typename(self): + self.session.execute('CREATE INDEX ON %s(v)' % self.function_name) + with patch.object(self.parser_class, '_build_index_metadata', side_effect=self.BadMetaException): + self.cluster.refresh_table_metadata(self.keyspace_name, self.function_name) + m = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_name] + self.assertIs(m._exc_info[0], self.BadMetaException) + self.assertIn("/*\nWarning:", m.export_as_string()) + + def test_bad_user_type(self): + self._skip_if_not_version((2, 1, 0)) self.session.execute('CREATE TYPE %s (i int, d double)' % self.function_name) - where_cls = " WHERE keyspace_name='%s' AND type_name='%s'" % (self.keyspace_name, self.function_name) - field_types = self.session.execute('SELECT field_types FROM system.schema_usertypes' + where_cls)[0].field_types - try: - self._run_on_all_nodes('UPDATE system.schema_usertypes SET field_types[0]=%s' + where_cls, ('Tr@inWr3ck##)))',)) - c = Cluster(protocol_version=PROTOCOL_VERSION) - c.connect() - meta = c.metadata.keyspaces[self.keyspace_name] - self.assertIsNotNone(meta._exc_info) - self.assertIn("/*\nWarning:", meta.export_as_string()) - c.shutdown() - finally: - self._run_on_all_nodes('UPDATE system.schema_usertypes SET field_types=%s' + where_cls, (field_types,)) + with patch.object(self.parser_class, '_build_user_type', side_effect=self.BadMetaException): + self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh + m = self.cluster.metadata.keyspaces[self.keyspace_name] + self.assertIs(m._exc_info[0], self.BadMetaException) + self.assertIn("/*\nWarning:", m.export_as_string()) + + def test_bad_user_function(self): + self._skip_if_not_version((2, 2, 0)) + self.session.execute("""CREATE FUNCTION IF NOT EXISTS %s (key int, val int) + RETURNS NULL ON NULL INPUT + RETURNS int + LANGUAGE javascript AS 'key + val';""" % self.function_name) + with patch.object(self.parser_class, '_build_function', side_effect=self.BadMetaException): + self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh + m = self.cluster.metadata.keyspaces[self.keyspace_name] + self.assertIs(m._exc_info[0], self.BadMetaException) + self.assertIn("/*\nWarning:", m.export_as_string()) + + def test_bad_user_aggregate(self): + self._skip_if_not_version((2, 2, 0)) + self.session.execute("""CREATE FUNCTION IF NOT EXISTS sum_int (key int, val int) + RETURNS NULL ON NULL INPUT + RETURNS int + LANGUAGE javascript AS 'key + val';""") + self.session.execute("""CREATE AGGREGATE %s(int) + SFUNC sum_int + STYPE int + INITCOND 0""" % self.function_name) + with patch.object(self.parser_class, '_build_aggregate', side_effect=self.BadMetaException): + self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh + m = self.cluster.metadata.keyspaces[self.keyspace_name] + self.assertIs(m._exc_info[0], self.BadMetaException) + self.assertIn("/*\nWarning:", m.export_as_string()) From bc2a947c115d8740f4a48a0e855f8ddd32d5767c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 17 Sep 2015 16:39:35 -0500 Subject: [PATCH 0050/2005] Make test_replicas Py3-compatible --- tests/integration/standard/test_metadata.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 09972833c6..d97e32bf82 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -843,8 +843,8 @@ def test_replicas(self): cluster.connect('test3rf') - self.assertNotEqual(list(cluster.metadata.get_replicas('test3rf', 'key')), []) - host = list(cluster.metadata.get_replicas('test3rf', 'key'))[0] + self.assertNotEqual(list(cluster.metadata.get_replicas('test3rf', six.b('key'))), []) + host = list(cluster.metadata.get_replicas('test3rf', six.b('key')))[0] self.assertEqual(host.datacenter, 'dc1') self.assertEqual(host.rack, 'r1') cluster.shutdown() From fd3bb4ed7ad3ecc94070adee9884977c41086621 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 17 Sep 2015 17:00:47 -0500 Subject: [PATCH 0051/2005] test: version Conditional target value for collection indexes --- tests/integration/standard/test_metadata.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index d97e32bf82..f8e7fbc923 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -330,7 +330,8 @@ def test_collection_indexes(self): % (self.ksname, self.cfname)) tablemeta = self.get_table_metadata() - self.assertIn(' (b)', tablemeta.export_as_string()) + target = ' (b)' if CASS_SERVER_VERSION < (3, 0) else 'values(b))' # explicit values in C* 3+ + self.assertIn(target, tablemeta.export_as_string()) # test full indexes on frozen collections, if available if CASS_SERVER_VERSION >= (2, 1, 3): From bc0fe4423e83877f7831c40a2e6fdd4c0e36c553 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 17 Sep 2015 17:01:07 -0500 Subject: [PATCH 0052/2005] Don't run cql compatibility test for cassandra 3.0+ Nothing should be cql-incompatible --- tests/integration/standard/test_metadata.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index f8e7fbc923..2b58b2334d 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -249,6 +249,9 @@ def test_composite_in_compound_primary_key_compact(self): self.check_create_statement(tablemeta, create_statement) def test_cql_compatibility(self): + if CASS_SERVER_VERSION >= (3, 0): + raise unittest.SkipTest("cql compatibility does not apply Cassandra 3.0+") + # having more than one non-PK column is okay if there aren't any # clustering columns create_statement = self.make_create_statement(["a"], [], ["b", "c", "d"], compact=True) From 7166f3de0105c7a0f3c77647b05712519539ac4f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 17 Sep 2015 17:23:07 -0500 Subject: [PATCH 0053/2005] test: compression enabled: false is explicit in C* 3+ --- tests/integration/standard/test_metadata.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 2b58b2334d..fbb329bb83 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -352,7 +352,8 @@ def test_compression_disabled(self): create_statement += " WITH compression = {}" self.session.execute(create_statement) tablemeta = self.get_table_metadata() - self.assertIn("compression = {}", tablemeta.export_as_string()) + expected = "compression = {}" if CASS_SERVER_VERSION < (3, 0) else "compression = {'enabled': 'false'}" + self.assertIn(expected, tablemeta.export_as_string()) def test_non_size_tiered_compaction(self): """ From 40c4807c56f8f103bf3c496a983cb8d1e17c157f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 17 Sep 2015 17:45:37 -0500 Subject: [PATCH 0054/2005] test: don't compare CQL we don't care about, that might change by version --- tests/integration/standard/test_metadata.py | 47 +++++++-------------- 1 file changed, 15 insertions(+), 32 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index fbb329bb83..35cebc5733 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -669,6 +669,15 @@ def assert_equal_diff(self, received, expected): lineterm='')) self.fail(diff_string) + def assert_startswith_diff(self, received, prefix): + if not received.startswith(prefix): + prefix_lines = previx.split('\n') + diff_string = '\n'.join(difflib.unified_diff(prefix_lines, + received.split('\n')[:len(prefix_lines)], + 'EXPECTED', 'RECEIVED', + lineterm='')) + self.fail(diff_string) + def test_export_keyspace_schema_udts(self): """ Test udt exports @@ -714,7 +723,7 @@ def test_export_keyspace_schema_udts(self): addresses map>) """) - expected_string = """CREATE KEYSPACE export_udts WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true; + expected_prefix = """CREATE KEYSPACE export_udts WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true; CREATE TYPE export_udts.street ( street_number int, @@ -733,43 +742,17 @@ def test_export_keyspace_schema_udts(self): CREATE TABLE export_udts.users ( user text PRIMARY KEY, - addresses map> -) WITH bloom_filter_fp_chance = 0.01 - AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' - AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} - AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99.0PERCENTILE';""" + addresses map>""" - self.assert_equal_diff(cluster.metadata.keyspaces['export_udts'].export_as_string(), expected_string) + self.assert_startswith_diff(cluster.metadata.keyspaces['export_udts'].export_as_string(), expected_prefix) table_meta = cluster.metadata.keyspaces['export_udts'].tables['users'] - expected_string = """CREATE TABLE export_udts.users ( + expected_prefix = """CREATE TABLE export_udts.users ( user text PRIMARY KEY, - addresses map> -) WITH bloom_filter_fp_chance = 0.01 - AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' - AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} - AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99.0PERCENTILE';""" + addresses map>""" - self.assert_equal_diff(table_meta.export_as_string(), expected_string) + self.assert_startswith_diff(table_meta.export_as_string(), expected_prefix) cluster.shutdown() From 15b8b2595fc1b79b142da6a58defe899e02c20ea Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 17 Sep 2015 18:15:11 -0500 Subject: [PATCH 0055/2005] Changing tests to use version neutral metadata for keyspace deletion --- tests/integration/long/test_consistency.py | 8 ++++---- tests/integration/long/test_large_data.py | 2 +- .../long/test_loadbalancingpolicies.py | 20 +++++++++---------- tests/integration/long/utils.py | 7 ++----- 4 files changed, 17 insertions(+), 20 deletions(-) diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index 1c860e9930..f46fedce53 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -134,7 +134,7 @@ def _test_tokenaware_one_node_down(self, keyspace, rf, accepted): wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2) - create_schema(session, keyspace, replication_factor=rf) + create_schema(cluster, session, keyspace, replication_factor=rf) self._insert(session, keyspace, count=1) self._query(session, keyspace, count=1) self.coordinator_stats.assert_query_count_equals(self, 1, 0) @@ -186,7 +186,7 @@ def test_rfthree_tokenaware_none_down(self): wait_for_up(cluster, 1, wait=False) wait_for_up(cluster, 2) - create_schema(session, keyspace, replication_factor=3) + create_schema(cluster, session, keyspace, replication_factor=3) self._insert(session, keyspace, count=1) self._query(session, keyspace, count=1) self.coordinator_stats.assert_query_count_equals(self, 1, 0) @@ -209,7 +209,7 @@ def _test_downgrading_cl(self, keyspace, rf, accepted): protocol_version=PROTOCOL_VERSION) session = cluster.connect() - create_schema(session, keyspace, replication_factor=rf) + create_schema(cluster, session, keyspace, replication_factor=rf) self._insert(session, keyspace, 1) self._query(session, keyspace, 1) self.coordinator_stats.assert_query_count_equals(self, 1, 0) @@ -264,7 +264,7 @@ def test_rfthree_tokenaware_downgradingcl(self): def rfthree_downgradingcl(self, cluster, keyspace, roundrobin): session = cluster.connect() - create_schema(session, keyspace, replication_factor=2) + create_schema(cluster, session, keyspace, replication_factor=2) self._insert(session, keyspace, count=12) self._query(session, keyspace, count=12) diff --git a/tests/integration/long/test_large_data.py b/tests/integration/long/test_large_data.py index 0b77feb0cb..4316a7b9af 100644 --- a/tests/integration/long/test_large_data.py +++ b/tests/integration/long/test_large_data.py @@ -66,7 +66,7 @@ def make_session_and_keyspace(self): session.default_timeout = 20.0 # increase the default timeout session.row_factory = dict_factory - create_schema(session, self.keyspace) + create_schema(cluster, session, self.keyspace) return session def batch_futures(self, session, statement_generator): diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 465f9d763d..b1a8d4d251 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -140,7 +140,7 @@ def test_roundrobin(self): wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) - create_schema(session, keyspace, replication_factor=3) + create_schema(cluster, session, keyspace, replication_factor=3) self._insert(session, keyspace) self._query(session, keyspace) @@ -182,7 +182,7 @@ def test_roundrobin_two_dcs(self): wait_for_up(cluster, 3, wait=False) wait_for_up(cluster, 4) - create_schema(session, keyspace, replication_strategy=[2, 2]) + create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) @@ -222,7 +222,7 @@ def test_roundrobin_two_dcs_2(self): wait_for_up(cluster, 3, wait=False) wait_for_up(cluster, 4) - create_schema(session, keyspace, replication_strategy=[2, 2]) + create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) @@ -263,7 +263,7 @@ def test_dc_aware_roundrobin_two_dcs(self): wait_for_up(cluster, 4, wait=False) wait_for_up(cluster, 5) - create_schema(session, keyspace, replication_strategy=[2, 2]) + create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) @@ -288,7 +288,7 @@ def test_dc_aware_roundrobin_two_dcs_2(self): wait_for_up(cluster, 4, wait=False) wait_for_up(cluster, 5) - create_schema(session, keyspace, replication_strategy=[2, 2]) + create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) @@ -312,7 +312,7 @@ def test_dc_aware_roundrobin_one_remote_host(self): wait_for_up(cluster, 3, wait=False) wait_for_up(cluster, 4) - create_schema(session, keyspace, replication_strategy=[2, 2]) + create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) @@ -403,7 +403,7 @@ def token_aware(self, keyspace, use_prepared=False): wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) - create_schema(session, keyspace, replication_factor=1) + create_schema(cluster, session, keyspace, replication_factor=1) self._insert(session, keyspace) self._query(session, keyspace, use_prepared=use_prepared) @@ -479,7 +479,7 @@ def test_token_aware_composite_key(self): wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) - create_schema(session, keyspace, replication_factor=2) + create_schema(cluster, session, keyspace, replication_factor=2) session.execute('CREATE TABLE %s (' 'k1 int, ' 'k2 int, ' @@ -509,7 +509,7 @@ def test_token_aware_with_rf_2(self, use_prepared=False): wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) - create_schema(session, keyspace, replication_factor=2) + create_schema(cluster, session, keyspace, replication_factor=2) self._insert(session, keyspace) self._query(session, keyspace) @@ -556,7 +556,7 @@ def test_white_list(self): wait_for_up(cluster, 2, wait=False) wait_for_up(cluster, 3) - create_schema(session, keyspace) + create_schema(cluster, session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index a3fb098cb5..08957e4d89 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -55,15 +55,12 @@ def assert_query_count_equals(self, testcase, node, expected): expected, ip, self.coordinator_counts[ip], dict(self.coordinator_counts))) -def create_schema(session, keyspace, simple_strategy=True, +def create_schema(cluster, session, keyspace, simple_strategy=True, replication_factor=1, replication_strategy=None): row_factory = session.row_factory session.row_factory = named_tuple_factory - results = session.execute( - 'SELECT keyspace_name FROM system.schema_keyspaces') - existing_keyspaces = [row[0] for row in results] - if keyspace in existing_keyspaces: + if keyspace in cluster.metadata.keyspaces.keys(): session.execute('DROP KEYSPACE %s' % keyspace, timeout=20) if simple_strategy: From 1e954277283f3e80132f9f2a26e50a9694d8e60b Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 17 Sep 2015 20:32:18 -0500 Subject: [PATCH 0056/2005] Fixing prepared statement test to work with changes made for CASSANDRA-6237 --- .../standard/test_prepared_statements.py | 69 +++++++++++-------- 1 file changed, 40 insertions(+), 29 deletions(-) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 2978003031..7218045cb5 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -23,6 +23,7 @@ from cassandra.cluster import Cluster from cassandra.query import PreparedStatement, UNSET_VALUE +from tests.integration import get_server_versions def setup_module(): @@ -31,6 +32,10 @@ def setup_module(): class PreparedStatementTests(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.cass_version = get_server_versions() + def test_basic(self): """ Test basic PreparedStatement usage @@ -111,18 +116,21 @@ def test_missing_primary_key(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() - - prepared = session.prepare( - """ - INSERT INTO test3rf.test (v) VALUES (?) - """) - - self.assertIsInstance(prepared, PreparedStatement) - bound = prepared.bind((1,)) - self.assertRaises(InvalidRequest, session.execute, bound) + self._run_missing_primary_key(session) cluster.shutdown() + def _run_missing_primary_key(self, session): + statement_to_prepare = """INSERT INTO test3rf.test (v) VALUES (?)""" + # logic needed work with changes in CASSANDRA-6237 + if self.cass_version[0] >= (3, 0, 0): + self.assertRaises(InvalidRequest, session.prepare, statement_to_prepare) + else: + prepared = session.prepare(statement_to_prepare) + self.assertIsInstance(prepared, PreparedStatement) + bound = prepared.bind((1,)) + self.assertRaises(InvalidRequest, session.execute, bound) + def test_missing_primary_key_dicts(self): """ Ensure an InvalidRequest is thrown @@ -132,36 +140,39 @@ def test_missing_primary_key_dicts(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() - - prepared = session.prepare( - """ - INSERT INTO test3rf.test (v) VALUES (?) - """) - - self.assertIsInstance(prepared, PreparedStatement) - bound = prepared.bind({'v': 1}) - self.assertRaises(InvalidRequest, session.execute, bound) - + self._run_missing_primary_key_dicts(session) cluster.shutdown() + def _run_missing_primary_key_dicts(self, session): + statement_to_prepare = """ INSERT INTO test3rf.test (v) VALUES (?)""" + # logic needed work with changes in CASSANDRA-6237 + if self.cass_version[0] >= (3, 0, 0): + self.assertRaises(InvalidRequest, session.prepare, statement_to_prepare) + else: + prepared = session.prepare(statement_to_prepare) + self.assertIsInstance(prepared, PreparedStatement) + bound = prepared.bind({'v': 1}) + self.assertRaises(InvalidRequest, session.execute, bound) + def test_too_many_bind_values(self): """ Ensure a ValueError is thrown when attempting to bind too many variables """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() - - prepared = session.prepare( - """ - INSERT INTO test3rf.test (v) VALUES (?) - """) - - self.assertIsInstance(prepared, PreparedStatement) - self.assertRaises(ValueError, prepared.bind, (1, 2)) - + self._run_too_many_bind_values(session) cluster.shutdown() + def _run_too_many_bind_values(self, session): + statement_to_prepare = """ INSERT INTO test3rf.test (v) VALUES (?)""" + # logic needed work with changes in CASSANDRA-6237 + if self.cass_version[0] >= (3, 0, 0): + self.assertRaises(InvalidRequest, session.prepare, statement_to_prepare) + else: + prepared = session.prepare(statement_to_prepare) + self.assertIsInstance(prepared, PreparedStatement) + self.assertRaises(ValueError, prepared.bind, (1, 2)) + def test_too_many_bind_values_dicts(self): """ Ensure an error is thrown when attempting to bind the wrong values From eea8b59c7f7d7f3293ee71312a667f4a2baa1113 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 18 Sep 2015 10:06:21 -0500 Subject: [PATCH 0057/2005] Updating keyspace removal logic to be 3.0 compatible --- tests/integration/long/test_schema.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index 6f165cfdde..51a2c72d56 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -54,9 +54,7 @@ def test_recreates(self): for keyspace_number in range(5): keyspace = "ks_{0}".format(keyspace_number) - results = execute_until_pass(session, "SELECT keyspace_name FROM system.schema_keyspaces") - existing_keyspaces = [row[0] for row in results] - if keyspace in existing_keyspaces: + if keyspace in self.cluster.metadata.keyspaces.keys(): drop = "DROP KEYSPACE {0}".format(keyspace) log.debug(drop) execute_until_pass(session, drop) From 379729d9524fb06eeda493b9dd53806f6a0098f3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 18 Sep 2015 15:04:04 -0500 Subject: [PATCH 0058/2005] add crc_check_chance to 3.0 options --- cassandra/metadata.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 1768b7432f..0b5b70d43a 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2057,6 +2057,7 @@ class SchemaParserV3(SchemaParserV22): 'comment', 'compaction', 'compression', + 'crc_check_chance', 'dclocal_read_repair_chance', 'default_time_to_live', 'gc_grace_seconds', From 2803bb734dbd941fb0bb125003d53ab20fbdb0c3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 14 Sep 2015 17:03:59 -0500 Subject: [PATCH 0059/2005] Add where_clause to system_schema.views PYTHON-399 --- cassandra/metadata.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 0b5b70d43a..82287dd64d 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2193,9 +2193,10 @@ def _build_view_metadata(self, row, col_rows=None): view_name = row["view_name"] base_table_name = row["base_table_name"] include_all_columns = row["include_all_columns"] + where_clause = row["where_clause"] col_rows = col_rows or self.keyspace_table_col_rows[keyspace_name][view_name] view_meta = MaterializedViewMetadata(keyspace_name, view_name, base_table_name, - include_all_columns, self._build_table_options(row)) + include_all_columns, where_clause, self._build_table_options(row)) self._build_table_columns(view_meta, col_rows) return view_meta @@ -2396,16 +2397,19 @@ class MaterializedViewMetadata(object): A dict mapping column names to :class:`.ColumnMetadata` instances. """ - include_all_columns = False + include_all_columns = None """ A flag indicating whether the view was created AS SELECT * """ + where_clause = None + """ String WHERE clause for the view select statement. From server metadata """ + options = None """ A dict mapping table option names to their specific settings for this view. """ - def __init__(self, keyspace_name, view_name, base_table_name, include_all_columns, options): + def __init__(self, keyspace_name, view_name, base_table_name, include_all_columns, where_clause, options): self.keyspace_name = keyspace_name self.name = view_name self.base_table_name = base_table_name @@ -2413,6 +2417,7 @@ def __init__(self, keyspace_name, view_name, base_table_name, include_all_column self.clustering_key = [] self.columns = OrderedDict() self.include_all_columns = include_all_columns + self.where_clause = where_clause self.options = options or {} def as_cql_query(self, formatted=False): @@ -2427,7 +2432,7 @@ def as_cql_query(self, formatted=False): selected_cols = '*' if self.include_all_columns else ', '.join(protect_name(col.name) for col in self.columns.values()) base_table = protect_name(self.base_table_name) - where_clause = " AND ".join("%s IS NOT NULL" % protect_name(col.name) for col in self.partition_key + self.clustering_key) + where_clause = self.where_clause part_key = ', '.join(protect_name(col.name) for col in self.partition_key) if len(self.partition_key) > 1: From 921bb984297fb90c90f4376739a5a0ecd73423b1 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Fri, 18 Sep 2015 15:15:59 -0700 Subject: [PATCH 0060/2005] [PYTHON-371] Tests for materialized view metadata --- tests/integration/standard/test_metadata.py | 134 +++++++++++++++++++- 1 file changed, 133 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 35cebc5733..e801f157d9 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -492,7 +492,7 @@ def test_refresh_keyspace_metadata(self): cluster2.shutdown() - def test_refresh_table_metatadata(self): + def test_refresh_table_metadata(self): """ test for synchronously refreshing table metadata @@ -524,6 +524,44 @@ def test_refresh_table_metatadata(self): cluster2.shutdown() + def test_refresh_table_metadata_for_materialized_views(self): + """ + test for synchronously refreshing materialized view metadata + + test_refresh_table_metadata_for_materialized_views tests that materialized view metadata is refreshed when calling + test_refresh_table_metatadata() with the materialized view name as the table. It creates a second cluster object + with schema_event_refresh_window=-1 such that schema refreshes are disabled for schema change push events. + It then creates a new materialized view , using the first cluster object, and verifies that the materialized view + metadata has not changed in the second cluster object. Finally, it calls test_refresh_table_metatadata() with the + materialized view name as the table name, and verifies that the materialized view metadata is updated in the + second cluster object. + + @since 3.0.0 + @jira_ticket PYTHON-371 + @expected_result Materialized view metadata should be refreshed when refresh_table_metadata() is called. + + @test_category metadata + """ + + if CASS_SERVER_VERSION < (3, 0): + raise unittest.SkipTest("Materialized views require Cassandra 3.0+") + + table_name = "test" + self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.ksname, table_name)) + + cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) + cluster2.connect() + + self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.ksname].tables[table_name].views) + self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)" + .format(self.ksname, table_name)) + self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.ksname].tables[table_name].views) + + cluster2.refresh_table_metadata(self.ksname, "mv1") + self.assertIn("mv1", cluster2.metadata.keyspaces[self.ksname].tables[table_name].views) + + cluster2.shutdown() + def test_refresh_user_type_metadata(self): """ test for synchronously refreshing UDT metadata in keyspace @@ -1840,3 +1878,97 @@ def test_bad_user_aggregate(self): m = self.cluster.metadata.keyspaces[self.keyspace_name] self.assertIs(m._exc_info[0], self.BadMetaException) self.assertIn("/*\nWarning:", m.export_as_string()) + +class MaterializedViewMetadataTest(unittest.TestCase): + + ksname = "materialized_view_test" + + def setUp(self): + if CASS_SERVER_VERSION < (3, 0): + raise unittest.SkipTest("Materialized views require Cassandra 3.0+") + + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + execute_until_pass(self.session, + "CREATE KEYSPACE {0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': '1'}}" + .format(self.ksname)) + + def tearDown(self): + execute_until_pass(self.session, "DROP KEYSPACE {0}".format(self.ksname)) + self.cluster.shutdown() + + def test_materialized_view_metadata_creation(self): + """ + test for materialized view metadata creation + + test_materialized_view_metadata_creation tests that materialized view metadata properly created implicitly in + both keyspace and table metadata under "views". It creates a simple base table and then creates a view based + on that table. It then checks that the materialized view metadata is contained in the keyspace and table + metadata. Finally, it checks that the keyspace_name and the base_table_name in the view metadata is properly set. + + @since 3.0.0 + @jira_ticket PYTHON-371 + @expected_result Materialized view metadata in both the ks and table should be created with a new view is created. + + @test_category metadata + """ + + self.session.execute("CREATE TABLE {0}.table1 (pk int PRIMARY KEY, c int)".format(self.ksname)) + self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.table1 WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.ksname)) + + self.assertIn("mv1", self.cluster.metadata.keyspaces[self.ksname].views) + self.assertIn("mv1", self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views) + + self.assertEqual(self.ksname, self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].keyspace_name) + self.assertEqual("table1", self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].base_table_name) + + def test_materialized_view_metadata_alter(self): + """ + test for materialized view metadata alteration + + test_materialized_view_metadata_alter tests that materialized view metadata is properly updated implicitly in the + table metadata once that view is updated. It creates a simple base table and then creates a view based + on that table. It then alters that materalized view and checks that the materialized view metadata is altered in + the table metadata. + + @since 3.0.0 + @jira_ticket PYTHON-371 + @expected_result Materialized view metadata should be updated with the view is altered. + + @test_category metadata + """ + + self.session.execute("CREATE TABLE {0}.table1 (pk int PRIMARY KEY, c int)".format(self.ksname)) + self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.table1 WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.ksname)) + + self.assertRegex(self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].options["compaction"]["class"], + "SizeTieredCompactionStrategy") + + self.session.execute("ALTER MATERIALIZED VIEW {0}.mv1 WITH compaction = {{ 'class' : 'LeveledCompactionStrategy' }}".format(self.ksname)) + self.assertRegex(self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].options["compaction"]["class"], + "LeveledCompactionStrategy") + + def test_materialized_view_metadata_drop(self): + """ + test for materialized view metadata dropping + + test_materialized_view_metadata_drop tests that materialized view metadata is properly removed implicitly in + both keyspace and table metadata once that view is dropped. It creates a simple base table and then creates a view + based on that table. It then drops that materalized view and checks that the materialized view metadata is removed + from the keyspace and table metadata. + + @since 3.0.0 + @jira_ticket PYTHON-371 + @expected_result Materialized view metadata in both the ks and table should be removed with the view is dropped. + + @test_category metadata + """ + + self.session.execute("CREATE TABLE {0}.table1 (pk int PRIMARY KEY, c int)".format(self.ksname)) + self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.table1 WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.ksname)) + self.session.execute("DROP MATERIALIZED VIEW {0}.mv1".format(self.ksname)) + + self.assertNotIn("mv1", self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views) + self.assertNotIn("mv1", self.cluster.metadata.keyspaces[self.ksname].views) + self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views) + self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.ksname].views) \ No newline at end of file From b398d778c046b1ebb09646e2ad73d1aff11cf238 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 18 Sep 2015 19:14:11 -0500 Subject: [PATCH 0061/2005] Adding MV metadata tests --- tests/integration/standard/test_metadata.py | 197 +++++++++++++++++++- 1 file changed, 196 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index e801f157d9..16f617263c 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1879,6 +1879,7 @@ def test_bad_user_aggregate(self): self.assertIs(m._exc_info[0], self.BadMetaException) self.assertIn("/*\nWarning:", m.export_as_string()) + class MaterializedViewMetadataTest(unittest.TestCase): ksname = "materialized_view_test" @@ -1971,4 +1972,198 @@ def test_materialized_view_metadata_drop(self): self.assertNotIn("mv1", self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views) self.assertNotIn("mv1", self.cluster.metadata.keyspaces[self.ksname].views) self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views) - self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.ksname].views) \ No newline at end of file + self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.ksname].views) + + def test_create_view_metadata(self): + """ + test to ensure that materialized view metadata is properly constructed + + test_create_view_metadata tests that materialized views metadata is properly constructed. It runs a simple + query to construct a materialized view, then proceeds to inspect the metadata associated with that MV. + Columns are inspected to insure that all are of the proper type, and in the proper type. + + @since 3.0.0 + @jira_ticket PYTHON-371 + @expected_result Materialized view metadata should be constructed appropriately. + + @test_category metadata + """ + create_table = """CREATE TABLE {0}.scores( + user TEXT, + game TEXT, + year INT, + month INT, + day INT, + score INT, + PRIMARY KEY (user, game, year, month, day) + )""".format(self.ksname) + + self.session.execute(create_table) + + create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS + SELECT game, year, month, score, user, day FROM {0}.scores + WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL + PRIMARY KEY ((game, year, month), score, user, day) + WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.ksname) + + self.session.execute(create_mv) + score_table = self.cluster.metadata.keyspaces[self.ksname].tables['scores'] + mv = self.cluster.metadata.keyspaces[self.ksname].views['monthlyhigh'] + + self.assertIsNotNone(score_table.views["monthlyhigh"]) + self.assertIsNotNone(len(score_table.views), 1) + + # Make sure user is a partition key, and not null + self.assertEqual(len(score_table.partition_key), 1) + self.assertIsNotNone(score_table.columns['user']) + self.assertTrue(score_table.columns['user'], score_table.partition_key[0]) + + # Validate clustering keys + self.assertEqual(len(score_table.clustering_key), 4) + + self.assertIsNotNone(score_table.columns['game']) + self.assertTrue(score_table.columns['game'], score_table.clustering_key[0]) + + self.assertIsNotNone(score_table.columns['year']) + self.assertTrue(score_table.columns['year'], score_table.clustering_key[1]) + + self.assertIsNotNone(score_table.columns['month']) + self.assertTrue(score_table.columns['month'], score_table.clustering_key[2]) + + self.assertIsNotNone(score_table.columns['day']) + self.assertTrue(score_table.columns['day'], score_table.clustering_key[3]) + + self.assertIsNotNone(score_table.columns['score']) + + # Validate basic mv information + self.assertEquals(mv.keyspace_name, self.ksname) + self.assertEquals(mv.name, "monthlyhigh") + self.assertEquals(mv.base_table_name, "scores") + self.assertFalse(mv.include_all_columns) + + # Validate that all columns are preset and correct + mv_columns = mv.columns.values() + self.assertEquals(len(mv_columns), 6) + + game_column = mv_columns[0] + self.assertIsNotNone(game_column) + self.assertEquals(game_column.name, 'game') + self.assertEquals(game_column, mv.partition_key[0]) + + year_column = mv_columns[1] + self.assertIsNotNone(year_column) + self.assertEquals(year_column.name, 'year') + self.assertEquals(year_column, mv.partition_key[1]) + + month_column = mv_columns[2] + self.assertIsNotNone(month_column) + self.assertEquals(month_column.name, 'month') + self.assertEquals(month_column, mv.partition_key[2]) + + score_column = mv_columns[3] + self.assertIsNotNone(score_column) + self.assertEquals(score_column.name, 'score') + self.assertEquals(score_column.name, mv.clustering_key[0].name) + self.assertEquals(score_column.table, mv.clustering_key[0].table) + self.assertEquals(score_column.data_type, mv.clustering_key[0].data_type) + self.assertEquals(score_column.index, mv.clustering_key[0].index) + self.assertEquals(score_column.is_static, mv.clustering_key[0].is_static) + self.assertEquals(score_column.is_reversed, mv.clustering_key[0].is_reversed) + + user_column = mv_columns[4] + self.assertIsNotNone(user_column) + self.assertEquals(user_column.name, 'user') + self.assertEquals(user_column.name, mv.clustering_key[1].name) + self.assertEquals(user_column.table, mv.clustering_key[1].table) + self.assertEquals(user_column.data_type, mv.clustering_key[1].data_type) + self.assertEquals(user_column.index, mv.clustering_key[1].index) + self.assertEquals(user_column.is_static, mv.clustering_key[1].is_static) + self.assertEquals(user_column.is_reversed, mv.clustering_key[1].is_reversed) + + day_column = mv_columns[5] + self.assertIsNotNone(day_column) + self.assertEquals(day_column.name, 'day') + self.assertEquals(day_column.name, mv.clustering_key[2].name) + self.assertEquals(day_column.table, mv.clustering_key[2].table) + self.assertEquals(day_column.data_type, mv.clustering_key[2].data_type) + self.assertEquals(day_column.index, mv.clustering_key[2].index) + self.assertEquals(day_column.is_static, mv.clustering_key[2].is_static) + self.assertEquals(day_column.is_reversed, mv.clustering_key[2].is_reversed) + + def test_metadata_with_quoted_identifiers(self): + """ + test to ensure that materialized view metadata is properly constructed when quoted identifiers are used + + test_metadata_with_quoted_identifiers tests that materialized views metadata is properly constructed. + It runs a simple query to construct a materialized view, then proceeds to inspect the metadata associated with + that MV. The caveat here is that the tables and the materialized view both have quoted identifiers + Columns are inspected to insure that all are of the proper type, and in the proper type. + + @since 3.0.0 + @jira_ticket PYTHON-371 + @expected_result Materialized view metadata should be constructed appropriately even with quoted identifiers. + + @test_category metadata + """ + create_table = """CREATE TABLE {0}.t1 ( + "theKey" int, + "the;Clustering" int, + "the Value" int, + PRIMARY KEY ("theKey", "the;Clustering"))""".format(self.ksname) + + self.session.execute(create_table) + + create_mv = """CREATE MATERIALIZED VIEW {0}.mv1 AS + SELECT "theKey", "the;Clustering", "the Value" + FROM {0}.t1 + WHERE "theKey" IS NOT NULL AND "the;Clustering" IS NOT NULL AND "the Value" IS NOT NULL + PRIMARY KEY ("theKey", "the;Clustering")""".format(self.ksname) + + self.session.execute(create_mv) + + t1_table = self.cluster.metadata.keyspaces[self.ksname].tables['t1'] + mv = self.cluster.metadata.keyspaces[self.ksname].views['mv1'] + + self.assertIsNotNone(t1_table.views["mv1"]) + self.assertIsNotNone(len(t1_table.views), 1) + + # Validate partition key, and not null + self.assertEqual(len(t1_table.partition_key), 1) + self.assertIsNotNone(t1_table.columns['theKey']) + self.assertTrue(t1_table.columns['theKey'], t1_table.partition_key[0]) + + # Validate clustering key column + self.assertEqual(len(t1_table.clustering_key), 1) + self.assertIsNotNone(t1_table.columns['the;Clustering']) + self.assertTrue(t1_table.columns['the;Clustering'], t1_table.clustering_key[0]) + + # Validate regular column + self.assertIsNotNone(t1_table.columns['the Value']) + + # Validate basic mv information + self.assertEquals(mv.keyspace_name, self.ksname) + self.assertEquals(mv.name, "mv1") + self.assertEquals(mv.base_table_name, "t1") + self.assertFalse(mv.include_all_columns) + + # Validate that all columns are preset and correct + mv_columns = mv.columns.values() + self.assertEquals(len(mv_columns), 3) + + theKey_column = mv_columns[0] + self.assertIsNotNone(theKey_column) + self.assertEquals(theKey_column.name, 'theKey') + self.assertEquals(theKey_column, mv.partition_key[0]) + + cluster_column = mv_columns[1] + self.assertIsNotNone(cluster_column) + self.assertEquals(cluster_column.name, 'the;Clustering') + self.assertEquals(cluster_column.name, mv.clustering_key[0].name) + self.assertEquals(cluster_column.table, mv.clustering_key[0].table) + self.assertEquals(cluster_column.index, mv.clustering_key[0].index) + self.assertEquals(cluster_column.is_static, mv.clustering_key[0].is_static) + self.assertEquals(cluster_column.is_reversed, mv.clustering_key[0].is_reversed) + + value_column = mv_columns[2] + self.assertIsNotNone(value_column) + self.assertEquals(value_column.name, 'the Value') \ No newline at end of file From 232e3f07317afd20c9726f95a4d677567d9cb7c2 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Sat, 19 Sep 2015 14:55:42 -0500 Subject: [PATCH 0062/2005] Making CQL compatible for 1.2.x --- tests/integration/standard/test_metadata.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 16f617263c..b8dc3c6189 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1804,14 +1804,14 @@ def setup_class(cls): cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) cls.keyspace_name = cls.__name__.lower() cls.session = cls.cluster.connect() - cls.session.execute("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name) + cls.session.execute("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name) cls.session.set_keyspace(cls.keyspace_name) connection = cls.cluster.control_connection._connection cls.parser_class = get_schema_parser(connection, timeout=20).__class__ @classmethod def teardown_class(cls): - cls.session.execute("DROP KEYSPACE IF EXISTS %s" % cls.keyspace_name) + cls.session.execute("DROP KEYSPACE %s" % cls.keyspace_name) cls.cluster.shutdown() def _skip_if_not_version(self, version): From a1574e0297d52f2b910514476d827f4f7045b416 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Sun, 20 Sep 2015 11:25:29 -0500 Subject: [PATCH 0063/2005] Changing to test setup to use protocol V1 instead of protocol downgrade logic --- tests/integration/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index b29aa00ff6..92ebd0a410 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -59,7 +59,7 @@ def get_server_versions(): if cass_version is not None: return (cass_version, cql_version) - c = Cluster() + c = Cluster(protocol_version=1) s = c.connect() row = s.execute('SELECT cql_version, release_version FROM system.local')[0] From 5e7807c167f64dd57fed55fb88a5147aa68fbeb1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 18 Sep 2015 17:06:05 -0500 Subject: [PATCH 0064/2005] cqle: Update for 3.0 compatibility WIP Mostly needs test updates --- cassandra/cqlengine/management.py | 159 ++++-------------- cassandra/cqlengine/models.py | 40 +---- cassandra/metadata.py | 5 +- .../management/test_compaction_settings.py | 60 +++---- tests/integration/cqlengine/test_ttl.py | 2 +- 5 files changed, 66 insertions(+), 200 deletions(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 41d714107b..2861b2a956 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -211,7 +211,7 @@ def sync_table(model): # check for an existing column family if raw_cf_name not in tables: log.debug("sync_table creating new table %s", cf_name) - qs = get_create_table(model) + qs = _get_create_table(model) try: execute(qs) @@ -223,8 +223,7 @@ def sync_table(model): else: log.debug("sync_table checking existing table %s", cf_name) # see if we're missing any columns - fields = get_fields(model) - field_names = [x.name for x in fields] + field_names = _get_non_pk_field_names(tables[raw_cf_name]) model_fields = set() # # TODO: does this work with db_name?? for name, col in model._columns.items(): @@ -242,7 +241,7 @@ def sync_table(model): if db_fields_not_in_model: log.info("Table %s has fields not referenced by model: %s", cf_name, db_fields_not_in_model) - update_compaction(model) + _update_options(model) table = cluster.metadata.keyspaces[ks_name].tables[raw_cf_name] @@ -331,9 +330,14 @@ def get_create_type(type_model, keyspace): return type_meta.as_cql_query() -def get_create_table(model): - cf_name = model.column_family_name() - qs = ['CREATE TABLE {0}'.format(cf_name)] +def _get_create_table(model): + ks_table_name = model.column_family_name() + query_strings = ['CREATE TABLE {0}'.format(ks_table_name)] + + # Need to use an existing table to make sure we're using the right + # metadata class + cluster = get_cluster() + existing_meta = cluster.metadata.keyspaces['system'].tables['local'] # add column types pkeys = [] # primary keys @@ -352,102 +356,30 @@ def add_column(col): qtypes.append('PRIMARY KEY (({0}){1})'.format(', '.join(pkeys), ckeys and ', ' + ', '.join(ckeys) or '')) - qs += ['({0})'.format(', '.join(qtypes))] - - with_qs = [] + query_strings += ['({0})'.format(', '.join(qtypes))] - table_properties = ['bloom_filter_fp_chance', 'caching', 'comment', - 'dclocal_read_repair_chance', 'default_time_to_live', 'gc_grace_seconds', - 'index_interval', 'memtable_flush_period_in_ms', 'populate_io_cache_on_flush', - 'read_repair_chance', 'replicate_on_write'] - for prop_name in table_properties: - prop_value = getattr(model, '__{0}__'.format(prop_name), None) - if prop_value is not None: - # Strings needs to be single quoted - if isinstance(prop_value, six.string_types): - prop_value = "'{0}'".format(prop_value) - with_qs.append("{0} = {1}".format(prop_name, prop_value)) + property_strings = [] _order = ['"{0}" {1}'.format(c.db_field_name, c.clustering_order or 'ASC') for c in model._clustering_keys.values()] if _order: - with_qs.append('clustering order by ({0})'.format(', '.join(_order))) - - compaction_options = get_compaction_options(model) - if compaction_options: - compaction_options = json.dumps(compaction_options).replace('"', "'") - with_qs.append("compaction = {0}".format(compaction_options)) - - # Add table properties. - if with_qs: - qs += ['WITH {0}'.format(' AND '.join(with_qs))] - - qs = ' '.join(qs) - return qs - - -def get_compaction_options(model): - """ - Generates dictionary (later converted to a string) for creating and altering - tables with compaction strategy - - :param model: - :return: - """ - if not model.__compaction__: - return {} - - result = {'class': model.__compaction__} - - def setter(key, limited_to_strategy=None): - """ - sets key in result, checking if the key is limited to either SizeTiered or Leveled - :param key: one of the compaction options, like "bucket_high" - :param limited_to_strategy: SizeTieredCompactionStrategy, LeveledCompactionStrategy - :return: - """ - mkey = "__compaction_{0}__".format(key) - tmp = getattr(model, mkey) - if tmp and limited_to_strategy and limited_to_strategy != model.__compaction__: - raise CQLEngineException("{0} is limited to {1}".format(key, limited_to_strategy)) + property_strings.append('CLUSTERING ORDER BY ({0})'.format(', '.join(_order))) - if tmp: - # Explicitly cast the values to strings to be able to compare the - # values against introspected values from Cassandra. - result[key] = str(tmp) + property_strings += existing_meta._make_option_strings(model.__options__ or {}) - setter('tombstone_compaction_interval') - setter('tombstone_threshold') + if property_strings: + query_strings += ['WITH {0}'.format(' AND '.join(property_strings))] - setter('bucket_high', SizeTieredCompactionStrategy) - setter('bucket_low', SizeTieredCompactionStrategy) - setter('max_threshold', SizeTieredCompactionStrategy) - setter('min_threshold', SizeTieredCompactionStrategy) - setter('min_sstable_size', SizeTieredCompactionStrategy) + return ' '.join(query_strings) - setter('sstable_size_in_mb', LeveledCompactionStrategy) - return result - - -def get_fields(model): +def _get_non_pk_field_names(table_meta): # returns all fields that aren't part of the PK - ks_name = model._get_keyspace() - col_family = model._raw_column_family_name() - field_types = ['regular', 'static'] - query = "select * from system.schema_columns where keyspace_name = %s and columnfamily_name = %s" - tmp = execute(query, [ks_name, col_family]) - - # Tables containing only primary keys do not appear to create - # any entries in system.schema_columns, as only non-primary-key attributes - # appear to be inserted into the schema_columns table - try: - return [Field(x['column_name'], x['validator']) for x in tmp if x['type'] in field_types] - except KeyError: - return [Field(x['column_name'], x['validator']) for x in tmp] - # convert to Field named tuples + pk_names = set(col.name for col in table_meta.primary_key) + field_names = [name for name in table_meta.columns.keys() if name not in pk_names] + return field_names -def get_table_settings(model): +def _get_table_metadata(model): # returns the table as provided by the native driver for a given model cluster = get_cluster() ks = model._get_keyspace() @@ -456,47 +388,26 @@ def get_table_settings(model): return table -def update_compaction(model): - """Updates the compaction options for the given model if necessary. +def _update_options(model): + """Updates the table options for the given model if necessary. :param model: The model to update. - :return: `True`, if the compaction options were modified in Cassandra, + :return: `True`, if the options were modified in Cassandra, `False` otherwise. :rtype: bool """ - log.debug("Checking %s for compaction differences", model) - table = get_table_settings(model) - - existing_options = table.options.copy() - - existing_compaction_strategy = existing_options['compaction_strategy_class'] - - existing_options = json.loads(existing_options['compaction_strategy_options']) - - desired_options = get_compaction_options(model) - - desired_compact_strategy = desired_options.get('class', SizeTieredCompactionStrategy) - - desired_options.pop('class', None) - - do_update = False - - if desired_compact_strategy not in existing_compaction_strategy: - do_update = True + log.debug("Checking %s for option differences", model) + table_meta = _get_table_metadata(model) + existing_options = table_meta.options - for k, v in desired_options.items(): - val = existing_options.pop(k, None) - if val != v: - do_update = True + model_options = model.__options__ or {} + update_options = dict((k, v) for k, v in model_options if existing_options[k] != v) - # check compaction_strategy_options - if do_update: - options = get_compaction_options(model) - # jsonify - options = json.dumps(options).replace('"', "'") - cf_name = model.column_family_name() - query = "ALTER TABLE {0} with compaction = {1}".format(cf_name, options) + if update_options: + option_strings = table_meta._make_option_strings(update_options) + options = ' AND '.join(option_strings) + query = "ALTER TABLE {0} WITH {1}".format(model.column_family_name(), options) execute(query) return True diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index ddb7945995..975773067a 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -310,46 +310,17 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): __keyspace__ = None - __default_ttl__ = None - __polymorphic_key__ = None # DEPRECATED __discriminator_value__ = None - # compaction options - __compaction__ = None - __compaction_tombstone_compaction_interval__ = None - __compaction_tombstone_threshold__ = None - - # compaction - size tiered options - __compaction_bucket_high__ = None - __compaction_bucket_low__ = None - __compaction_max_threshold__ = None - __compaction_min_threshold__ = None - __compaction_min_sstable_size__ = None - - # compaction - leveled options - __compaction_sstable_size_in_mb__ = None + __options__ = None - # end compaction # the queryset class used for this class __queryset__ = query.ModelQuerySet __dmlquery__ = query.DMLQuery __consistency__ = None # can be set per query - # Additional table properties - __bloom_filter_fp_chance__ = None - __caching__ = None - __comment__ = None - __dclocal_read_repair_chance__ = None - __default_time_to_live__ = None - __gc_grace_seconds__ = None - __index_interval__ = None - __memtable_flush_period_in_ms__ = None - __populate_io_cache_on_flush__ = None - __read_repair_chance__ = None - __replicate_on_write__ = None - _timestamp = None # optional timestamp to include with the operation (USING TIMESTAMP) _if_not_exists = False # optional if_not_exists flag to check existence before insertion @@ -791,6 +762,9 @@ def __new__(cls, name, bases, attrs): attrs['__discriminator_value__'] = attrs.get('__discriminator_value__', poly_key) attrs['__polymorphic_key__'] = attrs['__discriminator_value__'] + options = attrs.get('__options__') or {} + attrs['__default_ttl__'] = options.get('default_time_to_live') + def _transform_column(col_name, col_obj): column_dict[col_name] = col_obj if col_obj.primary_key: @@ -965,11 +939,11 @@ class Model(BaseModel): Sets the name of the keyspace used by this model. """ - __default_ttl__ = None + __options__ = None """ - *Optional* The default ttl used by this model. + *Optional* Table options applied with this model - This can be overridden by using the :meth:`~.ttl` method. + (e.g. compaction, default ttl, cache settings, tec.) """ __polymorphic_key__ = None diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 82287dd64d..c0186f72e9 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2292,8 +2292,9 @@ def _make_option_strings(cls, options_map): for option in cls.option_maps: value = options_copy.pop(option, {}) - params = ("'%s': '%s'" % (k, v) for k, v in value.items()) - ret.append("%s = {%s}" % (option, ', '.join(params))) + if value: + params = ("'%s': '%s'" % (k, v) for k, v in value.items()) + ret.append("%s = {%s}" % (option, ', '.join(params))) for name, value in options_copy.items(): if value is not None: diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index df443bf86f..7f037a8ce0 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -18,7 +18,7 @@ from cassandra.cqlengine import columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy from cassandra.cqlengine import CQLEngineException -from cassandra.cqlengine.management import get_compaction_options, drop_table, sync_table, get_table_settings +from cassandra.cqlengine.management import drop_table, sync_table, _get_table_metadata from cassandra.cqlengine.models import Model from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -64,34 +64,13 @@ def setUp(self): self.model = copy.deepcopy(CompactionLeveledStrategyModel) def test_simple_leveled(self): - result = get_compaction_options(self.model) assert result['class'] == LeveledCompactionStrategy - - def test_bucket_high_fails(self): - self.assert_option_fails('bucket_high') - - def test_bucket_low_fails(self): - self.assert_option_fails('bucket_low') - - def test_max_threshold_fails(self): - self.assert_option_fails('max_threshold') - - def test_min_threshold_fails(self): - self.assert_option_fails('min_threshold') - - def test_min_sstable_size_fails(self): - self.assert_option_fails('min_sstable_size') - - def test_sstable_size_in_mb(self): - with patch.object(self.model, '__compaction_sstable_size_in_mb__', 32): - result = get_compaction_options(self.model) - assert result['sstable_size_in_mb'] == '32' class LeveledcompactionTestTable(Model): - __compaction__ = LeveledCompactionStrategy + __options__ = {} # TODO __compaction_sstable_size_in_mb__ = 64 user_id = columns.UUID(primary_key=True) @@ -203,13 +182,18 @@ class OptionsTest(BaseCassEngTestCase): def test_all_size_tiered_options(self): class AllSizeTieredOptionsModel(Model): - - __compaction__ = SizeTieredCompactionStrategy - __compaction_bucket_low__ = .3 - __compaction_bucket_high__ = 2 - __compaction_min_threshold__ = 2 - __compaction_max_threshold__ = 64 - __compaction_tombstone_compaction_interval__ = 86400 + #__options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', + # 'bucket_low': '.3', + # 'bucket_high': '2', + # 'min_threshold': '2', + # 'max_threshold': '64', + # 'tombstone_compaction_interval': '86400'}} + __options__ = {'compaction': """{'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', + 'bucket_low': '.3', + 'bucket_high': '2', + 'min_threshold': '2', + 'max_threshold': '64', + 'tombstone_compaction_interval': '86400'}"""} cid = columns.UUID(primary_key=True) name = columns.Text() @@ -217,18 +201,14 @@ class AllSizeTieredOptionsModel(Model): drop_table(AllSizeTieredOptionsModel) sync_table(AllSizeTieredOptionsModel) - options = get_table_settings(AllSizeTieredOptionsModel).options['compaction_strategy_options'] - options = json.loads(options) - - expected = {u'min_threshold': u'2', - u'bucket_low': u'0.3', - u'tombstone_compaction_interval': u'86400', - u'bucket_high': u'2', - u'max_threshold': u'64'} + table_meta = _get_table_metadata(AllSizeTieredOptionsModel) + cql = table_meta.export_as_string() + expected = table_meta._make_option_strings(AllSizeTieredOptionsModel.__options__) + for option in expected: + self.assertIn(option, cql) - self.assertDictEqual(options, expected) - def test_all_leveled_options(self): + def xtest_all_leveled_options(self): class AllLeveledOptionsModel(Model): diff --git a/tests/integration/cqlengine/test_ttl.py b/tests/integration/cqlengine/test_ttl.py index 3d091e5337..e86b2cbf38 100644 --- a/tests/integration/cqlengine/test_ttl.py +++ b/tests/integration/cqlengine/test_ttl.py @@ -41,7 +41,7 @@ def tearDownClass(cls): class TestDefaultTTLModel(Model): - __default_ttl__ = 20 + __options__ = {'default_time_to_live': 20} id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) From 6f38f1b950a74f469825323121b954ecdbba1d91 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 21 Sep 2015 07:49:51 -0700 Subject: [PATCH 0065/2005] cqle: more mgmt changes and test upates for updated options api --- cassandra/cqlengine/__init__.py | 4 - cassandra/cqlengine/management.py | 60 +++++-- cassandra/metadata.py | 8 +- .../management/test_compaction_settings.py | 169 ++++++------------ .../cqlengine/management/test_management.py | 121 +++---------- .../cqlengine/model/test_polymorphism.py | 4 +- 6 files changed, 132 insertions(+), 234 deletions(-) diff --git a/cassandra/cqlengine/__init__.py b/cassandra/cqlengine/__init__.py index 38a02fd3ec..aeec9be086 100644 --- a/cassandra/cqlengine/__init__.py +++ b/cassandra/cqlengine/__init__.py @@ -15,10 +15,6 @@ import six -# compaction -SizeTieredCompactionStrategy = "SizeTieredCompactionStrategy" -LeveledCompactionStrategy = "LeveledCompactionStrategy" - # Caching constants. CACHING_ALL = "ALL" CACHING_KEYS_ONLY = "KEYS_ONLY" diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 2861b2a956..648e870554 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -20,7 +20,7 @@ import warnings from cassandra import metadata -from cassandra.cqlengine import CQLEngineException, SizeTieredCompactionStrategy, LeveledCompactionStrategy +from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine import columns from cassandra.cqlengine.connection import execute, get_cluster from cassandra.cqlengine.models import Model @@ -247,11 +247,13 @@ def sync_table(model): indexes = [c for n, c in model._columns.items() if c.index] + # TODO: support multiple indexes in C* 3.0+ for column in indexes: - if table.columns[column.db_field_name].index: + index_name = 'index_{0}_{1}'.format(raw_cf_name, column.db_field_name) + if table.indexes.get(index_name): continue - qs = ['CREATE INDEX index_{0}_{1}'.format(raw_cf_name, column.db_field_name)] + qs = ['CREATE INDEX {0}'.format(index_name)] qs += ['ON {0}'.format(cf_name)] qs += ['("{0}")'.format(column.db_field_name)] qs = ' '.join(qs) @@ -334,11 +336,6 @@ def _get_create_table(model): ks_table_name = model.column_family_name() query_strings = ['CREATE TABLE {0}'.format(ks_table_name)] - # Need to use an existing table to make sure we're using the right - # metadata class - cluster = get_cluster() - existing_meta = cluster.metadata.keyspaces['system'].tables['local'] - # add column types pkeys = [] # primary keys ckeys = [] # clustering keys @@ -364,7 +361,8 @@ def add_column(col): if _order: property_strings.append('CLUSTERING ORDER BY ({0})'.format(', '.join(_order))) - property_strings += existing_meta._make_option_strings(model.__options__ or {}) + # options strings use the V3 format, which matches CQL more closely and does not require mapping + property_strings += metadata.TableMetadataV3._make_option_strings(model.__options__ or {}) if property_strings: query_strings += ['WITH {0}'.format(' AND '.join(property_strings))] @@ -388,6 +386,21 @@ def _get_table_metadata(model): return table +def _options_map_from_strings(option_strings): + # converts options strings to a mapping to strings or dict + options = {} + for option in option_strings: + name, value = option.split('=') + i = value.find('{') + if i >= 0: + value = value[i:value.rfind('}') + 1].replace("'", '"') # from cql single quotes to json double; not aware of any values that would be escaped right now + value = json.loads(value) + else: + value = value.strip() + options[name.strip()] = value + return options + + def _update_options(model): """Updates the table options for the given model if necessary. @@ -398,15 +411,32 @@ def _update_options(model): :rtype: bool """ log.debug("Checking %s for option differences", model) - table_meta = _get_table_metadata(model) - existing_options = table_meta.options - model_options = model.__options__ or {} - update_options = dict((k, v) for k, v in model_options if existing_options[k] != v) + + table_meta = _get_table_metadata(model) + # go to CQL string first to normalize meta from different versions + existing_option_strings = set(table_meta._make_option_strings(table_meta.options)) + existing_options = _options_map_from_strings(existing_option_strings) + model_option_strings = metadata.TableMetadataV3._make_option_strings(model_options) + model_options = _options_map_from_strings(model_option_strings) + + update_options = {} + for name, value in model_options.items(): + existing_value = existing_options[name] + if isinstance(existing_value, six.string_types): + if value != existing_value: + update_options[name] = value + else: + try: + for k, v in value.items(): + if existing_value[k] != v: + update_options[name] = value + break + except KeyError: + update_options[name] = value if update_options: - option_strings = table_meta._make_option_strings(update_options) - options = ' AND '.join(option_strings) + options = ' AND '.join(metadata.TableMetadataV3._make_option_strings(update_options)) query = "ALTER TABLE {0} WITH {1}".format(model.column_family_name(), options) execute(query) return True diff --git a/cassandra/metadata.py b/cassandra/metadata.py index c0186f72e9..4251f00f3b 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -13,7 +13,7 @@ # limitations under the License. from bisect import bisect_right -from collections import defaultdict +from collections import defaultdict, Mapping from hashlib import md5 from itertools import islice, cycle import json @@ -1252,6 +1252,7 @@ class ColumnMetadata(object): the type classes in :mod:`cassandra.cqltypes`. """ + # TODO: probably remove this in favor of the table level mapping index = None """ If an index exists on this column, this is an instance of @@ -2291,8 +2292,9 @@ def _make_option_strings(cls, options_map): options_copy = dict(options_map.items()) for option in cls.option_maps: - value = options_copy.pop(option, {}) - if value: + value = options_copy.get(option) + if isinstance(value, Mapping): + del options_copy[option] params = ("'%s': '%s'" % (k, v) for k, v in value.items()) ret.append("%s = {%s}" % (option, ', '.join(params))) diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index 7f037a8ce0..85196d330b 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -13,12 +13,11 @@ # limitations under the License. import copy -import json from mock import patch +import six -from cassandra.cqlengine import columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy -from cassandra.cqlengine import CQLEngineException -from cassandra.cqlengine.management import drop_table, sync_table, _get_table_metadata +from cassandra.cqlengine import columns +from cassandra.cqlengine.management import drop_table, sync_table, _get_table_metadata, _update_options from cassandra.cqlengine.models import Model from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -31,47 +30,10 @@ class CompactionModel(Model): name = columns.Text() -class BaseCompactionTest(BaseCassEngTestCase): - def assert_option_fails(self, key): - # key is a normal_key, converted to - # __compaction_key__ - - key = "__compaction_{0}__".format(key) - - with patch.object(self.model, key, 10): - with self.assertRaises(CQLEngineException): - get_compaction_options(self.model) - - -class SizeTieredCompactionTest(BaseCompactionTest): - - def setUp(self): - self.model = copy.deepcopy(CompactionModel) - self.model.__compaction__ = SizeTieredCompactionStrategy - - def test_size_tiered(self): - result = get_compaction_options(self.model) - assert result['class'] == SizeTieredCompactionStrategy - - def test_min_threshold(self): - self.model.__compaction_min_threshold__ = 2 - result = get_compaction_options(self.model) - assert result['min_threshold'] == '2' - - -class LeveledCompactionTest(BaseCompactionTest): - def setUp(self): - self.model = copy.deepcopy(CompactionLeveledStrategyModel) - - def test_simple_leveled(self): - assert result['class'] == LeveledCompactionStrategy - assert result['sstable_size_in_mb'] == '32' - - class LeveledcompactionTestTable(Model): - __options__ = {} # TODO - __compaction_sstable_size_in_mb__ = 64 + __options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', + 'sstable_size_in_mb': '64'}} user_id = columns.UUID(primary_key=True) name = columns.Text() @@ -82,118 +44,98 @@ class AlterTableTest(BaseCassEngTestCase): def test_alter_is_called_table(self): drop_table(LeveledcompactionTestTable) sync_table(LeveledcompactionTestTable) - with patch('cassandra.cqlengine.management.update_compaction') as mock: + with patch('cassandra.cqlengine.management._update_options') as mock: sync_table(LeveledcompactionTestTable) assert mock.called == 1 def test_compaction_not_altered_without_changes_leveled(self): - from cassandra.cqlengine.management import update_compaction class LeveledCompactionChangesDetectionTest(Model): - __compaction__ = LeveledCompactionStrategy - __compaction_sstable_size_in_mb__ = 160 - __compaction_tombstone_threshold__ = 0.125 - __compaction_tombstone_compaction_interval__ = 3600 - + __options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', + 'sstable_size_in_mb': '160', + 'tombstone_threshold': '0.125', + 'tombstone_compaction_interval': '3600'}} pk = columns.Integer(primary_key=True) drop_table(LeveledCompactionChangesDetectionTest) sync_table(LeveledCompactionChangesDetectionTest) - assert not update_compaction(LeveledCompactionChangesDetectionTest) + self.assertFalse(_update_options(LeveledCompactionChangesDetectionTest)) def test_compaction_not_altered_without_changes_sizetiered(self): - from cassandra.cqlengine.management import update_compaction - class SizeTieredCompactionChangesDetectionTest(Model): - __compaction__ = SizeTieredCompactionStrategy - __compaction_bucket_high__ = 20 - __compaction_bucket_low__ = 10 - __compaction_max_threshold__ = 200 - __compaction_min_threshold__ = 100 - __compaction_min_sstable_size__ = 1000 - __compaction_tombstone_threshold__ = 0.125 - __compaction_tombstone_compaction_interval__ = 3600 - + __options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', + 'bucket_high': '20', + 'bucket_low': '10', + 'max_threshold': '200', + 'min_threshold': '100', + 'min_sstable_size': '1000', + 'tombstone_threshold': '0.125', + 'tombstone_compaction_interval': '3600'}} pk = columns.Integer(primary_key=True) drop_table(SizeTieredCompactionChangesDetectionTest) sync_table(SizeTieredCompactionChangesDetectionTest) - assert not update_compaction(SizeTieredCompactionChangesDetectionTest) + self.assertFalse(_update_options(SizeTieredCompactionChangesDetectionTest)) def test_alter_actually_alters(self): tmp = copy.deepcopy(LeveledcompactionTestTable) drop_table(tmp) sync_table(tmp) - tmp.__compaction__ = SizeTieredCompactionStrategy - tmp.__compaction_sstable_size_in_mb__ = None + tmp.__options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'}} sync_table(tmp) - table_settings = get_table_settings(tmp) + table_meta = _get_table_metadata(tmp) - self.assertRegexpMatches(table_settings.options['compaction_strategy_class'], '.*SizeTieredCompactionStrategy$') + self.assertRegexpMatches(table_meta.export_as_string(), '.*SizeTieredCompactionStrategy.*') def test_alter_options(self): class AlterTable(Model): - __compaction__ = LeveledCompactionStrategy - __compaction_sstable_size_in_mb__ = 64 - + __options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', + 'sstable_size_in_mb': '64'}} user_id = columns.UUID(primary_key=True) name = columns.Text() drop_table(AlterTable) sync_table(AlterTable) - AlterTable.__compaction_sstable_size_in_mb__ = 128 + table_meta = _get_table_metadata(AlterTable) + self.assertRegexpMatches(table_meta.export_as_string(), ".*'sstable_size_in_mb': '64'.*") + AlterTable.__options__['compaction']['sstable_size_in_mb'] = '128' sync_table(AlterTable) + table_meta = _get_table_metadata(AlterTable) + self.assertRegexpMatches(table_meta.export_as_string(), ".*'sstable_size_in_mb': '128'.*") -class EmptyCompactionTest(BaseCassEngTestCase): - def test_empty_compaction(self): - class EmptyCompactionModel(Model): - - __compaction__ = None - cid = columns.UUID(primary_key=True) - name = columns.Text() - - result = get_compaction_options(EmptyCompactionModel) - self.assertEqual({}, result) - - -class CompactionLeveledStrategyModel(Model): - - __compaction__ = LeveledCompactionStrategy - cid = columns.UUID(primary_key=True) - name = columns.Text() - - -class CompactionSizeTieredModel(Model): - - __compaction__ = SizeTieredCompactionStrategy - cid = columns.UUID(primary_key=True) - name = columns.Text() +class OptionsTest(BaseCassEngTestCase): + def _verify_options(self, table_meta, expected_options): + cql = table_meta.export_as_string() -class OptionsTest(BaseCassEngTestCase): + for name, value in expected_options.items(): + if isinstance(value, six.string_types): + self.assertIn("%s = '%s'" % (name, value), cql) + else: + start = cql.find("%s = {" % (name,)) + end = cql.find('}', start) + for subname, subvalue in value.items(): + attr = "'%s': '%s'" % (subname, subvalue) + found_at = cql.find(attr, start) + self.assertTrue(found_at > start) + self.assertTrue(found_at < end) def test_all_size_tiered_options(self): class AllSizeTieredOptionsModel(Model): - #__options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', - # 'bucket_low': '.3', - # 'bucket_high': '2', - # 'min_threshold': '2', - # 'max_threshold': '64', - # 'tombstone_compaction_interval': '86400'}} - __options__ = {'compaction': """{'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', + __options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'bucket_low': '.3', 'bucket_high': '2', 'min_threshold': '2', 'max_threshold': '64', - 'tombstone_compaction_interval': '86400'}"""} + 'tombstone_compaction_interval': '86400'}} cid = columns.UUID(primary_key=True) name = columns.Text() @@ -202,18 +144,13 @@ class AllSizeTieredOptionsModel(Model): sync_table(AllSizeTieredOptionsModel) table_meta = _get_table_metadata(AllSizeTieredOptionsModel) - cql = table_meta.export_as_string() - expected = table_meta._make_option_strings(AllSizeTieredOptionsModel.__options__) - for option in expected: - self.assertIn(option, cql) - + self._verify_options(table_meta, AllSizeTieredOptionsModel.__options__) - def xtest_all_leveled_options(self): + def test_all_leveled_options(self): class AllLeveledOptionsModel(Model): - - __compaction__ = LeveledCompactionStrategy - __compaction_sstable_size_in_mb__ = 64 + __options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', + 'sstable_size_in_mb': '64'}} cid = columns.UUID(primary_key=True) name = columns.Text() @@ -221,7 +158,5 @@ class AllLeveledOptionsModel(Model): drop_table(AllLeveledOptionsModel) sync_table(AllLeveledOptionsModel) - settings = get_table_settings(AllLeveledOptionsModel).options - - options = json.loads(settings['compaction_strategy_options']) - self.assertDictEqual(options, {u'sstable_size_in_mb': u'64'}) + table_meta = _get_table_metadata(AllLeveledOptionsModel) + self._verify_options(table_meta, AllLeveledOptionsModel.__options__) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index e20680d036..4b62fcfcd1 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -23,9 +23,9 @@ from cassandra.cqlengine.connection import get_session, get_cluster from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine import management -from cassandra.cqlengine.management import get_fields, sync_table, drop_table +from cassandra.cqlengine.management import _get_non_pk_field_names, _get_table_metadata, sync_table, drop_table from cassandra.cqlengine.models import Model -from cassandra.cqlengine import columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy +from cassandra.cqlengine import columns from tests.integration import CASSANDRA_VERSION, PROTOCOL_VERSION from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -91,7 +91,7 @@ class CapitalizedKeyModel(Model): class PrimaryKeysOnlyModel(Model): - __compaction__ = LeveledCompactionStrategy + __options__ = {'compaction': {'class': 'LeveledCompactionStrategy'}} first_ey = columns.Integer(primary_key=True) second_key = columns.Integer(primary_key=True) @@ -151,45 +151,35 @@ def setUp(self): def test_add_column(self): sync_table(FirstModel) - fields = get_fields(FirstModel) + fields = _get_non_pk_field_names(_get_table_metadata(FirstModel)) # this should contain the second key self.assertEqual(len(fields), 2) # get schema sync_table(SecondModel) - fields = get_fields(FirstModel) + fields = _get_non_pk_field_names(_get_table_metadata(FirstModel)) self.assertEqual(len(fields), 3) sync_table(ThirdModel) - fields = get_fields(FirstModel) + fields = _get_non_pk_field_names(_get_table_metadata(FirstModel)) self.assertEqual(len(fields), 4) sync_table(FourthModel) - fields = get_fields(FirstModel) + fields = _get_non_pk_field_names(_get_table_metadata(FirstModel)) self.assertEqual(len(fields), 4) class ModelWithTableProperties(Model): - # Set random table properties - __bloom_filter_fp_chance__ = 0.76328 - __caching__ = CACHING_ALL - __comment__ = 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR' - __gc_grace_seconds__ = 2063 - __populate_io_cache_on_flush__ = True - __read_repair_chance__ = 0.17985 - __replicate_on_write__ = False - __dclocal_read_repair_chance__ = 0.50811 + __options__ = {'bloom_filter_fp_chance': '0.76328', + 'comment': 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR', + 'gc_grace_seconds': '2063', + 'read_repair_chance': '0.17985', + 'dclocal_read_repair_chance': '0.50811'} key = columns.UUID(primary_key=True) -# kind of a hack, but we only test this property on C >= 2.0 -if CASSANDRA_VERSION >= '2.0.0': - ModelWithTableProperties.__memtable_flush_period_in_ms__ = 43681 - ModelWithTableProperties.__index_interval__ = 98706 - ModelWithTableProperties.__default_time_to_live__ = 4756 - class TablePropertiesTests(BaseCassEngTestCase): @@ -209,63 +199,23 @@ def test_set_table_properties(self): # TODO: due to a bug in the native driver i'm not seeing the local read repair chance show up # 'local_read_repair_chance': 0.50811, } - if CASSANDRA_VERSION <= '2.0.0': - expected['caching'] = CACHING_ALL - expected['replicate_on_write'] = False - - if CASSANDRA_VERSION == '2.0.0': - expected['populate_io_cache_on_flush'] = True - expected['index_interval'] = 98706 - - if CASSANDRA_VERSION >= '2.0.0': - expected['default_time_to_live'] = 4756 - expected['memtable_flush_period_in_ms'] = 43681 - - options = management.get_table_settings(ModelWithTableProperties).options + options = management._get_table_metadata(ModelWithTableProperties).options self.assertEqual(dict([(k, options.get(k)) for k in expected.keys()]), expected) def test_table_property_update(self): - ModelWithTableProperties.__bloom_filter_fp_chance__ = 0.66778 - ModelWithTableProperties.__caching__ = CACHING_NONE - ModelWithTableProperties.__comment__ = 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy' - ModelWithTableProperties.__gc_grace_seconds__ = 96362 + ModelWithTableProperties.__options__['bloom_filter_fp_chance'] = 0.66778 + ModelWithTableProperties.__options__['comment'] = 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy' + ModelWithTableProperties.__options__['gc_grace_seconds'] = 96362 - ModelWithTableProperties.__populate_io_cache_on_flush__ = False - ModelWithTableProperties.__read_repair_chance__ = 0.2989 - ModelWithTableProperties.__replicate_on_write__ = True - ModelWithTableProperties.__dclocal_read_repair_chance__ = 0.12732 - - if CASSANDRA_VERSION >= '2.0.0': - ModelWithTableProperties.__default_time_to_live__ = 65178 - ModelWithTableProperties.__memtable_flush_period_in_ms__ = 60210 - ModelWithTableProperties.__index_interval__ = 94207 + ModelWithTableProperties.__options__['read_repair_chance'] = 0.2989 + ModelWithTableProperties.__options__['dclocal_read_repair_chance'] = 0.12732 sync_table(ModelWithTableProperties) - table_settings = management.get_table_settings(ModelWithTableProperties).options - - expected = {'bloom_filter_fp_chance': 0.66778, - 'comment': 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy', - 'gc_grace_seconds': 96362, - 'read_repair_chance': 0.2989, - # 'local_read_repair_chance': 0.12732, - } - if CASSANDRA_VERSION >= '2.0.0': - expected['memtable_flush_period_in_ms'] = 60210 - expected['default_time_to_live'] = 65178 + table_options = management._get_table_metadata(ModelWithTableProperties).options - if CASSANDRA_VERSION == '2.0.0': - expected['index_interval'] = 94207 - - # these featuers removed in cassandra 2.1 - if CASSANDRA_VERSION <= '2.0.0': - expected['caching'] = CACHING_NONE - expected['replicate_on_write'] = True - expected['populate_io_cache_on_flush'] = False - - self.assertEqual(dict([(k, table_settings.get(k)) for k in expected.keys()]), - expected) + self.assertDictContainsSubset(ModelWithTableProperties.__options__, table_options) class SyncTableTests(BaseCassEngTestCase): @@ -275,32 +225,18 @@ def setUp(self): def test_sync_table_works_with_primary_keys_only_tables(self): - # This is "create table": - sync_table(PrimaryKeysOnlyModel) - - # let's make sure settings persisted correctly: - - assert PrimaryKeysOnlyModel.__compaction__ == LeveledCompactionStrategy # blows up with DoesNotExist if table does not exist - table_settings = management.get_table_settings(PrimaryKeysOnlyModel) - # let make sure the flag we care about + table_meta = management._get_table_metadata(PrimaryKeysOnlyModel) - assert LeveledCompactionStrategy in table_settings.options['compaction_strategy_class'] + self.assertIn('LeveledCompactionStrategy', table_meta.as_cql_query()) - # Now we are "updating" the table: - # setting up something to change - PrimaryKeysOnlyModel.__compaction__ = SizeTieredCompactionStrategy + PrimaryKeysOnlyModel.__options__['compaction']['class'] = 'SizeTieredCompactionStrategy' - # primary-keys-only tables do not create entries in system.schema_columns - # table. Only non-primary keys are added to that table. - # Our code must deal with that eventuality properly (not crash) - # on subsequent runs of sync_table (which runs get_fields internally) - get_fields(PrimaryKeysOnlyModel) sync_table(PrimaryKeysOnlyModel) - table_settings = management.get_table_settings(PrimaryKeysOnlyModel) - assert SizeTieredCompactionStrategy in table_settings.options['compaction_strategy_class'] + table_meta = management._get_table_metadata(PrimaryKeysOnlyModel) + self.assertIn('SizeTieredCompactionStrategy', table_meta.as_cql_query()) class NonModelFailureTest(BaseCassEngTestCase): @@ -328,9 +264,9 @@ class StaticModel(Model): with mock.patch.object(session, "execute", wraps=session.execute) as m: sync_table(StaticModel) - assert m.call_count > 0 + self.assertGreater(m.call_count, 0) statement = m.call_args[0][0].query_string - assert '"name" text static' in statement, statement + self.assertIn('"name" text static', statement) # if we sync again, we should not apply an alter w/ a static sync_table(StaticModel) @@ -338,5 +274,4 @@ class StaticModel(Model): with mock.patch.object(session, "execute", wraps=session.execute) as m2: sync_table(StaticModel) - assert len(m2.call_args_list) == 1 - assert "ALTER" not in m2.call_args[0][0].query_string + self.assertEqual(len(m2.call_args_list), 0) diff --git a/tests/integration/cqlengine/model/test_polymorphism.py b/tests/integration/cqlengine/model/test_polymorphism.py index 431237d2de..6e7a5a690d 100644 --- a/tests/integration/cqlengine/model/test_polymorphism.py +++ b/tests/integration/cqlengine/model/test_polymorphism.py @@ -488,5 +488,5 @@ def tearDownClass(cls): management.drop_table(IndexedInherit2) def test_success_case(self): - assert len(list(IndexedInherit1.objects(partition=self.p1.partition))) == 1 - assert len(list(IndexedInherit2.objects(partition=self.p1.partition))) == 1 + self.assertEqual(len(list(IndexedInherit1.objects(partition=self.p1.partition))), 1) + self.assertEqual(len(list(IndexedInherit2.objects(partition=self.p1.partition))), 1) From 033670f72bfa94a36be9191fa5cbdfd2bfe5e170 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 21 Sep 2015 08:32:05 -0700 Subject: [PATCH 0066/2005] 3.0.0a3 version update --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 7d60b6a292..5c7df1868a 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 0, '0a2', 'post') +__version_info__ = (3, 0, '0a3') __version__ = '.'.join(map(str, __version_info__)) From a56a4c98d026a7fe34aa4fb1d793bd78673fab9a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 21 Sep 2015 08:34:59 -0700 Subject: [PATCH 0067/2005] post-release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 5c7df1868a..d884b085c9 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 0, '0a3') +__version_info__ = (3, 0, '0a3', 'post0') __version__ = '.'.join(map(str, __version_info__)) From 5a45a0fb70c24c98a406117969e07d4be2e4b139 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 22 Sep 2015 15:34:48 -0700 Subject: [PATCH 0068/2005] Making mv metadata tests python 3 compatible --- tests/integration/standard/test_metadata.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index b8dc3c6189..ee1c485936 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -2042,7 +2042,7 @@ def test_create_view_metadata(self): self.assertFalse(mv.include_all_columns) # Validate that all columns are preset and correct - mv_columns = mv.columns.values() + mv_columns = list(mv.columns.values()) self.assertEquals(len(mv_columns), 6) game_column = mv_columns[0] @@ -2147,7 +2147,7 @@ def test_metadata_with_quoted_identifiers(self): self.assertFalse(mv.include_all_columns) # Validate that all columns are preset and correct - mv_columns = mv.columns.values() + mv_columns = list(mv.columns.values()) self.assertEquals(len(mv_columns), 3) theKey_column = mv_columns[0] From c6d1b8f35e6cfcc0f93299885e7bd7c938206bb1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 29 Sep 2015 17:05:59 -0500 Subject: [PATCH 0069/2005] Proper exc_info handling for Connection.defunct debug log Fixes an issue where exc_info can cause formatting failures when defunct is called while not actually handling an exception. PYTHON-409 --- cassandra/connection.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index c2efc3e08e..29edaaca1c 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -352,8 +352,14 @@ def defunct(self, exc): return self.is_defunct = True - log.debug("Defuncting connection (%s) to %s:", - id(self), self.host, exc_info=exc) + exc_info = sys.exc_info() + # if we are not handling an exception, just use the passed exception, and don't try to format exc_info with the message + if any(exc_info): + log.debug("Defuncting connection (%s) to %s:", + id(self), self.host, exc_info=exc_info) + else: + log.debug("Defuncting connection (%s) to %s: %s", + id(self), self.host, exc) self.last_error = exc self.close() From 849f52e5f976e5f6238d1aa0eeb33e6deb2b401e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 29 Sep 2015 17:09:06 -0500 Subject: [PATCH 0070/2005] Revert "Changing to test setup to use protocol V1 instead of protocol downgrade logic" This reverts commit a1574e0297d52f2b910514476d827f4f7045b416. Following fix for negotiation in nosetest context for Python 3 PYTHON-409 --- tests/integration/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 92ebd0a410..b29aa00ff6 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -59,7 +59,7 @@ def get_server_versions(): if cass_version is not None: return (cass_version, cql_version) - c = Cluster(protocol_version=1) + c = Cluster() s = c.connect() row = s.execute('SELECT cql_version, release_version FROM system.local')[0] From be4e1db9b3ab0ee7ad185d882add97a18a75d425 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 30 Sep 2015 12:27:56 -0500 Subject: [PATCH 0071/2005] cqle: Remove invalid test on limit with count. Limit does not apply to rows counted, but rather returned rows. It was incorrect in earlier versions of Cassandra. See https://issues.apache.org/jira/browse/CASSANDRA-10380 --- tests/integration/cqlengine/query/test_queryset.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 452775203f..db7c0c2306 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -262,14 +262,6 @@ def test_query_expression_count(self): q = TestModel.objects(TestModel.test_id == 0) assert q.count() == 4 - def test_query_limit_count(self): - """ Tests that adding query with a limit affects the count as expected """ - assert TestModel.objects.count() == 12 - - q = TestModel.objects(TestModel.test_id == 0).limit(2) - result = q.count() - self.assertEqual(2, result) - def test_iteration(self): """ Tests that iterating over a query set pulls back all of the expected results """ q = TestModel.objects(test_id=0) From 1048f33604fbc48bd9e476fd7e7ae57b4a6f3032 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 30 Sep 2015 14:15:46 -0500 Subject: [PATCH 0072/2005] cqle: test cleanup following options refactor --- .../management/test_compaction_settings.py | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index 85196d330b..a6717654e1 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -23,14 +23,7 @@ from tests.integration.cqlengine.base import BaseCassEngTestCase -class CompactionModel(Model): - - __compaction__ = None - cid = columns.UUID(primary_key=True) - name = columns.Text() - - -class LeveledcompactionTestTable(Model): +class LeveledCompactionTestTable(Model): __options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb': '64'}} @@ -42,10 +35,10 @@ class LeveledcompactionTestTable(Model): class AlterTableTest(BaseCassEngTestCase): def test_alter_is_called_table(self): - drop_table(LeveledcompactionTestTable) - sync_table(LeveledcompactionTestTable) + drop_table(LeveledCompactionTestTable) + sync_table(LeveledCompactionTestTable) with patch('cassandra.cqlengine.management._update_options') as mock: - sync_table(LeveledcompactionTestTable) + sync_table(LeveledCompactionTestTable) assert mock.called == 1 def test_compaction_not_altered_without_changes_leveled(self): @@ -82,7 +75,7 @@ class SizeTieredCompactionChangesDetectionTest(Model): self.assertFalse(_update_options(SizeTieredCompactionChangesDetectionTest)) def test_alter_actually_alters(self): - tmp = copy.deepcopy(LeveledcompactionTestTable) + tmp = copy.deepcopy(LeveledCompactionTestTable) drop_table(tmp) sync_table(tmp) tmp.__options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'}} From 75ddc514617304797626cc69957eb6008695be1e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 30 Sep 2015 14:15:51 -0500 Subject: [PATCH 0073/2005] cqle: update docs for new __options__ model attribute --- docs/api/cassandra/cqlengine/models.rst | 96 +++++++------------------ 1 file changed, 24 insertions(+), 72 deletions(-) diff --git a/docs/api/cassandra/cqlengine/models.rst b/docs/api/cassandra/cqlengine/models.rst index f29dacbf13..f06cd91b56 100644 --- a/docs/api/cassandra/cqlengine/models.rst +++ b/docs/api/cassandra/cqlengine/models.rst @@ -20,8 +20,8 @@ Model person.first_name #returns 'Blake' person.last_name #returns 'Eggleston' - *Model attributes define how the model maps to tables in the database. These are class variables that should be set - when defining Model deriviatives.* + Model attributes define how the model maps to tables in the database. These are class variables that should be set + when defining Model deriviatives. .. autoattribute:: __abstract__ :annotation: = False @@ -39,97 +39,49 @@ Model See :ref:`model_inheritance` for usage examples. - *Each table can have its own set of configuration options. - These can be specified on a model with the following attributes:* + Each table can have its own set of configuration options, including compaction. Unspecified, these default to sensible values in + the server. To override defaults, set options using the model ``__options__`` attribute, which allows options specified a dict. - See the `list of supported table properties for more information - `_. - - .. attribute:: __bloom_filter_fp_chance - - .. attribute:: __caching__ - - .. attribute:: __comment__ - - .. attribute:: __dclocal_read_repair_chance__ - - .. attribute:: __default_time_to_live__ - - .. attribute:: __gc_grace_seconds__ - - .. attribute:: __index_interval__ - - .. attribute:: __memtable_flush_period_in_ms__ - - .. attribute:: __populate_io_cache_on_flush__ - - .. attribute:: __read_repair_chance__ + When a table is synced, it will be altered to match the options set on your table. + This means that if you are changing settings manually they will be changed back on resync. - .. attribute:: __replicate_on_write__ + Do not use the options settings of cqlengine if you want to manage your compaction settings manually. + See the `list of supported table properties for more information + `_. - *Model presently supports specifying compaction options via class attributes. - cqlengine will only use your compaction options if you have a strategy set. - When a table is synced, it will be altered to match the compaction options set on your table. - This means that if you are changing settings manually they will be changed back on resync.* - - *Do not use the compaction settings of cqlengine if you want to manage your compaction settings manually.* - - *cqlengine supports all compaction options as of Cassandra 1.2.8.* - - **These attibutes will likely be replaced by a single options string in a future version, and are therefore deprecated.** - - .. attribute:: __compaction_bucket_high__ - :annotation: Deprecated - - .. attribute:: __compaction_bucket_low__ - :annotation: Deprecated - - .. attribute:: __compaction_max_compaction_threshold__ - :annotation: Deprecated - - .. attribute:: __compaction_min_compaction_threshold__ - :annotation: Deprecated - - .. attribute:: __compaction_min_sstable_size__ - :annotation: Deprecated - - .. attribute:: __compaction_sstable_size_in_mb__ - :annotation: Deprecated - - .. attribute:: __compaction_tombstone_compaction_interval__ - :annotation: Deprecated - .. attribute:: __compaction_tombstone_threshold__ - :annotation: Deprecated + .. attribute:: __options__ For example: .. code-block:: python class User(Model): - __compaction__ = cqlengine.LeveledCompactionStrategy - __compaction_sstable_size_in_mb__ = 64 - __compaction_tombstone_threshold__ = .2 + __options__ = {'compaction': {'class': 'LeveledCompactionStrategy', + 'sstable_size_in_mb': '64', + 'tombstone_threshold': '.2'}, + 'read_repair_chance': '0.5', + 'comment': 'User data stored here'} user_id = columns.UUID(primary_key=True) name = columns.Text() - or for SizeTieredCompaction: + or : .. code-block:: python class TimeData(Model): - __compaction__ = SizeTieredCompactionStrategy - __compaction_bucket_low__ = .3 - __compaction_bucket_high__ = 2 - __compaction_min_threshold__ = 2 - __compaction_max_threshold__ = 64 - __compaction_tombstone_compaction_interval__ = 86400 + __options__ = {'compaction': {'class': 'SizeTieredCompactionStrategy', + 'bucket_low': '.3', + 'bucket_high': '2', + 'min_threshold': '2', + 'max_threshold': '64', + 'tombstone_compaction_interval': '86400'}, + 'gc_grace_seconds': '0'} - Tables may use `LeveledCompactionStrategy` or `SizeTieredCompactionStrategy`. Both options are available in the top level cqlengine module. To reiterate, you will need to set your `__compaction__` option explicitly in order for cqlengine to handle any of your settings. - *The base methods allow creating, storing, and querying modeled objects.* + The base methods allow creating, storing, and querying modeled objects. .. automethod:: create From 0f06126a0ae13804c16ed6f29c122a3630b2f97f Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 30 Sep 2015 16:08:19 -0500 Subject: [PATCH 0074/2005] Adding tests for python-419 --- tests/integration/standard/test_metadata.py | 54 +++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index f822594d4a..4cbbc24d32 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -2111,6 +2111,60 @@ def test_create_view_metadata(self): self.assertEquals(day_column.is_static, mv.clustering_key[2].is_static) self.assertEquals(day_column.is_reversed, mv.clustering_key[2].is_reversed) + def test_create_mv_changes(self): + """ + test to ensure that materialized view metadata is properly updated with base table changes + + test_create_view_metadata tests that materialized views metadata is properly updated when corresponding changes + are made to the base table. We test adding a new column and changing base table columns types. We then ensure + that changes are surfaced in the MV accordingly. + + @since 3.0.0 + @jira_ticket PYTHON-419 + @expected_result Materialized view metadata should be update correctly + + @test_category metadata + """ + create_table = """CREATE TABLE {0}.scores( + user TEXT, + game TEXT, + year INT, + month INT, + day INT, + score TEXT, + PRIMARY KEY (user, game, year, month, day) + )""".format(self.ksname) + + self.session.execute(create_table) + + create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS + SELECT game, year, month, score, user, day FROM {0}.scores + WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL + PRIMARY KEY ((game, year, month), score, user, day) + WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.ksname) + + self.session.execute(create_mv) + score_table = self.cluster.metadata.keyspaces[self.ksname].tables['scores'] + + self.assertIsNotNone(score_table.views["monthlyhigh"]) + self.assertEqual(len(self.cluster.metadata.keyspaces[self.ksname].views), 1) + + insert_fouls = """ALTER TABLE {0}.scores ADD fouls INT""".format((self.ksname)) + + self.session.execute(insert_fouls) + self.assertEqual(len(self.cluster.metadata.keyspaces[self.ksname].views), 1) + + alter_scores = """ALTER TABLE {0}.scores ALTER score blob""".format((self.ksname)) + + self.session.execute(alter_scores) + self.assertEqual(len(self.cluster.metadata.keyspaces[self.ksname].views), 1) + + score_column = self.cluster.metadata.keyspaces[self.ksname].tables['scores'].columns['score'] + self.assertEquals(score_column.typestring, 'blob') + + score_mv_column = self.cluster.metadata.keyspaces[self.ksname].views["monthlyhigh"].columns['score'] + self.assertEquals(score_mv_column.typestring, 'blob') + def test_metadata_with_quoted_identifiers(self): """ test to ensure that materialized view metadata is properly constructed when quoted identifiers are used From 4ce576f9bb0abf63c18c28d1255152ce3d6bf021 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 30 Sep 2015 16:11:41 -0500 Subject: [PATCH 0075/2005] doc: adding MaterializedViewMeta to docs --- docs/api/cassandra/metadata.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/api/cassandra/metadata.rst b/docs/api/cassandra/metadata.rst index 291f63d5be..d797f739de 100644 --- a/docs/api/cassandra/metadata.rst +++ b/docs/api/cassandra/metadata.rst @@ -40,6 +40,9 @@ Schemas .. autoclass:: IndexMetadata () :members: +.. autoclass:: MaterializedViewMetadata () + :members: + Tokens and Ring Topology ------------------------ From a82f25e0b94da79afdf4fc00387a697c339733fa Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 30 Sep 2015 16:16:29 -0500 Subject: [PATCH 0076/2005] Unify IndexMetadat model btw C* 3 and earlier. PYTHON-404 --- cassandra/metadata.py | 151 +++++++------------- tests/integration/standard/test_metadata.py | 22 +-- 2 files changed, 61 insertions(+), 112 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 8700c65fa3..ff3c5b36b8 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1300,64 +1300,51 @@ class IndexMetadata(object): """ A representation of a secondary index on a column. """ + keyspace_name = None + """ A string name of the keyspace. """ - column = None - """ - The column (:class:`.ColumnMetadata`) this index is on. - """ + table_name = None + """ A string name of the table this index is on. """ name = None """ A string name for the index. """ - index_type = None - """ A string representing the type of index. """ + kind = None + """ A string representing the kind of index (COMPOSITE, CUSTOM,...). """ index_options = {} """ A dict of index options. """ - def __init__(self, column_metadata, index_name=None, index_type=None, index_options={}): - self.column = column_metadata + def __init__(self, keyspace_name, table_name, index_name, kind, index_options): + self.keyspace_name = keyspace_name + self.table_name = table_name self.name = index_name - self.index_type = index_type + self.kind = kind self.index_options = index_options def as_cql_query(self): """ Returns a CQL query that can be used to recreate this index. """ - table = self.column.table - if self.index_type != "CUSTOM": - index_target = protect_name(self.column.name) - if self.index_options is not None: - option_keys = self.index_options.keys() - if "index_keys" in option_keys: - index_target = 'keys(%s)' % (index_target,) - elif "index_values" in option_keys: - # don't use any "function" for collection values - pass - else: - # it might be a "full" index on a frozen collection, but - # we need to check the data type to verify that, because - # there is no special index option for full-collection - # indexes. - data_type = self.column.data_type - collection_types = ('map', 'set', 'list') - if data_type.typename == "frozen" and data_type.subtypes[0].typename in collection_types: - # no index option for full-collection index - index_target = 'full(%s)' % (index_target,) - + options = dict(self.index_options) + index_target = options.pop("target") + if self.kind != "CUSTOM": return "CREATE INDEX %s ON %s.%s (%s)" % ( self.name, # Cassandra doesn't like quoted index names for some reason - protect_name(table.keyspace.name), - protect_name(table.name), + protect_name(self.keyspace_name), + protect_name(self.table_name), index_target) else: - return "CREATE CUSTOM INDEX %s ON %s.%s (%s) USING '%s'" % ( + class_name = options.pop("class_name") + ret = "CREATE CUSTOM INDEX %s ON %s.%s (%s) USING '%s'" % ( self.name, # Cassandra doesn't like quoted index names for some reason - protect_name(table.keyspace.name), - protect_name(table.name), - protect_name(self.column.name), - self.index_options["class_name"]) + protect_name(self.keyspace_name), + protect_name(self.table_name), + index_target, + class_name) + if options: + ret += " WITH OPTIONS = %s" % Encoder().cql_encode_all_types(options) + return ret def export_as_string(self): """ @@ -1932,13 +1919,32 @@ def _build_column_metadata(table_metadata, row): @staticmethod def _build_index_metadata(column_metadata, row): index_name = row.get("index_name") - index_type = row.get("index_type") - if index_name or index_type: + kind = row.get("index_type") + if index_name or kind: options = row.get("index_options") - index_options = json.loads(options) if options else None - return IndexMetadata(column_metadata, index_name, index_type, index_options) - else: - return None + options = json.loads(options) if options else {} + options = options or {} # if the json parsed to None, init empty dict + + # generate a CQL index identity string + target = protect_name(column_metadata.name) + if kind != "CUSTOM": + if "index_keys" in options: + target = 'keys(%s)' % (target,) + elif "index_values" in options: + # don't use any "function" for collection values + pass + else: + # it might be a "full" index on a frozen collection, but + # we need to check the data type to verify that, because + # there is no special index option for full-collection + # indexes. + data_type = column_metadata.data_type + collection_types = ('map', 'set', 'list') + if data_type.typename == "frozen" and data_type.subtypes[0].typename in collection_types: + # no index option for full-collection index + target = 'full(%s)' % (target,) + options['target'] = target + return IndexMetadata(column_metadata.table.keyspace_name, column_metadata.table.name, index_name, kind, options) @staticmethod def _build_trigger_metadata(table_metadata, row): @@ -2220,7 +2226,7 @@ def _build_index_metadata(table_metadata, row): kind = row.get("kind") if index_name or kind: index_options = row.get("options") - return IndexMetadataV3(table_metadata.keyspace_name, table_metadata.name, index_name, kind, index_options) + return IndexMetadata(table_metadata.keyspace_name, table_metadata.name, index_name, kind, index_options) else: return None @@ -2310,63 +2316,6 @@ def _make_option_strings(cls, options_map): return list(sorted(ret)) -class IndexMetadataV3(object): - """ - A representation of a secondary index on a column. - """ - keyspace_name = None - """ A string name of the keyspace. """ - - table_name = None - """ A string name of the table this index is on. """ - - name = None - """ A string name for the index. """ - - kind = None - """ A string representing the kind of index (COMPOSITE, CUSTOM,...). """ - - index_options = {} - """ A dict of index options. """ - - def __init__(self, keyspace_name, table_name, index_name, kind, index_options): - self.keyspace_name = keyspace_name - self.table_name = table_name - self.name = index_name - self.kind = kind - self.index_options = index_options - - def as_cql_query(self): - """ - Returns a CQL query that can be used to recreate this index. - """ - options = dict(self.index_options) - index_target = options.pop("target") - if self.kind != "CUSTOM": - return "CREATE INDEX %s ON %s.%s (%s)" % ( - self.name, # Cassandra doesn't like quoted index names for some reason - protect_name(self.keyspace_name), - protect_name(self.table_name), - index_target) - else: - class_name = options.pop("class_name") - ret = "CREATE CUSTOM INDEX %s ON %s.%s (%s) USING '%s'" % ( - self.name, # Cassandra doesn't like quoted index names for some reason - protect_name(self.keyspace_name), - protect_name(self.table_name), - index_target, - class_name) - if options: - ret += " WITH OPTIONS = %s" % Encoder().cql_encode_all_types(options) - return ret - - def export_as_string(self): - """ - Returns a CQL query string that can be used to recreate this index. - """ - return self.as_cql_query() + ';' - - class MaterializedViewMetadata(object): """ A representation of a materialized view on a table diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index f822594d4a..c1aaf4646a 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -27,7 +27,7 @@ from cassandra.cluster import Cluster from cassandra.cqltypes import DoubleType, Int32Type, ListType, UTF8Type, MapType, LongType from cassandra.encoder import Encoder -from cassandra.metadata import (Metadata, KeyspaceMetadata, IndexMetadata, IndexMetadataV3, +from cassandra.metadata import (Metadata, KeyspaceMetadata, IndexMetadata, Token, MD5Token, TokenMap, murmur3, Function, Aggregate, protect_name, protect_names, get_schema_parser) from cassandra.policies import SimpleConvictionPolicy @@ -1301,10 +1301,10 @@ def test_index_updates(self): ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name] table_meta = ks_meta.tables[self.table_name] - self.assertIsInstance(ks_meta.indexes['a_idx'], (IndexMetadata, IndexMetadataV3)) - self.assertIsInstance(ks_meta.indexes['b_idx'], (IndexMetadata, IndexMetadataV3)) - self.assertIsInstance(table_meta.indexes['a_idx'], (IndexMetadata, IndexMetadataV3)) - self.assertIsInstance(table_meta.indexes['b_idx'], (IndexMetadata, IndexMetadataV3)) + self.assertIsInstance(ks_meta.indexes['a_idx'], IndexMetadata) + self.assertIsInstance(ks_meta.indexes['b_idx'], IndexMetadata) + self.assertIsInstance(table_meta.indexes['a_idx'], IndexMetadata) + self.assertIsInstance(table_meta.indexes['b_idx'], IndexMetadata) # both indexes updated when index dropped self.session.execute("DROP INDEX a_idx") @@ -1315,9 +1315,9 @@ def test_index_updates(self): ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name] table_meta = ks_meta.tables[self.table_name] self.assertNotIn('a_idx', ks_meta.indexes) - self.assertIsInstance(ks_meta.indexes['b_idx'], (IndexMetadata, IndexMetadataV3)) + self.assertIsInstance(ks_meta.indexes['b_idx'], IndexMetadata) self.assertNotIn('a_idx', table_meta.indexes) - self.assertIsInstance(table_meta.indexes['b_idx'], (IndexMetadata, IndexMetadataV3)) + self.assertIsInstance(table_meta.indexes['b_idx'], IndexMetadata) # keyspace index updated when table dropped self.drop_basic_table() @@ -1333,15 +1333,15 @@ def test_index_follows_alter(self): self.session.execute("CREATE INDEX %s ON %s (a)" % (idx, self.table_name)) ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name] table_meta = ks_meta.tables[self.table_name] - self.assertIsInstance(ks_meta.indexes[idx], (IndexMetadata, IndexMetadataV3)) - self.assertIsInstance(table_meta.indexes[idx], (IndexMetadata, IndexMetadataV3)) + self.assertIsInstance(ks_meta.indexes[idx], IndexMetadata) + self.assertIsInstance(table_meta.indexes[idx], IndexMetadata) self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name) old_meta = ks_meta ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name] self.assertIsNot(ks_meta, old_meta) table_meta = ks_meta.tables[self.table_name] - self.assertIsInstance(ks_meta.indexes[idx], (IndexMetadata, IndexMetadataV3)) - self.assertIsInstance(table_meta.indexes[idx], (IndexMetadata, IndexMetadataV3)) + self.assertIsInstance(ks_meta.indexes[idx], IndexMetadata) + self.assertIsInstance(table_meta.indexes[idx], IndexMetadata) self.drop_basic_table() From 9c200be78a69a10f4ff3f8d447f59cff17006a4d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 30 Sep 2015 16:23:02 -0500 Subject: [PATCH 0077/2005] TestCase.assertRegex is only present in unittest2, not std lib --- tests/integration/standard/test_metadata.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index c1aaf4646a..3ff8634959 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1963,11 +1963,11 @@ def test_materialized_view_metadata_alter(self): self.session.execute("CREATE TABLE {0}.table1 (pk int PRIMARY KEY, c int)".format(self.ksname)) self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.table1 WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.ksname)) - self.assertRegex(self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].options["compaction"]["class"], + self.assertRegexpMatches(self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].options["compaction"]["class"], "SizeTieredCompactionStrategy") self.session.execute("ALTER MATERIALIZED VIEW {0}.mv1 WITH compaction = {{ 'class' : 'LeveledCompactionStrategy' }}".format(self.ksname)) - self.assertRegex(self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].options["compaction"]["class"], + self.assertRegexpMatches(self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].options["compaction"]["class"], "LeveledCompactionStrategy") def test_materialized_view_metadata_drop(self): From 3dfac5326edc326bc55caec17ef34ce5ece6ada3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 1 Oct 2015 13:00:03 -0500 Subject: [PATCH 0078/2005] meta: transfer views to new table on table update event Resolves an issue where materialized views were dropped from meta model on table update events. PYTHON-419 --- cassandra/metadata.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 8700c65fa3..82160a2a1a 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -675,12 +675,24 @@ def resolve_user_types(self, key, types, user_type_strings): user_type_strings.append(user_type.as_cql_query(formatted=True)) def _add_table_metadata(self, table_metadata): - self._drop_table_metadata(table_metadata.name) - - self.tables[table_metadata.name] = table_metadata + old_indexes = {} + old_meta = self.tables.get(table_metadata.name, None) + if old_meta: + # views are not queried with table, so they must be transferred to new + table_metadata.views = old_meta.views + # indexes will be updated with what is on the new metadata + old_indexes = old_meta.indexes + + # note the intentional order of add before remove + # this makes sure the maps are never absent something that existed before this update for index_name, index_metadata in six.iteritems(table_metadata.indexes): self.indexes[index_name] = index_metadata + for index_name in (n for n in old_indexes if n not in table_metadata.indexes): + self.indexes.pop(index_name, None) + + self.tables[table_metadata.name] = table_metadata + def _drop_table_metadata(self, table_name): table_meta = self.tables.pop(table_name, None) if table_meta: From 185f906db5f14c0c3242b3776caf9fef5279a63c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 1 Oct 2015 13:00:50 -0500 Subject: [PATCH 0079/2005] Materialized view test cleanup/improvement test_create_mv_changes still fails due to Cassandra issue (noted in code) --- tests/integration/standard/test_metadata.py | 23 +++++++++++---------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 4cbbc24d32..cdd11514a2 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1963,11 +1963,11 @@ def test_materialized_view_metadata_alter(self): self.session.execute("CREATE TABLE {0}.table1 (pk int PRIMARY KEY, c int)".format(self.ksname)) self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.table1 WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.ksname)) - self.assertRegex(self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].options["compaction"]["class"], + self.assertRegexpMatches(self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].options["compaction"]["class"], "SizeTieredCompactionStrategy") self.session.execute("ALTER MATERIALIZED VIEW {0}.mv1 WITH compaction = {{ 'class' : 'LeveledCompactionStrategy' }}".format(self.ksname)) - self.assertRegex(self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].options["compaction"]["class"], + self.assertRegexpMatches(self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].options["compaction"]["class"], "LeveledCompactionStrategy") def test_materialized_view_metadata_drop(self): @@ -2137,27 +2137,28 @@ def test_create_mv_changes(self): self.session.execute(create_table) - create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS + view_name = 'monthlyhigh' + create_mv = """CREATE MATERIALIZED VIEW {0}.{1} AS SELECT game, year, month, score, user, day FROM {0}.scores WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL PRIMARY KEY ((game, year, month), score, user, day) - WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.ksname) + WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.ksname, view_name) self.session.execute(create_mv) score_table = self.cluster.metadata.keyspaces[self.ksname].tables['scores'] - self.assertIsNotNone(score_table.views["monthlyhigh"]) - self.assertEqual(len(self.cluster.metadata.keyspaces[self.ksname].views), 1) + self.assertIn(view_name, score_table.views) + self.assertIn(view_name, self.cluster.metadata.keyspaces[self.ksname].views) - insert_fouls = """ALTER TABLE {0}.scores ADD fouls INT""".format((self.ksname)) + insert_fouls = """ALTER TABLE {0}.scores ADD fouls INT""".format(self.ksname) self.session.execute(insert_fouls) - self.assertEqual(len(self.cluster.metadata.keyspaces[self.ksname].views), 1) + self.assertIn(view_name, self.cluster.metadata.keyspaces[self.ksname].views) - alter_scores = """ALTER TABLE {0}.scores ALTER score blob""".format((self.ksname)) + alter_scores = """ALTER TABLE {0}.scores ALTER score TYPE blob""".format(self.ksname) - self.session.execute(alter_scores) - self.assertEqual(len(self.cluster.metadata.keyspaces[self.ksname].views), 1) + self.session.execute(alter_scores) # ERROR https://issues.apache.org/jira/browse/CASSANDRA-10424 + self.assertIn(view_name, self.cluster.metadata.keyspaces[self.ksname].views) score_column = self.cluster.metadata.keyspaces[self.ksname].tables['scores'].columns['score'] self.assertEquals(score_column.typestring, 'blob') From f27d7db0270b7f7884227fa8a69c9ecdce45cc2d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 1 Oct 2015 13:56:55 -0500 Subject: [PATCH 0080/2005] meta: export_as_string for UserType PYTHON-405 --- cassandra/metadata.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 8700c65fa3..87e99fd00d 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -672,7 +672,7 @@ def resolve_user_types(self, key, types, user_type_strings): for field_type in user_type.field_types: if field_type.cassname == 'UserType' and field_type.typename in types: self.resolve_user_types(field_type.typename, types, user_type_strings) - user_type_strings.append(user_type.as_cql_query(formatted=True)) + user_type_strings.append(user_type.export_as_string()) def _add_table_metadata(self, table_metadata): self._drop_table_metadata(table_metadata.name) @@ -765,9 +765,12 @@ def as_cql_query(self, formatted=False): fields.append("%s %s" % (protect_name(field_name), field_type.cql_parameterized_type())) ret += field_join.join("%s%s" % (padding, field) for field in fields) - ret += "\n);" if formatted else ");" + ret += "\n)" if formatted else ")" return ret + def export_as_string(self): + return self.as_cql_query(formatted=True) + ';' + class Aggregate(object): """ From dfe97db082799a925f7203054b5adc33a28cf987 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 1 Oct 2015 14:08:55 -0500 Subject: [PATCH 0081/2005] meta: export_as_string for Function and Aggregate PYTHON-405 --- cassandra/metadata.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 87e99fd00d..041aa85fee 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -635,8 +635,8 @@ def export_as_string(self): """ cql = "\n\n".join([self.as_cql_query()] + self.user_type_strings() - + [f.as_cql_query(True) for f in self.functions.values()] - + [a.as_cql_query(True) for a in self.aggregates.values()] + + [f.export_as_string() for f in self.functions.values()] + + [a.export_as_string() for a in self.aggregates.values()] + [t.export_as_string() for t in self.tables.values()]) if self._exc_info: import traceback @@ -855,6 +855,9 @@ def as_cql_query(self, formatted=False): return ret + def export_as_string(self): + return self.as_cql_query(formatted=True) + ';' + @property def signature(self): return SignatureDescriptor.format_signature(self.name, self.type_signature) @@ -943,7 +946,10 @@ def as_cql_query(self, formatted=False): "%(on_null)s ON NULL INPUT%(sep)s" \ "RETURNS %(typ)s%(sep)s" \ "LANGUAGE %(lang)s%(sep)s" \ - "AS $$%(body)s$$;" % locals() + "AS $$%(body)s$$" % locals() + + def export_as_string(self): + return self.as_cql_query(formatted=True) + ';' @property def signature(self): From bfa75d6c2d7a85b0bab27c9ed2f97c7e753c812e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 1 Oct 2015 14:10:01 -0500 Subject: [PATCH 0082/2005] meta: improved format for Function and Aggregate CQL --- cassandra/metadata.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 041aa85fee..336437c484 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -838,7 +838,7 @@ def as_cql_query(self, formatted=False): If `formatted` is set to :const:`True`, extra whitespace will be added to make the query more readable. """ - sep = '\n' if formatted else ' ' + sep = '\n ' if formatted else ' ' keyspace = protect_name(self.keyspace) name = protect_name(self.name) type_list = ', '.join(self.type_signature) @@ -932,7 +932,7 @@ def as_cql_query(self, formatted=False): If `formatted` is set to :const:`True`, extra whitespace will be added to make the query more readable. """ - sep = '\n' if formatted else ' ' + sep = '\n ' if formatted else ' ' keyspace = protect_name(self.keyspace) name = protect_name(self.name) arg_list = ', '.join(["%s %s" % (protect_name(n), t) From 1395401898f47d62d9bfebe858a1d5d288ca3169 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 1 Oct 2015 14:52:42 -0500 Subject: [PATCH 0083/2005] meta: export_as_string for Trigger --- cassandra/metadata.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 336437c484..9ef94065b7 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1571,6 +1571,9 @@ def as_cql_query(self): ) return ret + def export_as_string(self): + return self.as_cql_query() + ';' + class _SchemaParser(object): From 5a09737f9ba6daa3c785224b1d82fcca49390b57 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 2 Oct 2015 12:30:40 -0500 Subject: [PATCH 0084/2005] popitem for util.OrderedMap --- cassandra/util.py | 8 ++++++++ tests/unit/test_orderedmap.py | 6 ++++++ 2 files changed, 14 insertions(+) diff --git a/cassandra/util.py b/cassandra/util.py index 12a8e8dce4..ee8d813082 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -788,6 +788,14 @@ def __repr__(self): def __str__(self): return '{%s}' % ', '.join("%r: %r" % (k, v) for k, v in self._items) + def popitem(self): + try: + kv = self._items.pop() + del self._index[self._serialize_key(kv[0])] + return kv + except IndexError: + raise KeyError() + def _serialize_key(self, key): return cPickle.dumps(key) diff --git a/tests/unit/test_orderedmap.py b/tests/unit/test_orderedmap.py index bb9a405c54..a7fbef13cb 100644 --- a/tests/unit/test_orderedmap.py +++ b/tests/unit/test_orderedmap.py @@ -146,6 +146,12 @@ def test_strings(self): self.assertEqual(str(OrderedMap([('two', 2), ('one', 1), (d, 'value'), (s, 'another')])), "{'two': 2, 'one': 1, %r: 'value', %r: 'another'}" % (d, s)) + def test_popitem(self): + item = (1, 2) + om = OrderedMap((item,)) + self.assertEqual(om.popitem(), item) + self.assertRaises(KeyError, om.popitem) + class OrderedMapSerializedKeyTest(unittest.TestCase): def test_init(self): From 15269484df11bde5c436f2ca0a4362858f227552 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 2 Oct 2015 13:05:38 -0500 Subject: [PATCH 0085/2005] Handle UDTs with non-pythonic identifiers (WIP - needs testing) Fixes issues forming meta types and deserializing UDTs with names or field names which are not valid Python identifiers. PYTHON-413 --- cassandra/cqltypes.py | 72 +++++++++++++++------ cassandra/protocol.py | 12 ++-- tests/integration/standard/test_metadata.py | 29 +++++++++ 3 files changed, 87 insertions(+), 26 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index fabef397e7..a5042230b2 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -842,14 +842,27 @@ class UserType(TupleType): _module = sys.modules[__name__] @classmethod - def make_udt_class(cls, keyspace, udt_name, names_and_types, mapped_class): + def get_udt_class(cls, keyspace, udt_name): + # this is split from make_udt_class to allow protocol read_type + # to get the type without having to read and form all inputs for make_udt_class if six.PY2 and isinstance(udt_name, unicode): udt_name = udt_name.encode('utf-8') try: return cls._cache[(keyspace, udt_name)] except KeyError: - field_names, types = zip(*names_and_types) - instance = type(udt_name, (cls,), {'subtypes': types, + pass + + @classmethod + def make_udt_class(cls, keyspace, udt_name, field_names, field_types, mapped_class=None): + assert len(field_names) == len(field_types) + + if six.PY2 and isinstance(udt_name, unicode): + udt_name = udt_name.encode('utf-8') + + instance = cls.get_udt_class(keyspace, udt_name) + + if not instance: + instance = type(udt_name, (cls,), {'subtypes': field_types, 'cassname': cls.cassname, 'typename': udt_name, 'fieldnames': field_names, @@ -857,7 +870,7 @@ def make_udt_class(cls, keyspace, udt_name, names_and_types, mapped_class): 'mapped_class': mapped_class, 'tuple_type': cls._make_registered_udt_namedtuple(keyspace, udt_name, field_names)}) cls._cache[(keyspace, udt_name)] = instance - return instance + return instance @classmethod def evict_udt_class(cls, keyspace, udt_name): @@ -873,14 +886,7 @@ def apply_parameters(cls, subtypes, names): keyspace = subtypes[0] udt_name = _name_from_hex_string(subtypes[1].cassname) field_names = [_name_from_hex_string(encoded_name) for encoded_name in names[2:]] - assert len(field_names) == len(subtypes[2:]) - return type(udt_name, (cls,), {'subtypes': subtypes[2:], - 'cassname': cls.cassname, - 'typename': udt_name, - 'fieldnames': field_names, - 'keyspace': keyspace, - 'mapped_class': None, - 'tuple_type': namedtuple(udt_name, field_names)}) + return cls.make_udt_class(keyspace, udt_name, field_names, subtypes[2:]) @classmethod def cql_parameterized_type(cls): @@ -891,17 +897,24 @@ def deserialize_safe(cls, byts, protocol_version): values = super(UserType, cls).deserialize_safe(byts, protocol_version) if cls.mapped_class: return cls.mapped_class(**dict(zip(cls.fieldnames, values))) - else: + elif cls.tuple_type: return cls.tuple_type(*values) + else: + return tuple(values) @classmethod def serialize_safe(cls, val, protocol_version): proto_version = max(3, protocol_version) buf = io.BytesIO() - for fieldname, subtype in zip(cls.fieldnames, cls.subtypes): - item = getattr(val, fieldname) + for i, (fieldname, subtype) in enumerate(zip(cls.fieldnames, cls.subtypes)): + # first treat as a tuple, else by custom type + try: + item = val[i] + except TypeError: + item = getattr(val, fieldname) + if item is not None: - packed_item = subtype.to_binary(getattr(val, fieldname), proto_version) + packed_item = subtype.to_binary(item, proto_version) buf.write(int32_pack(len(packed_item))) buf.write(packed_item) else: @@ -914,11 +927,28 @@ def _make_registered_udt_namedtuple(cls, keyspace, name, field_names): # required when unregistered udts are pickled for use as keys in # util.OrderedMap qualified_name = "%s_%s" % (keyspace, name) - nt = getattr(cls._module, qualified_name, None) - if not nt: - nt = namedtuple(qualified_name, field_names) - setattr(cls._module, qualified_name, nt) - return nt + t = getattr(cls._module, qualified_name, None) + if not t: + t = cls._make_udt_tuple_type(qualified_name, field_names) + setattr(cls._module, qualified_name, t) + return t + + @classmethod + def _make_udt_tuple_type(cls, name, field_names): + # fallback to positional named, then unnamed tuples + # for CQL identifiers that aren't valid in Python, + try: + t = namedtuple(name, field_names) + except ValueError: + try: + t = namedtuple(name, field_names, rename=True) + log.warn("could not create a namedtuple for '%s' because one or more field names are not valid Python identifiers (%s); " \ + "returning positionally-named fields" % (name, field_names)) + except ValueError: + t = None + log.warn("could not create a namedtuple for '%s' because the name is not a valid Python identifier; " \ + "will return tuples in its place" % (name,)) + return t class CompositeType(_ParameterizedType): diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 90fdcd091c..4cd8359fb5 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -680,11 +680,13 @@ def read_type(cls, f, user_type_map): ks = read_string(f) udt_name = read_string(f) num_fields = read_short(f) - names_and_types = tuple((read_string(f), cls.read_type(f, user_type_map)) - for _ in range(num_fields)) - mapped_class = user_type_map.get(ks, {}).get(udt_name) - typeclass = typeclass.make_udt_class( - ks, udt_name, names_and_types, mapped_class) + specialized_type = typeclass.get_udt_class(ks, udt_name) + if not specialized_type: + names, types = zip(*((read_string(f), cls.read_type(f, user_type_map)) + for _ in range(num_fields))) + mapped_class = user_type_map.get(ks, {}).get(udt_name) + specialized_type = typeclass.make_udt_class(ks, udt_name, names, types, mapped_class) + typeclass = specialized_type elif typeclass == CUSTOM_TYPE: classname = read_string(f) typeclass = lookup_casstype(classname) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index f822594d4a..3102970f67 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -675,6 +675,10 @@ def test_refresh_user_aggregate_metadata(self): class TestCodeCoverage(unittest.TestCase): + @property + def table_name(self): + return self._testMethodName.lower() + def test_export_schema(self): """ Test export schema functionality @@ -794,6 +798,31 @@ def test_export_keyspace_schema_udts(self): cluster.shutdown() + def test_udt_non_alphanum(self): + """ + PYTHON-413 + """ + s = Cluster(protocol_version=PROTOCOL_VERSION).connect() + non_alphanum_name = 'test.field@#$%@%#!' + type_name = 'type2' + s.execute('CREATE TYPE IF NOT EXISTS test1rf."%s" ("%s" text)' % (non_alphanum_name, non_alphanum_name)) + s.execute('CREATE TYPE IF NOT EXISTS test1rf.%s ("%s" text)' % (type_name, non_alphanum_name)) + # table with types as map keys to make sure the tuple lookup works + s.execute('CREATE TABLE IF NOT EXISTS test1rf.%s (k int PRIMARY KEY, non_alphanum_type_map map, int>, alphanum_type_map map, int>)' % (self.table_name, non_alphanum_name, type_name)) + s.execute('INSERT INTO test1rf.%s (k, non_alphanum_type_map, alphanum_type_map) VALUES (%s, {{"%s": \'nonalphanum\'}: 0}, {{"%s": \'alphanum\'}: 1})' % (self.table_name, 0, non_alphanum_name, non_alphanum_name)) + row = s.execute('SELECT * FROM test1rf.%s' % (self.table_name,))[0] + print row + k, v = row.non_alphanum_type_map.popitem() + self.assertEqual(v, 0) + self.assertEqual(k.__class__, tuple) + self.assertEqual(k[0], 'nonalphanum') + + k, v = row.alphanum_type_map.popitem() + self.assertEqual(v, 1) + self.assertNotEqual(k.__class__, tuple) # should be the namedtuple type + self.assertEqual(k[0], 'alphanum') + self.assertEqual(k._0, 'alphanum') # named tuple with positional field name + def test_case_sensitivity(self): """ Test that names that need to be escaped in CREATE statements are From ce0ab6196aa770e51e12b4ca65428cb689e68a1c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 5 Oct 2015 10:39:55 -0500 Subject: [PATCH 0086/2005] Must read complete UDT (even if known type) to advance stream. --- cassandra/protocol.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 4cd8359fb5..1e4a1b5d85 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -680,10 +680,11 @@ def read_type(cls, f, user_type_map): ks = read_string(f) udt_name = read_string(f) num_fields = read_short(f) + names_and_types = tuple((read_string(f), cls.read_type(f, user_type_map)) + for _ in range(num_fields)) specialized_type = typeclass.get_udt_class(ks, udt_name) if not specialized_type: - names, types = zip(*((read_string(f), cls.read_type(f, user_type_map)) - for _ in range(num_fields))) + names, types = zip(*names_and_types) mapped_class = user_type_map.get(ks, {}).get(udt_name) specialized_type = typeclass.make_udt_class(ks, udt_name, names, types, mapped_class) typeclass = specialized_type From 72aebca30141b7b362ae2199955efac2f7241373 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 5 Oct 2015 10:40:35 -0500 Subject: [PATCH 0087/2005] namedtuple does not support rename in Python 2.6 also fixing an issue passing the wrong name down to _make_udt_type_type --- cassandra/cqltypes.py | 4 ++-- cassandra/query.py | 4 ++-- cassandra/util.py | 16 ++++++++++++++++ tests/integration/standard/test_metadata.py | 2 +- 4 files changed, 21 insertions(+), 5 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index a5042230b2..75c8f48a1c 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -929,7 +929,7 @@ def _make_registered_udt_namedtuple(cls, keyspace, name, field_names): qualified_name = "%s_%s" % (keyspace, name) t = getattr(cls._module, qualified_name, None) if not t: - t = cls._make_udt_tuple_type(qualified_name, field_names) + t = cls._make_udt_tuple_type(name, field_names) setattr(cls._module, qualified_name, t) return t @@ -941,7 +941,7 @@ def _make_udt_tuple_type(cls, name, field_names): t = namedtuple(name, field_names) except ValueError: try: - t = namedtuple(name, field_names, rename=True) + t = namedtuple(name, util._positional_rename_invalid_identifiers(field_names)) log.warn("could not create a namedtuple for '%s' because one or more field names are not valid Python identifiers (%s); " \ "returning positionally-named fields" % (name, field_names)) except ValueError: diff --git a/cassandra/query.py b/cassandra/query.py index 0ba6c5947d..e1f8000894 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -31,7 +31,7 @@ from cassandra.encoder import Encoder import cassandra.encoder from cassandra.protocol import _UNSET_VALUE -from cassandra.util import OrderedDict +from cassandra.util import OrderedDict, _positional_rename_invalid_identifiers import logging log = logging.getLogger(__name__) @@ -123,7 +123,7 @@ def named_tuple_factory(colnames, rows): "Avoid this by choosing different names, using SELECT \"\" AS aliases, " "or specifying a different row_factory on your Session" % (colnames, clean_column_names)) - Row = namedtuple('Row', clean_column_names, rename=True) + Row = namedtuple('Row', _positional_rename_invalid_identifiers(clean_column_names)) return [Row(*row) for row in rows] diff --git a/cassandra/util.py b/cassandra/util.py index ee8d813082..fb2902dfc3 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1116,3 +1116,19 @@ def inet_ntop(address_family, packed_ip): raise socket.error(ctypes.FormatError()) return ip_string[:ip_string_size.value - 1] + + +import keyword + + +# similar tot a collections.namedtuple, reproduced here because Python 2.6 did not have the rename logic +def _positional_rename_invalid_identifiers(field_names): + names_out = list(field_names) + for index, name in enumerate(field_names): + if (not all(c.isalnum() or c == '_' for c in name) + or keyword.iskeyword(name) + or not name + or name[0].isdigit() + or name.startswith('_')): + names_out[index] = 'field_%d_' % index + return names_out diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 3102970f67..a5e8745f99 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -821,7 +821,7 @@ def test_udt_non_alphanum(self): self.assertEqual(v, 1) self.assertNotEqual(k.__class__, tuple) # should be the namedtuple type self.assertEqual(k[0], 'alphanum') - self.assertEqual(k._0, 'alphanum') # named tuple with positional field name + self.assertEqual(k.field_0_, 'alphanum') # named tuple with positional field name def test_case_sensitivity(self): """ From 047049c305314f8efff1cf4726644875d7ae56a4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 5 Oct 2015 10:54:40 -0500 Subject: [PATCH 0088/2005] Avoid building tuple type when it can be None --- cassandra/cqltypes.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 75c8f48a1c..66102921ea 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -927,8 +927,9 @@ def _make_registered_udt_namedtuple(cls, keyspace, name, field_names): # required when unregistered udts are pickled for use as keys in # util.OrderedMap qualified_name = "%s_%s" % (keyspace, name) - t = getattr(cls._module, qualified_name, None) - if not t: + try: + t = getattr(cls._module, qualified_name) + except AttributeError: t = cls._make_udt_tuple_type(name, field_names) setattr(cls._module, qualified_name, t) return t From 319a3807bcb33f11f79a2b4a5a621f28c446852e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 5 Oct 2015 10:55:43 -0500 Subject: [PATCH 0089/2005] Extract actual keyspace name when applying parameters of UDT types --- cassandra/cqltypes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 66102921ea..7606451c39 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -883,7 +883,7 @@ def evict_udt_class(cls, keyspace, udt_name): @classmethod def apply_parameters(cls, subtypes, names): - keyspace = subtypes[0] + keyspace = subtypes[0].cass_parameterized_type() # when parsed from cassandra type, the keyspace is created as an unrecognized cass type; This gets the name back udt_name = _name_from_hex_string(subtypes[1].cassname) field_names = [_name_from_hex_string(encoded_name) for encoded_name in names[2:]] return cls.make_udt_class(keyspace, udt_name, field_names, subtypes[2:]) From 68a5ed7ca3ab6255f0cc9bdf988231482dc60deb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 5 Oct 2015 12:48:23 -0500 Subject: [PATCH 0090/2005] test: make format strings Python 2.6 compatible --- tests/integration/standard/test_types.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index e9630a2926..991d6b7845 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -526,11 +526,11 @@ def test_can_insert_tuples_all_collection_datatypes(self): # create list values for datatype in PRIMITIVE_DATATYPES: - values.append('v_{} frozen>>'.format(len(values), datatype)) + values.append('v_{0} frozen>>'.format(len(values), datatype)) # create set values for datatype in PRIMITIVE_DATATYPES: - values.append('v_{} frozen>>'.format(len(values), datatype)) + values.append('v_{0} frozen>>'.format(len(values), datatype)) # create map values for datatype in PRIMITIVE_DATATYPES: @@ -538,7 +538,7 @@ def test_can_insert_tuples_all_collection_datatypes(self): if datatype == 'blob': # unhashable type: 'bytearray' datatype_1 = 'ascii' - values.append('v_{} frozen>>'.format(len(values), datatype_1, datatype_2)) + values.append('v_{0} frozen>>'.format(len(values), datatype_1, datatype_2)) # make sure we're testing all non primitive data types in the future if set(COLLECTION_TYPES) != set(['tuple', 'list', 'map', 'set']): From 438c72a26792bc16bb7095be49f8a42674b0225d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 5 Oct 2015 12:49:15 -0500 Subject: [PATCH 0091/2005] test: clean up keyspaces in test_udts to allow test rerun --- tests/integration/standard/test_udts.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 72aa658165..573691d765 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -101,6 +101,8 @@ def test_can_insert_unprepared_registered_udts(self): self.assertEqual(True, row.b.is_cool) self.assertTrue(type(row.b) is User) + s.execute("DROP KEYSPACE udt_test_unprepared_registered2") + c.shutdown() def test_can_register_udt_before_connecting(self): @@ -158,6 +160,9 @@ def test_can_register_udt_before_connecting(self): self.assertEqual(True, row.b.is_cool) self.assertTrue(type(row.b) is User2) + s.execute("DROP KEYSPACE udt_test_register_before_connecting") + s.execute("DROP KEYSPACE udt_test_register_before_connecting2") + c.shutdown() def test_can_insert_prepared_unregistered_udts(self): @@ -202,6 +207,8 @@ def test_can_insert_prepared_unregistered_udts(self): self.assertEqual('Texas', row.b.state) self.assertEqual(True, row.b.is_cool) + s.execute("DROP KEYSPACE udt_test_prepared_unregistered2") + c.shutdown() def test_can_insert_prepared_registered_udts(self): @@ -252,6 +259,8 @@ def test_can_insert_prepared_registered_udts(self): self.assertEqual(True, row.b.is_cool) self.assertTrue(type(row.b) is User) + s.execute("DROP KEYSPACE udt_test_prepared_registered2") + c.shutdown() def test_can_insert_udts_with_nulls(self): From a96b4b8fbe02d8264dd243f7212e28cfb8813436 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 5 Oct 2015 13:02:45 -0500 Subject: [PATCH 0092/2005] user type map set for each request Makes type mapping work even if the type is cached already. --- cassandra/cqltypes.py | 4 ++-- cassandra/protocol.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 7606451c39..2dafe53fe9 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -853,7 +853,7 @@ def get_udt_class(cls, keyspace, udt_name): pass @classmethod - def make_udt_class(cls, keyspace, udt_name, field_names, field_types, mapped_class=None): + def make_udt_class(cls, keyspace, udt_name, field_names, field_types): assert len(field_names) == len(field_types) if six.PY2 and isinstance(udt_name, unicode): @@ -867,7 +867,7 @@ def make_udt_class(cls, keyspace, udt_name, field_names, field_types, mapped_cla 'typename': udt_name, 'fieldnames': field_names, 'keyspace': keyspace, - 'mapped_class': mapped_class, + 'mapped_class': None, 'tuple_type': cls._make_registered_udt_namedtuple(keyspace, udt_name, field_names)}) cls._cache[(keyspace, udt_name)] = instance return instance diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 1e4a1b5d85..3bd54fb171 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -685,8 +685,8 @@ def read_type(cls, f, user_type_map): specialized_type = typeclass.get_udt_class(ks, udt_name) if not specialized_type: names, types = zip(*names_and_types) - mapped_class = user_type_map.get(ks, {}).get(udt_name) - specialized_type = typeclass.make_udt_class(ks, udt_name, names, types, mapped_class) + specialized_type = typeclass.make_udt_class(ks, udt_name, names, types) + specialized_type.mapped_class = user_type_map.get(ks, {}).get(udt_name) typeclass = specialized_type elif typeclass == CUSTOM_TYPE: classname = read_string(f) From 5fc175473489d1499b75005ea4e74f99f861605d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 5 Oct 2015 13:59:25 -0500 Subject: [PATCH 0093/2005] make udt class cache consider field names/types --- cassandra/cqltypes.py | 28 +++++++--------------------- cassandra/protocol.py | 9 +++------ 2 files changed, 10 insertions(+), 27 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 2dafe53fe9..aebbd17c93 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -841,17 +841,6 @@ class UserType(TupleType): _cache = {} _module = sys.modules[__name__] - @classmethod - def get_udt_class(cls, keyspace, udt_name): - # this is split from make_udt_class to allow protocol read_type - # to get the type without having to read and form all inputs for make_udt_class - if six.PY2 and isinstance(udt_name, unicode): - udt_name = udt_name.encode('utf-8') - try: - return cls._cache[(keyspace, udt_name)] - except KeyError: - pass - @classmethod def make_udt_class(cls, keyspace, udt_name, field_names, field_types): assert len(field_names) == len(field_types) @@ -859,9 +848,8 @@ def make_udt_class(cls, keyspace, udt_name, field_names, field_types): if six.PY2 and isinstance(udt_name, unicode): udt_name = udt_name.encode('utf-8') - instance = cls.get_udt_class(keyspace, udt_name) - - if not instance: + instance = cls._cache.get((keyspace, udt_name)) + if not instance or instance.fieldnames != field_names or instance.subtypes != field_types: instance = type(udt_name, (cls,), {'subtypes': field_types, 'cassname': cls.cassname, 'typename': udt_name, @@ -885,8 +873,8 @@ def evict_udt_class(cls, keyspace, udt_name): def apply_parameters(cls, subtypes, names): keyspace = subtypes[0].cass_parameterized_type() # when parsed from cassandra type, the keyspace is created as an unrecognized cass type; This gets the name back udt_name = _name_from_hex_string(subtypes[1].cassname) - field_names = [_name_from_hex_string(encoded_name) for encoded_name in names[2:]] - return cls.make_udt_class(keyspace, udt_name, field_names, subtypes[2:]) + field_names = tuple(_name_from_hex_string(encoded_name) for encoded_name in names[2:]) # using tuple here to match what comes into make_udt_class from other sources (for caching equality test) + return cls.make_udt_class(keyspace, udt_name, field_names, tuple(subtypes[2:])) @classmethod def cql_parameterized_type(cls): @@ -926,11 +914,9 @@ def _make_registered_udt_namedtuple(cls, keyspace, name, field_names): # this is required to make the type resolvable via this module... # required when unregistered udts are pickled for use as keys in # util.OrderedMap - qualified_name = "%s_%s" % (keyspace, name) - try: - t = getattr(cls._module, qualified_name) - except AttributeError: - t = cls._make_udt_tuple_type(name, field_names) + t = cls._make_udt_tuple_type(name, field_names) + if t: + qualified_name = "%s_%s" % (keyspace, name) setattr(cls._module, qualified_name, t) return t diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 3bd54fb171..bc87f9e4de 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -680,12 +680,9 @@ def read_type(cls, f, user_type_map): ks = read_string(f) udt_name = read_string(f) num_fields = read_short(f) - names_and_types = tuple((read_string(f), cls.read_type(f, user_type_map)) - for _ in range(num_fields)) - specialized_type = typeclass.get_udt_class(ks, udt_name) - if not specialized_type: - names, types = zip(*names_and_types) - specialized_type = typeclass.make_udt_class(ks, udt_name, names, types) + names, types = zip(*((read_string(f), cls.read_type(f, user_type_map)) + for _ in range(num_fields))) + specialized_type = typeclass.make_udt_class(ks, udt_name, names, types) specialized_type.mapped_class = user_type_map.get(ks, {}).get(udt_name) typeclass = specialized_type elif typeclass == CUSTOM_TYPE: From 048964be20d8fd8ade0d6fa0295d32ae5a062a33 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 5 Oct 2015 14:56:38 -0500 Subject: [PATCH 0094/2005] Move non-alphanum UDT tests to test_udts --- tests/integration/standard/test_metadata.py | 29 ------------------ tests/integration/standard/test_udts.py | 33 +++++++++++++++++++-- 2 files changed, 30 insertions(+), 32 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index a5e8745f99..f822594d4a 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -675,10 +675,6 @@ def test_refresh_user_aggregate_metadata(self): class TestCodeCoverage(unittest.TestCase): - @property - def table_name(self): - return self._testMethodName.lower() - def test_export_schema(self): """ Test export schema functionality @@ -798,31 +794,6 @@ def test_export_keyspace_schema_udts(self): cluster.shutdown() - def test_udt_non_alphanum(self): - """ - PYTHON-413 - """ - s = Cluster(protocol_version=PROTOCOL_VERSION).connect() - non_alphanum_name = 'test.field@#$%@%#!' - type_name = 'type2' - s.execute('CREATE TYPE IF NOT EXISTS test1rf."%s" ("%s" text)' % (non_alphanum_name, non_alphanum_name)) - s.execute('CREATE TYPE IF NOT EXISTS test1rf.%s ("%s" text)' % (type_name, non_alphanum_name)) - # table with types as map keys to make sure the tuple lookup works - s.execute('CREATE TABLE IF NOT EXISTS test1rf.%s (k int PRIMARY KEY, non_alphanum_type_map map, int>, alphanum_type_map map, int>)' % (self.table_name, non_alphanum_name, type_name)) - s.execute('INSERT INTO test1rf.%s (k, non_alphanum_type_map, alphanum_type_map) VALUES (%s, {{"%s": \'nonalphanum\'}: 0}, {{"%s": \'alphanum\'}: 1})' % (self.table_name, 0, non_alphanum_name, non_alphanum_name)) - row = s.execute('SELECT * FROM test1rf.%s' % (self.table_name,))[0] - print row - k, v = row.non_alphanum_type_map.popitem() - self.assertEqual(v, 0) - self.assertEqual(k.__class__, tuple) - self.assertEqual(k[0], 'nonalphanum') - - k, v = row.alphanum_type_map.popitem() - self.assertEqual(v, 1) - self.assertNotEqual(k.__class__, tuple) # should be the namedtuple type - self.assertEqual(k[0], 'alphanum') - self.assertEqual(k.field_0_, 'alphanum') # named tuple with positional field name - def test_case_sensitivity(self): """ Test that names that need to be escaped in CREATE statements are diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 573691d765..dc4dcf92d0 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -41,6 +41,10 @@ def setup_module(): class UDTTests(unittest.TestCase): + @property + def table_name(self): + return self._testMethodName.lower() + def setUp(self): self._cass_version, self._cql_version = get_server_versions() @@ -51,11 +55,9 @@ def setUp(self): self.session = self.cluster.connect() execute_until_pass(self.session, "CREATE KEYSPACE udttests WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}") - self.cluster.shutdown() + self.session.set_keyspace("udttests") def tearDown(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect() execute_until_pass(self.session, "DROP KEYSPACE udttests") self.cluster.shutdown() @@ -661,3 +663,28 @@ def test_can_insert_nested_collections(self): validate('map_udt', OrderedMap([(key, value), (key2, value)])) c.shutdown() + + def test_non_alphanum_identifiers(self): + """ + PYTHON-413 + """ + s = self.session + non_alphanum_name = 'test.field@#$%@%#!' + type_name = 'type2' + s.execute('CREATE TYPE "%s" ("%s" text)' % (non_alphanum_name, non_alphanum_name)) + s.execute('CREATE TYPE %s ("%s" text)' % (type_name, non_alphanum_name)) + # table with types as map keys to make sure the tuple lookup works + s.execute('CREATE TABLE %s (k int PRIMARY KEY, non_alphanum_type_map map, int>, alphanum_type_map map, int>)' % (self.table_name, non_alphanum_name, type_name)) + s.execute('INSERT INTO %s (k, non_alphanum_type_map, alphanum_type_map) VALUES (%s, {{"%s": \'nonalphanum\'}: 0}, {{"%s": \'alphanum\'}: 1})' % (self.table_name, 0, non_alphanum_name, non_alphanum_name)) + row = s.execute('SELECT * FROM %s' % (self.table_name,))[0] + + k, v = row.non_alphanum_type_map.popitem() + self.assertEqual(v, 0) + self.assertEqual(k.__class__, tuple) + self.assertEqual(k[0], 'nonalphanum') + + k, v = row.alphanum_type_map.popitem() + self.assertEqual(v, 1) + self.assertNotEqual(k.__class__, tuple) # should be the namedtuple type + self.assertEqual(k[0], 'alphanum') + self.assertEqual(k.field_0_, 'alphanum') # named tuple with positional field name From 4805dfcde96174e1d3094e639f7f024de32d8c3b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 5 Oct 2015 14:57:01 -0500 Subject: [PATCH 0095/2005] Add UDT type alteration test --- tests/integration/standard/test_udts.py | 32 +++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index dc4dcf92d0..d1a8ec1805 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -688,3 +688,35 @@ def test_non_alphanum_identifiers(self): self.assertNotEqual(k.__class__, tuple) # should be the namedtuple type self.assertEqual(k[0], 'alphanum') self.assertEqual(k.field_0_, 'alphanum') # named tuple with positional field name + + def test_type_alteration(self): + s = self.session + type_name = "type_name" + self.assertNotIn(type_name, s.cluster.metadata.keyspaces['udttests'].user_types) + s.execute('CREATE TYPE %s (v0 int)' % (type_name,)) + self.assertIn(type_name, s.cluster.metadata.keyspaces['udttests'].user_types) + + s.execute('CREATE TABLE %s (k int PRIMARY KEY, v frozen<%s>)' % (self.table_name, type_name)) + s.execute('INSERT INTO %s (k, v) VALUES (0, 1)' % (self.table_name,)) + + s.cluster.register_user_type('udttests', type_name, dict) + + val = s.execute('SELECT v FROM %s' % self.table_name)[0][0] + self.assertEqual(val['v0'], 1) + + # add field + s.execute('ALTER TYPE %s ADD v1 text' % (type_name,)) + val = s.execute('SELECT v FROM %s' % self.table_name)[0][0] + self.assertEqual(val['v0'], 1) + self.assertIsNone(val['v1']) + s.execute("INSERT INTO %s (k, v) VALUES (0, (2, 'sometext'))" % (self.table_name,)) + val = s.execute('SELECT v FROM %s' % self.table_name)[0][0] + self.assertEqual(val['v0'], 2) + self.assertEqual(val['v1'], 'sometext') + + # alter field type + s.execute('ALTER TYPE %s ALTER v1 TYPE blob' % (type_name,)) + s.execute("INSERT INTO %s (k, v) VALUES (0, (3, 0xdeadbeef))" % (self.table_name,)) + val = s.execute('SELECT v FROM %s' % self.table_name)[0][0] + self.assertEqual(val['v0'], 3) + self.assertEqual(val['v1'], six.b('\xde\xad\xbe\xef')) From 031ebb0ba9785b4e70c6ba6fda041fdcf5e86d38 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Oct 2015 09:55:02 -0500 Subject: [PATCH 0096/2005] Remove deprecated ResponseFuture timeout parameter --- cassandra/cluster.py | 33 +++---------------- .../standard/test_custom_payload.py | 2 +- tests/integration/standard/test_query.py | 2 +- 3 files changed, 7 insertions(+), 30 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 0f3a554faa..f6bc2d21d3 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3147,26 +3147,14 @@ def _retry_task(self, reuse_connection): # otherwise, move onto another host self.send_request() - def result(self, timeout=_NOT_SET): + def result(self): """ Return the final result or raise an Exception if errors were encountered. If the final result or error has not been set - yet, this method will block until that time. - - .. versionchanged:: 2.6.0 - - **`timeout` is deprecated. Use timeout in the Session execute functions instead. - The following description applies to deprecated behavior:** - - You may set a timeout (in seconds) with the `timeout` parameter. - By default, the :attr:`~.default_timeout` for the :class:`.Session` - this was created through will be used for the timeout on this - operation. - - This timeout applies to the entire request, including any retries - (decided internally by the :class:`.policies.RetryPolicy` used with - the request). + yet, this method will block until it is set, or the timeout + set for the request expires. + Timeout is specified in the Session request execution functions. If the timeout is exceeded, an :exc:`cassandra.OperationTimedOut` will be raised. This is a client-side timeout. For more information about server-side coordinator timeouts, see :class:`.policies.RetryPolicy`. @@ -3184,18 +3172,7 @@ def result(self, timeout=_NOT_SET): ... log.exception("Operation failed:") """ - if timeout is not _NOT_SET and not ResponseFuture._warned_timeout: - msg = "ResponseFuture.result timeout argument is deprecated. Specify the request timeout via Session.execute[_async]." - warnings.warn(msg, DeprecationWarning) - log.warning(msg) - ResponseFuture._warned_timeout = True - else: - timeout = None - - self._event.wait(timeout) - # TODO: remove this conditional when deprecated timeout parameter is removed - if not self._event.is_set(): - self._on_timeout() + self._event.wait() if self._final_result is not _NOT_SET: if self._paging_state is None: return self._final_result diff --git a/tests/integration/standard/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py index 04b0b1f739..e6a80d036e 100644 --- a/tests/integration/standard/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -162,6 +162,6 @@ def execute_async_validate_custom_payload(self, statement, custom_payload): # Submit the statement with our custom payload. Validate the one # we receive from the server matches response_future = self.session.execute_async(statement, custom_payload=custom_payload) - response_future.result(timeout=10.0) + response_future.result() returned_custom_payload = response_future.custom_payload self.assertEqual(custom_payload, returned_custom_payload) \ No newline at end of file diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 92ffbf68f3..c376bec9e3 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -137,7 +137,7 @@ def test_client_ip_in_trace(self): query = "SELECT * FROM system.local" statement = SimpleStatement(query) response_future = session.execute_async(statement, trace=True) - response_future.result(timeout=10.0) + response_future.result() # Fetch the client_ip from the trace. trace = response_future.get_query_trace(max_wait=2.0) From 419fcdf9beb0090136593935ab683f5a0f8d5f8e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Oct 2015 10:10:37 -0500 Subject: [PATCH 0097/2005] Remove deprecated Cluster.refresh_schema --- cassandra/cluster.py | 40 ---------------------------------- docs/api/cassandra/cluster.rst | 2 -- 2 files changed, 42 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index f6bc2d21d3..86e6521f9d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1225,46 +1225,6 @@ def _target_type_from_refresh_args(keyspace, table, usertype, function, aggregat return SchemaTargetType.KEYSPACE return None - def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None, aggregate=None, max_schema_agreement_wait=None): - """ - .. deprecated:: 2.6.0 - Use refresh_*_metadata instead - - Synchronously refresh schema metadata. - - {keyspace, table, usertype} are string names of the respective entities. - ``function`` is a :class:`cassandra.UserFunctionDescriptor`. - ``aggregate`` is a :class:`cassandra.UserAggregateDescriptor`. - - If none of ``{keyspace, table, usertype, function, aggregate}`` are specified, the entire schema is refreshed. - - If any of ``{keyspace, table, usertype, function, aggregate}`` are specified, ``keyspace`` is required. - - If only ``keyspace`` is specified, just the top-level keyspace metadata is refreshed (e.g. replication). - - The remaining arguments ``{table, usertype, function, aggregate}`` - are mutually exclusive -- only one may be specified. - - By default, the timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait` - and :attr:`~.Cluster.control_connection_timeout`. - - Passing max_schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`. - - Setting max_schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately. - - An Exception is raised if schema refresh fails for any reason. - """ - msg = "refresh_schema is deprecated. Use Cluster.refresh_*_metadata instead." - warnings.warn(msg, DeprecationWarning) - log.warning(msg) - - self._validate_refresh_schema(keyspace, table, usertype, function, aggregate) - target_type = self._target_type_from_refresh_args(keyspace, table, usertype, function, aggregate) - if not self.control_connection.refresh_schema(target_type=target_type, keyspace=keyspace, table=table, - type=usertype, function=function, aggregate=aggregate, - schema_agreement_wait=max_schema_agreement_wait): - raise Exception("Schema was not refreshed. See log for details.") - def submit_schema_refresh(self, keyspace=None, table=None, usertype=None, function=None, aggregate=None): """ .. deprecated:: 2.6.0 diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 1ccd9282b7..792a123b0b 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -87,8 +87,6 @@ .. automethod:: refresh_user_aggregate_metadata - .. automethod:: refresh_schema - .. automethod:: refresh_nodes .. automethod:: set_meta_refresh_enabled From 574266d1bf13e56ab4e527fd2cfd209a1a906ffe Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Oct 2015 10:14:12 -0500 Subject: [PATCH 0098/2005] Remove deprecated Cluster.submit_schema_refresh --- cassandra/cluster.py | 18 ------------- tests/integration/standard/test_cluster.py | 30 ---------------------- 2 files changed, 48 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 86e6521f9d..98b56c11b4 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1225,24 +1225,6 @@ def _target_type_from_refresh_args(keyspace, table, usertype, function, aggregat return SchemaTargetType.KEYSPACE return None - def submit_schema_refresh(self, keyspace=None, table=None, usertype=None, function=None, aggregate=None): - """ - .. deprecated:: 2.6.0 - Use refresh_*_metadata instead - - Schedule a refresh of the internal representation of the current - schema for this cluster. See :meth:`~.refresh_schema` for description of parameters. - """ - msg = "submit_schema_refresh is deprecated. Use Cluster.refresh_*_metadata instead." - warnings.warn(msg, DeprecationWarning) - log.warning(msg) - - self._validate_refresh_schema(keyspace, table, usertype, function, aggregate) - target_type = self._target_type_from_refresh_args(keyspace, table, usertype, function, aggregate) - return self.executor.submit( - self.control_connection.refresh_schema, target_type=target_type, keyspace=keyspace, table=table, - type=usertype, function=function, aggregate=aggregate) - def refresh_schema_metadata(self, max_schema_agreement_wait=None): """ Synchronously refresh all schema metadata. diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 5fe978868c..9b56f0045e 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -254,32 +254,6 @@ def test_cluster_settings(self): cluster.set_max_connections_per_host(HostDistance.LOCAL, max_connections_per_host + 1) self.assertEqual(cluster.get_max_connections_per_host(HostDistance.LOCAL), max_connections_per_host + 1) - def test_submit_schema_refresh(self): - """ - Ensure new new schema is refreshed after submit_schema_refresh() - """ - - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - cluster.connect() - self.assertNotIn("newkeyspace", cluster.metadata.keyspaces) - - other_cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = other_cluster.connect() - execute_until_pass(session, - """ - CREATE KEYSPACE newkeyspace - WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} - """) - - future = cluster.submit_schema_refresh() - future.result() - - self.assertIn("newkeyspace", cluster.metadata.keyspaces) - - execute_until_pass(session, "DROP KEYSPACE newkeyspace") - cluster.shutdown() - other_cluster.shutdown() - def test_refresh_schema(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() @@ -605,10 +579,6 @@ def test_pool_management(self): cluster.refresh_schema_metadata() cluster.refresh_schema_metadata(max_schema_agreement_wait=0) - # submit schema refresh - future = cluster.submit_schema_refresh() - future.result() - assert_quiescent_pool_state(self, cluster) cluster.shutdown() From bb984eedbaa80275041ea9d77b4208878b6444d0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Oct 2015 10:27:02 -0500 Subject: [PATCH 0099/2005] Remove deprecated cqltypes time/date functions Moved to util --- cassandra/cqltypes.py | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index fabef397e7..f2d0433ec2 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -73,20 +73,6 @@ def trim_if_startswith(s, prefix): return s -def unix_time_from_uuid1(u): - msg = "'cassandra.cqltypes.unix_time_from_uuid1' has moved to 'cassandra.util'. This entry point will be removed in the next major version." - warnings.warn(msg, DeprecationWarning) - log.warning(msg) - return util.unix_time_from_uuid1(u) - - -def datetime_from_timestamp(timestamp): - msg = "'cassandra.cqltypes.datetime_from_timestamp' has moved to 'cassandra.util'. This entry point will be removed in the next major version." - warnings.warn(msg, DeprecationWarning) - log.warning(msg) - return util.datetime_from_timestamp(timestamp) - - _casstypes = {} @@ -538,7 +524,6 @@ class DateType(_CassandraType): @staticmethod def interpret_datestring(val): - # not used internally. deprecate? if val[-5] in ('+', '-'): offset = (int(val[-4:-2]) * 3600 + int(val[-2:]) * 60) * int(val[-5] + '1') val = val[:-5] @@ -582,7 +567,7 @@ class TimeUUIDType(DateType): typename = 'timeuuid' def my_timestamp(self): - return unix_time_from_uuid1(self.val) + return util.unix_time_from_uuid1(self.val) @staticmethod def deserialize(byts, protocol_version): From e16a07351092de3ee4404899c0a41bb260d6b6c9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Oct 2015 10:28:43 -0500 Subject: [PATCH 0100/2005] remove deprecated decoder module --- cassandra/decoder.py | 58 -------------------------------------------- 1 file changed, 58 deletions(-) delete mode 100644 cassandra/decoder.py diff --git a/cassandra/decoder.py b/cassandra/decoder.py deleted file mode 100644 index 5c44d5b7ec..0000000000 --- a/cassandra/decoder.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2013-2015 DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from functools import wraps -import warnings - -import cassandra.query - -import logging -log = logging.getLogger(__name__) - -_have_warned = False - - -def warn_once(f): - - @wraps(f) - def new_f(*args, **kwargs): - global _have_warned - if not _have_warned: - msg = "cassandra.decoder.%s has moved to cassandra.query.%s" % (f.__name__, f.__name__) - warnings.warn(msg, DeprecationWarning) - log.warning(msg) - _have_warned = True - return f(*args, **kwargs) - - return new_f - -tuple_factory = warn_once(cassandra.query.tuple_factory) -""" -Deprecated: use :meth:`cassandra.query.tuple_factory()` -""" - -named_tuple_factory = warn_once(cassandra.query.named_tuple_factory) -""" -Deprecated: use :meth:`cassandra.query.named_tuple_factory()` -""" - -dict_factory = warn_once(cassandra.query.dict_factory) -""" -Deprecated: use :meth:`cassandra.query.dict_factory()` -""" - -ordered_dict_factory = warn_once(cassandra.query.ordered_dict_factory) -""" -Deprecated: use :meth:`cassandra.query.ordered_dict_factory()` -""" From cc9407366abd5afd4015ee38a3f2dc27956c74be Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Oct 2015 11:47:16 -0500 Subject: [PATCH 0101/2005] Remove deprecated TableMetadata.keyspace attribute --- cassandra/metadata.py | 16 +++------------- tests/integration/standard/test_metadata.py | 2 +- 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 8700c65fa3..27b3aaaad7 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -185,7 +185,6 @@ def _update_table(self, meta): # return one or the other based on the query results. # Here we deal with that. if isinstance(meta, TableMetadata): - meta.keyspace = keyspace_meta # temporary while TableMetadata.keyspace is deprecated keyspace_meta._add_table_metadata(meta) else: keyspace_meta._add_view_metadata(meta) @@ -952,14 +951,6 @@ class TableMetadata(object): A representation of the schema for a single table. """ - keyspace = None - """ - An instance of :class:`~.KeyspaceMetadata`. - - .. deprecated:: 2.7.0 - - """ - keyspace_name = None """ String name of this Table's keyspace """ @@ -1348,13 +1339,13 @@ def as_cql_query(self): return "CREATE INDEX %s ON %s.%s (%s)" % ( self.name, # Cassandra doesn't like quoted index names for some reason - protect_name(table.keyspace.name), + protect_name(table.keyspace_name), protect_name(table.name), index_target) else: return "CREATE CUSTOM INDEX %s ON %s.%s (%s) USING '%s'" % ( self.name, # Cassandra doesn't like quoted index names for some reason - protect_name(table.keyspace.name), + protect_name(table.keyspace_name), protect_name(table.name), protect_name(self.column.name), self.index_options["class_name"]) @@ -1556,7 +1547,7 @@ def __init__(self, table_metadata, trigger_name, options=None): def as_cql_query(self): ret = "CREATE TRIGGER %s ON %s.%s USING %s" % ( protect_name(self.name), - protect_name(self.table.keyspace.name), + protect_name(self.table.keyspace_name), protect_name(self.table.name), protect_value(self.options['class']) ) @@ -1644,7 +1635,6 @@ def get_all_keyspaces(self): try: for table_row in self.keyspace_table_rows.get(keyspace_meta.name, []): table_meta = self._build_table_metadata(table_row) - table_meta.keyspace = keyspace_meta # temporary while TableMetadata.keyspace is deprecated keyspace_meta._add_table_metadata(table_meta) for usertype_row in self.keyspace_type_rows.get(keyspace_meta.name, []): diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index f822594d4a..ac1f9b9a10 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -126,7 +126,7 @@ def test_basic_table_meta_properties(self): self.assertTrue(self.cfname in ksmeta.tables) tablemeta = ksmeta.tables[self.cfname] - self.assertEqual(tablemeta.keyspace, ksmeta) # tablemeta.keyspace is deprecated + self.assertEqual(tablemeta.keyspace_name, ksmeta.name) self.assertEqual(tablemeta.name, self.cfname) self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key]) From 96489ccc835d153c18b745792627f89ba60217be Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Oct 2015 12:19:15 -0500 Subject: [PATCH 0102/2005] Remove deprecated cqlengine.columns.TimeUUID.from_datetime --- cassandra/cqlengine/columns.py | 20 +------------------- docs/api/cassandra/cqlengine/columns.rst | 2 -- 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 11481bcd81..b13069cd73 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -540,6 +540,7 @@ def to_python(self, value): def to_database(self, value): return self.validate(value) + from uuid import UUID as pyUUID, getnode @@ -550,25 +551,6 @@ class TimeUUID(UUID): db_type = 'timeuuid' - @classmethod - def from_datetime(self, dt): - """ - generates a UUID for a given datetime - - :param dt: datetime - :type dt: datetime - :return: uuid1 - - .. deprecated:: 2.6.0 - - Use :func:`cassandra.util.uuid_from_time` - - """ - msg = "cqlengine.columns.TimeUUID.from_datetime is deprecated. Use cassandra.util.uuid_from_time instead." - warnings.warn(msg, DeprecationWarning) - log.warning(msg) - return util.uuid_from_time(dt) - class Boolean(Column): """ diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst index 918ff65fa6..7c4695d323 100644 --- a/docs/api/cassandra/cqlengine/columns.rst +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -78,8 +78,6 @@ Columns of all types are initialized by passing :class:`.Column` attributes to t .. autoclass:: TimeUUID(**kwargs) - .. automethod:: from_datetime - .. autoclass:: TinyInt(**kwargs) .. autoclass:: UserDefinedType From a2d3a9862ff80d225b93605e38097d473e22df41 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Oct 2015 13:01:08 -0500 Subject: [PATCH 0103/2005] Remove deprecated cqlengine.columns.Float(double_precision) parameter --- cassandra/cqlengine/columns.py | 11 -------- .../cqlengine/columns/test_value_io.py | 8 +----- .../cqlengine/model/test_model_io.py | 26 ++++++------------- .../integration/cqlengine/model/test_udts.py | 4 +-- 4 files changed, 11 insertions(+), 38 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index b13069cd73..56c178c939 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -594,17 +594,6 @@ class Float(BaseFloat): """ db_type = 'float' - def __init__(self, double_precision=None, **kwargs): - if double_precision is None or bool(double_precision): - msg = "Float(double_precision=True) is deprecated. Use Double() type instead." - double_precision = True - warnings.warn(msg, DeprecationWarning) - log.warning(msg) - - self.db_type = 'double' if double_precision else 'float' - - super(Float, self).__init__(**kwargs) - class Double(BaseFloat): """ diff --git a/tests/integration/cqlengine/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py index 3a13b5a4e0..cd9f63f2af 100644 --- a/tests/integration/cqlengine/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -175,15 +175,9 @@ def comparator_converter(self, val): return val if isinstance(val, UUID) else UUID(val) -# until Floats are implicitly single: -class FloatSingle(columns.Float): - def __init__(self, **kwargs): - super(FloatSingle, self).__init__(double_precision=False, **kwargs) - - class TestFloatIO(BaseColumnIOTest): - column = FloatSingle + column = columns.Float pkey_val = 4.75 data_val = -1.5 diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 3ba2aa9711..55bff3aead 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -156,7 +156,7 @@ class AllDatatypesModel(Model): e = columns.DateTime() f = columns.Decimal() g = columns.Double() - h = columns.Float(double_precision=False) + h = columns.Float() i = columns.Inet() j = columns.Integer() k = columns.Text() @@ -225,13 +225,8 @@ def test_can_insert_double_and_float(self): """ Test for inserting single-precision and double-precision values into a Float and Double columns - test_can_insert_double_and_float tests a Float can only hold a single-precision value, unless - "double_precision" attribute is specified as True or is unspecified. This test first tests that an AttributeError - is raised when attempting to input a double-precision value into a single-precision Float. It then verifies that - Double, Float(double_precision=True) and Float() can hold double-precision values by default. It also verifies that - columns.Float(double_precision=False) can hold a single-precision value, and a Double can hold a single-precision value. - @since 2.6.0 + @changed 3.0.0 removed deprecated Float(double_precision) parameter @jira_ticket PYTHON-246 @expected_result Each floating point column type is able to hold their respective precision values. @@ -240,24 +235,19 @@ def test_can_insert_double_and_float(self): class FloatingPointModel(Model): id = columns.Integer(primary_key=True) - a = columns.Float(double_precision=False) - b = columns.Float(double_precision=True) - c = columns.Float() + f = columns.Float() d = columns.Double() sync_table(FloatingPointModel) - FloatingPointModel.create(id=0, a=2.39) + FloatingPointModel.create(id=0, f=2.39) output = FloatingPointModel.objects().first() - self.assertEqual(2.390000104904175, output.a) + self.assertEqual(2.390000104904175, output.f) # float loses precision - FloatingPointModel.create(id=0, a=3.4028234663852886e+38, b=2.39, c=2.39, d=2.39) + FloatingPointModel.create(id=0, f=3.4028234663852886e+38, d=2.39) output = FloatingPointModel.objects().first() - - self.assertEqual(3.4028234663852886e+38, output.a) - self.assertEqual(2.39, output.b) - self.assertEqual(2.39, output.c) - self.assertEqual(2.39, output.d) + self.assertEqual(3.4028234663852886e+38, output.f) + self.assertEqual(2.39, output.d) # double retains precision FloatingPointModel.create(id=0, d=3.4028234663852886e+38) output = FloatingPointModel.objects().first() diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index bf2f37020c..4c5525f12b 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -214,7 +214,7 @@ class AllDatatypes(UserType): e = columns.DateTime() f = columns.Decimal() g = columns.Double() - h = columns.Float(double_precision=False) + h = columns.Float() i = columns.Inet() j = columns.Integer() k = columns.Text() @@ -261,7 +261,7 @@ class AllDatatypes(UserType): e = columns.DateTime() f = columns.Decimal() g = columns.Double() - h = columns.Float(double_precision=False) + h = columns.Float() i = columns.Inet() j = columns.Integer() k = columns.Text() From 4bd590960a2c5c0fcd2da1c436f535d85ce85b53 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Oct 2015 14:05:41 -0500 Subject: [PATCH 0104/2005] Remove deprecated cqlengine keyspace management functions --- cassandra/cqlengine/management.py | 59 ------------------- .../cqlengine/management/test_management.py | 29 +++------ 2 files changed, 8 insertions(+), 80 deletions(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 648e870554..4d6825c76f 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -37,58 +37,6 @@ schema_columnfamilies = NamedTable('system', 'schema_columnfamilies') -def create_keyspace(name, strategy_class, replication_factor, durable_writes=True, **replication_values): - """ - *Deprecated - use :func:`create_keyspace_simple` or :func:`create_keyspace_network_topology` instead* - - Creates a keyspace - - If the keyspace already exists, it will not be modified. - - **This function should be used with caution, especially in production environments. - Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).** - - *There are plans to guard schema-modifying functions with an environment-driven conditional.* - - :param str name: name of keyspace to create - :param str strategy_class: keyspace replication strategy class (:attr:`~.SimpleStrategy` or :attr:`~.NetworkTopologyStrategy` - :param int replication_factor: keyspace replication factor, used with :attr:`~.SimpleStrategy` - :param bool durable_writes: Write log is bypassed if set to False - :param \*\*replication_values: Additional values to ad to the replication options map - """ - if not _allow_schema_modification(): - return - - msg = "Deprecated. Use create_keyspace_simple or create_keyspace_network_topology instead" - warnings.warn(msg, DeprecationWarning) - log.warning(msg) - - cluster = get_cluster() - - if name not in cluster.metadata.keyspaces: - # try the 1.2 method - replication_map = { - 'class': strategy_class, - 'replication_factor': replication_factor - } - replication_map.update(replication_values) - if strategy_class.lower() != 'simplestrategy': - # Although the Cassandra documentation states for `replication_factor` - # that it is "Required if class is SimpleStrategy; otherwise, - # not used." we get an error if it is present. - replication_map.pop('replication_factor', None) - - query = """ - CREATE KEYSPACE {0} - WITH REPLICATION = {1} - """.format(metadata.protect_name(name), json.dumps(replication_map).replace('"', "'")) - - if strategy_class != 'SimpleStrategy': - query += " AND DURABLE_WRITES = {0}".format('true' if durable_writes else 'false') - - execute(query) - - def create_keyspace_simple(name, replication_factor, durable_writes=True): """ Creates a keyspace with SimpleStrategy for replica placement @@ -140,13 +88,6 @@ def _create_keyspace(name, durable_writes, strategy_class, strategy_options): log.info("Not creating keyspace %s because it already exists", name) -def delete_keyspace(name): - msg = "Deprecated. Use drop_keyspace instead" - warnings.warn(msg, DeprecationWarning) - log.warning(msg) - drop_keyspace(name) - - def drop_keyspace(name): """ Drops a keyspace, if it exists. diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 4b62fcfcd1..cb61e55e16 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -37,33 +37,20 @@ def test_create_drop_succeeeds(self): cluster = get_cluster() keyspace_ss = 'test_ks_ss' - self.assertFalse(keyspace_ss in cluster.metadata.keyspaces) + self.assertNotIn(keyspace_ss, cluster.metadata.keyspaces) management.create_keyspace_simple(keyspace_ss, 2) - self.assertTrue(keyspace_ss in cluster.metadata.keyspaces) + self.assertIn(keyspace_ss, cluster.metadata.keyspaces) management.drop_keyspace(keyspace_ss) - - self.assertFalse(keyspace_ss in cluster.metadata.keyspaces) - with warnings.catch_warnings(record=True) as w: - management.create_keyspace(keyspace_ss, strategy_class="SimpleStrategy", replication_factor=1) - self.assertEqual(len(w), 1) - self.assertEqual(w[-1].category, DeprecationWarning) - self.assertTrue(keyspace_ss in cluster.metadata.keyspaces) - - management.drop_keyspace(keyspace_ss) - self.assertFalse(keyspace_ss in cluster.metadata.keyspaces) + self.assertNotIn(keyspace_ss, cluster.metadata.keyspaces) keyspace_nts = 'test_ks_nts' - self.assertFalse(keyspace_nts in cluster.metadata.keyspaces) - management.create_keyspace_simple(keyspace_nts, 2) - self.assertTrue(keyspace_nts in cluster.metadata.keyspaces) - - with warnings.catch_warnings(record=True) as w: - management.delete_keyspace(keyspace_nts) - self.assertEqual(len(w), 1) - self.assertEqual(w[-1].category, DeprecationWarning) + self.assertNotIn(keyspace_nts, cluster.metadata.keyspaces) + management.create_keyspace_network_topology(keyspace_nts, {'dc1': 1}) + self.assertIn(keyspace_nts, cluster.metadata.keyspaces) - self.assertFalse(keyspace_nts in cluster.metadata.keyspaces) + management.drop_keyspace(keyspace_nts) + self.assertNotIn(keyspace_nts, cluster.metadata.keyspaces) class DropTableTest(BaseCassEngTestCase): From 9d98c8e633d54c052a188adac053637bfd2ab1ed Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Oct 2015 14:34:44 -0500 Subject: [PATCH 0105/2005] Remove deprecated cqlengine.Model.__polymorphic_*__ attributes --- cassandra/cqlengine/columns.py | 17 +- cassandra/cqlengine/models.py | 21 +- docs/api/cassandra/cqlengine/columns.rst | 2 - docs/api/cassandra/cqlengine/models.rst | 2 - .../cqlengine/model/test_polymorphism.py | 237 ------------------ 5 files changed, 4 insertions(+), 275 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 56c178c939..5289184db8 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -120,13 +120,6 @@ class Column(object): determines the order that the clustering keys are sorted on disk """ - polymorphic_key = False - """ - *Deprecated* - - see :attr:`~.discriminator_column` - """ - discriminator_column = False """ boolean, if set to True, this column will be used for discriminating records @@ -151,7 +144,6 @@ def __init__(self, default=None, required=False, clustering_order=None, - polymorphic_key=False, discriminator_column=False, static=False): self.partition_key = partition_key @@ -161,14 +153,7 @@ def __init__(self, self.default = default self.required = required self.clustering_order = clustering_order - - if polymorphic_key: - msg = "polymorphic_key is deprecated. Use discriminator_column instead." - warnings.warn(msg, DeprecationWarning) - log.warning(msg) - - self.discriminator_column = discriminator_column or polymorphic_key - self.polymorphic_key = self.discriminator_column + self.discriminator_column = discriminator_column # the column name in the model definition self.column_name = None diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 975773067a..3b9cd6ec8b 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -310,7 +310,6 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): __keyspace__ = None - __polymorphic_key__ = None # DEPRECATED __discriminator_value__ = None __options__ = None @@ -753,14 +752,7 @@ def __new__(cls, name, bases, attrs): is_abstract = attrs['__abstract__'] = attrs.get('__abstract__', False) # short circuit __discriminator_value__ inheritance - # __polymorphic_key__ is deprecated - poly_key = attrs.get('__polymorphic_key__', None) - if poly_key: - msg = '__polymorphic_key__ is deprecated. Use __discriminator_value__ instead' - warnings.warn(msg, DeprecationWarning) - log.warning(msg) - attrs['__discriminator_value__'] = attrs.get('__discriminator_value__', poly_key) - attrs['__polymorphic_key__'] = attrs['__discriminator_value__'] + attrs['__discriminator_value__'] = attrs.get('__discriminator_value__') options = attrs.get('__options__') or {} attrs['__default_ttl__'] = options.get('default_time_to_live') @@ -782,7 +774,7 @@ def _transform_column(col_name, col_obj): discriminator_columns = [c for c in column_definitions if c[1].discriminator_column] is_polymorphic = len(discriminator_columns) > 0 if len(discriminator_columns) > 1: - raise ModelDefinitionException('only one discriminator_column (polymorphic_key (deprecated)) can be defined in a model, {0} found'.format(len(discriminator_columns))) + raise ModelDefinitionException('only one discriminator_column can be defined in a model, {0} found'.format(len(discriminator_columns))) if attrs['__discriminator_value__'] and not is_polymorphic: raise ModelDefinitionException('__discriminator_value__ specified, but no base columns defined with discriminator_column=True') @@ -790,7 +782,7 @@ def _transform_column(col_name, col_obj): discriminator_column_name, discriminator_column = discriminator_columns[0] if discriminator_columns else (None, None) if isinstance(discriminator_column, (columns.BaseContainerColumn, columns.Counter)): - raise ModelDefinitionException('counter and container columns cannot be used as discriminator columns (polymorphic_key (deprecated)) ') + raise ModelDefinitionException('counter and container columns cannot be used as discriminator columns') # find polymorphic base class polymorphic_base = None @@ -946,13 +938,6 @@ class Model(BaseModel): (e.g. compaction, default ttl, cache settings, tec.) """ - __polymorphic_key__ = None - """ - *Deprecated.* - - see :attr:`~.__discriminator_value__` - """ - __discriminator_value__ = None """ *Optional* Specifies a value for the discriminator column when using model inheritance. diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst index 7c4695d323..3f83eac5e6 100644 --- a/docs/api/cassandra/cqlengine/columns.rst +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -29,8 +29,6 @@ Columns .. autoattribute:: clustering_order - .. autoattribute:: polymorphic_key - .. autoattribute:: discriminator_column .. autoattribute:: static diff --git a/docs/api/cassandra/cqlengine/models.rst b/docs/api/cassandra/cqlengine/models.rst index f06cd91b56..b8d8a71a0c 100644 --- a/docs/api/cassandra/cqlengine/models.rst +++ b/docs/api/cassandra/cqlengine/models.rst @@ -33,8 +33,6 @@ Model .. _ttl-change: .. autoattribute:: __default_ttl__ - .. autoattribute:: __polymorphic_key__ - .. autoattribute:: __discriminator_value__ See :ref:`model_inheritance` for usage examples. diff --git a/tests/integration/cqlengine/model/test_polymorphism.py b/tests/integration/cqlengine/model/test_polymorphism.py index 6e7a5a690d..f8969e32e6 100644 --- a/tests/integration/cqlengine/model/test_polymorphism.py +++ b/tests/integration/cqlengine/model/test_polymorphism.py @@ -22,243 +22,6 @@ from cassandra.cqlengine import management -class TestPolymorphicClassConstruction(BaseCassEngTestCase): - - def test_multiple_polymorphic_key_failure(self): - """ Tests that defining a model with more than one polymorphic key fails """ - with self.assertRaises(models.ModelDefinitionException): - class M(models.Model): - - partition = columns.Integer(primary_key=True) - type1 = columns.Integer(polymorphic_key=True) - type2 = columns.Integer(polymorphic_key=True) - - def test_no_polymorphic_key_column_failure(self): - with self.assertRaises(models.ModelDefinitionException): - class M(models.Model): - __polymorphic_key__ = 1 - - def test_polymorphic_key_inheritance(self): - """ Tests that polymorphic_key attribute is not inherited """ - class Base(models.Model): - - partition = columns.Integer(primary_key=True) - type1 = columns.Integer(polymorphic_key=True) - - class M1(Base): - __polymorphic_key__ = 1 - - class M2(M1): - pass - - assert M2.__polymorphic_key__ is None - - def test_polymorphic_metaclass(self): - """ Tests that the model meta class configures polymorphic models properly """ - class Base(models.Model): - - partition = columns.Integer(primary_key=True) - type1 = columns.Integer(polymorphic_key=True) - - class M1(Base): - __polymorphic_key__ = 1 - - assert Base._is_polymorphic - assert M1._is_polymorphic - - assert Base._is_polymorphic_base - assert not M1._is_polymorphic_base - - assert Base._discriminator_column is Base._columns['type1'] - assert M1._discriminator_column is M1._columns['type1'] - - assert Base._discriminator_column_name == 'type1' - assert M1._discriminator_column_name == 'type1' - - def test_table_names_are_inherited_from_poly_base(self): - class Base(models.Model): - - partition = columns.Integer(primary_key=True) - type1 = columns.Integer(polymorphic_key=True) - - class M1(Base): - __polymorphic_key__ = 1 - - assert Base.column_family_name() == M1.column_family_name() - - def test_collection_columns_cant_be_polymorphic_keys(self): - with self.assertRaises(models.ModelDefinitionException): - class Base(models.Model): - - partition = columns.Integer(primary_key=True) - type1 = columns.Set(columns.Integer, polymorphic_key=True) - - -class PolyBase(models.Model): - - partition = columns.UUID(primary_key=True, default=uuid.uuid4) - row_type = columns.Integer(polymorphic_key=True) - - -class Poly1(PolyBase): - __polymorphic_key__ = 1 - data1 = columns.Text() - - -class Poly2(PolyBase): - __polymorphic_key__ = 2 - data2 = columns.Text() - - -class TestPolymorphicModel(BaseCassEngTestCase): - - @classmethod - def setUpClass(cls): - super(TestPolymorphicModel, cls).setUpClass() - management.sync_table(Poly1) - management.sync_table(Poly2) - - @classmethod - def tearDownClass(cls): - super(TestPolymorphicModel, cls).tearDownClass() - management.drop_table(Poly1) - management.drop_table(Poly2) - - def test_saving_base_model_fails(self): - with self.assertRaises(models.PolymorphicModelException): - PolyBase.create() - - def test_saving_subclass_saves_poly_key(self): - p1 = Poly1.create(data1='pickle') - p2 = Poly2.create(data2='bacon') - - assert p1.row_type == Poly1.__polymorphic_key__ - assert p2.row_type == Poly2.__polymorphic_key__ - - def test_query_deserialization(self): - p1 = Poly1.create(data1='pickle') - p2 = Poly2.create(data2='bacon') - - p1r = PolyBase.get(partition=p1.partition) - p2r = PolyBase.get(partition=p2.partition) - - assert isinstance(p1r, Poly1) - assert isinstance(p2r, Poly2) - - def test_delete_on_polymorphic_subclass_does_not_include_polymorphic_key(self): - p1 = Poly1.create() - session = get_session() - with mock.patch.object(session, 'execute') as m: - Poly1.objects(partition=p1.partition).delete() - - # make sure our polymorphic key isn't in the CQL - # not sure how we would even get here if it was in there - # since the CQL would fail. - - self.assertNotIn("row_type", m.call_args[0][0].query_string) - - -class UnindexedPolyBase(models.Model): - - partition = columns.UUID(primary_key=True, default=uuid.uuid4) - cluster = columns.UUID(primary_key=True, default=uuid.uuid4) - row_type = columns.Integer(polymorphic_key=True) - - -class UnindexedPoly1(UnindexedPolyBase): - __polymorphic_key__ = 1 - data1 = columns.Text() - - -class UnindexedPoly2(UnindexedPolyBase): - __polymorphic_key__ = 2 - data2 = columns.Text() - - -class UnindexedPoly3(UnindexedPoly2): - __polymorphic_key__ = 3 - data3 = columns.Text() - - -class TestUnindexedPolymorphicQuery(BaseCassEngTestCase): - - @classmethod - def setUpClass(cls): - super(TestUnindexedPolymorphicQuery, cls).setUpClass() - management.sync_table(UnindexedPoly1) - management.sync_table(UnindexedPoly2) - management.sync_table(UnindexedPoly3) - - cls.p1 = UnindexedPoly1.create(data1='pickle') - cls.p2 = UnindexedPoly2.create(partition=cls.p1.partition, data2='bacon') - cls.p3 = UnindexedPoly3.create(partition=cls.p1.partition, data3='turkey') - - @classmethod - def tearDownClass(cls): - super(TestUnindexedPolymorphicQuery, cls).tearDownClass() - management.drop_table(UnindexedPoly1) - management.drop_table(UnindexedPoly2) - management.drop_table(UnindexedPoly3) - - def test_non_conflicting_type_results_work(self): - p1, p2, p3 = self.p1, self.p2, self.p3 - assert len(list(UnindexedPoly1.objects(partition=p1.partition, cluster=p1.cluster))) == 1 - assert len(list(UnindexedPoly2.objects(partition=p1.partition, cluster=p2.cluster))) == 1 - assert len(list(UnindexedPoly3.objects(partition=p1.partition, cluster=p3.cluster))) == 1 - - def test_subclassed_model_results_work_properly(self): - p1, p2, p3 = self.p1, self.p2, self.p3 - assert len(list(UnindexedPoly2.objects(partition=p1.partition, cluster__in=[p2.cluster, p3.cluster]))) == 2 - - def test_conflicting_type_results(self): - with self.assertRaises(models.PolymorphicModelException): - list(UnindexedPoly1.objects(partition=self.p1.partition)) - with self.assertRaises(models.PolymorphicModelException): - list(UnindexedPoly2.objects(partition=self.p1.partition)) - - -class IndexedPolyBase(models.Model): - - partition = columns.UUID(primary_key=True, default=uuid.uuid4) - cluster = columns.UUID(primary_key=True, default=uuid.uuid4) - row_type = columns.Integer(polymorphic_key=True, index=True) - - -class IndexedPoly1(IndexedPolyBase): - __polymorphic_key__ = 1 - data1 = columns.Text() - - -class IndexedPoly2(IndexedPolyBase): - __polymorphic_key__ = 2 - data2 = columns.Text() - - -class TestIndexedPolymorphicQuery(BaseCassEngTestCase): - - @classmethod - def setUpClass(cls): - super(TestIndexedPolymorphicQuery, cls).setUpClass() - management.sync_table(IndexedPoly1) - management.sync_table(IndexedPoly2) - - cls.p1 = IndexedPoly1.create(data1='pickle') - cls.p2 = IndexedPoly2.create(partition=cls.p1.partition, data2='bacon') - - @classmethod - def tearDownClass(cls): - super(TestIndexedPolymorphicQuery, cls).tearDownClass() - management.drop_table(IndexedPoly1) - management.drop_table(IndexedPoly2) - - def test_success_case(self): - assert len(list(IndexedPoly1.objects(partition=self.p1.partition))) == 1 - assert len(list(IndexedPoly2.objects(partition=self.p1.partition))) == 1 - - -######### -# Repeated tests for 'discriminator' properties, following deprecation of polymorphic variants -######### class TestInheritanceClassConstruction(BaseCassEngTestCase): def test_multiple_discriminator_value_failure(self): From 79efe97787f8aa443d8d4115b7c35f59620243a3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Oct 2015 14:41:53 -0500 Subject: [PATCH 0106/2005] Remove list prepend warning from cqlengine.statements --- cassandra/cqlengine/statements.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index c1a7a51130..4c2f728bc9 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -13,17 +13,13 @@ # limitations under the License. from datetime import datetime, timedelta -import logging import time import six -import warnings from cassandra.cqlengine import UnicodeMixin from cassandra.cqlengine.functions import QueryValue from cassandra.cqlengine.operators import BaseWhereOperator, InOperator -log = logging.getLogger(__name__) - class StatementException(Exception): pass @@ -295,11 +291,6 @@ def update_context(self, ctx): ctx[str(ctx_id)] = self._to_database(self._assignments) ctx_id += 1 if self._prepend is not None: - msg = "Previous versions of cqlengine implicitly reversed prepended lists to account for CASSANDRA-8733. " \ - "THIS VERSION DOES NOT. This warning will be removed in a future release." - warnings.warn(msg) - log.warning(msg) - ctx[str(ctx_id)] = self._to_database(self._prepend) ctx_id += 1 if self._append is not None: From f80c1ae714ce9de111e4b5d321dfad78635d1245 Mon Sep 17 00:00:00 2001 From: yi719 Date: Sat, 3 Oct 2015 09:49:55 +0800 Subject: [PATCH 0107/2005] Fix timeout in geventreactor Fix timeout in geventreactor --- cassandra/io/geventreactor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index f26e61523c..a063df1b70 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -36,7 +36,8 @@ def is_timeout(err): return ( err in (EINPROGRESS, EALREADY, EWOULDBLOCK) or - (err == EINVAL and os.name in ('nt', 'ce')) + (err == EINVAL and os.name in ('nt', 'ce')) or + isinstance(err, socket.timeout) ) From a9116ac4be286c3dba858c87509c3861fbdfcf42 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 7 Oct 2015 16:34:03 -0500 Subject: [PATCH 0108/2005] always return ResultSet for query results --- cassandra/cluster.py | 67 +++++++++++++------ cassandra/concurrent.py | 6 +- docs/api/cassandra/cluster.rst | 2 +- .../long/test_loadbalancingpolicies.py | 2 - tests/integration/standard/test_concurrent.py | 4 +- .../standard/test_custom_protocol_handler.py | 11 ++- tests/integration/standard/test_metrics.py | 6 +- tests/integration/standard/test_query.py | 12 ++-- .../integration/standard/test_query_paging.py | 32 ++++----- .../standard/test_row_factories.py | 10 +-- tests/integration/standard/test_udts.py | 10 --- 11 files changed, 86 insertions(+), 76 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 0f3a554faa..42328f85f9 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2720,6 +2720,7 @@ class ResponseFuture(object): _start_time = None _metrics = None _paging_state = None + _is_result_kind_rows = False _custom_payload = None _warnings = None _timer = None @@ -2930,7 +2931,8 @@ def _set_result(self, response): self, **response.results) else: results = getattr(response, 'results', None) - if results is not None and response.kind == RESULT_KIND_ROWS: + self._is_result_kind_rows = response.kind ==RESULT_KIND_ROWS + if results is not None and self._is_result_kind_rows: self._paging_state = response.paging_state results = self.row_factory(*results) self._set_final_result(results) @@ -3197,10 +3199,10 @@ def result(self, timeout=_NOT_SET): if not self._event.is_set(): self._on_timeout() if self._final_result is not _NOT_SET: - if self._paging_state is None: - return self._final_result + if self._is_result_kind_rows: + return ResultSet(self, self._final_result) else: - return PagedResult(self, self._final_result) + return self._final_result else: raise self._final_exception @@ -3333,51 +3335,74 @@ class QueryExhausted(Exception): pass -class PagedResult(object): +class ResultSet(object): """ - An iterator over the rows from a paged query result. Whenever the number - of result rows for a query exceed the :attr:`~.query.Statement.fetch_size` - (or :attr:`~.Session.default_fetch_size`, if not set) an instance of this - class will be returned. + An iterator over the rows from a query result. Also supplies basic equality + and indexing methods for backward-compatability. These methods materialize + the entire result set (loading all pages), and should only be used if the + total result size is understood. Warnings are emitted when paged results + are materialized in this fashion. You can treat this as a normal iterator over rows:: >>> from cassandra.query import SimpleStatement >>> statement = SimpleStatement("SELECT * FROM users", fetch_size=10) >>> for user_row in session.execute(statement): - ... process_user(user_row) + ... process_user(user_rowt Whenever there are no more rows in the current page, the next page will be fetched transparently. However, note that it *is* possible for an :class:`Exception` to be raised while fetching the next page, just like you might see on a normal call to ``session.execute()``. - - .. versionadded: 2.0.0 """ - response_future = None - def __init__(self, response_future, initial_response): self.response_future = response_future - self.current_response = iter(initial_response) + self._had_pages = response_future.has_more_pages + self._current_rows = initial_response + self._page_iter = None + + @property + def has_more_pages(self): + return self.response_future.has_more_pages def __iter__(self): + self._page_iter = iter(self._current_rows) return self def next(self): try: - return next(self.current_response) + return next(self._page_iter) except StopIteration: if not self.response_future.has_more_pages: raise self.response_future.start_fetching_next_page() result = self.response_future.result() - if self.response_future.has_more_pages: - self.current_response = result.current_response - else: - self.current_response = iter(result) + self._current_rows = result._current_rows + self._page_iter = iter(self._current_rows) - return next(self.current_response) + return next(self._page_iter) __next__ = next + + def __eq__(self, other): + if self._page_iter and self._had_pages: + raise RuntimeError("Cannot test equality when paged results have been consumed.") + if self.response_future.has_more_pages: + log.warning("Using equality operator on paged results causes entire result set to be materialized.") + self._current_rows = list(self) + return self._current_rows == other + + def __getitem__(self, i): + if self._page_iter and self._had_pages: + raise RuntimeError("Cannot index when paged results have been consumed.") + if self.response_future.has_more_pages: + log.warning("Using indexing on paged results causes entire result set to be materialized.") + self._current_rows = list(self) + return self._current_rows[i] + + def __nonzero__(self): + return bool(self._current_rows) + + __bool__ = __nonzero__ diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index d29ff84bd1..3391fdeaa2 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -20,7 +20,7 @@ from threading import Condition import sys -from cassandra.cluster import PagedResult +from cassandra.cluster import ResultSet import logging log = logging.getLogger(__name__) @@ -134,8 +134,8 @@ def _execute(self, idx, statement, params): self._put_result(e, idx, False) def _on_success(self, result, future, idx): - if future.has_more_pages: - result = PagedResult(future, result) + if future._is_result_kind_rows: + result = ResultSet(future, result) future.clear_callbacks() self._put_result(result, idx, True) diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 1ccd9282b7..3ea0d23091 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -140,7 +140,7 @@ .. automethod:: add_callbacks(callback, errback, callback_args=(), callback_kwargs=None, errback_args=(), errback_args=None) -.. autoclass:: PagedResult () +.. autoclass:: ResultSet () :members: .. autoexception:: QueryExhausted () diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index b1a8d4d251..40f0893944 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -493,7 +493,6 @@ def test_token_aware_composite_key(self): session.execute(prepared.bind((1, 2, 3))) results = session.execute('SELECT * FROM %s WHERE k1 = 1 AND k2 = 2' % table) - self.assertTrue(len(results) == 1) self.assertTrue(results[0].i) cluster.shutdown() @@ -539,7 +538,6 @@ def test_token_aware_with_local_table(self): p = session.prepare("SELECT * FROM system.local WHERE key=?") # this would blow up prior to 61b4fad r = session.execute(p, ('local',)) - self.assertEqual(len(r), 1) self.assertEqual(r[0].key, 'local') cluster.shutdown() diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index 4d9ce3aeac..23b810c535 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -18,7 +18,7 @@ from cassandra import InvalidRequest, ConsistencyLevel, ReadTimeout, WriteTimeout, OperationTimedOut, \ ReadFailure, WriteFailure -from cassandra.cluster import Cluster, PagedResult +from cassandra.cluster import Cluster from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args from cassandra.policies import HostDistance from cassandra.query import tuple_factory, SimpleStatement @@ -184,7 +184,7 @@ def test_execute_concurrent_paged_result(self): self.assertEqual(1, len(results)) self.assertTrue(results[0][0]) result = results[0][1] - self.assertIsInstance(result, PagedResult) + self.assertTrue(result.has_more_pages) self.assertEqual(num_statements, sum(1 for _ in result)) def test_execute_concurrent_paged_result_generator(self): diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 36965a36ff..ce16e78c79 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -65,9 +65,8 @@ def test_custom_raw_uuid_row_results(self): # Ensure that we get normal uuid back first session = Cluster(protocol_version=PROTOCOL_VERSION).connect(keyspace="custserdes") session.row_factory = tuple_factory - result_set = session.execute("SELECT schema_version FROM system.local") - result = result_set.pop() - uuid_type = result[0] + result = session.execute("SELECT schema_version FROM system.local") + uuid_type = result[0][0] self.assertEqual(type(uuid_type), uuid.UUID) # use our custom protocol handlder @@ -75,16 +74,14 @@ def test_custom_raw_uuid_row_results(self): session.client_protocol_handler = CustomTestRawRowType session.row_factory = tuple_factory result_set = session.execute("SELECT schema_version FROM system.local") - result = result_set.pop() - raw_value = result.pop() + raw_value = result_set[0][0] self.assertTrue(isinstance(raw_value, binary_type)) self.assertEqual(len(raw_value), 16) # Ensure that we get normal uuid back when we re-connect session.client_protocol_handler = ProtocolHandler result_set = session.execute("SELECT schema_version FROM system.local") - result = result_set.pop() - uuid_type = result[0] + uuid_type = result_set[0][0] self.assertEqual(type(uuid_type), uuid.UUID) session.shutdown() diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 6731e9073e..b2a046bae6 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -77,7 +77,7 @@ def test_write_timeout(self): # Assert read query = SimpleStatement("SELECT * FROM test WHERE k=1", consistency_level=ConsistencyLevel.ALL) results = execute_until_pass(session, query) - self.assertEqual(1, len(results)) + self.assertTrue(results) # Pause node so it shows as unreachable to coordinator get_node(1).pause() @@ -110,7 +110,7 @@ def test_read_timeout(self): # Assert read query = SimpleStatement("SELECT * FROM test WHERE k=1", consistency_level=ConsistencyLevel.ALL) results = execute_until_pass(session, query) - self.assertEqual(1, len(results)) + self.assertTrue(results) # Pause node so it shows as unreachable to coordinator get_node(1).pause() @@ -143,7 +143,7 @@ def test_unavailable(self): # Assert read query = SimpleStatement("SELECT * FROM test WHERE k=1", consistency_level=ConsistencyLevel.ALL) results = execute_until_pass(session, query) - self.assertEqual(1, len(results)) + self.assertTrue(results) # Stop node gracefully get_node(1).stop(wait=True, wait_other_notice=True) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 92ffbf68f3..2364a49f71 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -402,7 +402,7 @@ def test_conditional_update(self): future = self.session.execute_async(statement) result = future.result() self.assertEqual(future.message.serial_consistency_level, ConsistencyLevel.SERIAL) - self.assertEqual(1, len(result)) + self.assertTrue(result) self.assertFalse(result[0].applied) statement = SimpleStatement( @@ -412,7 +412,7 @@ def test_conditional_update(self): future = self.session.execute_async(statement) result = future.result() self.assertEqual(future.message.serial_consistency_level, ConsistencyLevel.LOCAL_SERIAL) - self.assertEqual(1, len(result)) + self.assertTrue(result) self.assertTrue(result[0].applied) def test_conditional_update_with_prepared_statements(self): @@ -424,7 +424,7 @@ def test_conditional_update_with_prepared_statements(self): future = self.session.execute_async(statement) result = future.result() self.assertEqual(future.message.serial_consistency_level, ConsistencyLevel.SERIAL) - self.assertEqual(1, len(result)) + self.assertTrue(result) self.assertFalse(result[0].applied) statement = self.session.prepare( @@ -434,7 +434,7 @@ def test_conditional_update_with_prepared_statements(self): future = self.session.execute_async(bound) result = future.result() self.assertEqual(future.message.serial_consistency_level, ConsistencyLevel.LOCAL_SERIAL) - self.assertEqual(1, len(result)) + self.assertTrue(result) self.assertTrue(result[0].applied) def test_conditional_update_with_batch_statements(self): @@ -445,7 +445,7 @@ def test_conditional_update_with_batch_statements(self): future = self.session.execute_async(statement) result = future.result() self.assertEqual(future.message.serial_consistency_level, ConsistencyLevel.SERIAL) - self.assertEqual(1, len(result)) + self.assertTrue(result) self.assertFalse(result[0].applied) statement = BatchStatement(serial_consistency_level=ConsistencyLevel.LOCAL_SERIAL) @@ -454,7 +454,7 @@ def test_conditional_update_with_batch_statements(self): future = self.session.execute_async(statement) result = future.result() self.assertEqual(future.message.serial_consistency_level, ConsistencyLevel.LOCAL_SERIAL) - self.assertEqual(1, len(result)) + self.assertTrue(result) self.assertTrue(result[0].applied) def test_bad_consistency_level(self): diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index eeb55e05be..f9e943f93d 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -26,7 +26,7 @@ from six.moves import range from threading import Event -from cassandra.cluster import Cluster, PagedResult +from cassandra.cluster import Cluster from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args from cassandra.policies import HostDistance from cassandra.query import SimpleStatement @@ -301,66 +301,66 @@ def test_fetch_size(self): self.session.default_fetch_size = 10 result = self.session.execute(prepared, []) - self.assertIsInstance(result, PagedResult) + self.assertTrue(result.has_more_pages) self.session.default_fetch_size = 2000 result = self.session.execute(prepared, []) - self.assertIsInstance(result, list) + self.assertFalse(result.has_more_pages) self.session.default_fetch_size = None result = self.session.execute(prepared, []) - self.assertIsInstance(result, list) + self.assertFalse(result.has_more_pages) self.session.default_fetch_size = 10 prepared.fetch_size = 2000 result = self.session.execute(prepared, []) - self.assertIsInstance(result, list) + self.assertFalse(result.has_more_pages) prepared.fetch_size = None result = self.session.execute(prepared, []) - self.assertIsInstance(result, list) + self.assertFalse(result.has_more_pages) prepared.fetch_size = 10 result = self.session.execute(prepared, []) - self.assertIsInstance(result, PagedResult) + self.assertTrue(result.has_more_pages) prepared.fetch_size = 2000 bound = prepared.bind([]) result = self.session.execute(bound, []) - self.assertIsInstance(result, list) + self.assertFalse(result.has_more_pages) prepared.fetch_size = None bound = prepared.bind([]) result = self.session.execute(bound, []) - self.assertIsInstance(result, list) + self.assertFalse(result.has_more_pages) prepared.fetch_size = 10 bound = prepared.bind([]) result = self.session.execute(bound, []) - self.assertIsInstance(result, PagedResult) + self.assertTrue(result.has_more_pages) bound.fetch_size = 2000 result = self.session.execute(bound, []) - self.assertIsInstance(result, list) + self.assertFalse(result.has_more_pages) bound.fetch_size = None result = self.session.execute(bound, []) - self.assertIsInstance(result, list) + self.assertFalse(result.has_more_pages) bound.fetch_size = 10 result = self.session.execute(bound, []) - self.assertIsInstance(result, PagedResult) + self.assertTrue(result.has_more_pages) s = SimpleStatement("SELECT * FROM test3rf.test", fetch_size=None) result = self.session.execute(s, []) - self.assertIsInstance(result, list) + self.assertFalse(result.has_more_pages) s = SimpleStatement("SELECT * FROM test3rf.test") result = self.session.execute(s, []) - self.assertIsInstance(result, PagedResult) + self.assertTrue(result.has_more_pages) s = SimpleStatement("SELECT * FROM test3rf.test") s.fetch_size = None result = self.session.execute(s, []) - self.assertIsInstance(result, list) + self.assertFalse(result.has_more_pages) diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index 4fe5cf3916..d4391daffb 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -19,7 +19,7 @@ except ImportError: import unittest # noqa -from cassandra.cluster import Cluster +from cassandra.cluster import Cluster, ResultSet from cassandra.query import tuple_factory, named_tuple_factory, dict_factory, ordered_dict_factory from cassandra.util import OrderedDict @@ -72,7 +72,7 @@ def test_tuple_factory(self): result = session.execute(self.select) - self.assertIsInstance(result, list) + self.assertIsInstance(result, ResultSet) self.assertIsInstance(result[0], tuple) for row in result: @@ -93,7 +93,7 @@ def test_named_tuple_factory(self): result = session.execute(self.select) - self.assertIsInstance(result, list) + self.assertIsInstance(result, ResultSet) for row in result: self.assertEqual(row.k, row.v) @@ -113,7 +113,7 @@ def test_dict_factory(self): result = session.execute(self.select) - self.assertIsInstance(result, list) + self.assertIsInstance(result, ResultSet) self.assertIsInstance(result[0], dict) for row in result: @@ -134,7 +134,7 @@ def test_ordered_dict_factory(self): result = session.execute(self.select) - self.assertIsInstance(result, list) + self.assertIsInstance(result, ResultSet) self.assertIsInstance(result[0], OrderedDict) for row in result: diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 72aa658165..15ed9dcebb 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -75,7 +75,6 @@ def test_can_insert_unprepared_registered_udts(self): s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User(42, 'bob'))) result = s.execute("SELECT b FROM mytable WHERE a=0") - self.assertEqual(1, len(result)) row = result[0] self.assertEqual(42, row.b.age) self.assertEqual('bob', row.b.name) @@ -95,7 +94,6 @@ def test_can_insert_unprepared_registered_udts(self): s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User('Texas', True))) result = s.execute("SELECT b FROM mytable WHERE a=0") - self.assertEqual(1, len(result)) row = result[0] self.assertEqual('Texas', row.b.state) self.assertEqual(True, row.b.is_cool) @@ -142,7 +140,6 @@ def test_can_register_udt_before_connecting(self): s.set_keyspace("udt_test_register_before_connecting") s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User1(42, 'bob'))) result = s.execute("SELECT b FROM mytable WHERE a=0") - self.assertEqual(1, len(result)) row = result[0] self.assertEqual(42, row.b.age) self.assertEqual('bob', row.b.name) @@ -152,7 +149,6 @@ def test_can_register_udt_before_connecting(self): s.set_keyspace("udt_test_register_before_connecting2") s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User2('Texas', True))) result = s.execute("SELECT b FROM mytable WHERE a=0") - self.assertEqual(1, len(result)) row = result[0] self.assertEqual('Texas', row.b.state) self.assertEqual(True, row.b.is_cool) @@ -177,7 +173,6 @@ def test_can_insert_prepared_unregistered_udts(self): select = s.prepare("SELECT b FROM mytable WHERE a=?") result = s.execute(select, (0,)) - self.assertEqual(1, len(result)) row = result[0] self.assertEqual(42, row.b.age) self.assertEqual('bob', row.b.name) @@ -197,7 +192,6 @@ def test_can_insert_prepared_unregistered_udts(self): select = s.prepare("SELECT b FROM mytable WHERE a=?") result = s.execute(select, (0,)) - self.assertEqual(1, len(result)) row = result[0] self.assertEqual('Texas', row.b.state) self.assertEqual(True, row.b.is_cool) @@ -223,7 +217,6 @@ def test_can_insert_prepared_registered_udts(self): select = s.prepare("SELECT b FROM mytable WHERE a=?") result = s.execute(select, (0,)) - self.assertEqual(1, len(result)) row = result[0] self.assertEqual(42, row.b.age) self.assertEqual('bob', row.b.name) @@ -246,7 +239,6 @@ def test_can_insert_prepared_registered_udts(self): select = s.prepare("SELECT b FROM mytable WHERE a=?") result = s.execute(select, (0,)) - self.assertEqual(1, len(result)) row = result[0] self.assertEqual('Texas', row.b.state) self.assertEqual(True, row.b.is_cool) @@ -528,7 +520,6 @@ def test_can_insert_udt_all_datatypes(self): # retrieve and verify data results = s.execute("SELECT * FROM mytable") - self.assertEqual(1, len(results)) row = results[0].b for expected, actual in zip(params, row): @@ -587,7 +578,6 @@ def test_can_insert_udt_all_collection_datatypes(self): # retrieve and verify data results = s.execute("SELECT * FROM mytable") - self.assertEqual(1, len(results)) row = results[0].b for expected, actual in zip(params, row): From 6402f71547cb2774263f37746b670b2c0c6337c2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 8 Oct 2015 16:56:59 -0500 Subject: [PATCH 0109/2005] Make ResultSet list mode explicit PYTHON-368 --- cassandra/cluster.py | 32 +++++++++++++++++++++----------- tests/unit/test_concurrent.py | 2 ++ 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 42328f85f9..ef41b58235 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3358,15 +3358,17 @@ class ResultSet(object): def __init__(self, response_future, initial_response): self.response_future = response_future - self._had_pages = response_future.has_more_pages self._current_rows = initial_response self._page_iter = None + self._list_mode = False @property def has_more_pages(self): return self.response_future.has_more_pages def __iter__(self): + if self._list_mode: + return iter(self._current_rows) self._page_iter = iter(self._current_rows) return self @@ -3375,6 +3377,8 @@ def next(self): return next(self._page_iter) except StopIteration: if not self.response_future.has_more_pages: + if not self._list_mode: + self._current_rows = [] raise self.response_future.start_fetching_next_page() @@ -3386,20 +3390,26 @@ def next(self): __next__ = next - def __eq__(self, other): - if self._page_iter and self._had_pages: - raise RuntimeError("Cannot test equality when paged results have been consumed.") + def _fetch_all(self): + self._current_rows = list(self) + self._page_iter = None + + def _enter_list_mode(self, operator): + if self._list_mode: + return + if self._page_iter: + raise RuntimeError("Cannot use %s when results have been iterated." % operator) if self.response_future.has_more_pages: - log.warning("Using equality operator on paged results causes entire result set to be materialized.") - self._current_rows = list(self) + log.warning("Using %s on paged results causes entire result set to be materialized.", operator) + self._fetch_all() + self._list_mode = True + + def __eq__(self, other): + self._enter_list_mode("equality operator") return self._current_rows == other def __getitem__(self, i): - if self._page_iter and self._had_pages: - raise RuntimeError("Cannot index when paged results have been consumed.") - if self.response_future.has_more_pages: - log.warning("Using indexing on paged results causes entire result set to be materialized.") - self._current_rows = list(self) + self._enter_list_mode("index operator") return self._current_rows[i] def __nonzero__(self): diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index 0bdb1f9e4d..7465f685c7 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -32,6 +32,8 @@ class MockResponseResponseFuture(): and invoke callback with various timing. """ + _is_result_kind_rows = False + # a list pending callbacks, these will be prioritized in reverse or normal orderd pending_callbacks = PriorityQueue() From 1862c7a0c4bf1447095977b08abfa738f045b8eb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 8 Oct 2015 16:59:51 -0500 Subject: [PATCH 0110/2005] Add unit test for cluster.ResultSet --- tests/unit/test_resultset.py | 160 +++++++++++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100644 tests/unit/test_resultset.py diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py new file mode 100644 index 0000000000..bb85de47ca --- /dev/null +++ b/tests/unit/test_resultset.py @@ -0,0 +1,160 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from mock import Mock, PropertyMock +import warnings + +from cassandra.cluster import ResultSet + + +class ResultSetTests(unittest.TestCase): + + def test_iter_non_paged(self): + expected = list(range(10)) + rs = ResultSet(Mock(has_more_pages=False), expected) + itr = iter(rs) + self.assertListEqual(list(itr), expected) + + def test_iter_paged(self): + expected = list(range(10)) + response_future = Mock(has_more_pages=True) + response_future.result.side_effect = (ResultSet(Mock(), expected[-5:]), ) # ResultSet is iterable, so it must be protected in order to be returned whole by the Mock + rs = ResultSet(response_future, expected[:5]) + itr = iter(rs) + type(response_future).has_more_pages = PropertyMock(side_effect=(True, False)) # after init to avoid side effects being consumed by init + self.assertListEqual(list(itr), expected) + + def test_list_non_paged(self): + # list access on RS for backwards-compatibility + expected = list(range(10)) + rs = ResultSet(Mock(has_more_pages=False), expected) + for i in range(10): + self.assertEqual(rs[i], expected[i]) + self.assertEqual(list(rs), expected) + + def test_list_paged(self): + # list access on RS for backwards-compatibility + expected = list(range(10)) + response_future = Mock(has_more_pages=True) + response_future.result.side_effect = (ResultSet(Mock(), expected[-5:]), ) # ResultSet is iterable, so it must be protected in order to be returned whole by the Mock + rs = ResultSet(response_future, expected[:5]) + type(response_future).has_more_pages = PropertyMock(side_effect=(True, True, False)) # one True for getitem check/warn, then True, False for two pages + self.assertEqual(rs[9], expected[9]) + self.assertEqual(list(rs), expected) + + def test_has_more_pages(self): + response_future = Mock() + response_future.has_more_pages.side_effect = PropertyMock(side_effect=(True, False)) + rs = ResultSet(response_future, []) + type(response_future).has_more_pages = PropertyMock(side_effect=(True, False)) # after init to avoid side effects being consumed by init + self.assertTrue(rs.has_more_pages) + self.assertFalse(rs.has_more_pages) + + def test_iterate_then_index(self): + # RuntimeError if indexing with no pages + expected = list(range(10)) + rs = ResultSet(Mock(has_more_pages=False), expected) + itr = iter(rs) + # before consuming + with self.assertRaises(RuntimeError): + rs[0] + list(itr) + # after consuming + with self.assertRaises(RuntimeError): + rs[0] + + self.assertFalse(rs) + self.assertFalse(list(rs)) + + # RuntimeError if indexing during or after pages + response_future = Mock(has_more_pages=True) + response_future.result.side_effect = (ResultSet(Mock(), expected[-5:]), ) # ResultSet is iterable, so it must be protected in order to be returned whole by the Mock + rs = ResultSet(response_future, expected[:5]) + type(response_future).has_more_pages = PropertyMock(side_effect=(True, False)) + itr = iter(rs) + # before consuming + with self.assertRaises(RuntimeError): + rs[0] + for row in itr: + # while consuming + with self.assertRaises(RuntimeError): + rs[0] + # after consuming + with self.assertRaises(RuntimeError): + rs[0] + self.assertFalse(rs) + self.assertFalse(list(rs)) + + def test_index_list_mode(self): + # no pages + expected = list(range(10)) + rs = ResultSet(Mock(has_more_pages=False), expected) + + # index access before iteration causes list to be materialized + self.assertEqual(rs[0], expected[0]) + + # resusable iteration + self.assertListEqual(list(rs), expected) + self.assertListEqual(list(rs), expected) + + self.assertTrue(rs) + + # pages + response_future = Mock(has_more_pages=True) + response_future.result.side_effect = (ResultSet(Mock(), expected[-5:]), ) # ResultSet is iterable, so it must be protected in order to be returned whole by the Mock + rs = ResultSet(response_future, expected[:5]) + type(response_future).has_more_pages = PropertyMock(side_effect=(True, True, False)) # First True is consumed on check entering list mode + # index access before iteration causes list to be materialized + self.assertEqual(rs[0], expected[0]) + self.assertEqual(rs[9], expected[9]) + # resusable iteration + self.assertListEqual(list(rs), expected) + self.assertListEqual(list(rs), expected) + + self.assertTrue(rs) + + def test_eq(self): + # no pages + expected = list(range(10)) + rs = ResultSet(Mock(has_more_pages=False), expected) + + # eq before iteration causes list to be materialized + self.assertEqual(rs, expected) + + # results can be iterated or indexed once we're materialized + self.assertListEqual(list(rs), expected) + self.assertEqual(rs[9], expected[9]) + self.assertTrue(rs) + + # pages + response_future = Mock(has_more_pages=True) + response_future.result.side_effect = (ResultSet(Mock(), expected[-5:]), ) # ResultSet is iterable, so it must be protected in order to be returned whole by the Mock + rs = ResultSet(response_future, expected[:5]) + type(response_future).has_more_pages = PropertyMock(side_effect=(True, True, False)) + # eq before iteration causes list to be materialized + self.assertEqual(rs, expected) + + # results can be iterated or indexed once we're materialized + self.assertListEqual(list(rs), expected) + self.assertEqual(rs[9], expected[9]) + self.assertTrue(rs) + + def test_bool(self): + self.assertFalse(ResultSet(Mock(has_more_pages=False), [])) + self.assertTrue(ResultSet(Mock(has_more_pages=False), [1])) From a5c433fc75cc018bd71f36a8f59e1699a2d4c9c6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 9 Oct 2015 09:43:40 -0500 Subject: [PATCH 0111/2005] test: further simplify MV meta compaction assertions --- tests/integration/standard/test_metadata.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index cdd11514a2..18298952e0 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1963,12 +1963,11 @@ def test_materialized_view_metadata_alter(self): self.session.execute("CREATE TABLE {0}.table1 (pk int PRIMARY KEY, c int)".format(self.ksname)) self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.table1 WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.ksname)) - self.assertRegexpMatches(self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].options["compaction"]["class"], - "SizeTieredCompactionStrategy") + self.assertIn("SizeTieredCompactionStrategy", self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].options["compaction"]["class"]) self.session.execute("ALTER MATERIALIZED VIEW {0}.mv1 WITH compaction = {{ 'class' : 'LeveledCompactionStrategy' }}".format(self.ksname)) - self.assertRegexpMatches(self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].options["compaction"]["class"], - "LeveledCompactionStrategy") + self.assertIn("LeveledCompactionStrategy", self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].options["compaction"]["class"]) + def test_materialized_view_metadata_drop(self): """ From a063421cf48b3c202bc4f61667b987f648619041 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 9 Oct 2015 09:46:44 -0500 Subject: [PATCH 0112/2005] Correct comment typo on peer review --- cassandra/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/util.py b/cassandra/util.py index fb2902dfc3..6dfeea011c 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1121,7 +1121,7 @@ def inet_ntop(address_family, packed_ip): import keyword -# similar tot a collections.namedtuple, reproduced here because Python 2.6 did not have the rename logic +# similar to collections.namedtuple, reproduced here because Python 2.6 did not have the rename logic def _positional_rename_invalid_identifiers(field_names): names_out = list(field_names) for index, name in enumerate(field_names): From 7dbe87b39a81deb12b2a38c377fb175252e6c219 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 9 Oct 2015 10:15:08 -0500 Subject: [PATCH 0113/2005] Don't run cqlengine tests in Travis They are covered similarly in our Jenkins CI integration tests, and the seutp here occasionally causes false negatives. --- .travis.yml | 14 -------------- tox.ini | 18 ++---------------- 2 files changed, 2 insertions(+), 30 deletions(-) diff --git a/.travis.yml b/.travis.yml index f5755e2425..08fb492a9f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,20 +19,6 @@ before_install: - sudo apt-get install -y build-essential python-dev - sudo apt-get install -y pypy-dev - sudo apt-get install -y libev4 libev-dev - - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${CASS_VER}x main" | sudo tee -a /etc/apt/sources.list - - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${CASS_VER}x main" | sudo tee -a /etc/apt/sources.list - - sudo rm -rf ~/.gnupg - - sudo gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D - - sudo gpg --export --armor F758CE318D77295D | sudo apt-key add - - - sudo gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 - - sudo gpg --export --armor 2B5C1B00 | sudo apt-key add - - - sudo gpg --keyserver pgp.mit.edu --recv-keys 0353B12C - - sudo gpg --export --armor 0353B12C | sudo apt-key add - - - sudo apt-get update - - sudo apt-get -o Dpkg::Options::="--force-confnew" install -y cassandra - - sudo rm -rf /var/lib/cassandra/* - - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /etc/cassandra/cassandra-env.sh" - - sudo service cassandra start install: - pip install tox diff --git a/tox.ini b/tox.ini index a266ed5598..a13469ef9f 100644 --- a/tox.ini +++ b/tox.ini @@ -9,26 +9,12 @@ deps = nose [testenv] deps = {[base]deps} - sure - blist + py{26,27,33,34}: blist cython py26: unittest2 py{26,27}: gevent twisted -setenv = USE_CASS_EXTERNAL=1 - LIBEV_EMBED=0 +setenv = LIBEV_EMBED=0 CARES_EMBED=0 changedir = {envtmpdir} commands = nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/ - nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/integration/cqlengine - -[testenv:pypy] -deps = {[base]deps} -commands = nosetests --verbosity=2 {toxinidir}/tests/unit/ - -[testenv:pypy3] -deps = {[base]deps} -commands = nosetests --verbosity=2 {toxinidir}/tests/unit/ - -# cqlengine/test_timestamp.py uses sure, which fails in pypy presently -# could remove sure usage From 07e61b20dee03ce658f2c3bfe21b3adb7f19a70a Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 2 Oct 2015 16:30:16 -0500 Subject: [PATCH 0114/2005] Adding improved metadata tests. Added additional infrastructure for setup and teardown. Fixed various timing issues in tests. --- build.yaml | 8 +- tests/integration/__init__.py | 147 ++++++++++++++++ tests/integration/cqlengine/test_ttl.py | 38 ++-- tests/integration/standard/test_cluster.py | 4 +- tests/integration/standard/test_metadata.py | 131 +++++++------- tests/integration/standard/test_query.py | 164 +++++++++++++++++- .../standard/test_row_factories.py | 54 +++--- 7 files changed, 429 insertions(+), 117 deletions(-) diff --git a/build.yaml b/build.yaml index 6a39582863..b31eac835d 100644 --- a/build.yaml +++ b/build.yaml @@ -25,7 +25,7 @@ build: sudo apt-get install -y libev4 libev-dev fi pip install -r test-requirements.txt - + pip install nose-ignore-docstring if [[ $CYTHON == 'CYTHON' ]]; then pip install cython @@ -38,12 +38,12 @@ build: fi echo "==========RUNNING CQLENGINE TESTS==========" - CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true + CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true echo "==========RUNNING INTEGRATION TESTS==========" - CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true + CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true echo "==========RUNNING LONG INTEGRATION TESTS==========" - CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true + CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true - xunit: - "*_results.xml" \ No newline at end of file diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index b29aa00ff6..4509cba168 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -271,6 +271,7 @@ def execute_until_pass(session, query): raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query)) + def setup_keyspace(ipformat=None): # wait for nodes to startup time.sleep(10) @@ -332,3 +333,149 @@ def wait_for_down(self): def wait_for_up(self): self.up_event.wait() + + +class BasicKeyspaceUnitTestCase(unittest.TestCase): + """ + This is basic unit test case that provides various utility methods that can be leveraged for testcase setup and tear + down + """ + @property + def keyspace_name(self): + return self.ks_name + + @property + def class_table_name(self): + return self.ks_name + + @property + def function_table_name(self): + return self._testMethodName.lower() + + @classmethod + def drop_keyspace(cls): + execute_until_pass(cls.session, "DROP KEYSPACE {0}".format(cls.ks_name)) + + @classmethod + def create_keyspace(cls, rf): + ddl = "CREATE KEYSPACE {0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': '{1}'}}".format(cls.ks_name, rf) + execute_until_pass(cls.session, ddl) + + @classmethod + def common_setup(cls, rf, create_class_table=False, skip_if_cass_version_less_than=None): + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.session = cls.cluster.connect() + cls.ks_name = cls.__name__.lower() + cls.create_keyspace(rf) + + if create_class_table: + + ddl = ''' + CREATE TABLE {0}.{1} ( + k int PRIMARY KEY, + v int )'''.format(cls.ks_name, cls.ks_name) + execute_until_pass(cls.session, ddl) + + def create_function_table(self): + ddl = ''' + CREATE TABLE {0}.{1} ( + k int PRIMARY KEY, + v int )'''.format(self.keyspace_name, self.function_table_name) + execute_until_pass(self.session, ddl) + + def drop_function_table(self): + ddl = "DROP TABLE {0}.{1} ".format(self.keyspace_name, self.function_table_name) + execute_until_pass(self.session, ddl) + + +class BasicSharedKeyspaceUnitTestCase(BasicKeyspaceUnitTestCase): + """ + This is basic unit test case that can be leveraged to scope a keyspace to a specific test class. + creates a keyspace named after the testclass with a rf of 1. + """ + @classmethod + def setUpClass(cls): + cls.common_setup(1) + + @classmethod + def tearDownClass(cls): + cls.drop_keyspace() + cls.cluster.shutdown() + + +class BasicSharedKeyspaceUnitTestCaseWTable(BasicSharedKeyspaceUnitTestCase): + """ + This is basic unit test case that can be leveraged to scope a keyspace to a specific test class. + creates a keyspace named after the testclass with a rf of 1, and a table named after the class + """ + @classmethod + def setUpClass(self): + self.common_setup(1, True) + + +class BasicSharedKeyspaceUnitTestCaseRF2(BasicSharedKeyspaceUnitTestCase): + """ + This is basic unit test case that can be leveraged to scope a keyspace to a specific test class. + creates a keyspace named after the test class with a rf of 2, and a table named after the class + """ + @classmethod + def setUpClass(self): + self.common_setup(2) + + +class BasicSharedKeyspaceUnitTestCaseWTable(BasicSharedKeyspaceUnitTestCase): + """ + This is basic unit test case that can be leveraged to scope a keyspace to a specific test class. + creates a keyspace named after the testc lass with a rf of 2, and a table named after the class + """ + @classmethod + def setUpClass(self): + self.common_setup(2, True) + + +class BasicSharedKeyspaceUnitTestCaseRF3(BasicSharedKeyspaceUnitTestCase): + """ + This is basic unit test case that can be leveraged to scope a keyspace to a specific test class. + creates a keyspace named after the test class with a rf of 3 + """ + @classmethod + def setUpClass(self): + self.common_setup(3) + + +class BasicSharedKeyspaceUnitTestCaseRF3WTable(BasicSharedKeyspaceUnitTestCase): + """ + This is basic unit test case that can be leveraged to scope a keyspace to a specific test class. + creates a keyspace named after the test class with a rf of 3 and a table named after the class + """ + @classmethod + def setUpClass(self): + self.common_setup(3, True) + + +class BasicSharedKeyspaceUnitTestCaseWFunctionTable(BasicSharedKeyspaceUnitTestCase): + """" + This is basic unit test case that can be leveraged to scope a keyspace to a specific test class. + creates a keyspace named after the test class with a rf of 3 and a table named after the class + the table is scoped to just the unit test and will be removed. + + """ + def setUp(self): + self.create_function_table() + + def tearDown(self): + self.drop_function_table() + + +class BasicSegregatedKeyspaceUnitTestCase(BasicKeyspaceUnitTestCase): + """ + This unit test will create and teardown a keyspace for each individual unit tests. + It has overhead and should only be used with complex unit test were sharing a keyspace will + cause issues. + """ + def setUp(self): + self.common_setup(1) + + def tearDown(self): + self.drop_keyspace() + self.cluster.shutdown() diff --git a/tests/integration/cqlengine/test_ttl.py b/tests/integration/cqlengine/test_ttl.py index e86b2cbf38..811fdbadab 100644 --- a/tests/integration/cqlengine/test_ttl.py +++ b/tests/integration/cqlengine/test_ttl.py @@ -12,6 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + from cassandra.cqlengine.management import sync_table, drop_table from tests.integration.cqlengine.base import BaseCassEngTestCase from cassandra.cqlengine.models import Model @@ -19,12 +25,13 @@ from cassandra.cqlengine import columns import mock from cassandra.cqlengine.connection import get_session +from tests.integration import CASSANDRA_VERSION class TestTTLModel(Model): - id = columns.UUID(primary_key=True, default=lambda:uuid4()) - count = columns.Integer() - text = columns.Text(required=False) + id = columns.UUID(primary_key=True, default=lambda: uuid4()) + count = columns.Integer() + text = columns.Text(required=False) class BaseTTLTest(BaseCassEngTestCase): @@ -42,24 +49,26 @@ def tearDownClass(cls): class TestDefaultTTLModel(Model): __options__ = {'default_time_to_live': 20} - id = columns.UUID(primary_key=True, default=lambda:uuid4()) - count = columns.Integer() - text = columns.Text(required=False) + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + count = columns.Integer() + text = columns.Text(required=False) class BaseDefaultTTLTest(BaseCassEngTestCase): @classmethod def setUpClass(cls): - super(BaseDefaultTTLTest, cls).setUpClass() - sync_table(TestDefaultTTLModel) - sync_table(TestTTLModel) + if CASSANDRA_VERSION >= '2.0': + super(BaseDefaultTTLTest, cls).setUpClass() + sync_table(TestDefaultTTLModel) + sync_table(TestTTLModel) @classmethod def tearDownClass(cls): - super(BaseDefaultTTLTest, cls).tearDownClass() - drop_table(TestDefaultTTLModel) - drop_table(TestTTLModel) + if CASSANDRA_VERSION >= '2.0': + super(BaseDefaultTTLTest, cls).tearDownClass() + drop_table(TestDefaultTTLModel) + drop_table(TestTTLModel) class TTLQueryTests(BaseTTLTest): @@ -91,7 +100,6 @@ def test_queryset_is_returned_on_class(self): self.assertTrue(isinstance(qs, TestTTLModel.__queryset__), type(qs)) - class TTLInstanceUpdateTest(BaseTTLTest): def test_update_includes_ttl(self): session = get_session() @@ -109,9 +117,6 @@ def test_update_syntax_valid(self): model.ttl(60).update(text="goodbye forever") - - - class TTLInstanceTest(BaseTTLTest): def test_instance_is_returned(self): """ @@ -151,6 +156,7 @@ def test_ttl_included_with_blind_update(self): self.assertIn("USING TTL", query) +@unittest.skipIf(CASSANDRA_VERSION < '2.0', "default_time_to_Live was introduce in C* 2.0, currently running {0}".format(CASSANDRA_VERSION)) class TTLDefaultTest(BaseDefaultTTLTest): def test_default_ttl_not_set(self): session = get_session() diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 5fe978868c..21ceddf4bf 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -503,7 +503,7 @@ def test_string_coverage(self): cluster.shutdown() def test_idle_heartbeat(self): - interval = 1 + interval = 2 cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=interval) if PROTOCOL_VERSION < 3: cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) @@ -520,7 +520,7 @@ def test_idle_heartbeat(self): connection_request_ids[id(c)] = deque(c.request_ids) # copy of request ids # let two heatbeat intervals pass (first one had startup messages in it) - time.sleep(2 * interval + interval/10.) + time.sleep(2 * interval + interval/2) connections = [c for holders in cluster.get_connection_holders() for c in holders.get_connections()] diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 18298952e0..5ec484522a 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -32,8 +32,10 @@ get_schema_parser) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host +from cassandra.query import SimpleStatement, ConsistencyLevel -from tests.integration import get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass +from tests.integration import get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, \ + BasicSharedKeyspaceUnitTestCase, BasicSegregatedKeyspaceUnitTestCase def setup_module(): @@ -307,8 +309,8 @@ def test_indexes(self): statements = [s.strip() for s in statements.split(';')] statements = list(filter(bool, statements)) self.assertEqual(3, len(statements)) - self.assertEqual(d_index, statements[1]) - self.assertEqual(e_index, statements[2]) + self.assertIn(d_index, statements) + self.assertIn(e_index, statements) # make sure indexes are included in KeyspaceMetadata.export_as_string() ksmeta = self.cluster.metadata.keyspaces[self.ksname] @@ -1901,23 +1903,17 @@ def test_bad_user_aggregate(self): self.assertIn("/*\nWarning:", m.export_as_string()) -class MaterializedViewMetadataTest(unittest.TestCase): - - ksname = "materialized_view_test" +class MaterializedViewMetadataTestSimple(BasicSharedKeyspaceUnitTestCase): def setUp(self): if CASS_SERVER_VERSION < (3, 0): raise unittest.SkipTest("Materialized views require Cassandra 3.0+") - - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect() - execute_until_pass(self.session, - "CREATE KEYSPACE {0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': '1'}}" - .format(self.ksname)) + self.session.execute("CREATE TABLE {0}.{1} (pk int PRIMARY KEY, c int)".format(self.keyspace_name, self.function_table_name)) + self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.keyspace_name, self.function_table_name)) def tearDown(self): - execute_until_pass(self.session, "DROP KEYSPACE {0}".format(self.ksname)) - self.cluster.shutdown() + self.session.execute("DROP MATERIALIZED VIEW {0}.mv1".format(self.keyspace_name)) + self.session.execute("DROP TABLE {0}.{1}".format(self.keyspace_name, self.function_table_name)) def test_materialized_view_metadata_creation(self): """ @@ -1935,14 +1931,11 @@ def test_materialized_view_metadata_creation(self): @test_category metadata """ - self.session.execute("CREATE TABLE {0}.table1 (pk int PRIMARY KEY, c int)".format(self.ksname)) - self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.table1 WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.ksname)) + self.assertIn("mv1", self.cluster.metadata.keyspaces[self.keyspace_name].views) + self.assertIn("mv1", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) - self.assertIn("mv1", self.cluster.metadata.keyspaces[self.ksname].views) - self.assertIn("mv1", self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views) - - self.assertEqual(self.ksname, self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].keyspace_name) - self.assertEqual("table1", self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].base_table_name) + self.assertEqual(self.keyspace_name, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].keyspace_name) + self.assertEqual(self.function_table_name, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].base_table_name) def test_materialized_view_metadata_alter(self): """ @@ -1959,15 +1952,10 @@ def test_materialized_view_metadata_alter(self): @test_category metadata """ + self.assertIn("SizeTieredCompactionStrategy", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].options["compaction"]["class"] ) - self.session.execute("CREATE TABLE {0}.table1 (pk int PRIMARY KEY, c int)".format(self.ksname)) - self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.table1 WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.ksname)) - - self.assertIn("SizeTieredCompactionStrategy", self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].options["compaction"]["class"]) - - self.session.execute("ALTER MATERIALIZED VIEW {0}.mv1 WITH compaction = {{ 'class' : 'LeveledCompactionStrategy' }}".format(self.ksname)) - self.assertIn("LeveledCompactionStrategy", self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views["mv1"].options["compaction"]["class"]) - + self.session.execute("ALTER MATERIALIZED VIEW {0}.mv1 WITH compaction = {{ 'class' : 'LeveledCompactionStrategy' }}".format(self.keyspace_name)) + self.assertIn("LeveledCompactionStrategy", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].options["compaction"]["class"]) def test_materialized_view_metadata_drop(self): """ @@ -1985,14 +1973,21 @@ def test_materialized_view_metadata_drop(self): @test_category metadata """ - self.session.execute("CREATE TABLE {0}.table1 (pk int PRIMARY KEY, c int)".format(self.ksname)) - self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.table1 WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.ksname)) - self.session.execute("DROP MATERIALIZED VIEW {0}.mv1".format(self.ksname)) + self.session.execute("DROP MATERIALIZED VIEW {0}.mv1".format(self.keyspace_name)) + + self.assertNotIn("mv1", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) + self.assertNotIn("mv1", self.cluster.metadata.keyspaces[self.keyspace_name].views) + self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) + self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.keyspace_name].views) + + self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.keyspace_name, self.function_table_name)) - self.assertNotIn("mv1", self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views) - self.assertNotIn("mv1", self.cluster.metadata.keyspaces[self.ksname].views) - self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.ksname].tables["table1"].views) - self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.ksname].views) + +class MaterializedViewMetadataTestComplex(BasicSegregatedKeyspaceUnitTestCase): + def setUp(self): + if CASS_SERVER_VERSION < (3, 0): + raise unittest.SkipTest("Materialized views require Cassandra 3.0+") + super(MaterializedViewMetadataTestComplex, self).setUp() def test_create_view_metadata(self): """ @@ -2016,7 +2011,7 @@ def test_create_view_metadata(self): day INT, score INT, PRIMARY KEY (user, game, year, month, day) - )""".format(self.ksname) + )""".format(self.keyspace_name) self.session.execute(create_table) @@ -2024,11 +2019,11 @@ def test_create_view_metadata(self): SELECT game, year, month, score, user, day FROM {0}.scores WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL PRIMARY KEY ((game, year, month), score, user, day) - WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.ksname) + WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.keyspace_name) self.session.execute(create_mv) - score_table = self.cluster.metadata.keyspaces[self.ksname].tables['scores'] - mv = self.cluster.metadata.keyspaces[self.ksname].views['monthlyhigh'] + score_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores'] + mv = self.cluster.metadata.keyspaces[self.keyspace_name].views['monthlyhigh'] self.assertIsNotNone(score_table.views["monthlyhigh"]) self.assertIsNotNone(len(score_table.views), 1) @@ -2056,7 +2051,7 @@ def test_create_view_metadata(self): self.assertIsNotNone(score_table.columns['score']) # Validate basic mv information - self.assertEquals(mv.keyspace_name, self.ksname) + self.assertEquals(mv.keyspace_name, self.keyspace_name) self.assertEquals(mv.name, "monthlyhigh") self.assertEquals(mv.base_table_name, "scores") self.assertFalse(mv.include_all_columns) @@ -2110,7 +2105,7 @@ def test_create_view_metadata(self): self.assertEquals(day_column.is_static, mv.clustering_key[2].is_static) self.assertEquals(day_column.is_reversed, mv.clustering_key[2].is_reversed) - def test_create_mv_changes(self): + def test_base_table_mv_changes(self): """ test to ensure that materialized view metadata is properly updated with base table changes @@ -2132,39 +2127,53 @@ def test_create_mv_changes(self): day INT, score TEXT, PRIMARY KEY (user, game, year, month, day) - )""".format(self.ksname) + )""".format(self.keyspace_name) self.session.execute(create_table) - view_name = 'monthlyhigh' - create_mv = """CREATE MATERIALIZED VIEW {0}.{1} AS + create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS SELECT game, year, month, score, user, day FROM {0}.scores WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL PRIMARY KEY ((game, year, month), score, user, day) - WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.ksname, view_name) + WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.keyspace_name) + + create_mv_alltime = """CREATE MATERIALIZED VIEW {0}.alltimehigh AS + SELECT * FROM {0}.scores + WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL + PRIMARY KEY (game, score, user, year, month, day) + WITH CLUSTERING ORDER BY (score DESC)""".format(self.keyspace_name) self.session.execute(create_mv) - score_table = self.cluster.metadata.keyspaces[self.ksname].tables['scores'] - self.assertIn(view_name, score_table.views) - self.assertIn(view_name, self.cluster.metadata.keyspaces[self.ksname].views) + self.session.execute(create_mv_alltime) - insert_fouls = """ALTER TABLE {0}.scores ADD fouls INT""".format(self.ksname) + score_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores'] + + self.assertIsNotNone(score_table.views["monthlyhigh"]) + self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 2) + + insert_fouls = """ALTER TABLE {0}.scores ADD fouls INT""".format((self.keyspace_name)) self.session.execute(insert_fouls) - self.assertIn(view_name, self.cluster.metadata.keyspaces[self.ksname].views) + self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 2) - alter_scores = """ALTER TABLE {0}.scores ALTER score TYPE blob""".format(self.ksname) + alter_scores = """ALTER TABLE {0}.scores ALTER score blob""".format((self.keyspace_name)) - self.session.execute(alter_scores) # ERROR https://issues.apache.org/jira/browse/CASSANDRA-10424 - self.assertIn(view_name, self.cluster.metadata.keyspaces[self.ksname].views) + self.session.execute(alter_scores) + self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 2) - score_column = self.cluster.metadata.keyspaces[self.ksname].tables['scores'].columns['score'] + score_column = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores'].columns['score'] self.assertEquals(score_column.typestring, 'blob') - score_mv_column = self.cluster.metadata.keyspaces[self.ksname].views["monthlyhigh"].columns['score'] + score_mv_column = self.cluster.metadata.keyspaces[self.keyspace_name].views["monthlyhigh"].columns['score'] self.assertEquals(score_mv_column.typestring, 'blob') + mv_alltime = self.cluster.metadata.keyspaces[self.keyspace_name].views["alltimehigh"] + self.assertIn("fouls", mv_alltime.columns) + + mv_alltime_fouls_comumn = self.cluster.metadata.keyspaces[self.keyspace_name].views["alltimehigh"].columns['fouls'] + self.assertEquals(mv_alltime_fouls_comumn.typestring, 'int') + def test_metadata_with_quoted_identifiers(self): """ test to ensure that materialized view metadata is properly constructed when quoted identifiers are used @@ -2184,7 +2193,7 @@ def test_metadata_with_quoted_identifiers(self): "theKey" int, "the;Clustering" int, "the Value" int, - PRIMARY KEY ("theKey", "the;Clustering"))""".format(self.ksname) + PRIMARY KEY ("theKey", "the;Clustering"))""".format(self.keyspace_name) self.session.execute(create_table) @@ -2192,12 +2201,12 @@ def test_metadata_with_quoted_identifiers(self): SELECT "theKey", "the;Clustering", "the Value" FROM {0}.t1 WHERE "theKey" IS NOT NULL AND "the;Clustering" IS NOT NULL AND "the Value" IS NOT NULL - PRIMARY KEY ("theKey", "the;Clustering")""".format(self.ksname) + PRIMARY KEY ("theKey", "the;Clustering")""".format(self.keyspace_name) self.session.execute(create_mv) - t1_table = self.cluster.metadata.keyspaces[self.ksname].tables['t1'] - mv = self.cluster.metadata.keyspaces[self.ksname].views['mv1'] + t1_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['t1'] + mv = self.cluster.metadata.keyspaces[self.keyspace_name].views['mv1'] self.assertIsNotNone(t1_table.views["mv1"]) self.assertIsNotNone(len(t1_table.views), 1) @@ -2216,7 +2225,7 @@ def test_metadata_with_quoted_identifiers(self): self.assertIsNotNone(t1_table.columns['the Value']) # Validate basic mv information - self.assertEquals(mv.keyspace_name, self.ksname) + self.assertEquals(mv.keyspace_name, self.keyspace_name) self.assertEquals(mv.name, "mv1") self.assertEquals(mv.base_table_name, "t1") self.assertFalse(mv.include_all_columns) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 92ffbf68f3..77374d2759 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -26,13 +26,15 @@ from cassandra.cluster import Cluster from cassandra.policies import HostDistance -from tests.integration import use_singledc, PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions import re def setup_module(): use_singledc() + global CASS_SERVER_VERSION + CASS_SERVER_VERSION = get_server_versions()[0] class QueryTests(unittest.TestCase): @@ -617,3 +619,163 @@ def test_inherit_first_rk_prepared_param(self): self.assertIsNotNone(batch.routing_key) self.assertEqual(batch.routing_key, self.prepared.bind((1, 0)).routing_key) + + +class MaterializedViewQueryTest(BasicSharedKeyspaceUnitTestCase): + + def setUp(self): + if CASS_SERVER_VERSION < (3, 0): + raise unittest.SkipTest("Materialized views require Cassandra 3.0+") + + def test_mv_filtering(self): + """ + Test to ensure that cql filtering where clauses are properly supported in the python driver. + + test_mv_filtering Tests that various complex MV where clauses produce the correct results. It also validates that + these results and the grammar is supported appropriately. + + @since 3.0.0 + @jira_ticket PYTHON-399 + @expected_result Materialized view where clauses should produce the appropriate results. + + @test_category materialized_view + """ + create_table = """CREATE TABLE {0}.scores( + user TEXT, + game TEXT, + year INT, + month INT, + day INT, + score INT, + PRIMARY KEY (user, game, year, month, day) + )""".format(self.ksname) + + self.session.execute(create_table) + + create_mv_alltime = """CREATE MATERIALIZED VIEW {0}.alltimehigh AS + SELECT * FROM {0}.scores + WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL + PRIMARY KEY (game, score, user, year, month, day) + WITH CLUSTERING ORDER BY (score DESC)""".format(self.ksname) + + create_mv_dailyhigh = """CREATE MATERIALIZED VIEW {0}.dailyhigh AS + SELECT * FROM {0}.scores + WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL + PRIMARY KEY ((game, year, month, day), score, user) + WITH CLUSTERING ORDER BY (score DESC)""".format(self.ksname) + + create_mv_monthlyhigh = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS + SELECT * FROM {0}.scores + WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL + PRIMARY KEY ((game, year, month), score, user, day) + WITH CLUSTERING ORDER BY (score DESC)""".format(self.ksname) + + create_mv_filtereduserhigh = """CREATE MATERIALIZED VIEW {0}.filtereduserhigh AS + SELECT * FROM {0}.scores + WHERE user in ('jbellis', 'pcmanus') AND game IS NOT NULL AND score IS NOT NULL AND year is NOT NULL AND day is not NULL and month IS NOT NULL + PRIMARY KEY (game, score, user, year, month, day) + WITH CLUSTERING ORDER BY (score DESC)""".format(self.ksname) + + self.session.execute(create_mv_alltime) + self.session.execute(create_mv_dailyhigh) + self.session.execute(create_mv_monthlyhigh) + self.session.execute(create_mv_filtereduserhigh) + + prepared_insert = self.session.prepare("""INSERT INTO {0}.scores (user, game, year, month, day, score) VALUES (?, ?, ? ,? ,?, ?)""".format(self.ksname)) + + bound = prepared_insert.bind(('pcmanus', 'Coup', 2015, 5, 1, 4000)) + self.session.execute(bound) + bound = prepared_insert.bind(('jbellis', 'Coup', 2015, 5, 3, 1750)) + self.session.execute(bound) + bound = prepared_insert.bind(('yukim', 'Coup', 2015, 5, 3, 2250)) + self.session.execute(bound) + bound = prepared_insert.bind(('tjake', 'Coup', 2015, 5, 3, 500)) + self.session.execute(bound) + bound = prepared_insert.bind(('iamaleksey', 'Coup', 2015, 6, 1, 2500)) + self.session.execute(bound) + bound = prepared_insert.bind(('tjake', 'Coup', 2015, 6, 2, 1000)) + self.session.execute(bound) + bound = prepared_insert.bind(('pcmanus', 'Coup', 2015, 6, 2, 2000)) + self.session.execute(bound) + bound = prepared_insert.bind(('jmckenzie', 'Coup', 2015, 6, 9, 2700)) + self.session.execute(bound) + bound = prepared_insert.bind(('jbellis', 'Coup', 2015, 6, 20, 3500)) + self.session.execute(bound) + bound = prepared_insert.bind(('jbellis', 'Checkers', 2015, 6, 20, 1200)) + self.session.execute(bound) + bound = prepared_insert.bind(('jbellis', 'Chess', 2015, 6, 21, 3500)) + self.session.execute(bound) + bound = prepared_insert.bind(('pcmanus', 'Chess', 2015, 1, 25, 3200)) + self.session.execute(bound) + + # Test simple statement and alltime high filtering + query_statement = SimpleStatement("SELECT * FROM {0}.alltimehigh WHERE game='Coup'".format(self.ksname), + consistency_level=ConsistencyLevel.QUORUM) + results = self.session.execute(query_statement) + self.assertEquals(results[0].game, 'Coup') + self.assertEquals(results[0].year, 2015) + self.assertEquals(results[0].month, 5) + self.assertEquals(results[0].day, 1) + self.assertEquals(results[0].score, 4000) + self.assertEquals(results[0].user, "pcmanus") + + # Test prepared statement and daily high filtering + prepared_query = self.session.prepare("SELECT * FROM {0}.dailyhigh WHERE game=? AND year=? AND month=? and day=?".format(self.ksname).format(self.ksname)) + bound_query = prepared_query.bind(("Coup", 2015, 6, 2)) + results = self.session.execute(bound_query) + self.assertEquals(results[0].game, 'Coup') + self.assertEquals(results[0].year, 2015) + self.assertEquals(results[0].month, 6) + self.assertEquals(results[0].day, 2) + self.assertEquals(results[0].score, 2000) + self.assertEquals(results[0].user, "pcmanus") + + self.assertEquals(results[1].game, 'Coup') + self.assertEquals(results[1].year, 2015) + self.assertEquals(results[1].month, 6) + self.assertEquals(results[1].day, 2) + self.assertEquals(results[1].score, 1000) + self.assertEquals(results[1].user, "tjake") + + # Test montly high range queries + prepared_query = self.session.prepare("SELECT * FROM {0}.monthlyhigh WHERE game=? AND year=? AND month=? and score >= ? and score <= ?".format(self.ksname).format(self.ksname)) + bound_query = prepared_query.bind(("Coup", 2015, 6, 2500, 3500)) + results = self.session.execute(bound_query) + self.assertEquals(results[0].game, 'Coup') + self.assertEquals(results[0].year, 2015) + self.assertEquals(results[0].month, 6) + self.assertEquals(results[0].day, 20) + self.assertEquals(results[0].score, 3500) + self.assertEquals(results[0].user, "jbellis") + + self.assertEquals(results[1].game, 'Coup') + self.assertEquals(results[1].year, 2015) + self.assertEquals(results[1].month, 6) + self.assertEquals(results[1].day, 9) + self.assertEquals(results[1].score, 2700) + self.assertEquals(results[1].user, "jmckenzie") + + self.assertEquals(results[2].game, 'Coup') + self.assertEquals(results[2].year, 2015) + self.assertEquals(results[2].month, 6) + self.assertEquals(results[2].day, 1) + self.assertEquals(results[2].score, 2500) + self.assertEquals(results[2].user, "iamaleksey") + + # Test filtered user high scores + query_statement = SimpleStatement("SELECT * FROM {0}.filtereduserhigh WHERE game='Chess'".format(self.ksname), + consistency_level=ConsistencyLevel.QUORUM) + results = self.session.execute(query_statement) + self.assertEquals(results[0].game, 'Chess') + self.assertEquals(results[0].year, 2015) + self.assertEquals(results[0].month, 6) + self.assertEquals(results[0].day, 21) + self.assertEquals(results[0].score, 3500) + self.assertEquals(results[0].user, "jbellis") + + self.assertEquals(results[1].game, 'Chess') + self.assertEquals(results[1].year, 2015) + self.assertEquals(results[1].month, 1) + self.assertEquals(results[1].day, 25) + self.assertEquals(results[1].score, 3200) + self.assertEquals(results[1].user, "pcmanus") \ No newline at end of file diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index 4fe5cf3916..3c056739f2 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION +from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCaseWFunctionTable try: import unittest2 as unittest @@ -28,45 +28,37 @@ def setup_module(): use_singledc() -class RowFactoryTests(unittest.TestCase): +class RowFactoryTests(BasicSharedKeyspaceUnitTestCaseWFunctionTable): """ Test different row_factories and access code """ - def setUp(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect() + super(RowFactoryTests, self).setUp() + self.insert1 = ''' + INSERT INTO {0}.{1} + ( k , v ) + VALUES + ( 1 , 1 ) + '''.format(self.keyspace_name, self.function_table_name) + + self.insert2 = ''' + INSERT INTO {0}.{1} + ( k , v ) + VALUES + ( 2 , 2 ) + '''.format(self.keyspace_name, self.function_table_name) + + self.select = ''' + SELECT * FROM {0}.{1} + '''.format(self.keyspace_name, self.function_table_name) def tearDown(self): - self.cluster.shutdown() - - truncate = ''' - TRUNCATE test3rf.test - ''' - - insert1 = ''' - INSERT INTO test3rf.test - ( k , v ) - VALUES - ( 1 , 1 ) - ''' - - insert2 = ''' - INSERT INTO test3rf.test - ( k , v ) - VALUES - ( 2 , 2 ) - ''' - - select = ''' - SELECT * FROM test3rf.test - ''' + self.drop_function_table() def test_tuple_factory(self): session = self.session session.row_factory = tuple_factory - session.execute(self.truncate) session.execute(self.insert1) session.execute(self.insert2) @@ -87,7 +79,6 @@ def test_named_tuple_factory(self): session = self.session session.row_factory = named_tuple_factory - session.execute(self.truncate) session.execute(self.insert1) session.execute(self.insert2) @@ -107,7 +98,6 @@ def test_dict_factory(self): session = self.session session.row_factory = dict_factory - session.execute(self.truncate) session.execute(self.insert1) session.execute(self.insert2) @@ -128,7 +118,6 @@ def test_ordered_dict_factory(self): session = self.session session.row_factory = ordered_dict_factory - session.execute(self.truncate) session.execute(self.insert1) session.execute(self.insert2) @@ -154,7 +143,6 @@ class NamedTupleFactoryAndNumericColNamesTests(unittest.TestCase): def setup_class(cls): cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) cls.session = cls.cluster.connect() - cls._cass_version, cls._cql_version = get_server_versions() ddl = ''' CREATE TABLE test1rf.table_num_col ( key blob PRIMARY KEY, "626972746864617465" blob ) From 97bf563b7ec30fed5a1158d815a352b0fbd75a55 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 9 Oct 2015 13:03:14 -0500 Subject: [PATCH 0115/2005] default CL is now LOCAL_QUORUM PYTHON-416 --- cassandra/cluster.py | 6 +++++- docs/api/cassandra/cluster.rst | 3 +++ docs/getting_started.rst | 8 ++++++-- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 0f3a554faa..555acd0c58 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1474,13 +1474,17 @@ class Session(object): .. versionadded:: 2.0.0 """ - default_consistency_level = ConsistencyLevel.ONE + default_consistency_level = ConsistencyLevel.LOCAL_QUORUM """ The default :class:`~ConsistencyLevel` for operations executed through this session. This default may be overridden by setting the :attr:`~.Statement.consistency_level` on individual statements. .. versionadded:: 1.2.0 + + .. versionchanged:: 3.0.0 + + default changed from ONE to LOCAL_QUORUM """ max_trace_wait = 2.0 diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 1ccd9282b7..ead6457d5a 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -98,6 +98,9 @@ .. autoattribute:: default_timeout + .. autoattribute:: default_consistency_level + :annotation: = LOCAL_QUORUM + .. autoattribute:: row_factory .. autoattribute:: default_fetch_size diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 9e4384ce2f..2920b9ef27 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -327,8 +327,12 @@ The consistency level used for a query determines how many of the replicas of the data you are interacting with need to respond for the query to be considered a success. -By default, :attr:`.ConsistencyLevel.ONE` will be used for all queries. To -specify a different consistency level, you will need to wrap your queries +By default, :attr:`.ConsistencyLevel.LOCAL_QUORUM` will be used for all queries. +This is to provide unsurprising default behavior with respect to read-after-write +consistency. However, many applications will prefer :attr:`.ConsistencyLevel.ONE` +to maximize performance and availability. +You can specify a different default for the session on :attr:`.Session.default_consistency_level`. +To specify a different consistency level per request, you will need to wrap your queries in a :class:`~.SimpleStatement`: .. code-block:: python From 3fdf3fb451f237549b79f00fd9295e31c8b11215 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 9 Oct 2015 15:02:59 -0500 Subject: [PATCH 0116/2005] cqle: Handle and return descriptive error for unknown table option PYTHON-406 --- cassandra/cqlengine/management.py | 5 ++++- .../integration/cqlengine/management/test_management.py | 9 +++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 648e870554..d6bf5ad9c9 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -422,7 +422,10 @@ def _update_options(model): update_options = {} for name, value in model_options.items(): - existing_value = existing_options[name] + try: + existing_value = existing_options[name] + except KeyError: + raise KeyError("Invalid table option: '%s'; known options: %s" % (name, existing_options.keys())) if isinstance(existing_value, six.string_types): if value != existing_value: update_options[name] = value diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 4b62fcfcd1..650beded8f 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -217,6 +217,15 @@ def test_table_property_update(self): self.assertDictContainsSubset(ModelWithTableProperties.__options__, table_options) + def test_bogus_option_update(self): + sync_table(ModelWithTableProperties) + option = 'no way will this ever be an option' + try: + ModelWithTableProperties.__options__[option] = 'what was I thinking?' + self.assertRaisesRegexp(KeyError, "Invalid table option.*%s.*" % option, sync_table, ModelWithTableProperties) + finally: + ModelWithTableProperties.__options__.pop(option, None) + class SyncTableTests(BaseCassEngTestCase): From bd0a8647f2492d98bc6af5543a5d7540ef3f95d3 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 9 Oct 2015 15:32:32 -0500 Subject: [PATCH 0117/2005] Changing MV tests to work around async metadata updates --- tests/integration/standard/test_metadata.py | 72 +++++++++++++++++---- 1 file changed, 58 insertions(+), 14 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 5ec484522a..b9c61a5dd8 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -20,6 +20,7 @@ import difflib import six import sys +import time from mock import Mock, patch from cassandra import AlreadyExists, SignatureDescriptor, UserFunctionDescriptor, UserAggregateDescriptor @@ -2105,17 +2106,16 @@ def test_create_view_metadata(self): self.assertEquals(day_column.is_static, mv.clustering_key[2].is_static) self.assertEquals(day_column.is_reversed, mv.clustering_key[2].is_reversed) - def test_base_table_mv_changes(self): + def test_base_table_column_addition_mv(self): """ - test to ensure that materialized view metadata is properly updated with base table changes + test to ensure that materialized view metadata is properly updated with base columns are added - test_create_view_metadata tests that materialized views metadata is properly updated when corresponding changes - are made to the base table. We test adding a new column and changing base table columns types. We then ensure - that changes are surfaced in the MV accordingly. + test_create_view_metadata tests that materialized views metadata is properly updated when columns are added to + the base table. @since 3.0.0 @jira_ticket PYTHON-419 - @expected_result Materialized view metadata should be update correctly + @expected_result Materialized view metadata should be updated correctly @test_category metadata """ @@ -2150,6 +2150,7 @@ def test_base_table_mv_changes(self): score_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores'] self.assertIsNotNone(score_table.views["monthlyhigh"]) + self.assertIsNotNone(score_table.views["alltimehigh"]) self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 2) insert_fouls = """ALTER TABLE {0}.scores ADD fouls INT""".format((self.keyspace_name)) @@ -2157,10 +2158,59 @@ def test_base_table_mv_changes(self): self.session.execute(insert_fouls) self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 2) - alter_scores = """ALTER TABLE {0}.scores ALTER score blob""".format((self.keyspace_name)) + score_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores'] + self.assertIn("fouls", score_table.columns) + + # This is a workaround for mv notifications being separate from base table schema responses. + # This maybe fixed with future protocol changes + for i in range(10): + mv_alltime = self.cluster.metadata.keyspaces[self.keyspace_name].views["alltimehigh"] + if("fouls" in mv_alltime.columns): + break + time.sleep(.2) + + self.assertIn("fouls", mv_alltime.columns) + + mv_alltime_fouls_comumn = self.cluster.metadata.keyspaces[self.keyspace_name].views["alltimehigh"].columns['fouls'] + self.assertEquals(mv_alltime_fouls_comumn.typestring, 'int') + + def test_base_table_type_alter_mv(self): + """ + test to ensure that materialized view metadata is properly updated when a type in the base table + is updated. + + test_create_view_metadata tests that materialized views metadata is properly updated when the type of base table + column is changed. + @since 3.0.0 + @jira_ticket CASSANDRA-10424 + @expected_result Materialized view metadata should be updated correctly + + @test_category metadata + """ + create_table = """CREATE TABLE {0}.scores( + user TEXT, + game TEXT, + year INT, + month INT, + day INT, + score TEXT, + PRIMARY KEY (user, game, year, month, day) + )""".format(self.keyspace_name) + + self.session.execute(create_table) + + create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS + SELECT game, year, month, score, user, day FROM {0}.scores + WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL + PRIMARY KEY ((game, year, month), score, user, day) + WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.keyspace_name) + + self.session.execute(create_mv) + self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 1) + alter_scores = """ALTER TABLE {0}.scores ALTER score TYPE blob""".format((self.keyspace_name)) self.session.execute(alter_scores) - self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 2) + self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 1) score_column = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores'].columns['score'] self.assertEquals(score_column.typestring, 'blob') @@ -2168,12 +2218,6 @@ def test_base_table_mv_changes(self): score_mv_column = self.cluster.metadata.keyspaces[self.keyspace_name].views["monthlyhigh"].columns['score'] self.assertEquals(score_mv_column.typestring, 'blob') - mv_alltime = self.cluster.metadata.keyspaces[self.keyspace_name].views["alltimehigh"] - self.assertIn("fouls", mv_alltime.columns) - - mv_alltime_fouls_comumn = self.cluster.metadata.keyspaces[self.keyspace_name].views["alltimehigh"].columns['fouls'] - self.assertEquals(mv_alltime_fouls_comumn.typestring, 'int') - def test_metadata_with_quoted_identifiers(self): """ test to ensure that materialized view metadata is properly constructed when quoted identifiers are used From 827f6a2c7974f0c158933b54b281a9ff8de63e97 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 12 Oct 2015 11:09:37 -0500 Subject: [PATCH 0118/2005] remove blist sortedset as soft dependency PYTHON-385 --- cassandra/util.py | 360 +++++++++++++++++++++++++--------------------- tox.ini | 1 - 2 files changed, 194 insertions(+), 167 deletions(-) diff --git a/cassandra/util.py b/cassandra/util.py index 6dfeea011c..224ee878d0 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -485,210 +485,238 @@ def union(self, other): def isdisjoint(self, other): return len(self.intersection(other)) == 0 -try: - from blist import sortedset -except ImportError: - from bisect import bisect_left +from bisect import bisect_left - class sortedset(object): - ''' - A sorted set based on sorted list - This set is used in place of blist.sortedset in Python environments - where blist module/extension is not available. +class SortedSet(object): + ''' + A sorted set based on sorted list - A sorted set implementation is used in this case because it does not - require its elements to be immutable/hashable. + A sorted set implementation is used in this case because it does not + require its elements to be immutable/hashable. - #Not implemented: update functions, inplace operators + #Not implemented: update functions, inplace operators + ''' - ''' + def __init__(self, iterable=()): + self._items = [] + self.update(iterable) - def __init__(self, iterable=()): - self._items = [] - self.update(iterable) + def __len__(self): + return len(self._items) - def __len__(self): - return len(self._items) + def __getitem__(self, i): + return self._items[i] - def __getitem__(self, i): - return self._items[i] + def __iter__(self): + return iter(self._items) - def __iter__(self): - return iter(self._items) + def __reversed__(self): + return reversed(self._items) - def __reversed__(self): - return reversed(self._items) + def __repr__(self): + return '%s(%r)' % ( + self.__class__.__name__, + self._items) - def __repr__(self): - return '%s(%r)' % ( - self.__class__.__name__, - self._items) + def __reduce__(self): + return self.__class__, (self._items,) - def __reduce__(self): - return self.__class__, (self._items,) + def __eq__(self, other): + if isinstance(other, self.__class__): + return self._items == other._items + else: + try: + return len(other) == len(self._items) and all(item in self for item in other) + except TypeError: + return NotImplemented - def __eq__(self, other): - if isinstance(other, self.__class__): - return self._items == other._items - else: - if not isinstance(other, set): - return False + def __ne__(self, other): + if isinstance(other, self.__class__): + return self._items != other._items + else: + try: + return len(other) != len(self._items) or any(item not in self for item in other) + except TypeError: + return NotImplemented - return len(other) == len(self._items) and all(item in other for item in self._items) + def __le__(self, other): + return self.issubset(other) - def __ne__(self, other): - if isinstance(other, self.__class__): - return self._items != other._items - else: - if not isinstance(other, set): - return True + def __lt__(self, other): + return len(other) > len(self._items) and self.issubset(other) - return len(other) != len(self._items) or any(item not in other for item in self._items) + def __ge__(self, other): + return self.issuperset(other) - def __le__(self, other): - return self.issubset(other) + def __gt__(self, other): + return len(self._items) > len(other) and self.issuperset(other) - def __lt__(self, other): - return len(other) > len(self._items) and self.issubset(other) + def __and__(self, other): + return self._intersect(other) + __rand__ = __and__ - def __ge__(self, other): - return self.issuperset(other) + def __iand__(self, other): + isect = self._intersect(other) + self._items = isect._items + return self - def __gt__(self, other): - return len(self._items) > len(other) and self.issuperset(other) + def __or__(self, other): + return self.union(other) + __ror__ = __or__ - def __and__(self, other): - return self._intersect(other) + def __ior__(self, other): + union = self.union(other) + self._items = union._items + return self - def __or__(self, other): - return self.union(other) + def __sub__(self, other): + return self._diff(other) - def __sub__(self, other): - return self._diff(other) + def __rsub__(self, other): + return sortedset(other) - self - def __xor__(self, other): - return self.symmetric_difference(other) + def __isub__(self, other): + diff = self._diff(other) + self._items = diff._items + return self - def __contains__(self, item): - i = bisect_left(self._items, item) - return i < len(self._items) and self._items[i] == item + def __xor__(self, other): + return self.symmetric_difference(other) + __rxor__ = __xor__ - def add(self, item): - i = bisect_left(self._items, item) - if i < len(self._items): - if self._items[i] != item: - self._items.insert(i, item) - else: - self._items.append(item) + def __ixor__(self, other): + sym_diff = self.symmetric_difference(other) + self._items = sym_diff._items + return self - def update(self, iterable): - for i in iterable: - self.add(i) + def __contains__(self, item): + i = bisect_left(self._items, item) + return i < len(self._items) and self._items[i] == item - def clear(self): - del self._items[:] + def __delitem__(self, i): + del self._items[i] - def copy(self): - new = sortedset() - new._items = list(self._items) - return new - - def isdisjoint(self, other): - return len(self._intersect(other)) == 0 - - def issubset(self, other): - return len(self._intersect(other)) == len(self._items) - - def issuperset(self, other): - return len(self._intersect(other)) == len(other) - - def pop(self): - if not self._items: - raise KeyError("pop from empty set") - return self._items.pop() - - def remove(self, item): - i = bisect_left(self._items, item) - if i < len(self._items): - if self._items[i] == item: - self._items.pop(i) - return - raise KeyError('%r' % item) - - def union(self, *others): - union = sortedset() - union._items = list(self._items) - for other in others: - if isinstance(other, self.__class__): - i = 0 - for item in other._items: - i = bisect_left(union._items, item, i) - if i < len(union._items): - if item != union._items[i]: - union._items.insert(i, item) - else: - union._items.append(item) - else: - for item in other: - union.add(item) - return union - - def intersection(self, *others): - isect = self.copy() - for other in others: - isect = isect._intersect(other) - if not isect: - break - return isect + def __delslice__(self, i, j): + del self._items[i:j] - def difference(self, *others): - diff = self.copy() - for other in others: - diff = diff._diff(other) - if not diff: - break - return diff + def add(self, item): + i = bisect_left(self._items, item) + if i < len(self._items): + if self._items[i] != item: + self._items.insert(i, item) + else: + self._items.append(item) - def symmetric_difference(self, other): - diff_self_other = self._diff(other) - diff_other_self = other.difference(self) - return diff_self_other.union(diff_other_self) + def update(self, iterable): + for i in iterable: + self.add(i) - def _diff(self, other): - diff = sortedset() - if isinstance(other, self.__class__): - i = 0 - for item in self._items: - i = bisect_left(other._items, item, i) - if i < len(other._items): - if item != other._items[i]: - diff._items.append(item) - else: - diff._items.append(item) - else: - for item in self._items: - if item not in other: - diff.add(item) - return diff + def clear(self): + del self._items[:] + + def copy(self): + new = sortedset() + new._items = list(self._items) + return new + + def isdisjoint(self, other): + return len(self._intersect(other)) == 0 - def _intersect(self, other): - isect = sortedset() + def issubset(self, other): + return len(self._intersect(other)) == len(self._items) + + def issuperset(self, other): + return len(self._intersect(other)) == len(other) + + def pop(self): + if not self._items: + raise KeyError("pop from empty set") + return self._items.pop() + + def remove(self, item): + i = bisect_left(self._items, item) + if i < len(self._items): + if self._items[i] == item: + self._items.pop(i) + return + raise KeyError('%r' % item) + + def union(self, *others): + union = sortedset() + union._items = list(self._items) + for other in others: if isinstance(other, self.__class__): i = 0 - for item in self._items: - i = bisect_left(other._items, item, i) - if i < len(other._items): - if item == other._items[i]: - isect._items.append(item) + for item in other._items: + i = bisect_left(union._items, item, i) + if i < len(union._items): + if item != union._items[i]: + union._items.insert(i, item) else: - break + union._items.append(item) else: - for item in self._items: - if item in other: - isect.add(item) - return isect + for item in other: + union.add(item) + return union + + def intersection(self, *others): + isect = self.copy() + for other in others: + isect = isect._intersect(other) + if not isect: + break + return isect + + def difference(self, *others): + diff = self.copy() + for other in others: + diff = diff._diff(other) + if not diff: + break + return diff + + def symmetric_difference(self, other): + diff_self_other = self._diff(other) + diff_other_self = other.difference(self) + return diff_self_other.union(diff_other_self) + + def _diff(self, other): + diff = sortedset() + if isinstance(other, self.__class__): + i = 0 + for item in self._items: + i = bisect_left(other._items, item, i) + if i < len(other._items): + if item != other._items[i]: + diff._items.append(item) + else: + diff._items.append(item) + else: + for item in self._items: + if item not in other: + diff.add(item) + return diff + + def _intersect(self, other): + isect = sortedset() + if isinstance(other, self.__class__): + i = 0 + for item in self._items: + i = bisect_left(other._items, item, i) + if i < len(other._items): + if item == other._items[i]: + isect._items.append(item) + else: + break + else: + for item in self._items: + if item in other: + isect.add(item) + return isect + +sortedset = SortedSet # backwards-compatibility from collections import Mapping diff --git a/tox.ini b/tox.ini index a13469ef9f..89278ce902 100644 --- a/tox.ini +++ b/tox.ini @@ -9,7 +9,6 @@ deps = nose [testenv] deps = {[base]deps} - py{26,27,33,34}: blist cython py26: unittest2 py{26,27}: gevent From d5ae0d0395fb28ee68457eb2058c90451c95cc20 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 12 Oct 2015 11:10:25 -0500 Subject: [PATCH 0119/2005] doc: remove blist section from installation page --- docs/installation.rst | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/docs/installation.rst b/docs/installation.rst index f81210d6fb..eb252a551f 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -93,19 +93,6 @@ support this:: pip install scales -(*Optional*) blist for Sorted Sets ----------------------------------- -Cassandra can store entire collections within a column. One of those -collection types is a set. Cassandra's sets are actually ordered -sets. By default, the driver will use a pure Python ordered sets to represent -these collections. If you would like to use an optimized implementation, -install the ``blist`` library:: - - pip install blist - -**Note**: This optional dependency is likely to be removed in favor of the internal -implementation in the next major version. - (*Optional*) Non-python Dependencies ------------------------------------ From 4eacbd58c16838e4c0457bf1ea904ea5a60ac5de Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 12 Oct 2015 11:10:46 -0500 Subject: [PATCH 0120/2005] test: expand sortedset test covering new methods --- tests/unit/test_sortedset.py | 115 +++++++++++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/tests/unit/test_sortedset.py b/tests/unit/test_sortedset.py index a2e042c7a0..99d8d365fc 100644 --- a/tests/unit/test_sortedset.py +++ b/tests/unit/test_sortedset.py @@ -29,6 +29,9 @@ def test_init(self): self.assertEqual(len(ss), len(expected)) self.assertEqual(list(ss), expected) + def test_repr(self): + self.assertEqual(repr(sortedset([1, 2, 3, 4])), "SortedSet([1, 2, 3, 4])") + def test_contains(self): input = [5, 4, 3, 2, 1, 1, 1] expected = sorted(set(input)) @@ -62,6 +65,8 @@ def test_equal(self): self.assertEqual(ss1, s1) self.assertEqual(ss12, s12) + self.assertEqual(ss12, s12) + self.assertEqual(ss1.__eq__(None), NotImplemented) self.assertNotEqual(ss1, ss12) self.assertNotEqual(ss12, ss1) self.assertNotEqual(ss1, s12) @@ -198,6 +203,40 @@ def test_remove(self): self.assertRaises(KeyError, ss.remove, 2) self.assertFalse(ss) + def test_getitem(self): + ss = sortedset(range(3)) + for i in range(len(ss)): + self.assertEqual(ss[i], i) + with self.assertRaises(IndexError): + ss[len(ss)] + + def test_delitem(self): + expected = [1,2,3,4] + ss = sortedset(expected) + for i in range(len(ss)): + self.assertListEqual(list(ss), expected[i:]) + del ss[0] + with self.assertRaises(IndexError): + ss[0] + + def test_delslice(self): + expected = [1, 2, 3, 4, 5] + ss = sortedset(expected) + del ss[1:3] + self.assertListEqual(list(ss), [1, 4, 5]) + del ss[-1:] + self.assertListEqual(list(ss), [1, 4]) + del ss[1:] + self.assertListEqual(list(ss), [1]) + del ss[:] + self.assertFalse(ss) + with self.assertRaises(IndexError): + del ss[0] + + def test_reversed(self): + expected = range(10) + self.assertListEqual(list(reversed(sortedset(expected))), list(reversed(expected))) + def test_operators(self): ss1 = sortedset([1]) @@ -232,20 +271,96 @@ def test_operators(self): self.assertEqual(ss12 & ss12, ss12) self.assertEqual(ss12 & set(), sortedset()) + # __iand__ + tmp = sortedset(ss12) + tmp &= ss1 + self.assertEqual(tmp, ss1) + tmp = sortedset(ss1) + tmp &= ss12 + self.assertEqual(tmp, ss1) + tmp = sortedset(ss12) + tmp &= ss12 + self.assertEqual(tmp, ss12) + tmp = sortedset(ss12) + tmp &= set() + self.assertEqual(tmp, sortedset()) + + # __rand__ + self.assertEqual(set([1]) & ss12, ss1) + # __or__ self.assertEqual(ss1 | ss12, ss12) self.assertEqual(ss12 | ss12, ss12) self.assertEqual(ss12 | set(), ss12) self.assertEqual(sortedset() | ss1 | ss12, ss12) + # __ior__ + tmp = sortedset(ss1) + tmp |= ss12 + self.assertEqual(tmp, ss12) + tmp = sortedset(ss12) + tmp |= ss12 + self.assertEqual(tmp, ss12) + tmp = sortedset(ss12) + tmp |= set() + self.assertEqual(tmp, ss12) + tmp = sortedset() + tmp |= ss1 + tmp |= ss12 + self.assertEqual(tmp, ss12) + + # __ror__ + self.assertEqual(set([1]) | ss12, ss12) + # __sub__ self.assertEqual(ss1 - ss12, set()) self.assertEqual(ss12 - ss12, set()) self.assertEqual(ss12 - set(), ss12) self.assertEqual(ss12 - ss1, sortedset([2])) + # __isub__ + tmp = sortedset(ss1) + tmp -= ss12 + self.assertEqual(tmp, set()) + tmp = sortedset(ss12) + tmp -= ss12 + self.assertEqual(tmp, set()) + tmp = sortedset(ss12) + tmp -= set() + self.assertEqual(tmp, ss12) + tmp = sortedset(ss12) + tmp -= ss1 + self.assertEqual(tmp, sortedset([2])) + + # __rsub__ + self.assertEqual(set((1,2,3)) - ss12, set((3,))) + # __xor__ self.assertEqual(ss1 ^ ss12, set([2])) self.assertEqual(ss12 ^ ss1, set([2])) self.assertEqual(ss12 ^ ss12, set()) self.assertEqual(ss12 ^ set(), ss12) + + # __ixor__ + tmp = sortedset(ss1) + tmp ^= ss12 + self.assertEqual(tmp, set([2])) + tmp = sortedset(ss12) + tmp ^= ss1 + self.assertEqual(tmp, set([2])) + tmp = sortedset(ss12) + tmp ^= ss12 + self.assertEqual(tmp, set()) + tmp = sortedset(ss12) + tmp ^= set() + self.assertEqual(tmp, ss12) + + # __rxor__ + self.assertEqual(set([1, 2]) ^ ss1, (set([2]))) + + def test_reduce_pickle(self): + ss = sortedset((4,3,2,1)) + import pickle + s = pickle.dumps(ss) + self.assertEqual(pickle.loads(s), ss) + From bd0563048b7e76a7592f512ee4b5335887b2e9bb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 12 Oct 2015 11:28:35 -0500 Subject: [PATCH 0121/2005] test: update metadata unit test reflecting change in TableMetadata --- tests/unit/test_metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index f5dc86301a..1b3c49378c 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -348,7 +348,7 @@ def test_build_index_as_cql(self): column_meta = Mock() column_meta.name = 'column_name_here' column_meta.table.name = 'table_name_here' - column_meta.table.keyspace.name = 'keyspace_name_here' + column_meta.table.keyspace_name = 'keyspace_name_here' column_meta.table.columns = {column_meta.name: column_meta} connection = Mock() connection.server_version = '2.1.0' From 6ae9533e6273e9f7f2427f486937c1a4580daae7 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 12 Oct 2015 12:52:02 -0500 Subject: [PATCH 0122/2005] Added test for Python-402, changed schmema metadata tests to use common setup/teardown. --- tests/integration/standard/test_metadata.py | 184 ++++++++++---------- 1 file changed, 95 insertions(+), 89 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 90388fef0d..325d6a7bdd 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -36,7 +36,7 @@ from cassandra.query import SimpleStatement, ConsistencyLevel from tests.integration import get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, \ - BasicSharedKeyspaceUnitTestCase, BasicSegregatedKeyspaceUnitTestCase + BasicSharedKeyspaceUnitTestCase, BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase def setup_module(): @@ -45,29 +45,13 @@ def setup_module(): CASS_SERVER_VERSION = get_server_versions()[0] -class SchemaMetadataTests(unittest.TestCase): - - ksname = "schemametadatatest" - - @property - def cfname(self): - return self._testMethodName.lower() - - def setUp(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect() - execute_until_pass(self.session, - "CREATE KEYSPACE schemametadatatest WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}") - - def tearDown(self): - execute_until_pass(self.session, "DROP KEYSPACE schemametadatatest") - self.cluster.shutdown() +class SchemaMetadataTests(BasicSegregatedKeyspaceUnitTestCase): def make_create_statement(self, partition_cols, clustering_cols=None, other_cols=None, compact=False): clustering_cols = clustering_cols or [] other_cols = other_cols or [] - statement = "CREATE TABLE %s.%s (" % (self.ksname, self.cfname) + statement = "CREATE TABLE %s.%s (" % (self.keyspace_name, self.function_table_name) if len(partition_cols) == 1 and not clustering_cols: statement += "%s text PRIMARY KEY, " % protect_name(partition_cols[0]) else: @@ -99,17 +83,17 @@ def make_create_statement(self, partition_cols, clustering_cols=None, other_cols def check_create_statement(self, tablemeta, original): recreate = tablemeta.as_cql_query(formatted=False) self.assertEqual(original, recreate[:len(original)]) - execute_until_pass(self.session, "DROP TABLE {0}.{1}".format(self.ksname, self.cfname)) + execute_until_pass(self.session, "DROP TABLE {0}.{1}".format(self.keyspace_name, self.function_table_name)) execute_until_pass(self.session, recreate) # create the table again, but with formatting enabled - execute_until_pass(self.session, "DROP TABLE {0}.{1}".format(self.ksname, self.cfname)) + execute_until_pass(self.session, "DROP TABLE {0}.{1}".format(self.keyspace_name, self.function_table_name)) recreate = tablemeta.as_cql_query(formatted=True) execute_until_pass(self.session, recreate) def get_table_metadata(self): - self.cluster.refresh_table_metadata(self.ksname, self.cfname) - return self.cluster.metadata.keyspaces[self.ksname].tables[self.cfname] + self.cluster.refresh_table_metadata(self.keyspace_name, self.function_table_name) + return self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name] def test_basic_table_meta_properties(self): create_statement = self.make_create_statement(["a"], [], ["b", "c"]) @@ -119,18 +103,18 @@ def test_basic_table_meta_properties(self): meta = self.cluster.metadata self.assertNotEqual(meta.cluster_name, None) - self.assertTrue(self.ksname in meta.keyspaces) - ksmeta = meta.keyspaces[self.ksname] + self.assertTrue(self.keyspace_name in meta.keyspaces) + ksmeta = meta.keyspaces[self.keyspace_name] - self.assertEqual(ksmeta.name, self.ksname) + self.assertEqual(ksmeta.name, self.keyspace_name) self.assertTrue(ksmeta.durable_writes) self.assertEqual(ksmeta.replication_strategy.name, 'SimpleStrategy') self.assertEqual(ksmeta.replication_strategy.replication_factor, 1) - self.assertTrue(self.cfname in ksmeta.tables) - tablemeta = ksmeta.tables[self.cfname] + self.assertTrue(self.function_table_name in ksmeta.tables) + tablemeta = ksmeta.tables[self.function_table_name] self.assertEqual(tablemeta.keyspace, ksmeta) # tablemeta.keyspace is deprecated - self.assertEqual(tablemeta.name, self.cfname) + self.assertEqual(tablemeta.name, self.function_table_name) self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key]) self.assertEqual([], tablemeta.clustering_key) @@ -216,6 +200,28 @@ def test_compound_primary_keys_compact(self): self.check_create_statement(tablemeta, create_statement) + def test_cluster_column_ordering_reversed_metadata(self): + """ + Simple test to ensure that the metatdata associated with cluster ordering is surfaced is surfaced correctly. + + Creates a table with a few clustering keys. Then checks the clustering order associated with clustering columns + and ensure it's set correctly. + @since 3.0.0 + @jira_ticket PYTHON-402 + @expected_result is_reversed is set on DESC order, and is False on ASC + + @test_category metadata + """ + + create_statement = self.make_create_statement(["a"], ["b", "c"], ["d"], compact=True) + create_statement += " AND CLUSTERING ORDER BY (b ASC, c DESC)" + self.session.execute(create_statement) + tablemeta = self.get_table_metadata() + b_column = tablemeta.columns['b'] + self.assertFalse(b_column.is_reversed) + c_column = tablemeta.columns['c'] + self.assertTrue(c_column.is_reversed) + def test_compound_primary_keys_more_columns_compact(self): create_statement = self.make_create_statement(["a"], ["b", "c"], ["d"], compact=True) create_statement += " AND CLUSTERING ORDER BY (b ASC, c ASC)" @@ -300,8 +306,8 @@ def test_indexes(self): create_statement += " WITH CLUSTERING ORDER BY (b ASC, c ASC)" execute_until_pass(self.session, create_statement) - d_index = "CREATE INDEX d_index ON %s.%s (d)" % (self.ksname, self.cfname) - e_index = "CREATE INDEX e_index ON %s.%s (e)" % (self.ksname, self.cfname) + d_index = "CREATE INDEX d_index ON %s.%s (d)" % (self.keyspace_name, self.function_table_name) + e_index = "CREATE INDEX e_index ON %s.%s (e)" % (self.keyspace_name, self.function_table_name) execute_until_pass(self.session, d_index) execute_until_pass(self.session, e_index) @@ -314,7 +320,7 @@ def test_indexes(self): self.assertIn(e_index, statements) # make sure indexes are included in KeyspaceMetadata.export_as_string() - ksmeta = self.cluster.metadata.keyspaces[self.ksname] + ksmeta = self.cluster.metadata.keyspaces[self.keyspace_name] statement = ksmeta.export_as_string() self.assertIn('CREATE INDEX d_index', statement) self.assertIn('CREATE INDEX e_index', statement) @@ -324,16 +330,16 @@ def test_collection_indexes(self): raise unittest.SkipTest("Secondary index on collections were introduced in Cassandra 2.1") self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b map)" - % (self.ksname, self.cfname)) + % (self.keyspace_name, self.function_table_name)) self.session.execute("CREATE INDEX index1 ON %s.%s (keys(b))" - % (self.ksname, self.cfname)) + % (self.keyspace_name, self.function_table_name)) tablemeta = self.get_table_metadata() self.assertIn('(keys(b))', tablemeta.export_as_string()) - self.session.execute("DROP INDEX %s.index1" % (self.ksname,)) + self.session.execute("DROP INDEX %s.index1" % (self.keyspace_name,)) self.session.execute("CREATE INDEX index2 ON %s.%s (b)" - % (self.ksname, self.cfname)) + % (self.keyspace_name, self.function_table_name)) tablemeta = self.get_table_metadata() target = ' (b)' if CASS_SERVER_VERSION < (3, 0) else 'values(b))' # explicit values in C* 3+ @@ -341,11 +347,11 @@ def test_collection_indexes(self): # test full indexes on frozen collections, if available if CASS_SERVER_VERSION >= (2, 1, 3): - self.session.execute("DROP TABLE %s.%s" % (self.ksname, self.cfname)) + self.session.execute("DROP TABLE %s.%s" % (self.keyspace_name, self.function_table_name)) self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b frozen>)" - % (self.ksname, self.cfname)) + % (self.keyspace_name, self.function_table_name)) self.session.execute("CREATE INDEX index3 ON %s.%s (full(b))" - % (self.ksname, self.cfname)) + % (self.keyspace_name, self.function_table_name)) tablemeta = self.get_table_metadata() self.assertIn('(full(b))', tablemeta.export_as_string()) @@ -414,49 +420,49 @@ def test_refresh_schema_metadata(self): self.assertIn("new_keyspace", cluster2.metadata.keyspaces) # Keyspace metadata modification - self.session.execute("ALTER KEYSPACE {0} WITH durable_writes = false".format(self.ksname)) - self.assertTrue(cluster2.metadata.keyspaces[self.ksname].durable_writes) + self.session.execute("ALTER KEYSPACE {0} WITH durable_writes = false".format(self.keyspace_name)) + self.assertTrue(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes) cluster2.refresh_schema_metadata() - self.assertFalse(cluster2.metadata.keyspaces[self.ksname].durable_writes) + self.assertFalse(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes) # Table metadata modification table_name = "test" - self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.ksname, table_name)) + self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.keyspace_name, table_name)) cluster2.refresh_schema_metadata() - self.session.execute("ALTER TABLE {0}.{1} ADD c double".format(self.ksname, table_name)) - self.assertNotIn("c", cluster2.metadata.keyspaces[self.ksname].tables[table_name].columns) + self.session.execute("ALTER TABLE {0}.{1} ADD c double".format(self.keyspace_name, table_name)) + self.assertNotIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns) cluster2.refresh_schema_metadata() - self.assertIn("c", cluster2.metadata.keyspaces[self.ksname].tables[table_name].columns) + self.assertIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns) if PROTOCOL_VERSION >= 3: # UDT metadata modification - self.session.execute("CREATE TYPE {0}.user (age int, name text)".format(self.ksname)) - self.assertEqual(cluster2.metadata.keyspaces[self.ksname].user_types, {}) + self.session.execute("CREATE TYPE {0}.user (age int, name text)".format(self.keyspace_name)) + self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {}) cluster2.refresh_schema_metadata() - self.assertIn("user", cluster2.metadata.keyspaces[self.ksname].user_types) + self.assertIn("user", cluster2.metadata.keyspaces[self.keyspace_name].user_types) if PROTOCOL_VERSION >= 4: # UDF metadata modification self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int) RETURNS NULL ON NULL INPUT RETURNS int - LANGUAGE javascript AS 'key + val';""".format(self.ksname)) + LANGUAGE javascript AS 'key + val';""".format(self.keyspace_name)) - self.assertEqual(cluster2.metadata.keyspaces[self.ksname].functions, {}) + self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {}) cluster2.refresh_schema_metadata() - self.assertIn("sum_int(int,int)", cluster2.metadata.keyspaces[self.ksname].functions) + self.assertIn("sum_int(int,int)", cluster2.metadata.keyspaces[self.keyspace_name].functions) # UDA metadata modification self.session.execute("""CREATE AGGREGATE {0}.sum_agg(int) SFUNC sum_int STYPE int INITCOND 0""" - .format(self.ksname)) + .format(self.keyspace_name)) - self.assertEqual(cluster2.metadata.keyspaces[self.ksname].aggregates, {}) + self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].aggregates, {}) cluster2.refresh_schema_metadata() - self.assertIn("sum_agg(int)", cluster2.metadata.keyspaces[self.ksname].aggregates) + self.assertIn("sum_agg(int)", cluster2.metadata.keyspaces[self.keyspace_name].aggregates) # Cluster metadata modification self.session.execute("DROP KEYSPACE new_keyspace") @@ -487,11 +493,11 @@ def test_refresh_keyspace_metadata(self): cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) cluster2.connect() - self.assertTrue(cluster2.metadata.keyspaces[self.ksname].durable_writes) - self.session.execute("ALTER KEYSPACE {0} WITH durable_writes = false".format(self.ksname)) - self.assertTrue(cluster2.metadata.keyspaces[self.ksname].durable_writes) - cluster2.refresh_keyspace_metadata(self.ksname) - self.assertFalse(cluster2.metadata.keyspaces[self.ksname].durable_writes) + self.assertTrue(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes) + self.session.execute("ALTER KEYSPACE {0} WITH durable_writes = false".format(self.keyspace_name)) + self.assertTrue(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes) + cluster2.refresh_keyspace_metadata(self.keyspace_name) + self.assertFalse(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes) cluster2.shutdown() @@ -513,21 +519,21 @@ def test_refresh_table_metadata(self): """ table_name = "test" - self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.ksname, table_name)) + self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.keyspace_name, table_name)) cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) cluster2.connect() - self.assertNotIn("c", cluster2.metadata.keyspaces[self.ksname].tables[table_name].columns) - self.session.execute("ALTER TABLE {0}.{1} ADD c double".format(self.ksname, table_name)) - self.assertNotIn("c", cluster2.metadata.keyspaces[self.ksname].tables[table_name].columns) + self.assertNotIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns) + self.session.execute("ALTER TABLE {0}.{1} ADD c double".format(self.keyspace_name, table_name)) + self.assertNotIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns) - cluster2.refresh_table_metadata(self.ksname, table_name) - self.assertIn("c", cluster2.metadata.keyspaces[self.ksname].tables[table_name].columns) + cluster2.refresh_table_metadata(self.keyspace_name, table_name) + self.assertIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns) cluster2.shutdown() - def test_refresh_table_metadata_for_materialized_views(self): + def test_refresh_metadata_for_mv(self): """ test for synchronously refreshing materialized view metadata @@ -550,18 +556,18 @@ def test_refresh_table_metadata_for_materialized_views(self): raise unittest.SkipTest("Materialized views require Cassandra 3.0+") table_name = "test" - self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.ksname, table_name)) + self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.keyspace_name, self.function_table_name)) cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) cluster2.connect() - self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.ksname].tables[table_name].views) + self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)" - .format(self.ksname, table_name)) - self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.ksname].tables[table_name].views) + .format(self.keyspace_name, self.function_table_name)) + self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) - cluster2.refresh_table_metadata(self.ksname, "mv1") - self.assertIn("mv1", cluster2.metadata.keyspaces[self.ksname].tables[table_name].views) + cluster2.refresh_table_metadata(self.keyspace_name, "mv1") + self.assertIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) cluster2.shutdown() @@ -588,12 +594,12 @@ def test_refresh_user_type_metadata(self): cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) cluster2.connect() - self.assertEqual(cluster2.metadata.keyspaces[self.ksname].user_types, {}) - self.session.execute("CREATE TYPE {0}.user (age int, name text)".format(self.ksname)) - self.assertEqual(cluster2.metadata.keyspaces[self.ksname].user_types, {}) + self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {}) + self.session.execute("CREATE TYPE {0}.user (age int, name text)".format(self.keyspace_name)) + self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {}) - cluster2.refresh_user_type_metadata(self.ksname, "user") - self.assertIn("user", cluster2.metadata.keyspaces[self.ksname].user_types) + cluster2.refresh_user_type_metadata(self.keyspace_name, "user") + self.assertIn("user", cluster2.metadata.keyspaces[self.keyspace_name].user_types) cluster2.shutdown() @@ -621,15 +627,15 @@ def test_refresh_user_function_metadata(self): cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) cluster2.connect() - self.assertEqual(cluster2.metadata.keyspaces[self.ksname].functions, {}) + self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {}) self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int) RETURNS NULL ON NULL INPUT RETURNS int - LANGUAGE javascript AS 'key + val';""".format(self.ksname)) + LANGUAGE javascript AS 'key + val';""".format(self.keyspace_name)) - self.assertEqual(cluster2.metadata.keyspaces[self.ksname].functions, {}) - cluster2.refresh_user_function_metadata(self.ksname, UserFunctionDescriptor("sum_int", ["int", "int"])) - self.assertIn("sum_int(int,int)", cluster2.metadata.keyspaces[self.ksname].functions) + self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {}) + cluster2.refresh_user_function_metadata(self.keyspace_name, UserFunctionDescriptor("sum_int", ["int", "int"])) + self.assertIn("sum_int(int,int)", cluster2.metadata.keyspaces[self.keyspace_name].functions) cluster2.shutdown() @@ -657,21 +663,21 @@ def test_refresh_user_aggregate_metadata(self): cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) cluster2.connect() - self.assertEqual(cluster2.metadata.keyspaces[self.ksname].aggregates, {}) + self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].aggregates, {}) self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int) RETURNS NULL ON NULL INPUT RETURNS int - LANGUAGE javascript AS 'key + val';""".format(self.ksname)) + LANGUAGE javascript AS 'key + val';""".format(self.keyspace_name)) self.session.execute("""CREATE AGGREGATE {0}.sum_agg(int) SFUNC sum_int STYPE int INITCOND 0""" - .format(self.ksname)) + .format(self.keyspace_name)) - self.assertEqual(cluster2.metadata.keyspaces[self.ksname].aggregates, {}) - cluster2.refresh_user_aggregate_metadata(self.ksname, UserAggregateDescriptor("sum_agg", ["int"])) - self.assertIn("sum_agg(int)", cluster2.metadata.keyspaces[self.ksname].aggregates) + self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].aggregates, {}) + cluster2.refresh_user_aggregate_metadata(self.keyspace_name, UserAggregateDescriptor("sum_agg", ["int"])) + self.assertIn("sum_agg(int)", cluster2.metadata.keyspaces[self.keyspace_name].aggregates) cluster2.shutdown() From 97adeca31f40ec73546bde5e41a20b739f34f5bc Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 12 Oct 2015 15:02:30 -0500 Subject: [PATCH 0123/2005] surface query trace in ResultSet API PYTHON-318 --- cassandra/cluster.py | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ef41b58235..9dae0370db 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3218,7 +3218,8 @@ def get_query_trace(self, max_wait=None): if not self._query_trace: return None - self._query_trace.populate(max_wait) + if not self._query_trace.events: + self._query_trace.populate(max_wait) return self._query_trace def add_callback(self, fn, *args, **kwargs): @@ -3361,9 +3362,13 @@ def __init__(self, response_future, initial_response): self._current_rows = initial_response self._page_iter = None self._list_mode = False + self._traces = [response_future._query_trace] if response_future._query_trace else [] @property def has_more_pages(self): + """ + True if the last response indicated more pages; False otherwise + """ return self.response_future.has_more_pages def __iter__(self): @@ -3383,6 +3388,8 @@ def next(self): self.response_future.start_fetching_next_page() result = self.response_future.result() + if self.response_future._query_trace: + self._traces.append(self.response_future._query_trace) self._current_rows = result._current_rows self._page_iter = iter(self._current_rows) @@ -3416,3 +3423,29 @@ def __nonzero__(self): return bool(self._current_rows) __bool__ = __nonzero__ + + def get_query_trace(self, max_wait_sec=None): + """ + Fetches and returns the query trace of the last response, or `None` if tracing was + not enabled. + + Note that this may raise an exception if there are problems retrieving the trace + details from Cassandra. If the trace is not available after `max_wait_sec`, + :exc:`cassandra.query.TraceUnavailable` will be raised. + """ + if self._traces: + self._get_trace(0, max_wait_sec) + + def get_all_query_traces(self, max_wait_sec=None): + """ + Fetches and returns the query traces for all query pages, if tracing was enabled. + + See note in :meth:`~.get_current_query_trace` regarding possible exceptions. + """ + return [self._get_trace(i, max_wait_sec) for i in range(len(self._traces))] + + def _get_trace(self, i, max_wait): + trace = self._traces[i] + if not trace.events: + trace.populate(max_wait=max_wait) + return trace From 7f7b8cc280fed6332ad73a85a51d3594b8ce3433 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 12 Oct 2015 15:21:49 -0500 Subject: [PATCH 0124/2005] always return ResultSet, even for non-row responses --- cassandra/cluster.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 9dae0370db..6749085fb2 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2720,7 +2720,6 @@ class ResponseFuture(object): _start_time = None _metrics = None _paging_state = None - _is_result_kind_rows = False _custom_payload = None _warnings = None _timer = None @@ -2931,8 +2930,7 @@ def _set_result(self, response): self, **response.results) else: results = getattr(response, 'results', None) - self._is_result_kind_rows = response.kind ==RESULT_KIND_ROWS - if results is not None and self._is_result_kind_rows: + if results is not None and response.kind ==RESULT_KIND_ROWS: self._paging_state = response.paging_state results = self.row_factory(*results) self._set_final_result(results) @@ -3199,10 +3197,7 @@ def result(self, timeout=_NOT_SET): if not self._event.is_set(): self._on_timeout() if self._final_result is not _NOT_SET: - if self._is_result_kind_rows: - return ResultSet(self, self._final_result) - else: - return self._final_result + return ResultSet(self, self._final_result) else: raise self._final_exception @@ -3359,7 +3354,7 @@ class ResultSet(object): def __init__(self, response_future, initial_response): self.response_future = response_future - self._current_rows = initial_response + self._current_rows = initial_response or [] self._page_iter = None self._list_mode = False self._traces = [response_future._query_trace] if response_future._query_trace else [] From 9ca9985f703a872cf6bada1ed4949bf70f04d9c0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 13 Oct 2015 11:42:35 -0500 Subject: [PATCH 0125/2005] Trace info is now attached to ResultSet --- cassandra/cluster.py | 37 ++++--------------- cassandra/concurrent.py | 6 +-- tests/integration/standard/test_cluster.py | 23 ++++++------ tests/integration/standard/test_concurrent.py | 21 +++++++---- tests/integration/standard/test_query.py | 37 ++++++++++++------- .../standard/test_row_factories.py | 1 + tests/unit/test_concurrent.py | 10 ++--- tests/unit/test_response_future.py | 8 ++-- 8 files changed, 69 insertions(+), 74 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 6749085fb2..8ef7b6610f 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1601,34 +1601,14 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_ no timeout. Please see :meth:`.ResponseFuture.result` for details on the scope and effect of this timeout. - If `trace` is set to :const:`True`, an attempt will be made to - fetch the trace details and attach them to the `query`'s - :attr:`~.Statement.trace` attribute in the form of a :class:`.QueryTrace` - instance. This requires that `query` be a :class:`.Statement` subclass - instance and not just a string. If there is an error fetching the - trace details, the :attr:`~.Statement.trace` attribute will be left as - :const:`None`. + If `trace` is set to :const:`True`, the query will be sent with tracing enabled. + The trace details can be obtained using the returned :class:`.ResultSet` object. `custom_payload` is a :ref:`custom_payload` dict to be passed to the server. If `query` is a Statement with its own custom_payload. The message payload will be a union of the two, with the values specified here taking precedence. """ - if trace and not isinstance(query, Statement): - raise TypeError( - "The query argument must be an instance of a subclass of " - "cassandra.query.Statement when trace=True") - - future = self.execute_async(query, parameters, trace, custom_payload, timeout) - try: - result = future.result() - finally: - if trace: - try: - query.trace = future.get_query_trace(self.max_trace_wait) - except Exception: - log.exception("Unable to fetch query trace:") - - return result + return self.execute_async(query, parameters, trace, custom_payload, timeout).result() def execute_async(self, query, parameters=None, trace=False, custom_payload=None, timeout=_NOT_SET): """ @@ -1638,9 +1618,9 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None on the :class:`.ResponseFuture` to syncronously block for results at any time. - If `trace` is set to :const:`True`, you may call - :meth:`.ResponseFuture.get_query_trace()` after the request - completes to retrieve a :class:`.QueryTrace` instance. + If `trace` is set to :const:`True`, you may get the query trace descriptors using + :meth:`.ResultSet.get_query_trace()` or :meth:`.ResultSet.get_all_query_traces()` + on the future result. `custom_payload` is a :ref:`custom_payload` dict to be passed to the server. If `query` is a Statement with its own custom_payload. The message payload @@ -1730,8 +1710,7 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time query.batch_type, query._statements_and_parameters, cl, query.serial_consistency_level, timestamp) - if trace: - message.tracing = True + message.tracing = trace message.update_custom_payload(query.custom_payload) message.update_custom_payload(custom_payload) @@ -3429,7 +3408,7 @@ def get_query_trace(self, max_wait_sec=None): :exc:`cassandra.query.TraceUnavailable` will be raised. """ if self._traces: - self._get_trace(0, max_wait_sec) + return self._get_trace(0, max_wait_sec) def get_all_query_traces(self, max_wait_sec=None): """ diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index 3391fdeaa2..a360ffaaca 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -134,10 +134,8 @@ def _execute(self, idx, statement, params): self._put_result(e, idx, False) def _on_success(self, result, future, idx): - if future._is_result_kind_rows: - result = ResultSet(future, result) - future.clear_callbacks() - self._put_result(result, idx, True) + future.clear_callbacks() + self._put_result(ResultSet(future, result), idx, True) def _on_error(self, result, future, idx): self._put_result(result, idx, False) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 5fe978868c..97adb45f52 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -78,7 +78,7 @@ def test_basic(self): CREATE KEYSPACE clustertests WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} """) - self.assertEqual(None, result) + self.assertFalse(result) result = execute_until_pass(session, """ @@ -89,13 +89,13 @@ def test_basic(self): PRIMARY KEY (a, b) ) """) - self.assertEqual(None, result) + self.assertFalse(result) result = session.execute( """ INSERT INTO clustertests.cf0 (a, b, c) VALUES ('a', 'b', 'c') """) - self.assertEqual(None, result) + self.assertFalse(result) result = session.execute("SELECT * FROM clustertests.cf0") self.assertEqual([('a', 'b', 'c')], result) @@ -152,7 +152,7 @@ def test_connect_on_keyspace(self): """ INSERT INTO test3rf.test (k, v) VALUES (8889, 8889) """) - self.assertEqual(None, result) + self.assertFalse(result) result = session.execute("SELECT * FROM test3rf.test") self.assertEqual([(8889, 8889)], result) @@ -437,8 +437,6 @@ def test_trace(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() - self.assertRaises(TypeError, session.execute, "SELECT * FROM system.local", trace=True) - def check_trace(trace): self.assertIsNot(None, trace.request_type) self.assertIsNot(None, trace.duration) @@ -446,15 +444,18 @@ def check_trace(trace): self.assertIsNot(None, trace.coordinator) self.assertIsNot(None, trace.events) + result = session.execute( "SELECT * FROM system.local", trace=True) + check_trace(result.get_query_trace()) + query = "SELECT * FROM system.local" statement = SimpleStatement(query) - session.execute(statement, trace=True) - check_trace(statement.trace) + result = session.execute(statement, trace=True) + check_trace(result.get_query_trace()) query = "SELECT * FROM system.local" statement = SimpleStatement(query) - session.execute(statement) - self.assertEqual(None, statement.trace) + result = session.execute(statement) + self.assertIsNone(result.get_query_trace()) statement2 = SimpleStatement(query) future = session.execute_async(statement2, trace=True) @@ -464,7 +465,7 @@ def check_trace(trace): statement2 = SimpleStatement(query) future = session.execute_async(statement2) future.result() - self.assertEqual(None, future.get_query_trace()) + self.assertIsNone(future.get_query_trace()) prepared = session.prepare("SELECT * FROM system.local") future = session.execute_async(prepared, parameters=(), trace=True) diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index 23b810c535..a0cf79fab8 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -89,7 +89,9 @@ def test_execute_concurrent(self): results = self.execute_concurrent_helper(self.session, list(zip(statements, parameters))) self.assertEqual(num_statements, len(results)) - self.assertEqual([(True, None)] * num_statements, results) + for success, result in results: + self.assertTrue(success) + self.assertFalse(result) # read statement = SimpleStatement( @@ -111,7 +113,9 @@ def test_execute_concurrent_with_args(self): results = self.execute_concurrent_args_helper(self.session, statement, parameters) self.assertEqual(num_statements, len(results)) - self.assertEqual([(True, None)] * num_statements, results) + for success, result in results: + self.assertTrue(success) + self.assertFalse(result) # read statement = SimpleStatement( @@ -143,8 +147,9 @@ def test_execute_concurrent_with_args_generator(self): parameters = [(i, i) for i in range(num_statements)] results = self.execute_concurrent_args_helper(self.session, statement, parameters, results_generator=True) - for result in results: - self.assertEqual((True, None), result) + for success, result in results: + self.assertTrue(success) + self.assertFalse(result) # read statement = SimpleStatement( @@ -172,7 +177,9 @@ def test_execute_concurrent_paged_result(self): results = self.execute_concurrent_args_helper(self.session, statement, parameters) self.assertEqual(num_statements, len(results)) - self.assertEqual([(True, None)] * num_statements, results) + for success, result in results: + self.assertTrue(success) + self.assertFalse(result) # read statement = SimpleStatement( @@ -273,7 +280,7 @@ def test_no_raise_on_first_failure(self): self.assertIsInstance(result, InvalidRequest) else: self.assertTrue(success) - self.assertEqual(None, result) + self.assertFalse(result) def test_no_raise_on_first_failure_client_side(self): statement = SimpleStatement( @@ -292,4 +299,4 @@ def test_no_raise_on_first_failure_client_side(self): self.assertIsInstance(result, TypeError) else: self.assertTrue(success) - self.assertEqual(None, result) + self.assertFalse(result) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 2364a49f71..fbb206b14a 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -65,27 +65,34 @@ def test_trace_prints_okay(self): query = "SELECT * FROM system.local" statement = SimpleStatement(query) - session.execute(statement, trace=True) + rs = session.execute(statement, trace=True) # Ensure this does not throw an exception - str(statement.trace) - for event in statement.trace.events: + trace = rs.get_query_trace() + self.assertTrue(trace.events) + str(trace) + for event in trace.events: str(event) cluster.shutdown() - def test_trace_id_to_query(self): + def test_trace_id_to_resultset(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() - query = "SELECT * FROM system.local" - statement = SimpleStatement(query) - self.assertIsNone(statement.trace_id) - future = session.execute_async(statement, trace=True) + future = session.execute_async("SELECT * FROM system.local", trace=True) + + # future should have the current trace + rs = future.result() + future_trace = future.get_query_trace() + self.assertIsNotNone(future_trace) + + rs_trace = rs.get_query_trace() + self.assertEqual(rs_trace, future_trace) + self.assertTrue(rs_trace.events) + self.assertEqual(len(rs_trace.events), len(future_trace.events)) - # query should have trace_id, even before trace is obtained - future.result() - self.assertIsNotNone(statement.trace_id) + self.assertListEqual([rs_trace], rs.get_all_query_traces()) cluster.shutdown() @@ -96,11 +103,13 @@ def test_trace_ignores_row_factory(self): query = "SELECT * FROM system.local" statement = SimpleStatement(query) - session.execute(statement, trace=True) + rs = session.execute(statement, trace=True) # Ensure this does not throw an exception - str(statement.trace) - for event in statement.trace.events: + trace = rs.get_query_trace() + self.assertTrue(trace.events) + str(trace) + for event in trace.events: str(event) cluster.shutdown() diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index d4391daffb..c43fe57e02 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -94,6 +94,7 @@ def test_named_tuple_factory(self): result = session.execute(self.select) self.assertIsInstance(result, ResultSet) + result = list(result) for row in result: self.assertEqual(row.k, row.v) diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index 7465f685c7..3c2734e415 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -32,7 +32,7 @@ class MockResponseResponseFuture(): and invoke callback with various timing. """ - _is_result_kind_rows = False + _query_trace = None # a list pending callbacks, these will be prioritized in reverse or normal orderd pending_callbacks = PriorityQueue() @@ -106,7 +106,7 @@ def run(self): self._stopper.wait(.1) callback_args = pending_callback[1] fn, args, kwargs, time_added = callback_args - fn(time_added, *args, **kwargs) + fn([time_added], *args, **kwargs) self._stopper.wait(.001) return @@ -222,8 +222,8 @@ def validate_result_ordering(self, results): :param results: """ last_time_added = 0 - for result in results: - current_time_added = result[1] + for success, result in results: + self.assertTrue(success) + current_time_added = list(result)[0] self.assertLess(last_time_added, current_time_added) last_time_added = current_time_added - diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 3c41c53c4e..5c59259be3 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -94,7 +94,7 @@ def test_set_keyspace_result(self): results="keyspace1") rf._set_result(result) rf._set_keyspace_completed({}) - self.assertEqual(None, rf.result()) + self.assertFalse(rf.result()) def test_schema_change_result(self): session = self.make_session() @@ -113,9 +113,9 @@ def test_other_result_message_kind(self): session = self.make_session() rf = self.make_response_future(session) rf.send_request() - result = object() + result = [1, 2, 3] rf._set_result(Mock(spec=ResultMessage, kind=999, results=result)) - self.assertIs(result, rf.result()) + self.assertListEqual(list(rf.result()), result) def test_read_timeout_error_message(self): session = self.make_session() @@ -172,7 +172,7 @@ def test_retry_policy_says_ignore(self): result = Mock(spec=UnavailableErrorMessage, info={}) rf._set_result(result) - self.assertEqual(None, rf.result()) + self.assertFalse(rf.result()) def test_retry_policy_says_retry(self): session = self.make_session() From ead4f2fc8a478bc7bd85f2fcb82699fb7aaff593 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 13 Oct 2015 11:43:05 -0500 Subject: [PATCH 0126/2005] ignore generated test artifact --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index a874ebcec1..5c9cbec957 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,7 @@ setuptools*.egg cassandra/*.c !cassandra/cmurmur3.c cassandra/*.html +tests/unit/cython/bytesio_testhelper.c # OSX .DS_Store From 932a1d5272d075f945ea5659a2a642e3e3cdcb58 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 13 Oct 2015 11:43:23 -0500 Subject: [PATCH 0127/2005] Remove trace, trace_id from Statement --- cassandra/cluster.py | 2 -- cassandra/query.py | 12 ------------ 2 files changed, 14 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 8ef7b6610f..208d80de0e 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2880,8 +2880,6 @@ def _set_result(self, response): trace_id = getattr(response, 'trace_id', None) if trace_id: - if self.query: - self.query.trace_id = trace_id self._query_trace = QueryTrace(trace_id, self.session) self._warnings = getattr(response, 'warnings', None) diff --git a/cassandra/query.py b/cassandra/query.py index 0ba6c5947d..f1e0fd8592 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -175,18 +175,6 @@ class Statement(object): will be retried. """ - trace = None - """ - If :meth:`.Session.execute()` is run with `trace` set to :const:`True`, - this will be set to a :class:`.QueryTrace` instance. - """ - - trace_id = None - """ - If :meth:`.Session.execute()` is run with `trace` set to :const:`True`, - this will be set to the tracing ID from the server. - """ - consistency_level = None """ The :class:`.ConsistencyLevel` to be used for this operation. Defaults From 183c8edb16f7509d956e63b13d4c321e104f0438 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 13 Oct 2015 17:03:23 -0500 Subject: [PATCH 0128/2005] query trace tracing all in ResponseFuture also add manual page fetching for ResultSet --- cassandra/cluster.py | 83 ++++++++++++++++++++------------------------ 1 file changed, 38 insertions(+), 45 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 208d80de0e..c236353331 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1619,7 +1619,7 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None any time. If `trace` is set to :const:`True`, you may get the query trace descriptors using - :meth:`.ResultSet.get_query_trace()` or :meth:`.ResultSet.get_all_query_traces()` + :meth:`.ResponseFuture.get_query_trace()` or :meth:`.ResponseFuture.get_all_query_traces()` on the future result. `custom_payload` is a :ref:`custom_payload` dict to be passed to the server. @@ -2689,7 +2689,7 @@ class ResponseFuture(object): _req_id = None _final_result = _NOT_SET _final_exception = None - _query_trace = None + _query_traces = None _callbacks = None _errbacks = None _current_host = None @@ -2880,7 +2880,9 @@ def _set_result(self, response): trace_id = getattr(response, 'trace_id', None) if trace_id: - self._query_trace = QueryTrace(trace_id, self.session) + if not self._query_traces: + self._query_traces = [] + self._query_traces.append(QueryTrace(trace_id, self.session)) self._warnings = getattr(response, 'warnings', None) self._custom_payload = getattr(response, 'custom_payload', None) @@ -3180,19 +3182,31 @@ def result(self, timeout=_NOT_SET): def get_query_trace(self, max_wait=None): """ - Returns the :class:`~.query.QueryTrace` instance representing a trace - of the last attempt for this operation, or :const:`None` if tracing was - not enabled for this query. Note that this may raise an exception if - there are problems retrieving the trace details from Cassandra. If the - trace is not available after `max_wait` seconds, + Fetches and returns the query trace of the last response, or `None` if tracing was + not enabled. + + Note that this may raise an exception if there are problems retrieving the trace + details from Cassandra. If the trace is not available after `max_wait_sec`, :exc:`cassandra.query.TraceUnavailable` will be raised. """ - if not self._query_trace: - return None + if self._query_traces: + return self._get_query_trace(len(self._query_traces) - 1, max_wait) - if not self._query_trace.events: - self._query_trace.populate(max_wait) - return self._query_trace + def get_all_query_traces(self, max_wait_per=None): + """ + Fetches and returns the query traces for all query pages, if tracing was enabled. + + See note in :meth:`~.get_current_query_trace` regarding possible exceptions. + """ + if self._query_traces: + return [self._get_query_trace(i, max_wait_per) for i in range(len(self._query_traces))] + return [] + + def _get_query_trace(self, i, max_wait): + trace = self._query_traces[i] + if not trace.events: + trace.populate(max_wait=max_wait) + return trace def add_callback(self, fn, *args, **kwargs): """ @@ -3334,7 +3348,6 @@ def __init__(self, response_future, initial_response): self._current_rows = initial_response or [] self._page_iter = None self._list_mode = False - self._traces = [response_future._query_trace] if response_future._query_trace else [] @property def has_more_pages(self): @@ -3343,6 +3356,10 @@ def has_more_pages(self): """ return self.response_future.has_more_pages + @property + def current_rows(self): + return self._current_rows or [] + def __iter__(self): if self._list_mode: return iter(self._current_rows) @@ -3358,17 +3375,19 @@ def next(self): self._current_rows = [] raise - self.response_future.start_fetching_next_page() - result = self.response_future.result() - if self.response_future._query_trace: - self._traces.append(self.response_future._query_trace) - self._current_rows = result._current_rows + self.fetch_next_page() self._page_iter = iter(self._current_rows) return next(self._page_iter) __next__ = next + def fetch_next_page(self): + if self.response_future.has_more_pages: + self.response_future.start_fetching_next_page() + result = self.response_future.result() + self._current_rows = result._current_rows + def _fetch_all(self): self._current_rows = list(self) self._page_iter = None @@ -3395,29 +3414,3 @@ def __nonzero__(self): return bool(self._current_rows) __bool__ = __nonzero__ - - def get_query_trace(self, max_wait_sec=None): - """ - Fetches and returns the query trace of the last response, or `None` if tracing was - not enabled. - - Note that this may raise an exception if there are problems retrieving the trace - details from Cassandra. If the trace is not available after `max_wait_sec`, - :exc:`cassandra.query.TraceUnavailable` will be raised. - """ - if self._traces: - return self._get_trace(0, max_wait_sec) - - def get_all_query_traces(self, max_wait_sec=None): - """ - Fetches and returns the query traces for all query pages, if tracing was enabled. - - See note in :meth:`~.get_current_query_trace` regarding possible exceptions. - """ - return [self._get_trace(i, max_wait_sec) for i in range(len(self._traces))] - - def _get_trace(self, i, max_wait): - trace = self._traces[i] - if not trace.events: - trace.populate(max_wait=max_wait) - return trace From 0f3d56cbf7a85c60d3a307fc698d7392c8271cab Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 13 Oct 2015 16:37:39 -0500 Subject: [PATCH 0129/2005] Added tests for PYTHON-406. NamedTable tests with materialized views. --- .../integration/cqlengine/query/test_named.py | 87 ++++++++++++++++++- 1 file changed, 86 insertions(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index a06aa22d55..6307f6ece7 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -12,15 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from cassandra.cqlengine import operators +from cassandra.cqlengine import operators, connection from cassandra.cqlengine.named import NamedKeyspace from cassandra.cqlengine.operators import EqualsOperator, GreaterThanOrEqualOperator from cassandra.cqlengine.query import ResultObject +from cassandra.concurrent import execute_concurrent_with_args from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine.query.test_queryset import BaseQuerySetUsage +from tests.integration import BasicSharedKeyspaceUnitTestCase + + class TestQuerySetOperation(BaseCassEngTestCase): @classmethod @@ -259,3 +263,84 @@ def test_get_multipleobjects_exception(self): self.table.objects.get(test_id=1) +class TestQuerySetCountSelectionAndIteration(BasicSharedKeyspaceUnitTestCase): + + def test_named_table_with_mv(self): + """ + Test NamedTable access to materialized views + + Creates some materialized views using Traditional CQL. Then ensures we can access those materialized view using + the NamedKeyspace, and NamedTable interfaces. Tests basic filtering as well. + + @since 3.0.0 + @jira_ticket PYTHON-406 + @expected_result Named Tables should have access to materialized views + + @test_category materialized_view + """ + connection.setup(['127.0.0.1'], self.keyspace_name) + + # Create a base table and two materialized views + create_table = """CREATE TABLE {0}.scores( + user TEXT, + game TEXT, + year INT, + month INT, + day INT, + score INT, + PRIMARY KEY (user, game, year, month, day) + )""".format(self.keyspace_name) + + self.session.execute(create_table) + create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS + SELECT game, year, month, score, user, day FROM {0}.scores + WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL + PRIMARY KEY ((game, year, month), score, user, day) + WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.keyspace_name) + + self.session.execute(create_mv) + + create_mv_alltime = """CREATE MATERIALIZED VIEW {0}.alltimehigh AS + SELECT * FROM {0}.scores + WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL + PRIMARY KEY (game, score, user, year, month, day) + WITH CLUSTERING ORDER BY (score DESC)""".format(self.keyspace_name) + + self.session.execute(create_mv_alltime) + + # Populate the base table with data + prepared_insert = self.session.prepare("""INSERT INTO {0}.scores (user, game, year, month, day, score) VALUES (?, ?, ? ,? ,?, ?)""".format(self.keyspace_name)) + parameters = {('pcmanus', 'Coup', 2015, 5, 1, 4000), + ('jbellis', 'Coup', 2015, 5, 3, 1750), + ('yukim', 'Coup', 2015, 5, 3, 2250), + ('tjake', 'Coup', 2015, 5, 3, 500), + ('iamaleksey', 'Coup', 2015, 6, 1, 2500), + ('tjake', 'Coup', 2015, 6, 2, 1000), + ('pcmanus', 'Coup', 2015, 6, 2, 2000), + ('jmckenzie', 'Coup', 2015, 6, 9, 2700), + ('jbellis', 'Coup', 2015, 6, 20, 3500), + ('jbellis', 'Checkers', 2015, 6, 20, 1200), + ('jbellis', 'Chess', 2015, 6, 21, 3500), + ('pcmanus', 'Chess', 2015, 1, 25, 3200)} + execute_concurrent_with_args(self.session, prepared_insert, parameters) + + # Attempt to query the data using Named Table interface + # Also test filtering on mv's + key_space = NamedKeyspace(self.keyspace_name) + table = key_space.table("scores") + mv_monthly = key_space.table("monthlyhigh") + table_objects = table.objects.all() + mv_monthly_objects = mv_monthly.objects.all() + mv_all_time = key_space.table("alltimehigh") + mv_all_objects = mv_all_time.objects.all() + self.assertEqual(len(table_objects), len(parameters)) + self.assertEqual(len(mv_monthly_objects), len(parameters)) + self.assertEqual(len(mv_all_objects), len(parameters)) + + filtered_mv_monthly_objects = mv_monthly.objects.filter(game='Chess', year=2015, month=6) + self.assertEquals(len(filtered_mv_monthly_objects), 1) + self.assertEquals(filtered_mv_monthly_objects[0]['score'], 3500) + self.assertEquals(filtered_mv_monthly_objects[0]['user'], 'jbellis') + filtered_mv_alltime_objects = mv_all_time.objects.filter(game='Chess') + self.assertEquals(len(filtered_mv_alltime_objects), 2) + self.assertEquals(filtered_mv_alltime_objects[0]['score'], 3500) From 88f0afe9b4c1774f3caf9173ef40412da10fcd10 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 14 Oct 2015 10:15:33 -0500 Subject: [PATCH 0130/2005] Add ResultSet get_*query*trace* pass-through to future --- cassandra/cluster.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c236353331..83307208a0 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3196,7 +3196,7 @@ def get_all_query_traces(self, max_wait_per=None): """ Fetches and returns the query traces for all query pages, if tracing was enabled. - See note in :meth:`~.get_current_query_trace` regarding possible exceptions. + See note in :meth:`~.get_query_trace` regarding possible exceptions. """ if self._query_traces: return [self._get_query_trace(i, max_wait_per) for i in range(len(self._query_traces))] @@ -3387,6 +3387,8 @@ def fetch_next_page(self): self.response_future.start_fetching_next_page() result = self.response_future.result() self._current_rows = result._current_rows + else: + self._current_rows = [] def _fetch_all(self): self._current_rows = list(self) @@ -3414,3 +3416,17 @@ def __nonzero__(self): return bool(self._current_rows) __bool__ = __nonzero__ + + def get_query_trace(self, max_wait_sec=None): + """ + Gets the last query trace from the associated future. + See :meth:`.ResponseFuture.get_query_trace` for details. + """ + return self.response_future.get_query_trace(max_wait_sec) + + def get_all_query_traces(self, max_wait_sec_per=None): + """ + Gets all query traces from the associated future. + See :meth:`.ResponseFuture.get_all_query_traces` for details. + """ + return self.response_future.get_all_query_traces(max_wait_sec_per) From 5957a65ec12167cd83eda10a06ff87c24f3db7ae Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 14 Oct 2015 10:18:05 -0500 Subject: [PATCH 0131/2005] ResponseFuture doc updates --- docs/api/cassandra/cluster.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 3ea0d23091..8075476b8a 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -126,6 +126,8 @@ .. automethod:: get_query_trace() + .. automethod:: get_all_query_traces() + .. autoattribute:: custom_payload() .. autoattribute:: has_more_pages From 385baa1659df81d95b81a6582a10c455f25a181e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 14 Oct 2015 10:18:23 -0500 Subject: [PATCH 0132/2005] Trace test updates for PYTHON-318 --- tests/integration/standard/test_client_warnings.py | 4 ++-- tests/integration/standard/test_cluster.py | 10 +++++----- tests/unit/test_resultset.py | 11 +++++++---- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index 4316c9398a..90f224825a 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -91,7 +91,7 @@ def test_warning_with_trace(self): future.result() self.assertEqual(len(future.warnings), 1) self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') - self.assertIsNotNone(future._query_trace) + self.assertIsNotNone(future.get_query_trace()) def test_warning_with_custom_payload(self): """ @@ -127,5 +127,5 @@ def test_warning_with_trace_and_custom_payload(self): future.result() self.assertEqual(len(future.warnings), 1) self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') - self.assertIsNotNone(future._query_trace) + self.assertIsNotNone(future.get_query_trace()) self.assertDictEqual(future.custom_payload, payload) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 97adb45f52..d6d1394f7d 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -438,11 +438,11 @@ def test_trace(self): session = cluster.connect() def check_trace(trace): - self.assertIsNot(None, trace.request_type) - self.assertIsNot(None, trace.duration) - self.assertIsNot(None, trace.started_at) - self.assertIsNot(None, trace.coordinator) - self.assertIsNot(None, trace.events) + self.assertIsNotNone(trace.request_type) + self.assertIsNotNone(trace.duration) + self.assertIsNotNone(trace.started_at) + self.assertIsNotNone(trace.coordinator) + self.assertIsNotNone(trace.events) result = session.execute( "SELECT * FROM system.local", trace=True) check_trace(result.get_query_trace()) diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index bb85de47ca..2a68376767 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -37,7 +37,8 @@ def test_iter_paged(self): response_future.result.side_effect = (ResultSet(Mock(), expected[-5:]), ) # ResultSet is iterable, so it must be protected in order to be returned whole by the Mock rs = ResultSet(response_future, expected[:5]) itr = iter(rs) - type(response_future).has_more_pages = PropertyMock(side_effect=(True, False)) # after init to avoid side effects being consumed by init + # this is brittle, depends on internal impl details. Would like to find a better way + type(response_future).has_more_pages = PropertyMock(side_effect=(True, True, False)) # after init to avoid side effects being consumed by init self.assertListEqual(list(itr), expected) def test_list_non_paged(self): @@ -54,7 +55,8 @@ def test_list_paged(self): response_future = Mock(has_more_pages=True) response_future.result.side_effect = (ResultSet(Mock(), expected[-5:]), ) # ResultSet is iterable, so it must be protected in order to be returned whole by the Mock rs = ResultSet(response_future, expected[:5]) - type(response_future).has_more_pages = PropertyMock(side_effect=(True, True, False)) # one True for getitem check/warn, then True, False for two pages + # this is brittle, depends on internal impl details. Would like to find a better way + type(response_future).has_more_pages = PropertyMock(side_effect=(True, True, True, False)) # First two True are consumed on check entering list mode self.assertEqual(rs[9], expected[9]) self.assertEqual(list(rs), expected) @@ -119,7 +121,8 @@ def test_index_list_mode(self): response_future = Mock(has_more_pages=True) response_future.result.side_effect = (ResultSet(Mock(), expected[-5:]), ) # ResultSet is iterable, so it must be protected in order to be returned whole by the Mock rs = ResultSet(response_future, expected[:5]) - type(response_future).has_more_pages = PropertyMock(side_effect=(True, True, False)) # First True is consumed on check entering list mode + # this is brittle, depends on internal impl details. Would like to find a better way + type(response_future).has_more_pages = PropertyMock(side_effect=(True, True, True, False)) # First two True are consumed on check entering list mode # index access before iteration causes list to be materialized self.assertEqual(rs[0], expected[0]) self.assertEqual(rs[9], expected[9]) @@ -146,7 +149,7 @@ def test_eq(self): response_future = Mock(has_more_pages=True) response_future.result.side_effect = (ResultSet(Mock(), expected[-5:]), ) # ResultSet is iterable, so it must be protected in order to be returned whole by the Mock rs = ResultSet(response_future, expected[:5]) - type(response_future).has_more_pages = PropertyMock(side_effect=(True, True, False)) + type(response_future).has_more_pages = PropertyMock(side_effect=(True, True, True, False)) # eq before iteration causes list to be materialized self.assertEqual(rs, expected) From f6026d9ffece229fc8b488c14262f0b0dba294dd Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 14 Oct 2015 11:24:57 -0500 Subject: [PATCH 0133/2005] Handle Infinity and NaN in cql string encoding PYTHON-282 --- cassandra/encoder.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 2d2433333e..e4022306c1 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -23,6 +23,7 @@ from binascii import hexlify import calendar import datetime +import math import sys import types from uuid import UUID @@ -145,7 +146,12 @@ def cql_encode_float(self, val): """ Encode floats using repr to preserve precision """ - return repr(val) + if math.isinf(val): + return 'Infinity' if val > 0 else '-Infinity' + elif math.isnan(val): + return 'NaN' + else: + return repr(val) def cql_encode_datetime(self, val): """ From d1391d82cc5e2680c25654da839cd32453f92650 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 14 Oct 2015 11:25:29 -0500 Subject: [PATCH 0134/2005] Integration test for inf and nan float values PYTHON-282 --- tests/integration/standard/test_types.py | 37 ++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 991d6b7845..eb9baefb55 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -18,10 +18,12 @@ import unittest # noqa from datetime import datetime +import math import six from cassandra import InvalidRequest from cassandra.cluster import Cluster +from cassandra.concurrent import execute_concurrent_with_args from cassandra.cqltypes import Int32Type, EMPTY from cassandra.query import dict_factory, ordered_dict_factory from cassandra.util import sortedset @@ -703,3 +705,38 @@ def test_can_read_composite_type(self): result = s.execute("SELECT * FROM composites WHERE a = 0")[0] self.assertEqual(0, result.a) self.assertEqual(('abc',), result.b) + + def test_special_float_cql_encoding(self): + """ + PYTHON-282 + """ + s = self.session + + s.execute(""" + CREATE TABLE float_cql_encoding ( + f float PRIMARY KEY, + d double + )""") + items = (float('nan'), float('inf'), float('-inf')) + + def verify_insert_select(ins_statement, sel_statement): + execute_concurrent_with_args(s, ins_statement, ((f, f) for f in items)) + for f in items: + row = s.execute(sel_statement, (f,))[0] + print row + if math.isnan(f): + self.assertTrue(math.isnan(row.f)) + self.assertTrue(math.isnan(row.d)) + else: + self.assertEqual(row.f, f) + self.assertEqual(row.d, f) + + # cql encoding + verify_insert_select('INSERT INTO float_cql_encoding (f, d) VALUES (%s, %s)', + 'SELECT * FROM float_cql_encoding WHERE f=%s') + + s.execute("TRUNCATE float_cql_encoding") + + # prepared binding + verify_insert_select(s.prepare('INSERT INTO float_cql_encoding (f, d) VALUES (?, ?)'), + s.prepare('SELECT * FROM float_cql_encoding WHERE f=?')) From 3b228cb73a6a8f9878e70903da05b4df99457296 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 14 Oct 2015 15:33:40 -0500 Subject: [PATCH 0135/2005] Fixing issue with ; delimeter on as_cql_query for keyspace. Changing all_as_cql to internal method to avoid confusion --- cassandra/metadata.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 9ef94065b7..fb512eb825 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -656,7 +656,7 @@ def as_cql_query(self): ret = "CREATE KEYSPACE %s WITH replication = %s " % ( protect_name(self.name), self.replication_strategy.export_for_schema()) - return ret + (' AND durable_writes = %s;' % ("true" if self.durable_writes else "false")) + return ret + (' AND durable_writes = %s' % ("true" if self.durable_writes else "false")) def user_type_strings(self): user_type_strings = [] @@ -1079,18 +1079,18 @@ def export_as_string(self): (self.keyspace_name, self.name) for line in traceback.format_exception(*self._exc_info): ret += line - ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/" % self.all_as_cql() + ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/" % self._all_as_cql() elif not self.is_cql_compatible: # If we can't produce this table with CQL, comment inline ret = "/*\nWarning: Table %s.%s omitted because it has constructs not compatible with CQL (was created via legacy API).\n" % \ (self.keyspace_name, self.name) - ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/" % self.all_as_cql() + ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/" % self._all_as_cql() else: - ret = self.all_as_cql() + ret = self._all_as_cql() return ret - def all_as_cql(self): + def _all_as_cql(self): ret = self.as_cql_query(formatted=True) ret += ";" From 30f2a2a895ba8bcdd69d684810743a88f874337a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 15 Oct 2015 09:26:08 -0500 Subject: [PATCH 0136/2005] semicolon following keyspace CQL in export_as_string --- cassandra/metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index fb512eb825..9750741bbe 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -633,7 +633,7 @@ def export_as_string(self): Returns a CQL query string that can be used to recreate the entire keyspace, including user-defined types and tables. """ - cql = "\n\n".join([self.as_cql_query()] + cql = "\n\n".join([self.as_cql_query() + ';'] + self.user_type_strings() + [f.export_as_string() for f in self.functions.values()] + [a.export_as_string() for a in self.aggregates.values()] From c43f65e1cbc2c334e0de3dc8be4d009fd1230f9a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 15 Oct 2015 09:53:53 -0500 Subject: [PATCH 0137/2005] test: update udt meta string test following PYTHON-405 --- tests/unit/test_metadata.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 1b3c49378c..86758e2332 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -329,17 +329,17 @@ class UserTypesTest(unittest.TestCase): def test_as_cql_query(self): field_types = [IntegerType, AsciiType, TupleType.apply_parameters([IntegerType, AsciiType])] udt = UserType("ks1", "mytype", ["a", "b", "c"], field_types) - self.assertEqual("CREATE TYPE ks1.mytype (a varint, b ascii, c frozen>);", udt.as_cql_query(formatted=False)) + self.assertEqual("CREATE TYPE ks1.mytype (a varint, b ascii, c frozen>)", udt.as_cql_query(formatted=False)) self.assertEqual("""CREATE TYPE ks1.mytype ( a varint, b ascii, c frozen> -);""", udt.as_cql_query(formatted=True)) +);""", udt.export_as_string()) def test_as_cql_query_name_escaping(self): udt = UserType("MyKeyspace", "MyType", ["AbA", "keyspace"], [AsciiType, AsciiType]) - self.assertEqual('CREATE TYPE "MyKeyspace"."MyType" ("AbA" ascii, "keyspace" ascii);', udt.as_cql_query(formatted=False)) + self.assertEqual('CREATE TYPE "MyKeyspace"."MyType" ("AbA" ascii, "keyspace" ascii)', udt.as_cql_query(formatted=False)) class IndexTest(unittest.TestCase): From eb8914f4c8833e0bee51c1152447dcd08e255dd7 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 15 Oct 2015 09:56:26 -0500 Subject: [PATCH 0138/2005] Fixing name collision in integration tests --- tests/integration/cqlengine/query/test_named.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index 6307f6ece7..90a6ce32d4 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -263,7 +263,7 @@ def test_get_multipleobjects_exception(self): self.table.objects.get(test_id=1) -class TestQuerySetCountSelectionAndIteration(BasicSharedKeyspaceUnitTestCase): +class TestTableWithMV(BasicSharedKeyspaceUnitTestCase): def test_named_table_with_mv(self): """ @@ -338,9 +338,9 @@ def test_named_table_with_mv(self): self.assertEqual(len(mv_all_objects), len(parameters)) filtered_mv_monthly_objects = mv_monthly.objects.filter(game='Chess', year=2015, month=6) - self.assertEquals(len(filtered_mv_monthly_objects), 1) - self.assertEquals(filtered_mv_monthly_objects[0]['score'], 3500) - self.assertEquals(filtered_mv_monthly_objects[0]['user'], 'jbellis') + self.assertEqual(len(filtered_mv_monthly_objects), 1) + self.assertEqual(filtered_mv_monthly_objects[0]['score'], 3500) + self.assertEqual(filtered_mv_monthly_objects[0]['user'], 'jbellis') filtered_mv_alltime_objects = mv_all_time.objects.filter(game='Chess') - self.assertEquals(len(filtered_mv_alltime_objects), 2) - self.assertEquals(filtered_mv_alltime_objects[0]['score'], 3500) + self.assertEqual(len(filtered_mv_alltime_objects), 2) + self.assertEqual(filtered_mv_alltime_objects[0]['score'], 3500) From 803e5742fcc2a901a40695a5d5dab81202bf668a Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 15 Oct 2015 15:02:09 -0500 Subject: [PATCH 0139/2005] Fixing issue with default_keyspace being over-ridden in cqlengine tests --- tests/integration/cqlengine/query/test_named.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index 90a6ce32d4..73b7921bc5 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -17,6 +17,7 @@ from cassandra.cqlengine.operators import EqualsOperator, GreaterThanOrEqualOperator from cassandra.cqlengine.query import ResultObject from cassandra.concurrent import execute_concurrent_with_args +from cassandra.cqlengine import models from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine.query.test_queryset import BaseQuerySetUsage @@ -263,7 +264,13 @@ def test_get_multipleobjects_exception(self): self.table.objects.get(test_id=1) -class TestTableWithMV(BasicSharedKeyspaceUnitTestCase): +class TestAlpha(BasicSharedKeyspaceUnitTestCase): + + def setUp(self): + self.default_keyspace = models.DEFAULT_KEYSPACE + + def tearDown(self): + models.DEFAULT_KEYSPACE = self.default_keyspace def test_named_table_with_mv(self): """ From 53222eec541205c7a9ebc529e14a025e56fb1e9c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 15 Oct 2015 15:30:16 -0500 Subject: [PATCH 0140/2005] Make bind ignore extra keys in dict values PYTHON-178 --- cassandra/query.py | 15 +++++++-------- tests/unit/test_parameter_binding.py | 6 ++++-- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index e1f8000894..679f8ef7e1 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -486,23 +486,24 @@ def bind(self, values): * short sequences will be extended to match bind parameters with UNSET_VALUE * names may be omitted from a dict with UNSET_VALUE implied. + .. versionchanged:: 3.0.0 + + method will not throw if extra keys are present in bound dict (PYTHON-178) """ if values is None: values = () proto_version = self.prepared_statement.protocol_version col_meta = self.prepared_statement.column_metadata - col_meta_len = len(col_meta) - value_len = len(values) # special case for binding dicts if isinstance(values, dict): - unbound_values = values.copy() + values_dict = values values = [] # sort values accordingly for col in col_meta: try: - values.append(unbound_values.pop(col.name)) + values.append(values_dict[col.name]) except KeyError: if proto_version >= 4: values.append(UNSET_VALUE) @@ -511,10 +512,8 @@ def bind(self, values): 'Column name `%s` not found in bound dict.' % (col.name)) - value_len = len(values) - - if unbound_values: - raise ValueError("Unexpected arguments provided to bind(): %s" % unbound_values.keys()) + value_len = len(values) + col_meta_len = len(col_meta) if value_len > col_meta_len: raise ValueError( diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index a53a9f6dfb..044e032554 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -25,6 +25,7 @@ from cassandra.util import OrderedDict from six.moves import xrange +import six class ParamBindingTest(unittest.TestCase): @@ -147,8 +148,9 @@ def test_dict_missing_routing_key(self): def test_missing_value(self): self.assertRaises(KeyError, self.bound.bind, {'rk0': 0, 'rk1': 0, 'ck0': 0}) - def test_dict_extra_value(self): - self.assertRaises(ValueError, self.bound.bind, {'rk0': 0, 'rk1': 0, 'ck0': 0, 'v0': 0, 'should_not_be_here': 123}) + def test_extra_value(self): + self.bound.bind({'rk0': 0, 'rk1': 0, 'ck0': 0, 'v0': 0, 'should_not_be_here': 123}) # okay to have extra keys in dict + self.assertEqual(self.bound.values, [six.b('\x00') * 4] * 4) # four encoded zeros self.assertRaises(ValueError, self.bound.bind, (0, 0, 0, 0, 123)) def test_values_none(self): From 55ebfdccc1275a687fdca653d333316b50cb20f5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 15 Oct 2015 16:17:50 -0500 Subject: [PATCH 0141/2005] Update integration tests allowing unbound keys in dict PYTHON-178 --- tests/integration/standard/test_prepared_statements.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 7218045cb5..b0be36147c 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -173,7 +173,7 @@ def _run_too_many_bind_values(self, session): self.assertIsInstance(prepared, PreparedStatement) self.assertRaises(ValueError, prepared.bind, (1, 2)) - def test_too_many_bind_values_dicts(self): + def test_imprecise_bind_values_dicts(self): """ Ensure an error is thrown when attempting to bind the wrong values with dict bindings @@ -189,16 +189,16 @@ def test_too_many_bind_values_dicts(self): self.assertIsInstance(prepared, PreparedStatement) - # too many values - self.assertRaises(ValueError, prepared.bind, {'k': 1, 'v': 2, 'v2': 3}) + # too many values is ok - others are ignored + prepared.bind({'k': 1, 'v': 2, 'v2': 3}) # right number, but one does not belong if PROTOCOL_VERSION < 4: # pre v4, the driver bails with key error when 'v' is found missing self.assertRaises(KeyError, prepared.bind, {'k': 1, 'v2': 3}) else: - # post v4, the driver uses UNSET_VALUE for 'v' and bails when 'v2' is unbound - self.assertRaises(ValueError, prepared.bind, {'k': 1, 'v2': 3}) + # post v4, the driver uses UNSET_VALUE for 'v' and 'v2' is ignored + prepared.bind({'k': 1, 'v2': 3}) # also catch too few variables with dicts self.assertIsInstance(prepared, PreparedStatement) From 938150d3bdca146ad22be2e1bdd54906045f1a23 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 15 Oct 2015 16:33:30 -0500 Subject: [PATCH 0142/2005] Adding test for multiple indicies metadata --- tests/integration/standard/test_metadata.py | 31 +++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 325d6a7bdd..c8d726d171 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -681,6 +681,37 @@ def test_refresh_user_aggregate_metadata(self): cluster2.shutdown() + def test_multiple_indices(self): + """ + test multiple indices on the same column. + + Creates a table and two indices. Ensures that both indices metatdata is surface appropriately. + + @since 3.0.0 + @jira_ticket PYTHON-276 + @expected_result IndexMetadata is appropriately surfaced + + @test_category metadata + """ + self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b map)".format(self.keyspace_name, self.function_table_name)) + self.session.execute("CREATE INDEX index_1 ON {0}.{1}(b)".format(self.keyspace_name, self.function_table_name)) + self.session.execute("CREATE INDEX index_2 ON {0}.{1}(keys(b))".format(self.keyspace_name, self.function_table_name)) + + indices = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].indexes + self.assertEqual(len(indices), 2) + index_1 = indices["index_1"] + index_2 = indices['index_2'] + self.assertEqual(index_1.table_name, "test_multiple_indices") + self.assertEqual(index_1.name, "index_1") + self.assertEqual(index_1.kind, "COMPOSITES") + self.assertEqual(index_1.index_options["target"], "values(b)") + self.assertEqual(index_1.keyspace_name, "schemametadatatests") + self.assertEqual(index_2.table_name, "test_multiple_indices") + self.assertEqual(index_2.name, "index_2") + self.assertEqual(index_2.kind, "COMPOSITES") + self.assertEqual(index_2.index_options["target"], "keys(b)") + self.assertEqual(index_2.keyspace_name, "schemametadatatests") + class TestCodeCoverage(unittest.TestCase): From 1107c5c2ebdbb6e8c49e65d980ff93b54e829227 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 16 Oct 2015 13:14:38 -0500 Subject: [PATCH 0143/2005] Skipping mv tests in cqlengine if c* version is less than 3.0 --- tests/integration/cqlengine/query/test_named.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index 73b7921bc5..2cc88ec51d 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -12,6 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + from cassandra.cqlengine import operators, connection from cassandra.cqlengine.named import NamedKeyspace from cassandra.cqlengine.operators import EqualsOperator, GreaterThanOrEqualOperator @@ -23,7 +28,7 @@ from tests.integration.cqlengine.query.test_queryset import BaseQuerySetUsage -from tests.integration import BasicSharedKeyspaceUnitTestCase +from tests.integration import BasicSharedKeyspaceUnitTestCase, get_server_versions class TestQuerySetOperation(BaseCassEngTestCase): @@ -264,10 +269,14 @@ def test_get_multipleobjects_exception(self): self.table.objects.get(test_id=1) -class TestAlpha(BasicSharedKeyspaceUnitTestCase): +class TestNamedWithMV(BasicSharedKeyspaceUnitTestCase): def setUp(self): self.default_keyspace = models.DEFAULT_KEYSPACE + cass_version = get_server_versions()[0] + if cass_version < (3, 0): + raise unittest.SkipTest("Materialized views require Cassandra 3.0+") + super(TestNamedWithMV, self).setUp() def tearDown(self): models.DEFAULT_KEYSPACE = self.default_keyspace From d4c380cb010fc3396e6217597ead1ef6024be723 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 16 Oct 2015 13:56:36 -0500 Subject: [PATCH 0144/2005] Make ResultSet handle more than just list results for use with custom ProtocolHandlers providing different ResultMessage deserialization PYTHON-430 --- cassandra/cluster.py | 12 +++++++++--- cassandra/obj_parser.pyx | 4 ++++ cassandra/protocol.py | 4 ++++ 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 83307208a0..9d3427b869 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -61,7 +61,7 @@ BatchMessage, RESULT_KIND_PREPARED, RESULT_KIND_SET_KEYSPACE, RESULT_KIND_ROWS, RESULT_KIND_SCHEMA_CHANGE, MIN_SUPPORTED_VERSION, - ProtocolHandler) + ProtocolHandler, _RESULT_SEQUENCE_TYPES) from cassandra.metadata import Metadata, protect_name, murmur3 from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, @@ -3345,7 +3345,7 @@ class ResultSet(object): def __init__(self, response_future, initial_response): self.response_future = response_future - self._current_rows = initial_response or [] + self._set_current_rows(initial_response) self._page_iter = None self._list_mode = False @@ -3386,10 +3386,16 @@ def fetch_next_page(self): if self.response_future.has_more_pages: self.response_future.start_fetching_next_page() result = self.response_future.result() - self._current_rows = result._current_rows + self._current_rows = result._current_rows # ResultSet has already _set_current_rows to the appropriate form else: self._current_rows = [] + def _set_current_rows(self, result): + if isinstance(result, _RESULT_SEQUENCE_TYPES): + self._current_rows = result + else: + self._current_rows = [result] if result else [] + def _fetch_all(self): self._current_rows = list(self) self._page_iter = None diff --git a/cassandra/obj_parser.pyx b/cassandra/obj_parser.pyx index 8aa5b3940f..21ce95e0bd 100644 --- a/cassandra/obj_parser.pyx +++ b/cassandra/obj_parser.pyx @@ -38,6 +38,8 @@ cdef class LazyParser(ColumnParser): # supported in cpdef methods return parse_rows_lazy(reader, desc) + cpdef get_cython_generator_type(self): + return get_cython_generator_type() def parse_rows_lazy(BytesIOReader reader, ParseDesc desc): cdef Py_ssize_t i, rowcount @@ -45,6 +47,8 @@ def parse_rows_lazy(BytesIOReader reader, ParseDesc desc): cdef RowParser rowparser = TupleRowParser() return (rowparser.unpack_row(reader, desc) for i in range(rowcount)) +def get_cython_generator_type(): + return type((i for i in range(0))) cdef class TupleRowParser(RowParser): """ diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 90fdcd091c..1c6ea85cf1 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -1000,6 +1000,7 @@ def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcod return msg +_RESULT_SEQUENCE_TYPES = (list, tuple) # types retuned by ResultMessages def cython_protocol_handler(colparser): """ @@ -1045,6 +1046,9 @@ class CythonProtocolHandler(ProtocolHandler): if HAVE_CYTHON: from cassandra.obj_parser import ListParser, LazyParser ProtocolHandler = cython_protocol_handler(ListParser()) + + lazy_parser = LazyParser() + _RESULT_SEQUENCE_TYPES += (lazy_parser.get_cython_generator_type(),) LazyProtocolHandler = cython_protocol_handler(LazyParser()) else: # Use Python-based ProtocolHandler From 795acbd254987c1f11ff41a78f42ecb26a3fb0f4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 16 Oct 2015 13:57:36 -0500 Subject: [PATCH 0145/2005] test: new tests covering paging for Lazy and NumPy parsers PYTHON-430 --- .../standard/test_cython_protocol_handlers.py | 72 +++++++++++++++++-- 1 file changed, 65 insertions(+), 7 deletions(-) diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index 985b795302..888f73fed6 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -55,21 +55,79 @@ def test_cython_lazy_parser(self): """ verify_iterator_data(self.assertEqual, get_data(LazyProtocolHandler)) + @numpytest + def test_cython_lazy_results_paged(self): + """ + Test Cython-based parser that returns an iterator, over multiple pages + """ + # arrays = { 'a': arr1, 'b': arr2, ... } + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + session = cluster.connect(keyspace="testspace") + session.row_factory = tuple_factory + session.client_protocol_handler = LazyProtocolHandler + session.default_fetch_size = 2 + + self.assertLess(session.default_fetch_size, self.N_ITEMS) + + results = session.execute("SELECT * FROM test_table") + + self.assertTrue(results.has_more_pages) + self.assertEqual(verify_iterator_data(self.assertEqual, results), self.N_ITEMS) # make sure we see all rows + + cluster.shutdown() + @numpytest def test_numpy_parser(self): """ Test Numpy-based parser that returns a NumPy array """ # arrays = { 'a': arr1, 'b': arr2, ... } - arrays = get_data(NumpyProtocolHandler) + result = get_data(NumpyProtocolHandler) + self.assertFalse(result.has_more_pages) + self._verify_numpy_page(result[0]) + @numpytest + def test_numpy_results_paged(self): + """ + Test Numpy-based parser that returns a NumPy array + """ + # arrays = { 'a': arr1, 'b': arr2, ... } + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + session = cluster.connect(keyspace="testspace") + session.row_factory = tuple_factory + session.client_protocol_handler = NumpyProtocolHandler + session.default_fetch_size = 2 + + expected_pages = (self.N_ITEMS + session.default_fetch_size - 1) // session.default_fetch_size + + self.assertLess(session.default_fetch_size, self.N_ITEMS) + + results = session.execute("SELECT * FROM test_table") + + self.assertTrue(results.has_more_pages) + for count, page in enumerate(results, 1): + self.assertIsInstance(page, dict) + for colname, arr in page.items(): + if count <= expected_pages: + self.assertGreater(len(arr), 0, "page count: %d" % (count,)) + self.assertLessEqual(len(arr), session.default_fetch_size) + else: + # we get one extra item out of this iteration because of the way NumpyParser returns results + # The last page is returned as a dict with zero-length arrays + self.assertEqual(len(arr), 0) + self.assertEqual(self._verify_numpy_page(page), len(arr)) + self.assertEqual(count, expected_pages + 1) # see note about extra 'page' above + + cluster.shutdown() + + def _verify_numpy_page(self, page): colnames = self.colnames datatypes = get_primitive_datatypes() for colname, datatype in zip(colnames, datatypes): - arr = arrays[colname] + arr = page[colname] self.match_dtype(datatype, arr.dtype) - verify_iterator_data(self.assertEqual, arrays_to_list_of_tuples(arrays, colnames)) + return verify_iterator_data(self.assertEqual, arrays_to_list_of_tuples(page, colnames)) def match_dtype(self, datatype, dtype): """Match a string cqltype (e.g. 'int' or 'blob') with a numpy dtype""" @@ -100,9 +158,7 @@ def arrays_to_list_of_tuples(arrays, colnames): def get_data(protocol_handler): """ - Get some data from the test table. - - :param key: if None, get all results (100.000 results), otherwise get only one result + Get data from the test table. """ cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect(keyspace="testspace") @@ -121,9 +177,11 @@ def verify_iterator_data(assertEqual, results): Check the result of get_data() when this is a list or iterator of tuples """ - for result in results: + count = 0 + for count, result in enumerate(results, 1): params = get_all_primitive_params(result[0]) assertEqual(len(params), len(result), msg="Not the right number of columns?") for expected, actual in zip(params, result): assertEqual(actual, expected) + return count From 9e6ee6fcde72681090513206804005d8114ca15f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 16 Oct 2015 15:23:56 -0500 Subject: [PATCH 0146/2005] cython deser: Handle tuple fallback for non-alpha ids in udts --- cassandra/deserializers.pyx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cassandra/deserializers.pyx b/cassandra/deserializers.pyx index e2f2328424..ffbb702a4f 100644 --- a/cassandra/deserializers.pyx +++ b/cassandra/deserializers.pyx @@ -378,8 +378,10 @@ cdef class DesUserType(DesTupleType): values = DesTupleType.deserialize(self, buf, protocol_version) if typ.mapped_class: return typ.mapped_class(**dict(zip(typ.fieldnames, values))) - else: + elif typ.tuple_type: return typ.tuple_type(*values) + else: + return tuple(values) cdef class DesCompositeType(_DesParameterizedType): From e6e4bba3508c261319954e0c7f821dcfdf0e172e Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 19 Oct 2015 10:37:43 -0500 Subject: [PATCH 0147/2005] Adding tests for PYTHON-416 --- tests/unit/test_connection.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 521fab6105..d9e4236d9b 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -22,13 +22,14 @@ import time from threading import Lock -from cassandra.cluster import Cluster +from cassandra.cluster import Cluster, Session from cassandra.connection import (Connection, HEADER_DIRECTION_TO_CLIENT, ProtocolError, locally_supported_compressions, ConnectionHeartbeat, _Frame) from cassandra.marshal import uint8_pack, uint32_pack, int32_pack from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ProtocolHandler) +import cassandra.cqlengine.connection class ConnectionTest(unittest.TestCase): @@ -413,3 +414,21 @@ def send_msg(msg, req_id, msg_callback): self.assertIsInstance(exc, Exception) self.assertEqual(exc.args, Exception('Connection heartbeat failure').args) holder.return_connection.assert_has_calls([call(connection)] * get_holders.call_count) + + +class ConnectionDefaultTest(unittest.TestCase): + """ + Test to ensure object mapper and base driver default cl's are the same. + + + @since 3.0.0 + @jira_ticket PYTHON-416 + @expected_result cl's matchy between object mapper and base driver. + + @test_category consistency + """ + def test_default_cl(self, *args): + base_driver_cl = Session.default_consistency_level + cqlengine_cl = cassandra.cqlengine.connection.default_consistency_level + self.assertEqual(base_driver_cl, cqlengine_cl) + From 62558919ac81709fb9128fe5f7729d72afd79b13 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 19 Oct 2015 10:51:35 -0500 Subject: [PATCH 0148/2005] Making local quorum default cl for clqengine --- cassandra/cqlengine/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index c6540d4986..03eb28bcc6 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -33,7 +33,7 @@ cluster = None session = None lazy_connect_args = None -default_consistency_level = ConsistencyLevel.ONE +default_consistency_level = ConsistencyLevel.LOCAL_QUORUM # Because type models may be registered before a connection is present, From 8e18d2774911bb33c62c408be49fa8d7cc47442d Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 19 Oct 2015 15:00:53 -0500 Subject: [PATCH 0149/2005] First pass at moving travis over to the new infrastructure --- .travis.yml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 08fb492a9f..8e1e587229 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,6 @@ language: python python: 2.7 +sudo: false env: - TOX_ENV=py26 CASS_VER=21 CASS_DRIVER_NO_CYTHON=1 - TOX_ENV=py26 CASS_VER=21 @@ -14,11 +15,14 @@ env: - TOX_ENV=pypy CASS_VER=21 CASS_DRIVER_NO_CYTHON=1 - TOX_ENV=pypy3 CASS_VER=21 CASS_DRIVER_NO_CYTHON=1 -before_install: - - sudo apt-get update -y - - sudo apt-get install -y build-essential python-dev - - sudo apt-get install -y pypy-dev - - sudo apt-get install -y libev4 libev-dev +addons: + apt: + packages: + - build-essential + - python-dev + - pypy-dev + - libev4 + - libev-dev install: - pip install tox From dd4df431b277a27c205766482f6bce6cea2fdad8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 20 Oct 2015 10:13:47 -0500 Subject: [PATCH 0150/2005] cqle: make default CL based on core default + test updates PYTHON-416 --- cassandra/cqlengine/connection.py | 14 +++++--------- tests/integration/cqlengine/__init__.py | 2 ++ .../integration/cqlengine/test_consistency.py | 13 ++++++++++++- tests/unit/test_connection.py | 19 ------------------- 4 files changed, 19 insertions(+), 29 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 03eb28bcc6..cc9e63d4b3 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -16,7 +16,6 @@ import logging import six -from cassandra import ConsistencyLevel from cassandra.cluster import Cluster, _NOT_SET, NoHostAvailable, UserTypeDoesNotExist from cassandra.query import SimpleStatement, Statement, dict_factory @@ -33,7 +32,6 @@ cluster = None session = None lazy_connect_args = None -default_consistency_level = ConsistencyLevel.LOCAL_QUORUM # Because type models may be registered before a connection is present, @@ -95,7 +93,7 @@ def set_session(s): def setup( hosts, default_keyspace, - consistency=ConsistencyLevel.ONE, + consistency=None, lazy_connect=False, retry_connect=False, **kwargs): @@ -104,12 +102,12 @@ def setup( :param list hosts: list of hosts, (``contact_points`` for :class:`cassandra.cluster.Cluster`) :param str default_keyspace: The default keyspace to use - :param int consistency: The global default :class:`~.ConsistencyLevel` + :param int consistency: The global default :class:`~.ConsistencyLevel` - default is the same as :attr:`.Session.default_consistency_level` :param bool lazy_connect: True if should not connect until first use :param bool retry_connect: True if we should retry to connect even if there was a connection failure initially :param \*\*kwargs: Pass-through keyword arguments for :class:`cassandra.cluster.Cluster` """ - global cluster, session, default_consistency_level, lazy_connect_args + global cluster, session, lazy_connect_args if 'username' in kwargs or 'password' in kwargs: raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") @@ -117,7 +115,6 @@ def setup( from cassandra.cqlengine import models models.DEFAULT_KEYSPACE = default_keyspace - default_consistency_level = consistency if lazy_connect: kwargs['default_keyspace'] = default_keyspace kwargs['consistency'] = consistency @@ -139,6 +136,8 @@ def setup( kwargs['retry_connect'] = retry_connect lazy_connect_args = (hosts, kwargs) raise + if consistency is not None: + session.default_consistency_level = consistency session.row_factory = dict_factory _register_known_types(cluster) @@ -151,9 +150,6 @@ def execute(query, params=None, consistency_level=None, timeout=NOT_SET): if not session: raise CQLEngineException("It is required to setup() cqlengine before executing queries") - if consistency_level is None: - consistency_level = default_consistency_level - if isinstance(query, Statement): pass diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index c53884fcb2..cce715d7c6 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -14,6 +14,7 @@ import os import warnings +from cassandra import ConsistencyLevel from cassandra.cqlengine import connection from cassandra.cqlengine.management import create_keyspace_simple, CQLENG_ALLOW_SCHEMA_MANAGEMENT @@ -29,6 +30,7 @@ def setup_package(): keyspace = 'cqlengine_test' connection.setup(['127.0.0.1'], + consistency=ConsistencyLevel.ONE, protocol_version=PROTOCOL_VERSION, default_keyspace=keyspace) diff --git a/tests/integration/cqlengine/test_consistency.py b/tests/integration/cqlengine/test_consistency.py index 85b2f17289..174f620793 100644 --- a/tests/integration/cqlengine/test_consistency.py +++ b/tests/integration/cqlengine/test_consistency.py @@ -15,8 +15,10 @@ import mock from uuid import uuid4 -from cassandra import ConsistencyLevel as CL +from cassandra import ConsistencyLevel as CL, ConsistencyLevel +from cassandra.cluster import Session from cassandra.cqlengine import columns +from cassandra.cqlengine import connection from cassandra.cqlengine.management import sync_table, drop_table from cassandra.cqlengine.models import Model from cassandra.cqlengine.query import BatchQuery @@ -110,3 +112,12 @@ def test_delete(self): args = m.call_args self.assertEqual(CL.ALL, args[0][0].consistency_level) + + def test_default_consistency(self): + # verify global assumed default + self.assertEqual(Session.default_consistency_level, ConsistencyLevel.LOCAL_QUORUM) + + # verify that this session default is set according to connection.setup + # assumes tests/cqlengine/__init__ setup uses CL.ONE + session = connection.get_session() + self.assertEqual(session.default_consistency_level, ConsistencyLevel.ONE) diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index d9e4236d9b..8055a5e766 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -29,7 +29,6 @@ from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ProtocolHandler) -import cassandra.cqlengine.connection class ConnectionTest(unittest.TestCase): @@ -414,21 +413,3 @@ def send_msg(msg, req_id, msg_callback): self.assertIsInstance(exc, Exception) self.assertEqual(exc.args, Exception('Connection heartbeat failure').args) holder.return_connection.assert_has_calls([call(connection)] * get_holders.call_count) - - -class ConnectionDefaultTest(unittest.TestCase): - """ - Test to ensure object mapper and base driver default cl's are the same. - - - @since 3.0.0 - @jira_ticket PYTHON-416 - @expected_result cl's matchy between object mapper and base driver. - - @test_category consistency - """ - def test_default_cl(self, *args): - base_driver_cl = Session.default_consistency_level - cqlengine_cl = cassandra.cqlengine.connection.default_consistency_level - self.assertEqual(base_driver_cl, cqlengine_cl) - From 8ecb8d483a41d4ae8fc6470cce410f934861db3e Mon Sep 17 00:00:00 2001 From: Michael Penick Date: Tue, 20 Oct 2015 11:50:59 -0700 Subject: [PATCH 0151/2005] Added space back in condition --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 83307208a0..a5839d9cbd 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2909,7 +2909,7 @@ def _set_result(self, response): self, **response.results) else: results = getattr(response, 'results', None) - if results is not None and response.kind ==RESULT_KIND_ROWS: + if results is not None and response.kind == RESULT_KIND_ROWS: self._paging_state = response.paging_state results = self.row_factory(*results) self._set_final_result(results) From 2ade38b5b90aa923be7393c526af1f32426ffdb6 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 20 Oct 2015 14:44:50 -0500 Subject: [PATCH 0152/2005] Fixing a few broken integration tests --- tests/integration/long/test_loadbalancingpolicies.py | 4 ++-- tests/integration/standard/test_metadata.py | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 40f0893944..9866392836 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -16,7 +16,7 @@ from cassandra import ConsistencyLevel, Unavailable, OperationTimedOut, ReadTimeout, ReadFailure, \ WriteTimeout, WriteFailure -from cassandra.cluster import Cluster, NoHostAvailable +from cassandra.cluster import Cluster, NoHostAvailable, Session from cassandra.concurrent import execute_concurrent_with_args from cassandra.metadata import murmur3 from cassandra.policies import (RoundRobinPolicy, DCAwareRoundRobinPolicy, @@ -426,7 +426,7 @@ def token_aware(self, keyspace, use_prepared=False): self._query(session, keyspace, use_prepared=use_prepared) self.fail() except Unavailable as e: - self.assertEqual(e.consistency, 1) + self.assertEqual(e.consistency, Session.default_consistency_level) self.assertEqual(e.required_replicas, 1) self.assertEqual(e.alive_replicas, 0) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index c8d726d171..98a9945f47 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -693,6 +693,9 @@ def test_multiple_indices(self): @test_category metadata """ + if CASS_SERVER_VERSION < (3, 0): + raise unittest.SkipTest("Materialized views require Cassandra 3.0+") + self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b map)".format(self.keyspace_name, self.function_table_name)) self.session.execute("CREATE INDEX index_1 ON {0}.{1}(b)".format(self.keyspace_name, self.function_table_name)) self.session.execute("CREATE INDEX index_2 ON {0}.{1}(keys(b))".format(self.keyspace_name, self.function_table_name)) From 6fd3ee34dac695f7bb357cb4ea7d987b74f5ed23 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 20 Oct 2015 16:23:12 -0500 Subject: [PATCH 0153/2005] test: remove print statement --- tests/integration/standard/test_types.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index eb9baefb55..640a1980bc 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -723,7 +723,6 @@ def verify_insert_select(ins_statement, sel_statement): execute_concurrent_with_args(s, ins_statement, ((f, f) for f in items)) for f in items: row = s.execute(sel_statement, (f,))[0] - print row if math.isnan(f): self.assertTrue(math.isnan(row.f)) self.assertTrue(math.isnan(row.d)) From 393f257ca4c3946906f6baf1daca431995a6b982 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 20 Oct 2015 16:23:25 -0500 Subject: [PATCH 0154/2005] test: fix materialized view test errors --- tests/integration/standard/test_query.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 23bad1d779..faffd33a92 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -657,7 +657,7 @@ def test_mv_filtering(self): day INT, score INT, PRIMARY KEY (user, game, year, month, day) - )""".format(self.ksname) + )""".format(self.keyspace_name) self.session.execute(create_table) @@ -665,32 +665,32 @@ def test_mv_filtering(self): SELECT * FROM {0}.scores WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL PRIMARY KEY (game, score, user, year, month, day) - WITH CLUSTERING ORDER BY (score DESC)""".format(self.ksname) + WITH CLUSTERING ORDER BY (score DESC)""".format(self.keyspace_name) create_mv_dailyhigh = """CREATE MATERIALIZED VIEW {0}.dailyhigh AS SELECT * FROM {0}.scores WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL PRIMARY KEY ((game, year, month, day), score, user) - WITH CLUSTERING ORDER BY (score DESC)""".format(self.ksname) + WITH CLUSTERING ORDER BY (score DESC)""".format(self.keyspace_name) create_mv_monthlyhigh = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS SELECT * FROM {0}.scores WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL PRIMARY KEY ((game, year, month), score, user, day) - WITH CLUSTERING ORDER BY (score DESC)""".format(self.ksname) + WITH CLUSTERING ORDER BY (score DESC)""".format(self.keyspace_name) create_mv_filtereduserhigh = """CREATE MATERIALIZED VIEW {0}.filtereduserhigh AS SELECT * FROM {0}.scores WHERE user in ('jbellis', 'pcmanus') AND game IS NOT NULL AND score IS NOT NULL AND year is NOT NULL AND day is not NULL and month IS NOT NULL PRIMARY KEY (game, score, user, year, month, day) - WITH CLUSTERING ORDER BY (score DESC)""".format(self.ksname) + WITH CLUSTERING ORDER BY (score DESC)""".format(self.keyspace_name) self.session.execute(create_mv_alltime) self.session.execute(create_mv_dailyhigh) self.session.execute(create_mv_monthlyhigh) self.session.execute(create_mv_filtereduserhigh) - prepared_insert = self.session.prepare("""INSERT INTO {0}.scores (user, game, year, month, day, score) VALUES (?, ?, ? ,? ,?, ?)""".format(self.ksname)) + prepared_insert = self.session.prepare("""INSERT INTO {0}.scores (user, game, year, month, day, score) VALUES (?, ?, ? ,? ,?, ?)""".format(self.keyspace_name)) bound = prepared_insert.bind(('pcmanus', 'Coup', 2015, 5, 1, 4000)) self.session.execute(bound) @@ -718,7 +718,7 @@ def test_mv_filtering(self): self.session.execute(bound) # Test simple statement and alltime high filtering - query_statement = SimpleStatement("SELECT * FROM {0}.alltimehigh WHERE game='Coup'".format(self.ksname), + query_statement = SimpleStatement("SELECT * FROM {0}.alltimehigh WHERE game='Coup'".format(self.keyspace_name), consistency_level=ConsistencyLevel.QUORUM) results = self.session.execute(query_statement) self.assertEquals(results[0].game, 'Coup') @@ -729,7 +729,7 @@ def test_mv_filtering(self): self.assertEquals(results[0].user, "pcmanus") # Test prepared statement and daily high filtering - prepared_query = self.session.prepare("SELECT * FROM {0}.dailyhigh WHERE game=? AND year=? AND month=? and day=?".format(self.ksname).format(self.ksname)) + prepared_query = self.session.prepare("SELECT * FROM {0}.dailyhigh WHERE game=? AND year=? AND month=? and day=?".format(self.keyspace_name)) bound_query = prepared_query.bind(("Coup", 2015, 6, 2)) results = self.session.execute(bound_query) self.assertEquals(results[0].game, 'Coup') @@ -747,7 +747,7 @@ def test_mv_filtering(self): self.assertEquals(results[1].user, "tjake") # Test montly high range queries - prepared_query = self.session.prepare("SELECT * FROM {0}.monthlyhigh WHERE game=? AND year=? AND month=? and score >= ? and score <= ?".format(self.ksname).format(self.ksname)) + prepared_query = self.session.prepare("SELECT * FROM {0}.monthlyhigh WHERE game=? AND year=? AND month=? and score >= ? and score <= ?".format(self.keyspace_name)) bound_query = prepared_query.bind(("Coup", 2015, 6, 2500, 3500)) results = self.session.execute(bound_query) self.assertEquals(results[0].game, 'Coup') @@ -772,7 +772,7 @@ def test_mv_filtering(self): self.assertEquals(results[2].user, "iamaleksey") # Test filtered user high scores - query_statement = SimpleStatement("SELECT * FROM {0}.filtereduserhigh WHERE game='Chess'".format(self.ksname), + query_statement = SimpleStatement("SELECT * FROM {0}.filtereduserhigh WHERE game='Chess'".format(self.keyspace_name), consistency_level=ConsistencyLevel.QUORUM) results = self.session.execute(query_statement) self.assertEquals(results[0].game, 'Chess') From 8a52b876856fdca38a83f0a150457c8181131611 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 21 Oct 2015 13:29:47 -0500 Subject: [PATCH 0155/2005] Setting up default CLs for both TokenAware tests --- tests/integration/long/test_loadbalancingpolicies.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 9866392836..155115d1b1 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -72,7 +72,7 @@ def _query(self, session, keyspace, count=12, query_string = 'SELECT * FROM %s.cf WHERE k = ?' % keyspace if not self.prepared or self.prepared.query_string != query_string: self.prepared = session.prepare(query_string) - + self.prepared.consistency_level=consistency_level for i in range(count): tries = 0 while True: @@ -426,7 +426,7 @@ def token_aware(self, keyspace, use_prepared=False): self._query(session, keyspace, use_prepared=use_prepared) self.fail() except Unavailable as e: - self.assertEqual(e.consistency, Session.default_consistency_level) + self.assertEqual(e.consistency, 1) self.assertEqual(e.required_replicas, 1) self.assertEqual(e.alive_replicas, 0) From 915dc94b7ce9668795f418c81c3f5851da88b76c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 21 Oct 2015 14:19:38 -0500 Subject: [PATCH 0156/2005] setup: remove cython extensions and build io for proper docs --- setup.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/setup.py b/setup.py index 5b8772fa7e..803f7acc6a 100644 --- a/setup.py +++ b/setup.py @@ -104,6 +104,23 @@ def run(self): pass if has_subprocess: + # Prevent run with in-place extensions because cython-generated objects do not carry docstrings + # http://docs.cython.org/src/userguide/special_methods.html#docstrings + import glob + for f in glob.glob("cassandra/*.so"): + print("Removing '%s' to allow docs to run on pure python modules." %(f,)) + os.unlink(f) + + # Build murmur and io extensions to make import and docstrings work + try: + output = subprocess.check_output( + ["python", "setup.py", "build_ext", "--inplace", "--force", "--no-murmur3", "--no-cython"], + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as exc: + raise RuntimeError("Documentation step '%s' failed: %s: %s" % ("build_ext", exc, exc.output)) + else: + print(output) + try: output = subprocess.check_output( ["sphinx-build", "-b", mode, "docs", path], From db58ba704273240484b2d4cd8f124faf4a3324ff Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 21 Oct 2015 14:19:43 -0500 Subject: [PATCH 0157/2005] doc: mention gevent/eventlet patching assumptions --- cassandra/cluster.py | 11 +++++------ cassandra/io/eventletreactor.py | 2 ++ cassandra/io/geventreactor.py | 3 ++- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 8a4b2ce59f..890930cf50 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -407,18 +407,17 @@ def auth_provider(self, value): * :class:`cassandra.io.asyncorereactor.AsyncoreConnection` * :class:`cassandra.io.libevreactor.LibevConnection` - * :class:`cassandra.io.geventreactor.GeventConnection` (requires monkey-patching) + * :class:`cassandra.io.eventletreactor.EventletConnection` (requires monkey-patching - see doc for details) + * :class:`cassandra.io.geventreactor.GeventConnection` (requires monkey-patching - see doc for details) * :class:`cassandra.io.twistedreactor.TwistedConnection` By default, ``AsyncoreConnection`` will be used, which uses - the ``asyncore`` module in the Python standard library. The - performance is slightly worse than with ``libev``, but it is - supported on a wider range of systems. + the ``asyncore`` module in the Python standard library. If ``libev`` is installed, ``LibevConnection`` will be used instead. - If gevent monkey-patching of the standard library is detected, - GeventConnection will be used automatically. + If ``gevent`` or ``eventlet`` monkey-patching is detected, the corresponding + connection class will be used automatically. """ control_connection_timeout = 2.0 diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index aa90cc9abd..5706f9cb25 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -44,6 +44,8 @@ def is_timeout(err): class EventletConnection(Connection): """ An implementation of :class:`.Connection` that utilizes ``eventlet``. + + This implementation assumes all eventlet monkey patching is active. It is not tested with partial patching. """ _read_watcher = None diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index a063df1b70..1dd013aa7a 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -17,7 +17,6 @@ from gevent import select, socket import gevent.ssl -from collections import defaultdict from functools import partial import logging import os @@ -44,6 +43,8 @@ def is_timeout(err): class GeventConnection(Connection): """ An implementation of :class:`.Connection` that utilizes ``gevent``. + + This implementation assumes all gevent monkey patching is active. It is not tested with partial patching. """ _read_watcher = None From 26f2fafd703df46fe670b167d75b102ee4c46e40 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 22 Oct 2015 09:27:32 -0500 Subject: [PATCH 0158/2005] test: relax timeout commparison Was occasionally failing in Travis CI because of platform slowness. --- tests/unit/io/utils.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 26307ffaf9..de6a43fe17 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -15,7 +15,7 @@ import time -class TimerCallback(): +class TimerCallback(object): invoked = False created_time = 0 @@ -39,7 +39,7 @@ def get_wait_time(self): return elapsed_time def wait_match_excepted(self): - if self.expected_wait-.01 <= self.get_wait_time() <= self.expected_wait+.01: + if self.expected_wait - .01 <= self.get_wait_time() <= self.expected_wait + .01: return True return False @@ -54,16 +54,16 @@ def get_timeout(gross_time, start, end, precision, split_range): :param split_range: generate values from both ends :return: a timeout value to use """ - if(split_range): - top_num = float(end)/precision - bottom_num = float(start)/precision + if split_range: + top_num = float(end) / precision + bottom_num = float(start) / precision if gross_time % 2 == 0: - timeout = top_num - float(gross_time)/precision + timeout = top_num - float(gross_time) / precision else: - timeout = bottom_num + float(gross_time)/precision + timeout = bottom_num + float(gross_time) / precision else: - timeout = float(gross_time)/precision + timeout = float(gross_time) / precision return timeout @@ -102,6 +102,4 @@ def submit_and_wait_for_completion(unit_test, connection, start, end, increment, # ensure they are all called back in a timely fashion for callback in completed_callbacks: - unit_test.assertAlmostEqual(callback.expected_wait, callback.get_wait_time(), delta=.1) - - + unit_test.assertAlmostEqual(callback.expected_wait, callback.get_wait_time(), delta=.15) From 13f11a1c7c027fcd9d6f9456f568f51007edcb57 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 22 Oct 2015 09:31:58 -0500 Subject: [PATCH 0159/2005] setup: correct inaccurate comment --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 803f7acc6a..c2845e85c9 100644 --- a/setup.py +++ b/setup.py @@ -111,7 +111,7 @@ def run(self): print("Removing '%s' to allow docs to run on pure python modules." %(f,)) os.unlink(f) - # Build murmur and io extensions to make import and docstrings work + # Build io extension to make import and docstrings work try: output = subprocess.check_output( ["python", "setup.py", "build_ext", "--inplace", "--force", "--no-murmur3", "--no-cython"], From 27f98d93e9693abdf9cf50622c11468e1e6275b1 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 22 Oct 2015 11:28:57 -0500 Subject: [PATCH 0160/2005] Updating test documentation for python-282 --- tests/integration/standard/test_types.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 640a1980bc..a2d22854ea 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -708,7 +708,13 @@ def test_can_read_composite_type(self): def test_special_float_cql_encoding(self): """ - PYTHON-282 + Test to insure that Infinity -Infinity and NaN are supported by the python driver. + + @since 3.0.0 + @jira_ticket PYTHON-282 + @expected_result nan, inf and -inf can be inserted and selected correctly. + + @test_category data_types """ s = self.session From 06c8318b4594d0c72120b76c32d2fa922a8bf096 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 22 Oct 2015 11:36:23 -0500 Subject: [PATCH 0161/2005] ResponseFuture.get_query_trace_ids for getting ids without fetching --- cassandra/cluster.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 890930cf50..ca7572de88 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3102,6 +3102,12 @@ def result(self): else: raise self._final_exception + def get_query_trace_ids(self): + """ + Returns the trace session ids for this future, if tracing was enabled (does not fetch trace data). + """ + return [trace.trace_id for trace in self._query_traces] + def get_query_trace(self, max_wait=None): """ Fetches and returns the query trace of the last response, or `None` if tracing was From f43fd8d41cc037a8c370d51c08c2e7a0157ef499 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 22 Oct 2015 12:01:06 -0500 Subject: [PATCH 0162/2005] Add option to populate parital QueryTrace --- cassandra/query.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index 95e59b7288..cb54549cf5 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -858,7 +858,7 @@ def __init__(self, trace_id, session): self.trace_id = trace_id self._session = session - def populate(self, max_wait=2.0): + def populate(self, max_wait=2.0, wait_for_complete=True): """ Retrieves the actual tracing details from Cassandra and populates the attributes of this instance. Because tracing details are stored @@ -866,6 +866,9 @@ def populate(self, max_wait=2.0): detail fetch. If the trace is still not available after `max_wait` seconds, :exc:`.TraceUnavailable` will be raised; if `max_wait` is :const:`None`, this will retry forever. + + `wait_for_complete=False` bypasses the wait for duration to be populated. + This can be used to query events from partial sessions. """ attempt = 0 start = time.time() @@ -879,15 +882,19 @@ def populate(self, max_wait=2.0): session_results = self._execute( self._SELECT_SESSIONS_FORMAT, (self.trace_id,), time_spent, max_wait) - if not session_results or session_results[0].duration is None: + is_complete = session_results[0].duration is not None + if not session_results or (wait_for_complete and not is_complete): time.sleep(self._BASE_RETRY_SLEEP * (2 ** attempt)) attempt += 1 continue - log.debug("Fetched trace info for trace ID: %s", self.trace_id) + if is_complete: + log.debug("Fetched trace info for trace ID: %s", self.trace_id) + else: + log.debug("Fetching parital trace info for trace ID: %s", self.trace_id) session_row = session_results[0] self.request_type = session_row.request - self.duration = timedelta(microseconds=session_row.duration) + self.duration = timedelta(microseconds=session_row.duration) if is_complete else None self.started_at = session_row.started_at self.coordinator = session_row.coordinator self.parameters = session_row.parameters From 4bf233efbec8e856cfccac242369a6c3d012d427 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 22 Oct 2015 15:22:08 -0500 Subject: [PATCH 0163/2005] typo in docstring --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ca7572de88..ffc563e194 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3263,7 +3263,7 @@ class ResultSet(object): >>> from cassandra.query import SimpleStatement >>> statement = SimpleStatement("SELECT * FROM users", fetch_size=10) >>> for user_row in session.execute(statement): - ... process_user(user_rowt + ... process_user(user_row) Whenever there are no more rows in the current page, the next page will be fetched transparently. However, note that it *is* possible for From 95c6008acf4ee2460d5ab1a357fc594d8ef9d69d Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 22 Oct 2015 17:06:51 -0500 Subject: [PATCH 0164/2005] Adding skip to test for auto paging and protocol v1 --- tests/integration/__init__.py | 2 ++ tests/integration/standard/test_cython_protocol_handlers.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 4509cba168..7cdb7143f1 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -126,6 +126,8 @@ def _tuple_version(version_string): PROTOCOL_VERSION = int(os.getenv('PROTOCOL_VERSION', default_protocol_version)) +notprotocolv1 = unittest.skipUnless(PROTOCOL_VERSION > 1, 'Protocol v1 not supported') + def get_cluster(): return CCM_CLUSTER diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index 888f73fed6..a13c5e21bb 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -11,7 +11,7 @@ from cassandra.cluster import Cluster from cassandra.protocol import ProtocolHandler, LazyProtocolHandler, NumpyProtocolHandler -from tests.integration import use_singledc, PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1 from tests.integration.datatype_utils import update_datatypes from tests.integration.standard.utils import ( create_table_with_all_types, get_all_primitive_params, get_primitive_datatypes) @@ -55,6 +55,7 @@ def test_cython_lazy_parser(self): """ verify_iterator_data(self.assertEqual, get_data(LazyProtocolHandler)) + @notprotocolv1 @numpytest def test_cython_lazy_results_paged(self): """ From 80316f81b66435f6d3b3d89d2c16869f6a3f5209 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 23 Oct 2015 17:01:36 -0500 Subject: [PATCH 0165/2005] Skipping non applicable tests --- tests/integration/standard/test_cython_protocol_handlers.py | 1 + tests/integration/standard/test_types.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index a13c5e21bb..f4e0a5a619 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -77,6 +77,7 @@ def test_cython_lazy_results_paged(self): cluster.shutdown() + @notprotocolv1 @numpytest def test_numpy_parser(self): """ diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index a2d22854ea..73fbc89507 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -28,7 +28,7 @@ from cassandra.query import dict_factory, ordered_dict_factory from cassandra.util import sortedset -from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION, execute_until_pass +from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION, execute_until_pass, notprotocolv1 from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, \ get_sample, get_collection_sample @@ -706,6 +706,7 @@ def test_can_read_composite_type(self): self.assertEqual(0, result.a) self.assertEqual(('abc',), result.b) + @notprotocolv1 def test_special_float_cql_encoding(self): """ Test to insure that Infinity -Infinity and NaN are supported by the python driver. From 6d17e2ca91cea7760de0dcd06e7ec9633a61c0ac Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 26 Oct 2015 10:41:50 -0500 Subject: [PATCH 0166/2005] exclude dense surrogate columns from metadata https://datastax-oss.atlassian.net/browse/JAVA-873?focusedCommentId=23630&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-23630 --- cassandra/cqltypes.py | 2 ++ cassandra/metadata.py | 18 +++++++++++------- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index fcc55d69b0..3cb8817294 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -53,6 +53,8 @@ apache_cassandra_type_prefix = 'org.apache.cassandra.db.marshal.' +cassandra_empty_type = 'org.apache.cassandra.db.marshal.EmptyType' + log = logging.getLogger(__name__) if six.PY3: diff --git a/cassandra/metadata.py b/cassandra/metadata.py index d9d765f266..f0b94820f2 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2153,12 +2153,13 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_row if flags: compact_static = False table_meta.is_compact_storage = 'dense' in flags or 'super' in flags or 'compound' not in flags + is_dense = 'dense' in flags else: - # example: create table t (a int, b int, c int, primary key((a,b))) with compact storage compact_static = True table_meta.is_compact_storage = True + is_dense = False - self._build_table_columns(table_meta, col_rows, compact_static) + self._build_table_columns(table_meta, col_rows, compact_static, is_dense) for trigger_row in trigger_rows: trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) @@ -2178,7 +2179,7 @@ def _build_table_options(self, row): """ Setup the mostly-non-schema table options, like caching settings """ return dict((o, row.get(o)) for o in self.recognized_table_options if o in row) - def _build_table_columns(self, meta, col_rows, compact_static=False): + def _build_table_columns(self, meta, col_rows, compact_static=False, is_dense=False): # partition key partition_rows = [r for r in col_rows if r.get('kind', None) == "partition_key"] @@ -2205,12 +2206,15 @@ def _build_table_columns(self, meta, col_rows, compact_static=False): for col_row in (r for r in col_rows if r.get('kind', None) not in ('partition_key', 'clustering_key')): column_meta = self._build_column_metadata(meta, col_row) - if not compact_static or column_meta.is_static: + if is_dense and column_meta.data_type.cassname.endswith(types.cassandra_empty_type): + continue + if compact_static and not column_meta.is_static: # for compact static tables, we omit the clustering key and value, and only add the logical columns. # They are marked not static so that it generates appropriate CQL - if compact_static: - column_meta.is_static = False - meta.columns[column_meta.name] = column_meta + continue + if compact_static: + column_meta.is_static = False + meta.columns[column_meta.name] = column_meta def _build_view_metadata(self, row, col_rows=None): keyspace_name = row["keyspace_name"] From 56508430985f8ce4b2ff0640e73ffc5f0aa4d6b1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 26 Oct 2015 10:48:07 -0500 Subject: [PATCH 0167/2005] exclude unnamed dense surrogate columns from metadata https://datastax-oss.atlassian.net/browse/JAVA-873 --- cassandra/metadata.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index f0b94820f2..8577d8f8dd 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1888,11 +1888,12 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): # other normal columns for col_row in col_rows: column_meta = self._build_column_metadata(table_meta, col_row) - table_meta.columns[column_meta.name] = column_meta - index_meta = self._build_index_metadata(column_meta, col_row) - column_meta.index = index_meta - if index_meta: - table_meta.indexes[index_meta.name] = index_meta + if column_meta.name: + table_meta.columns[column_meta.name] = column_meta + index_meta = self._build_index_metadata(column_meta, col_row) + column_meta.index = index_meta + if index_meta: + table_meta.indexes[index_meta.name] = index_meta for trigger_row in trigger_rows: trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) From 046cf3d15723440837ff825cd25ada8665711699 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 26 Oct 2015 13:48:59 -0500 Subject: [PATCH 0168/2005] Attach column names to ResultSet PYTHON-439 --- cassandra/cluster.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ffc563e194..d6360dbfdd 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2633,6 +2633,7 @@ class ResponseFuture(object): _req_id = None _final_result = _NOT_SET + _col_names = None _final_exception = None _query_traces = None _callbacks = None @@ -2856,6 +2857,7 @@ def _set_result(self, response): results = getattr(response, 'results', None) if results is not None and response.kind == RESULT_KIND_ROWS: self._paging_state = response.paging_state + self._col_names = results[0] results = self.row_factory(*results) self._set_final_result(results) elif isinstance(response, ErrorMessage): @@ -3273,6 +3275,7 @@ class ResultSet(object): def __init__(self, response_future, initial_response): self.response_future = response_future + self.column_names = response_future._col_names self._set_current_rows(initial_response) self._page_iter = None self._list_mode = False From 6188d7d7c74c8b4270d16c195e9d244d57602bd7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 26 Oct 2015 15:36:08 -0500 Subject: [PATCH 0169/2005] Fix unit tests following ResultSet update setting col names PYTHON-439 --- tests/unit/test_concurrent.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index 3c2734e415..d120cd4a6a 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -33,6 +33,7 @@ class MockResponseResponseFuture(): """ _query_trace = None + _col_names = None # a list pending callbacks, these will be prioritized in reverse or normal orderd pending_callbacks = PriorityQueue() From 21ccd5c1e7bc43032cf9fcba74bb5ba7991d8f14 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 27 Oct 2015 16:02:09 -0500 Subject: [PATCH 0170/2005] Handling async materialized view metadata updates in integration tests --- tests/integration/standard/test_metadata.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index d1439b795b..268a9764fd 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -2256,7 +2256,12 @@ def test_base_table_type_alter_mv(self): score_column = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores'].columns['score'] self.assertEquals(score_column.typestring, 'blob') - score_mv_column = self.cluster.metadata.keyspaces[self.keyspace_name].views["monthlyhigh"].columns['score'] + for i in range(10): + score_mv_column = self.cluster.metadata.keyspaces[self.keyspace_name].views["monthlyhigh"].columns['score'] + if("blob" == score_mv_column.typestring): + break + time.sleep(.2) + self.assertEquals(score_mv_column.typestring, 'blob') def test_metadata_with_quoted_identifiers(self): From 17d50e80bfd663b7b1c315054dc442084213d89a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 27 Oct 2015 17:11:36 -0500 Subject: [PATCH 0171/2005] Schema meta types as CQL (WIP) First pass at CASSANDRA-10365/PYTHON-422 Does not handle deserialization of system_schema.aggregates.initcond No testing outside of metadata integration. --- cassandra/__init__.py | 16 +- cassandra/cqltypes.py | 12 ++ cassandra/metadata.py | 185 ++++++++++++-------- tests/integration/standard/test_metadata.py | 93 +++++----- 4 files changed, 168 insertions(+), 138 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index d884b085c9..2bcc97d70f 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -142,9 +142,9 @@ class SchemaTargetType(object): class SignatureDescriptor(object): - def __init__(self, name, type_signature): + def __init__(self, name, argument_types): self.name = name - self.type_signature = type_signature + self.argument_types = argument_types @property def signature(self): @@ -153,14 +153,14 @@ def signature(self): can be used to uniquely identify overloaded function names within a keyspace """ - return self.format_signature(self.name, self.type_signature) + return self.format_signature(self.name, self.argument_types) @staticmethod - def format_signature(name, type_signature): - return "%s(%s)" % (name, ','.join(t for t in type_signature)) + def format_signature(name, argument_types): + return "%s(%s)" % (name, ','.join(t for t in argument_types)) def __repr__(self): - return "%s(%s, %s)" % (self.__class__.__name__, self.name, self.type_signature) + return "%s(%s, %s)" % (self.__class__.__name__, self.name, self.argument_types) class UserFunctionDescriptor(SignatureDescriptor): @@ -173,7 +173,7 @@ class UserFunctionDescriptor(SignatureDescriptor): name of the function """ - type_signature = None + argument_types = None """ Ordered list of CQL argument type names comprising the type signature """ @@ -189,7 +189,7 @@ class UserAggregateDescriptor(SignatureDescriptor): name of the aggregate """ - type_signature = None + argument_types = None """ Ordered list of CQL argument type names comprising the type signature """ diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 3cb8817294..6d2e330995 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -54,6 +54,7 @@ apache_cassandra_type_prefix = 'org.apache.cassandra.db.marshal.' cassandra_empty_type = 'org.apache.cassandra.db.marshal.EmptyType' +cql_empty_type = 'empty' log = logging.getLogger(__name__) @@ -78,6 +79,17 @@ def trim_if_startswith(s, prefix): _casstypes = {} +cql_type_scanner = re.Scanner(( + ('frozen', None), + (r'[a-zA-Z0-9_]+', lambda s, t: t), + (r'[\s,<>]', None), +)) + + +def cql_types_from_string(cql_type): + return cql_type_scanner.scan(cql_type)[0] + + class CassandraTypeType(type): """ The CassandraType objects in this module will normally be used directly, diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 8577d8f8dd..00181f411c 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -35,7 +35,7 @@ from cassandra.encoder import Encoder from cassandra.marshal import varint_unpack from cassandra.protocol import QueryMessage -from cassandra.query import dict_factory +from cassandra.query import dict_factory, bind_params from cassandra.util import OrderedDict log = logging.getLogger(__name__) @@ -659,18 +659,19 @@ def as_cql_query(self): def user_type_strings(self): user_type_strings = [] - types = self.user_types.copy() - keys = sorted(types.keys()) + user_types = self.user_types.copy() + keys = sorted(user_types.keys()) for k in keys: - if k in types: - self.resolve_user_types(k, types, user_type_strings) + if k in user_types: + self.resolve_user_types(k, user_types, user_type_strings) return user_type_strings - def resolve_user_types(self, key, types, user_type_strings): - user_type = types.pop(key) - for field_type in user_type.field_types: - if field_type.cassname == 'UserType' and field_type.typename in types: - self.resolve_user_types(field_type.typename, types, user_type_strings) + def resolve_user_types(self, key, user_types, user_type_strings): + user_type = user_types.pop(key) + for type_name in user_type.field_types: + for sub_type in types.cql_types_from_string(type_name): + if sub_type in user_types: + self.resolve_user_types(sub_type, user_types, user_type_strings) user_type_strings.append(user_type.export_as_string()) def _add_table_metadata(self, table_metadata): @@ -773,7 +774,7 @@ def as_cql_query(self, formatted=False): fields = [] for field_name, field_type in zip(self.field_names, self.field_types): - fields.append("%s %s" % (protect_name(field_name), field_type.cql_parameterized_type())) + fields.append("%s %s" % (protect_name(field_name), field_type)) ret += field_join.join("%s%s" % (padding, field) for field in fields) ret += "\n)" if formatted else ")" @@ -802,7 +803,7 @@ class Aggregate(object): The name of this aggregate """ - type_signature = None + argument_types = None """ An ordered list of the types for each argument to the aggregate """ @@ -832,11 +833,11 @@ class Aggregate(object): Type of the aggregate state """ - def __init__(self, keyspace, name, type_signature, state_func, + def __init__(self, keyspace, name, argument_types, state_func, state_type, final_func, initial_condition, return_type): self.keyspace = keyspace self.name = name - self.type_signature = type_signature + self.argument_types = argument_types self.state_func = state_func self.state_type = state_type self.final_func = final_func @@ -852,9 +853,9 @@ def as_cql_query(self, formatted=False): sep = '\n ' if formatted else ' ' keyspace = protect_name(self.keyspace) name = protect_name(self.name) - type_list = ', '.join(self.type_signature) + type_list = ', '.join(self.argument_types) state_func = protect_name(self.state_func) - state_type = self.state_type.cql_parameterized_type() + state_type = self.state_type ret = "CREATE AGGREGATE %(keyspace)s.%(name)s(%(type_list)s)%(sep)s" \ "SFUNC %(state_func)s%(sep)s" \ @@ -871,7 +872,7 @@ def export_as_string(self): @property def signature(self): - return SignatureDescriptor.format_signature(self.name, self.type_signature) + return SignatureDescriptor.format_signature(self.name, self.argument_types) class Function(object): @@ -893,7 +894,7 @@ class Function(object): The name of this function """ - type_signature = None + argument_types = None """ An ordered list of the types for each argument to the function """ @@ -924,12 +925,12 @@ class Function(object): (convenience function to avoid handling nulls explicitly if the result will just be null) """ - def __init__(self, keyspace, name, type_signature, argument_names, + def __init__(self, keyspace, name, argument_types, argument_names, return_type, language, body, called_on_null_input): self.keyspace = keyspace self.name = name - self.type_signature = type_signature - # type_signature (frozen>) will always be a list + self.argument_types = argument_types + # argument_types (frozen>) will always be a list # argument_name is not frozen in C* < 3.0 and may return None self.argument_names = argument_names or [] self.return_type = return_type @@ -947,8 +948,8 @@ def as_cql_query(self, formatted=False): keyspace = protect_name(self.keyspace) name = protect_name(self.name) arg_list = ', '.join(["%s %s" % (protect_name(n), t) - for n, t in zip(self.argument_names, self.type_signature)]) - typ = self.return_type.cql_parameterized_type() + for n, t in zip(self.argument_names, self.argument_types)]) + typ = self.return_type lang = self.language body = self.body on_null = "CALLED" if self.called_on_null_input else "RETURNS NULL" @@ -964,7 +965,7 @@ def export_as_string(self): @property def signature(self): - return SignatureDescriptor.format_signature(self.name, self.type_signature) + return SignatureDescriptor.format_signature(self.name, self.argument_types) class TableMetadata(object): @@ -1128,7 +1129,7 @@ def as_cql_query(self, formatted=False): columns = [] for col in self.columns.values(): - columns.append("%s %s%s" % (protect_name(col.name), col.typestring, ' static' if col.is_static else '')) + columns.append("%s %s%s" % (protect_name(col.name), col.cql_type, ' static' if col.is_static else '')) if len(self.partition_key) == 1 and not self.clustering_key: columns[0] += " PRIMARY KEY" @@ -1261,10 +1262,9 @@ class ColumnMetadata(object): name = None """ The string name of this column. """ - data_type = None + cql_type = None """ - The data type for the column in the form of an instance of one of - the type classes in :mod:`cassandra.cqltypes`. + The CQL type for the column. """ # TODO: probably remove this in favor of the table level mapping @@ -1285,27 +1285,17 @@ class ColumnMetadata(object): If this column is reversed (DESC) as in clustering order """ - def __init__(self, table_metadata, column_name, data_type, index_metadata=None, is_static=False, is_reversed=False): + _cass_type = None + + def __init__(self, table_metadata, column_name, cql_type, is_static=False, is_reversed=False): self.table = table_metadata self.name = column_name - self.data_type = data_type - self.index = index_metadata + self.cql_type = cql_type self.is_static = is_static self.is_reversed = is_reversed - @property - def typestring(self): - """ - A string representation of the type for this column, such as "varchar" - or "map". - """ - if issubclass(self.data_type, types.ReversedType): - return self.data_type.subtypes[0].cql_parameterized_type() - else: - return self.data_type.cql_parameterized_type() - def __str__(self): - return "%s %s" % (self.name, self.data_type) + return "%s %s" % (self.name, self.cql_type) class IndexMetadata(object): @@ -1596,6 +1586,8 @@ class SchemaParserV22(_SchemaParser): _table_name_col = 'columnfamily_name' + _function_agg_arument_type_col = 'signature' + recognized_table_options = ( "comment", "read_repair_chance", @@ -1620,6 +1612,8 @@ class SchemaParserV22(_SchemaParser): "compression", "default_time_to_live") + _encoder = Encoder() + def __init__(self, connection, timeout): super(SchemaParserV22, self).__init__(connection, timeout) self.keyspaces_result = [] @@ -1667,7 +1661,7 @@ def get_all_keyspaces(self): def get_table(self, keyspaces, keyspace, table): cl = ConsistencyLevel.ONE - where_clause = " WHERE keyspace_name = '%s' AND %s = '%s'" % (keyspace, self._table_name_col, table) + where_clause = bind_params(" WHERE keyspace_name = %%s AND %s = %%s" % (self._table_name_col,), (keyspace, table), self._encoder) cf_query = QueryMessage(query=self._SELECT_COLUMN_FAMILIES + where_clause, consistency_level=cl) col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) @@ -1686,21 +1680,24 @@ def get_table(self, keyspaces, keyspace, table): return self._build_table_metadata(table_result[0], col_result, triggers_result) def get_type(self, keyspaces, keyspace, type): - where_clause = " WHERE keyspace_name = '%s' AND type_name = '%s'" % (keyspace, type) + where_clause = bind_params(" WHERE keyspace_name = %s AND type_name = %s", (keyspace, type), self._encoder) return self._query_build_row(self._SELECT_TYPES + where_clause, self._build_user_type) def get_function(self, keyspaces, keyspace, function): - where_clause = " WHERE keyspace_name = '%s' AND function_name = '%s' AND signature = [%s]" \ - % (keyspace, function.name, ','.join("'%s'" % t for t in function.type_signature)) + where_clause = bind_params(" WHERE keyspace_name = %%s AND function_name = %%s AND %s = %%s" % (self._function_agg_arument_type_col,), + (keyspace, function.name, function.argument_types), + self._encoder) return self._query_build_row(self._SELECT_FUNCTIONS + where_clause, self._build_function) def get_aggregate(self, keyspaces, keyspace, aggregate): - where_clause = " WHERE keyspace_name = '%s' AND aggregate_name = '%s' AND signature = [%s]" \ - % (keyspace, aggregate.name, ','.join("'%s'" % t for t in aggregate.type_signature)) + where_clause = bind_params(" WHERE keyspace_name = %%s AND aggregate_name = %%s AND %s = %%s" % (self._function_agg_arument_type_col,), + (keyspace, aggregate.name, aggregate.argument_types), + self._encoder) + return self._query_build_row(self._SELECT_AGGREGATES + where_clause, self._build_aggregate) def get_keyspace(self, keyspaces, keyspace): - where_clause = " WHERE keyspace_name = '%s'" % (keyspace,) + where_clause = bind_params(" WHERE keyspace_name = %s", (keyspace,), self._encoder) return self._query_build_row(self._SELECT_KEYSPACES + where_clause, self._build_keyspace_metadata) @classmethod @@ -1722,29 +1719,36 @@ def _build_keyspace_metadata_internal(row): strategy_options = json.loads(row["strategy_options"]) return KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) - @staticmethod - def _build_user_type(usertype_row): - type_classes = list(map(types.lookup_casstype, usertype_row['field_types'])) + @classmethod + def _build_user_type(cls, usertype_row): + field_types = list(map(cls._schema_type_to_cql, usertype_row['field_types'])) return UserType(usertype_row['keyspace_name'], usertype_row['type_name'], - usertype_row['field_names'], type_classes) + usertype_row['field_names'], field_types) - @staticmethod - def _build_function(function_row): - return_type = types.lookup_casstype(function_row['return_type']) + @classmethod + def _build_function(cls, function_row): + return_type = cls._schema_type_to_cql(function_row['return_type']) return Function(function_row['keyspace_name'], function_row['function_name'], - function_row['signature'], function_row['argument_names'], + function_row[cls._function_agg_arument_type_col], function_row['argument_names'], return_type, function_row['language'], function_row['body'], function_row['called_on_null_input']) - @staticmethod - def _build_aggregate(aggregate_row): - state_type = types.lookup_casstype(aggregate_row['state_type']) + @classmethod + def _build_aggregate(cls, aggregate_row): + state_data_type = aggregate_row['state_type'] initial_condition = aggregate_row['initcond'] - if initial_condition is not None: - initial_condition = state_type.deserialize(initial_condition, 3) - return_type = types.lookup_casstype(aggregate_row['return_type']) + try: + # TODO: asking if initcond can be the CQL literal in 3.0 + # this is a stopgap pending ^ or developing type parsing + cass_type = types.lookup_casstype(state_data_type) + if initial_condition is not None: + initial_condition = cass_type.deserialize(initial_condition, 3) + except ValueError: + pass + state_type = cls._schema_type_to_cql(state_data_type) + return_type = cls._schema_type_to_cql(aggregate_row['return_type']) return Aggregate(aggregate_row['keyspace_name'], aggregate_row['aggregate_name'], - aggregate_row['signature'], aggregate_row['state_func'], state_type, + aggregate_row[cls._function_agg_arument_type_col], aggregate_row['state_func'], state_type, aggregate_row['final_func'], initial_condition, return_type) def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): @@ -1845,7 +1849,7 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): else: column_name = "key%d" % i - col = ColumnMetadata(table_meta, column_name, col_type) + col = ColumnMetadata(table_meta, column_name, col_type.cql_parameterized_type()) table_meta.columns[column_name] = col table_meta.partition_key.append(col) @@ -1857,7 +1861,9 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): column_name = "column%d" % i data_type = column_name_types[i] - col = ColumnMetadata(table_meta, column_name, data_type, is_reversed=types.is_reversed_casstype(data_type)) + cql_type = _cql_from_cass_type(data_type) + is_reversed = types.is_reversed_casstype(data_type) + col = ColumnMetadata(table_meta, column_name, cql_type, is_reversed=is_reversed) table_meta.columns[column_name] = col table_meta.clustering_key.append(col) @@ -1922,13 +1928,16 @@ def _build_table_options(self, row): return options - @staticmethod - def _build_column_metadata(table_metadata, row): + @classmethod + def _build_column_metadata(cls, table_metadata, row): name = row["column_name"] - data_type = types.lookup_casstype(row["validator"]) + type_string = row["validator"] + data_type = types.lookup_casstype(type_string) + cql_type = _cql_from_cass_type(data_type) is_static = row.get("type", None) == "static" is_reversed = types.is_reversed_casstype(data_type) - column_meta = ColumnMetadata(table_metadata, name, data_type, is_static=is_static, is_reversed=is_reversed) + column_meta = ColumnMetadata(table_metadata, name, cql_type, is_static, is_reversed) + column_meta._cass_type = data_type return column_meta @staticmethod @@ -1953,7 +1962,7 @@ def _build_index_metadata(column_metadata, row): # we need to check the data type to verify that, because # there is no special index option for full-collection # indexes. - data_type = column_metadata.data_type + data_type = column_metadata._cass_type collection_types = ('map', 'set', 'list') if data_type.typename == "frozen" and data_type.subtypes[0].typename in collection_types: # no index option for full-collection index @@ -2062,6 +2071,11 @@ def _aggregate_results(self): cfname = row[self._table_name_col] m[ksname][cfname].append(row) + @staticmethod + def _schema_type_to_cql(type_string): + cass_type = types.lookup_casstype(type_string) + return _cql_from_cass_type(cass_type) + class SchemaParserV3(SchemaParserV22): _SELECT_KEYSPACES = "SELECT * FROM system_schema.keyspaces" @@ -2076,6 +2090,8 @@ class SchemaParserV3(SchemaParserV22): _table_name_col = 'table_name' + _function_agg_arument_type_col = 'argument_types' + recognized_table_options = ( 'bloom_filter_fp_chance', 'caching', @@ -2107,14 +2123,15 @@ def get_all_keyspaces(self): def get_table(self, keyspaces, keyspace, table): cl = ConsistencyLevel.ONE - where_clause = " WHERE keyspace_name = '%s' AND %s = '%s'" % (keyspace, self._table_name_col, table) + where_clause = bind_params(" WHERE keyspace_name = %%s AND %s = %%s" % (self._table_name_col), (keyspace, table), self._encoder) cf_query = QueryMessage(query=self._SELECT_TABLES + where_clause, consistency_level=cl) col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) indexes_query = QueryMessage(query=self._SELECT_INDEXES + where_clause, consistency_level=cl) triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) # in protocol v4 we don't know if this event is a view or a table, so we look for both - view_query = QueryMessage(query=self._SELECT_VIEWS + " WHERE keyspace_name = '%s' AND view_name = '%s'" % (keyspace, table), + where_clause = bind_params(" WHERE keyspace_name = %s AND view_name = %s", (keyspace, table), self._encoder) + view_query = QueryMessage(query=self._SELECT_VIEWS + where_clause, consistency_level=cl) (cf_success, cf_result), (col_success, col_result), (indexes_sucess, indexes_result), \ (triggers_success, triggers_result), (view_success, view_result) \ @@ -2207,7 +2224,7 @@ def _build_table_columns(self, meta, col_rows, compact_static=False, is_dense=Fa for col_row in (r for r in col_rows if r.get('kind', None) not in ('partition_key', 'clustering_key')): column_meta = self._build_column_metadata(meta, col_row) - if is_dense and column_meta.data_type.cassname.endswith(types.cassandra_empty_type): + if is_dense and column_meta.cql_type == types.cql_empty_type: continue if compact_static and not column_meta.is_static: # for compact static tables, we omit the clustering key and value, and only add the logical columns. @@ -2233,10 +2250,10 @@ def _build_view_metadata(self, row, col_rows=None): @staticmethod def _build_column_metadata(table_metadata, row): name = row["column_name"] - data_type = types.lookup_casstype(row["type"]) + cql_type = row["type"] is_static = row.get("kind", None) == "static" is_reversed = row["clustering_order"].upper() == "DESC" - column_meta = ColumnMetadata(table_metadata, name, data_type, is_static=is_static, is_reversed=is_reversed) + column_meta = ColumnMetadata(table_metadata, name, cql_type, is_static, is_reversed) return column_meta @staticmethod @@ -2304,6 +2321,10 @@ def _aggregate_results(self): for row in self.views_result: m[row["keyspace_name"]].append(row) + @staticmethod + def _schema_type_to_cql(type_string): + return type_string + class TableMetadataV3(TableMetadata): compaction_options = {} @@ -2438,3 +2459,13 @@ def get_schema_parser(connection, timeout): # we could further specialize by version. Right now just refactoring the # multi-version parser we have as of C* 2.2.0rc1. return SchemaParserV22(connection, timeout) + +def _cql_from_cass_type(cass_type): + """ + A string representation of the type for this column, such as "varchar" + or "map". + """ + if issubclass(cass_type, types.ReversedType): + return cass_type.subtypes[0].cql_parameterized_type() + else: + return cass_type.cql_parameterized_type() diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 268a9764fd..ee32f34773 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -26,17 +26,15 @@ from cassandra import AlreadyExists, SignatureDescriptor, UserFunctionDescriptor, UserAggregateDescriptor from cassandra.cluster import Cluster -from cassandra.cqltypes import DoubleType, Int32Type, ListType, UTF8Type, MapType, LongType from cassandra.encoder import Encoder from cassandra.metadata import (Metadata, KeyspaceMetadata, IndexMetadata, Token, MD5Token, TokenMap, murmur3, Function, Aggregate, protect_name, protect_names, get_schema_parser) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host -from cassandra.query import SimpleStatement, ConsistencyLevel from tests.integration import get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, \ - BasicSharedKeyspaceUnitTestCase, BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase + BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase def setup_module(): @@ -753,7 +751,7 @@ def assert_equal_diff(self, received, expected): def assert_startswith_diff(self, received, prefix): if not received.startswith(prefix): - prefix_lines = previx.split('\n') + prefix_lines = prefix.split('\n') diff_string = '\n'.join(difflib.unified_diff(prefix_lines, received.split('\n')[:len(prefix_lines)], 'EXPECTED', 'RECEIVED', @@ -1450,7 +1448,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): @property def signature(self): return SignatureDescriptor.format_signature(self.function_kwargs['name'], - self.function_kwargs['type_signature']) + self.function_kwargs['argument_types']) class VerifiedFunction(Verified): def __init__(self, test_case, **kwargs): @@ -1466,9 +1464,9 @@ class FunctionMetadata(FunctionTest): def make_function_kwargs(self, called_on_null=True): return {'keyspace': self.keyspace_name, 'name': self.function_name, - 'type_signature': ['double', 'int'], + 'argument_types': ['double', 'int'], 'argument_names': ['d', 'i'], - 'return_type': DoubleType, + 'return_type': 'double', 'language': 'java', 'body': 'return new Double(0.0);', 'called_on_null_input': called_on_null} @@ -1485,7 +1483,7 @@ def test_functions_after_udt(self): SEE https://issues.apache.org/jira/browse/CASSANDRA-9186 Maybe update this after release kwargs = self.make_function_kwargs() - kwargs['type_signature'][0] = "frozen<%s>" % udt_name + kwargs['argument_types'][0] = "frozen<%s>" % udt_name expected_meta = Function(**kwargs) with self.VerifiedFunction(self, **kwargs): @@ -1526,16 +1524,16 @@ def test_function_same_name_diff_types(self): with self.VerifiedFunction(self, **kwargs): # another function: same name, different type sig. - self.assertGreater(len(kwargs['type_signature']), 1) + self.assertGreater(len(kwargs['argument_types']), 1) self.assertGreater(len(kwargs['argument_names']), 1) - kwargs['type_signature'] = kwargs['type_signature'][:1] + kwargs['argument_types'] = kwargs['argument_types'][:1] kwargs['argument_names'] = kwargs['argument_names'][:1] # Ensure they are surfaced separately with self.VerifiedFunction(self, **kwargs): functions = [f for f in self.keyspace_function_meta.values() if f.name == self.function_name] self.assertEqual(len(functions), 2) - self.assertNotEqual(functions[0].type_signature, functions[1].type_signature) + self.assertNotEqual(functions[0].argument_types, functions[1].argument_types) def test_function_no_parameters(self): """ @@ -1549,9 +1547,9 @@ def test_function_no_parameters(self): @test_category function """ kwargs = self.make_function_kwargs() - kwargs['type_signature'] = [] + kwargs['argument_types'] = [] kwargs['argument_names'] = [] - kwargs['return_type'] = LongType + kwargs['return_type'] = 'bigint' kwargs['body'] = 'return System.currentTimeMillis() / 1000L;' with self.VerifiedFunction(self, **kwargs) as vf: @@ -1648,7 +1646,7 @@ def setup_class(cls): def make_aggregate_kwargs(self, state_func, state_type, final_func=None, init_cond=None): return {'keyspace': self.keyspace_name, 'name': self.function_name + '_aggregate', - 'type_signature': ['int'], + 'argument_types': ['int'], 'state_func': state_func, 'state_type': state_type, 'final_func': final_func, @@ -1669,8 +1667,8 @@ def test_return_type_meta(self): @test_category aggregate """ - with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', Int32Type, init_cond=1)) as va: - self.assertIs(self.keyspace_aggregate_meta[va.signature].return_type, Int32Type) + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond=1)) as va: + self.assertEqual(self.keyspace_aggregate_meta[va.signature].return_type, 'int') def test_init_cond(self): """ @@ -1694,13 +1692,13 @@ def test_init_cond(self): # int32 for init_cond in (-1, 0, 1): - with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', Int32Type, init_cond=init_cond)) as va: + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond=init_cond)) as va: sum_res = s.execute("SELECT %s(v) AS sum FROM t" % va.function_kwargs['name'])[0].sum self.assertEqual(sum_res, init_cond + sum(expected_values)) # list for init_cond in ([], ['1', '2']): - with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', ListType.apply_parameters([UTF8Type]), init_cond=init_cond)) as va: + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', 'list', init_cond=init_cond)) as va: list_res = s.execute("SELECT %s(v) AS list_res FROM t" % va.function_kwargs['name'])[0].list_res self.assertListEqual(list_res[:len(init_cond)], init_cond) self.assertEqual(set(i for i in list_res[len(init_cond):]), @@ -1710,7 +1708,7 @@ def test_init_cond(self): expected_map_values = dict((i, i) for i in expected_values) expected_key_set = set(expected_values) for init_cond in ({}, {1: 2, 3: 4}, {5: 5}): - with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('update_map', MapType.apply_parameters([Int32Type, Int32Type]), init_cond=init_cond)) as va: + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('update_map', 'map', init_cond=init_cond)) as va: map_res = s.execute("SELECT %s(v) AS map_res FROM t" % va.function_kwargs['name'])[0].map_res self.assertDictContainsSubset(expected_map_values, map_res) init_not_updated = dict((k, init_cond[k]) for k in set(init_cond) - expected_key_set) @@ -1732,7 +1730,7 @@ def test_aggregates_after_functions(self): """ # functions must come before functions in keyspace dump - with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', ListType.apply_parameters([UTF8Type]))): + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', 'list')): keyspace_cql = self.cluster.metadata.keyspaces[self.keyspace_name].export_as_string() func_idx = keyspace_cql.find("CREATE FUNCTION") aggregate_idx = keyspace_cql.rfind("CREATE AGGREGATE") @@ -1752,14 +1750,14 @@ def test_same_name_diff_types(self): @test_category function """ - kwargs = self.make_aggregate_kwargs('sum_int', Int32Type, init_cond=0) + kwargs = self.make_aggregate_kwargs('sum_int', 'int', init_cond=0) with self.VerifiedAggregate(self, **kwargs): kwargs['state_func'] = 'sum_int_two' - kwargs['type_signature'] = ['int', 'int'] + kwargs['argument_types'] = ['int', 'int'] with self.VerifiedAggregate(self, **kwargs): aggregates = [a for a in self.keyspace_aggregate_meta.values() if a.name == kwargs['name']] self.assertEqual(len(aggregates), 2) - self.assertNotEqual(aggregates[0].type_signature, aggregates[1].type_signature) + self.assertNotEqual(aggregates[0].argument_types, aggregates[1].argument_types) def test_aggregates_follow_keyspace_alter(self): """ @@ -1775,7 +1773,7 @@ def test_aggregates_follow_keyspace_alter(self): @test_category function """ - with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', Int32Type, init_cond=0)): + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond=0)): original_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name] self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name) try: @@ -1799,7 +1797,7 @@ def test_cql_optional_params(self): @test_category function """ - kwargs = self.make_aggregate_kwargs('extend_list', ListType.apply_parameters([UTF8Type])) + kwargs = self.make_aggregate_kwargs('extend_list', 'list') # no initial condition, final func self.assertIsNone(kwargs['initial_condition']) @@ -2117,35 +2115,23 @@ def test_create_view_metadata(self): self.assertEquals(month_column.name, 'month') self.assertEquals(month_column, mv.partition_key[2]) + def compare_columns(a, b, name): + self.assertEquals(a.name, name) + self.assertEquals(a.name, b.name) + self.assertEquals(a.table, b.table) + self.assertEquals(a.cql_type, b.cql_type) + self.assertEquals(a.index, b.index) + self.assertEquals(a.is_static, b.is_static) + self.assertEquals(a.is_reversed, b.is_reversed) + score_column = mv_columns[3] - self.assertIsNotNone(score_column) - self.assertEquals(score_column.name, 'score') - self.assertEquals(score_column.name, mv.clustering_key[0].name) - self.assertEquals(score_column.table, mv.clustering_key[0].table) - self.assertEquals(score_column.data_type, mv.clustering_key[0].data_type) - self.assertEquals(score_column.index, mv.clustering_key[0].index) - self.assertEquals(score_column.is_static, mv.clustering_key[0].is_static) - self.assertEquals(score_column.is_reversed, mv.clustering_key[0].is_reversed) + compare_columns(score_column, mv.clustering_key[0], 'score') user_column = mv_columns[4] - self.assertIsNotNone(user_column) - self.assertEquals(user_column.name, 'user') - self.assertEquals(user_column.name, mv.clustering_key[1].name) - self.assertEquals(user_column.table, mv.clustering_key[1].table) - self.assertEquals(user_column.data_type, mv.clustering_key[1].data_type) - self.assertEquals(user_column.index, mv.clustering_key[1].index) - self.assertEquals(user_column.is_static, mv.clustering_key[1].is_static) - self.assertEquals(user_column.is_reversed, mv.clustering_key[1].is_reversed) + compare_columns(user_column, mv.clustering_key[1], 'user') day_column = mv_columns[5] - self.assertIsNotNone(day_column) - self.assertEquals(day_column.name, 'day') - self.assertEquals(day_column.name, mv.clustering_key[2].name) - self.assertEquals(day_column.table, mv.clustering_key[2].table) - self.assertEquals(day_column.data_type, mv.clustering_key[2].data_type) - self.assertEquals(day_column.index, mv.clustering_key[2].index) - self.assertEquals(day_column.is_static, mv.clustering_key[2].is_static) - self.assertEquals(day_column.is_reversed, mv.clustering_key[2].is_reversed) + compare_columns(day_column, mv.clustering_key[2], 'day') def test_base_table_column_addition_mv(self): """ @@ -2213,7 +2199,7 @@ def test_base_table_column_addition_mv(self): self.assertIn("fouls", mv_alltime.columns) mv_alltime_fouls_comumn = self.cluster.metadata.keyspaces[self.keyspace_name].views["alltimehigh"].columns['fouls'] - self.assertEquals(mv_alltime_fouls_comumn.typestring, 'int') + self.assertEquals(mv_alltime_fouls_comumn.cql_type, 'int') def test_base_table_type_alter_mv(self): """ @@ -2254,15 +2240,16 @@ def test_base_table_type_alter_mv(self): self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 1) score_column = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores'].columns['score'] - self.assertEquals(score_column.typestring, 'blob') + self.assertEquals(score_column.cql_type, 'blob') + # until CASSANDRA-9920+CASSANDRA-10500 MV updates are only available later with an async event for i in range(10): score_mv_column = self.cluster.metadata.keyspaces[self.keyspace_name].views["monthlyhigh"].columns['score'] - if("blob" == score_mv_column.typestring): + if "blob" == score_mv_column.cql_type: break time.sleep(.2) - self.assertEquals(score_mv_column.typestring, 'blob') + self.assertEquals(score_mv_column.cql_type, 'blob') def test_metadata_with_quoted_identifiers(self): """ From 644263bfe69b1d1820c1f4d1a79f65dbf7831035 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 28 Oct 2015 11:16:48 -0500 Subject: [PATCH 0172/2005] Use a single CQL encoder for metadata module. --- cassandra/metadata.py | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 00181f411c..37f5b0fa74 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -79,6 +79,8 @@ Set of reserved keywords in CQL. """ +_encoder = Encoder() + class Metadata(object): """ @@ -862,7 +864,7 @@ def as_cql_query(self, formatted=False): "STYPE %(state_type)s" % locals() ret += ''.join((sep, 'FINALFUNC ', protect_name(self.final_func))) if self.final_func else '' - ret += ''.join((sep, 'INITCOND ', Encoder().cql_encode_all_types(self.initial_condition)))\ + ret += ''.join((sep, 'INITCOND ', _encoder.cql_encode_all_types(self.initial_condition)))\ if self.initial_condition is not None else '' return ret @@ -1612,8 +1614,6 @@ class SchemaParserV22(_SchemaParser): "compression", "default_time_to_live") - _encoder = Encoder() - def __init__(self, connection, timeout): super(SchemaParserV22, self).__init__(connection, timeout) self.keyspaces_result = [] @@ -1661,7 +1661,7 @@ def get_all_keyspaces(self): def get_table(self, keyspaces, keyspace, table): cl = ConsistencyLevel.ONE - where_clause = bind_params(" WHERE keyspace_name = %%s AND %s = %%s" % (self._table_name_col,), (keyspace, table), self._encoder) + where_clause = bind_params(" WHERE keyspace_name = %%s AND %s = %%s" % (self._table_name_col,), (keyspace, table), _encoder) cf_query = QueryMessage(query=self._SELECT_COLUMN_FAMILIES + where_clause, consistency_level=cl) col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) @@ -1680,24 +1680,22 @@ def get_table(self, keyspaces, keyspace, table): return self._build_table_metadata(table_result[0], col_result, triggers_result) def get_type(self, keyspaces, keyspace, type): - where_clause = bind_params(" WHERE keyspace_name = %s AND type_name = %s", (keyspace, type), self._encoder) + where_clause = bind_params(" WHERE keyspace_name = %s AND type_name = %s", (keyspace, type), _encoder) return self._query_build_row(self._SELECT_TYPES + where_clause, self._build_user_type) def get_function(self, keyspaces, keyspace, function): where_clause = bind_params(" WHERE keyspace_name = %%s AND function_name = %%s AND %s = %%s" % (self._function_agg_arument_type_col,), - (keyspace, function.name, function.argument_types), - self._encoder) + (keyspace, function.name, function.argument_types), _encoder) return self._query_build_row(self._SELECT_FUNCTIONS + where_clause, self._build_function) def get_aggregate(self, keyspaces, keyspace, aggregate): where_clause = bind_params(" WHERE keyspace_name = %%s AND aggregate_name = %%s AND %s = %%s" % (self._function_agg_arument_type_col,), - (keyspace, aggregate.name, aggregate.argument_types), - self._encoder) + (keyspace, aggregate.name, aggregate.argument_types), _encoder) return self._query_build_row(self._SELECT_AGGREGATES + where_clause, self._build_aggregate) def get_keyspace(self, keyspaces, keyspace): - where_clause = bind_params(" WHERE keyspace_name = %s", (keyspace,), self._encoder) + where_clause = bind_params(" WHERE keyspace_name = %s", (keyspace,), _encoder) return self._query_build_row(self._SELECT_KEYSPACES + where_clause, self._build_keyspace_metadata) @classmethod @@ -2123,14 +2121,14 @@ def get_all_keyspaces(self): def get_table(self, keyspaces, keyspace, table): cl = ConsistencyLevel.ONE - where_clause = bind_params(" WHERE keyspace_name = %%s AND %s = %%s" % (self._table_name_col), (keyspace, table), self._encoder) + where_clause = bind_params(" WHERE keyspace_name = %%s AND %s = %%s" % (self._table_name_col), (keyspace, table), _encoder) cf_query = QueryMessage(query=self._SELECT_TABLES + where_clause, consistency_level=cl) col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) indexes_query = QueryMessage(query=self._SELECT_INDEXES + where_clause, consistency_level=cl) triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) # in protocol v4 we don't know if this event is a view or a table, so we look for both - where_clause = bind_params(" WHERE keyspace_name = %s AND view_name = %s", (keyspace, table), self._encoder) + where_clause = bind_params(" WHERE keyspace_name = %s AND view_name = %s", (keyspace, table), _encoder) view_query = QueryMessage(query=self._SELECT_VIEWS + where_clause, consistency_level=cl) (cf_success, cf_result), (col_success, col_result), (indexes_sucess, indexes_result), \ From 394992ab25582705057dfe20c554459b8bd858f7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 28 Oct 2015 14:10:06 -0500 Subject: [PATCH 0173/2005] cython: fix shift overflow issue in varint unpack PYTHON-433 --- cassandra/cython_marshal.pyx | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/cassandra/cython_marshal.pyx b/cassandra/cython_marshal.pyx index 56437e32cd..cdf1bc7c88 100644 --- a/cassandra/cython_marshal.pyx +++ b/cassandra/cython_marshal.pyx @@ -105,19 +105,21 @@ cdef varint_unpack(Buffer *term): # TODO: Optimize these two functions cdef varint_unpack_py3(bytes term): - cdef int64_t one = 1L - val = int(''.join(["%02x" % i for i in term]), 16) if (term[0] & 128) != 0: - # There is a bug in Cython (0.20 - 0.22), where if we do - # '1 << (len(term) * 8)' Cython generates '1' directly into the - # C code, causing integer overflows - val -= one << (len(term) * 8) + shift = len(term) * 8 # * Note below + val -= 1 << shift return val cdef varint_unpack_py2(bytes term): # noqa - cdef int64_t one = 1L val = int(term.encode('hex'), 16) if (ord(term[0]) & 128) != 0: - val = val - (one << (len(term) * 8)) + shift = len(term) * 8 # * Note below + val = val - (1 << shift) return val + +# * Note * +# '1 << (len(term) * 8)' Cython tries to do native +# integer shifts, which overflows. We need this to +# emulate Python shifting, which will expand the long +# to accommodate From 619c56ca57aaacdcbaaa3652018f6b0bf3da48de Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 28 Oct 2015 14:31:39 -0500 Subject: [PATCH 0174/2005] Add synchronous meta refresh for materialized views PYTHON-407 --- cassandra/cluster.py | 9 +++++++++ tests/integration/standard/test_metadata.py | 10 +++++++++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ffc563e194..96b955ef58 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1261,6 +1261,15 @@ def refresh_table_metadata(self, keyspace, table, max_schema_agreement_wait=None if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=table, schema_agreement_wait=max_schema_agreement_wait): raise Exception("Table metadata was not refreshed. See log for details.") + def refresh_materialized_view_metadata(self, keyspace, view, max_schema_agreement_wait=None): + """ + Synchronously refresh materialized view metadata. + + See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior + """ + if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=view, schema_agreement_wait=max_schema_agreement_wait): + raise Exception("View metadata was not refreshed. See log for details.") + def refresh_user_type_metadata(self, keyspace, user_type, max_schema_agreement_wait=None): """ Synchronously refresh user defined type metadata. diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 268a9764fd..bbda0c2d56 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -556,7 +556,6 @@ def test_refresh_metadata_for_mv(self): if CASS_SERVER_VERSION < (3, 0): raise unittest.SkipTest("Materialized views require Cassandra 3.0+") - table_name = "test" self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.keyspace_name, self.function_table_name)) cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) @@ -572,6 +571,15 @@ def test_refresh_metadata_for_mv(self): cluster2.shutdown() + original_meta = self.cluster.metadata.keyspaces[self.keyspace_name].views['mv1'] + self.assertIs(original_meta, self.session.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1']) + self.cluster.refresh_materialized_view_metadata(self.keyspace_name, 'mv1') + + current_meta = self.cluster.metadata.keyspaces[self.keyspace_name].views['mv1'] + self.assertIsNot(current_meta, original_meta) + self.assertIsNot(original_meta, self.session.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1']) + self.assertEqual(original_meta.as_cql_query(), current_meta.as_cql_query()) + def test_refresh_user_type_metadata(self): """ test for synchronously refreshing UDT metadata in keyspace From 1a74af426c0f2cd5135065f8e7ca1b0cfbfb3fe7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 29 Oct 2015 10:29:41 -0500 Subject: [PATCH 0175/2005] encode UDT field names to avoid implicit conversion in formatting PYTHON-353 --- cassandra/cluster.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ffc563e194..16d012c3c1 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1898,11 +1898,17 @@ def user_type_registered(self, keyspace, user_type, klass): raise UserTypeDoesNotExist( 'User type %s does not exist in keyspace %s' % (user_type, keyspace)) + field_names = type_meta.field_names + if six.PY2: + # go from unicode to string to avoid decode errors from implicit + # decode when formatting non-ascii values + field_names = [fn.encode('utf-8') for fn in field_names] + def encode(val): return '{ %s }' % ' , '.join('%s : %s' % ( field_name, self.encoder.cql_encode_all_types(getattr(val, field_name, None)) - ) for field_name in type_meta.field_names) + ) for field_name in field_names) self.encoder.mapping[klass] = encode From 114c051cd3390d986d284aa4ebf818fc9048f7af Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 29 Oct 2015 14:41:02 -0500 Subject: [PATCH 0176/2005] cqle: test for None overriding default --- .../integration/cqlengine/model/test_model_io.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 9ef2df548c..ae622a2243 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -183,6 +183,21 @@ class AllDatatypesModel(Model): for i, i_char in enumerate(range(ord('a'), ord('a') + 14)): self.assertEqual(input[i], output[chr(i_char)]) + def test_can_specify_none_instead_of_default(self): + self.assertIsNotNone(TestModel.a_bool.column.default) + + # override default + inst = TestModel.create(a_bool=None) + self.assertIsNone(inst.a_bool) + queried = TestModel.objects(id=inst.id).first() + self.assertIsNone(queried.a_bool) + + # letting default be set + inst = TestModel.create() + self.assertEqual(inst.a_bool, TestModel.a_bool.column.default) + queried = TestModel.objects(id=inst.id).first() + self.assertEqual(queried.a_bool, TestModel.a_bool.column.default) + def test_can_insert_model_with_all_protocol_v4_column_types(self): """ Test for inserting all protocol v4 column types into a Model From 4bd472d1a4aa125e231cb27784138416ae49b4ae Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 29 Oct 2015 15:01:13 -0500 Subject: [PATCH 0177/2005] Reintroduce explicit value flag --- cassandra/cqlengine/columns.py | 7 ++----- cassandra/cqlengine/models.py | 3 ++- cassandra/cqlengine/usertype.py | 4 ++-- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index bea4ce5a2d..1778fe659a 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -32,11 +32,8 @@ def __init__(self, instance, column, value): self.instance = instance self.column = column self.value = value - - # Brand new instances do no have previous value yet. - self.previous_value = None - if self.instance._is_persisted: - self.previous_value = deepcopy(value) + self.previous_value = deepcopy(value) if instance._is_persisted else None + self.explicit = False @property def deleted(self): diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index d6b62838dc..8f7f10dd48 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -345,6 +345,7 @@ def __init__(self, **values): if value is not None or isinstance(column, columns.BaseContainerColumn): value = column.to_python(value) value_mngr = column.value_manager(self, column, value) + value_mngr.explicit = name in values self._values[name] = value_mngr def __repr__(self): @@ -509,7 +510,7 @@ def validate(self): """ for name, col in self._columns.items(): v = getattr(self, name) - if v is None and col.has_default: + if v is None and not self._values[name].explicit and col.has_default: v = col.get_default() val = col.validate(v) setattr(self, name, val) diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index e080371f26..dc5575389a 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -34,6 +34,7 @@ def __init__(self, **values): if value is not None or isinstance(field, columns.BaseContainerColumn): value = field.to_python(value) value_mngr = field.value_manager(self, field, value) + value_mngr.explicit = name in values self._values[name] = value_mngr def __eq__(self, other): @@ -130,10 +131,9 @@ def validate(self): """ Cleans and validates the field values """ - pass for name, field in self._fields.items(): v = getattr(self, name) - if v is None and field.has_default: + if v is None and not self._values[name].explicit and field.has_default: v = field.get_default() val = field.validate(v) setattr(self, name, val) From 699460f658775a2ad2755e743054512bf7029101 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 29 Oct 2015 15:52:32 -0500 Subject: [PATCH 0178/2005] cqle: refactor model persisted flag removes previous change adding flag to initializer consolidates valuemanager updates and flag setting removes need of value manager to know about persistence PYTHON-348 --- cassandra/cqlengine/columns.py | 5 ++-- cassandra/cqlengine/models.py | 28 ++++++++----------- cassandra/cqlengine/usertype.py | 1 - .../cqlengine/model/test_model_io.py | 2 +- 4 files changed, 15 insertions(+), 21 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 1778fe659a..45851f27ba 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -16,7 +16,6 @@ from datetime import date, datetime import logging import six -import warnings from cassandra import util from cassandra.cqltypes import DateType, SimpleDateType @@ -32,7 +31,7 @@ def __init__(self, instance, column, value): self.instance = instance self.column = column self.value = value - self.previous_value = deepcopy(value) if instance._is_persisted else None + self.previous_value = None self.explicit = False @property @@ -50,7 +49,7 @@ def changed(self): return self.value != self.previous_value def reset_previous_value(self): - self.previous_value = copy(self.value) + self.previous_value = deepcopy(self.value) def getval(self): return self.value diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 8f7f10dd48..f615068001 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -332,16 +332,11 @@ def __init__(self, **values): self._transaction = None self._batch = None self._timeout = connection.NOT_SET + self._is_persisted = False - # A flag set by the deserializer to indicate that update should be used - # when persisting changes. - self._is_persisted = values.pop('_is_persisted', False) - - # Initialize each volumn to its provided or default right at - # instanciation. See PYTHON-348. self._values = {} for name, column in self._columns.items(): - value = values.get(name, None) + value = values.get(name) if value is not None or isinstance(column, columns.BaseContainerColumn): value = column.to_python(value) value_mngr = column.value_manager(self, column, value) @@ -418,7 +413,14 @@ def _construct_instance(cls, values): else: klass = cls - return klass(_is_persisted=True, **field_dict) + instance = klass(**field_dict) + instance._set_persisted() + return instance + + def _set_persisted(self): + for v in self._values.values(): + v.reset_previous_value() + self._is_persisted = True def _can_update(self): """ @@ -645,10 +647,7 @@ def save(self): transaction=self._transaction, timeout=self._timeout).save() - # reset the value managers - for v in self._values.values(): - v.reset_previous_value() - self._is_persisted = True + self._set_persisted() self._ttl = self.__default_ttl__ self._timestamp = None @@ -694,10 +693,7 @@ def update(self, **values): transaction=self._transaction, timeout=self._timeout).update() - # reset the value managers - for v in self._values.values(): - v.reset_previous_value() - self._is_persisted = True + self._set_persisted() self._ttl = self.__default_ttl__ self._timestamp = None diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index dc5575389a..dcc3fc7662 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -27,7 +27,6 @@ class BaseUserType(object): def __init__(self, **values): self._values = {} - self._is_persisted = False for name, field in self._fields.items(): value = values.get(name, None) diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index ae622a2243..dd00172ac7 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -393,7 +393,7 @@ def test_previous_value_tracking_of_persisted_instance(self): assert self.instance.get_changed_columns() == [] assert self.instance._values['count'].previous_value == 3 - def test_previous_value_tracking_on_instanciation(self): + def test_previous_value_tracking_on_instantiation(self): self.instance = TestMultiKeyModel( partition=random.randint(0, 1000), cluster=random.randint(0, 1000), From 8842b6a63113511f61eb65d97a9a873307fb62cd Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 29 Oct 2015 16:21:25 -0500 Subject: [PATCH 0179/2005] cqle test: don't setup cqlengine connection in individual test --- .../integration/cqlengine/query/test_named.py | 23 ++++++++----------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index 2cc88ec51d..ad4f52afc2 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -269,18 +269,14 @@ def test_get_multipleobjects_exception(self): self.table.objects.get(test_id=1) -class TestNamedWithMV(BasicSharedKeyspaceUnitTestCase): +class TestNamedWithMV(BaseCassEngTestCase): def setUp(self): - self.default_keyspace = models.DEFAULT_KEYSPACE cass_version = get_server_versions()[0] if cass_version < (3, 0): raise unittest.SkipTest("Materialized views require Cassandra 3.0+") super(TestNamedWithMV, self).setUp() - def tearDown(self): - models.DEFAULT_KEYSPACE = self.default_keyspace - def test_named_table_with_mv(self): """ Test NamedTable access to materialized views @@ -294,9 +290,10 @@ def test_named_table_with_mv(self): @test_category materialized_view """ - connection.setup(['127.0.0.1'], self.keyspace_name) - - # Create a base table and two materialized views + ks = models.DEFAULT_KEYSPACE + self.session.execute("DROP MATERIALIZED VIEW IF EXISTS {0}.alltimehigh".format(ks)) + self.session.execute("DROP MATERIALIZED VIEW IF EXISTS {0}.monthlyhigh".format(ks)) + self.session.execute("DROP TABLE IF EXISTS {0}.scores".format(ks)) create_table = """CREATE TABLE {0}.scores( user TEXT, game TEXT, @@ -305,14 +302,14 @@ def test_named_table_with_mv(self): day INT, score INT, PRIMARY KEY (user, game, year, month, day) - )""".format(self.keyspace_name) + )""".format(ks) self.session.execute(create_table) create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS SELECT game, year, month, score, user, day FROM {0}.scores WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL PRIMARY KEY ((game, year, month), score, user, day) - WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.keyspace_name) + WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(ks) self.session.execute(create_mv) @@ -320,12 +317,12 @@ def test_named_table_with_mv(self): SELECT * FROM {0}.scores WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL PRIMARY KEY (game, score, user, year, month, day) - WITH CLUSTERING ORDER BY (score DESC)""".format(self.keyspace_name) + WITH CLUSTERING ORDER BY (score DESC)""".format(ks) self.session.execute(create_mv_alltime) # Populate the base table with data - prepared_insert = self.session.prepare("""INSERT INTO {0}.scores (user, game, year, month, day, score) VALUES (?, ?, ? ,? ,?, ?)""".format(self.keyspace_name)) + prepared_insert = self.session.prepare("""INSERT INTO {0}.scores (user, game, year, month, day, score) VALUES (?, ?, ? ,? ,?, ?)""".format(ks)) parameters = {('pcmanus', 'Coup', 2015, 5, 1, 4000), ('jbellis', 'Coup', 2015, 5, 3, 1750), ('yukim', 'Coup', 2015, 5, 3, 2250), @@ -342,7 +339,7 @@ def test_named_table_with_mv(self): # Attempt to query the data using Named Table interface # Also test filtering on mv's - key_space = NamedKeyspace(self.keyspace_name) + key_space = NamedKeyspace(ks) table = key_space.table("scores") mv_monthly = key_space.table("monthlyhigh") table_objects = table.objects.all() From 7fc41a3e6ed9976cacd21d24b3bf036550a64429 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 30 Oct 2015 13:22:22 -0500 Subject: [PATCH 0180/2005] split aggregate parsing for C* 3+, update tests --- cassandra/metadata.py | 25 ++++++++-------- tests/integration/standard/test_metadata.py | 32 ++++++++++++--------- tests/unit/test_metadata.py | 26 +++++------------ 3 files changed, 38 insertions(+), 45 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 37f5b0fa74..ac189201e7 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -864,8 +864,7 @@ def as_cql_query(self, formatted=False): "STYPE %(state_type)s" % locals() ret += ''.join((sep, 'FINALFUNC ', protect_name(self.final_func))) if self.final_func else '' - ret += ''.join((sep, 'INITCOND ', _encoder.cql_encode_all_types(self.initial_condition)))\ - if self.initial_condition is not None else '' + ret += ''.join((sep, 'INITCOND ', self.initial_condition)) if self.initial_condition is not None else '' return ret @@ -1733,20 +1732,14 @@ def _build_function(cls, function_row): @classmethod def _build_aggregate(cls, aggregate_row): - state_data_type = aggregate_row['state_type'] + cass_state_type = types.lookup_casstype(aggregate_row['state_type']) initial_condition = aggregate_row['initcond'] - try: - # TODO: asking if initcond can be the CQL literal in 3.0 - # this is a stopgap pending ^ or developing type parsing - cass_type = types.lookup_casstype(state_data_type) - if initial_condition is not None: - initial_condition = cass_type.deserialize(initial_condition, 3) - except ValueError: - pass - state_type = cls._schema_type_to_cql(state_data_type) + if initial_condition is not None: + initial_condition = _encoder.cql_encode_all_types(cass_state_type.deserialize(initial_condition, 3)) + state_type = _cql_from_cass_type(cass_state_type) return_type = cls._schema_type_to_cql(aggregate_row['return_type']) return Aggregate(aggregate_row['keyspace_name'], aggregate_row['aggregate_name'], - aggregate_row[cls._function_agg_arument_type_col], aggregate_row['state_func'], state_type, + aggregate_row['signature'], aggregate_row['state_func'], state_type, aggregate_row['final_func'], initial_condition, return_type) def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): @@ -2154,6 +2147,12 @@ def _build_keyspace_metadata_internal(row): strategy_class = strategy_options.pop("class") return KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) + @staticmethod + def _build_aggregate(aggregate_row): + return Aggregate(aggregate_row['keyspace_name'], aggregate_row['aggregate_name'], + aggregate_row['argument_types'], aggregate_row['state_func'], aggregate_row['state_type'], + aggregate_row['final_func'], aggregate_row['initcond'], aggregate_row['return_type']) + def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_rows=None): keyspace_name = row["keyspace_name"] table_name = row[self._table_name_col] diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index ee32f34773..b2d8fc1ace 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1667,7 +1667,7 @@ def test_return_type_meta(self): @test_category aggregate """ - with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond=1)) as va: + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond='1')) as va: self.assertEqual(self.keyspace_aggregate_meta[va.signature].return_type, 'int') def test_init_cond(self): @@ -1688,17 +1688,21 @@ def test_init_cond(self): c = Cluster(protocol_version=3) s = c.connect(self.keyspace_name) + encoder = Encoder() + expected_values = range(4) # int32 for init_cond in (-1, 0, 1): - with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond=init_cond)) as va: + cql_init = encoder.cql_encode_all_types(init_cond) + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond=cql_init)) as va: sum_res = s.execute("SELECT %s(v) AS sum FROM t" % va.function_kwargs['name'])[0].sum - self.assertEqual(sum_res, init_cond + sum(expected_values)) + self.assertEqual(sum_res, int(init_cond) + sum(expected_values)) # list for init_cond in ([], ['1', '2']): - with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', 'list', init_cond=init_cond)) as va: + cql_init = encoder.cql_encode_all_types(init_cond) + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', 'list', init_cond=cql_init)) as va: list_res = s.execute("SELECT %s(v) AS list_res FROM t" % va.function_kwargs['name'])[0].list_res self.assertListEqual(list_res[:len(init_cond)], init_cond) self.assertEqual(set(i for i in list_res[len(init_cond):]), @@ -1708,7 +1712,8 @@ def test_init_cond(self): expected_map_values = dict((i, i) for i in expected_values) expected_key_set = set(expected_values) for init_cond in ({}, {1: 2, 3: 4}, {5: 5}): - with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('update_map', 'map', init_cond=init_cond)) as va: + cql_init = encoder.cql_encode_all_types(init_cond) + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('update_map', 'map', init_cond=cql_init)) as va: map_res = s.execute("SELECT %s(v) AS map_res FROM t" % va.function_kwargs['name'])[0].map_res self.assertDictContainsSubset(expected_map_values, map_res) init_not_updated = dict((k, init_cond[k]) for k in set(init_cond) - expected_key_set) @@ -1750,7 +1755,7 @@ def test_same_name_diff_types(self): @test_category function """ - kwargs = self.make_aggregate_kwargs('sum_int', 'int', init_cond=0) + kwargs = self.make_aggregate_kwargs('sum_int', 'int', init_cond='0') with self.VerifiedAggregate(self, **kwargs): kwargs['state_func'] = 'sum_int_two' kwargs['argument_types'] = ['int', 'int'] @@ -1773,7 +1778,7 @@ def test_aggregates_follow_keyspace_alter(self): @test_category function """ - with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond=0)): + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond='0')): original_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name] self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name) try: @@ -1798,6 +1803,7 @@ def test_cql_optional_params(self): """ kwargs = self.make_aggregate_kwargs('extend_list', 'list') + encoder = Encoder() # no initial condition, final func self.assertIsNone(kwargs['initial_condition']) @@ -1811,13 +1817,13 @@ def test_cql_optional_params(self): self.assertEqual(cql.find('FINALFUNC'), -1) # initial condition, no final func - kwargs['initial_condition'] = ['init', 'cond'] + kwargs['initial_condition'] = encoder.cql_encode_all_types(['init', 'cond']) with self.VerifiedAggregate(self, **kwargs) as va: meta = self.keyspace_aggregate_meta[va.signature] - self.assertListEqual(meta.initial_condition, kwargs['initial_condition']) + self.assertEqual(meta.initial_condition, kwargs['initial_condition']) self.assertIsNone(meta.final_func) cql = meta.as_cql_query() - search_string = "INITCOND %s" % Encoder().cql_encode_all_types(kwargs['initial_condition']) + search_string = "INITCOND %s" % kwargs['initial_condition'] self.assertGreater(cql.find(search_string), 0, '"%s" search string not found in cql:\n%s' % (search_string, cql)) self.assertEqual(cql.find('FINALFUNC'), -1) @@ -1834,14 +1840,14 @@ def test_cql_optional_params(self): self.assertGreater(cql.find(search_string), 0, '"%s" search string not found in cql:\n%s' % (search_string, cql)) # both - kwargs['initial_condition'] = ['init', 'cond'] + kwargs['initial_condition'] = encoder.cql_encode_all_types(['init', 'cond']) kwargs['final_func'] = 'List_As_String' with self.VerifiedAggregate(self, **kwargs) as va: meta = self.keyspace_aggregate_meta[va.signature] - self.assertListEqual(meta.initial_condition, kwargs['initial_condition']) + self.assertEqual(meta.initial_condition, kwargs['initial_condition']) self.assertEqual(meta.final_func, kwargs['final_func']) cql = meta.as_cql_query() - init_cond_idx = cql.find("INITCOND %s" % Encoder().cql_encode_all_types(kwargs['initial_condition'])) + init_cond_idx = cql.find("INITCOND %s" % kwargs['initial_condition']) final_func_idx = cql.find('FINALFUNC "%s"' % kwargs['final_func']) self.assertNotIn(-1, (init_cond_idx, final_func_idx)) self.assertGreater(init_cond_idx, final_func_idx) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 86758e2332..51f4f61357 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -19,15 +19,13 @@ from mock import Mock import os -import random import six import cassandra -from cassandra.cqltypes import IntegerType, AsciiType, TupleType from cassandra.metadata import (Murmur3Token, MD5Token, BytesToken, ReplicationStrategy, NetworkTopologyStrategy, SimpleStrategy, - LocalStrategy, NoMurmur3, protect_name, + LocalStrategy, protect_name, protect_names, protect_value, is_valid_name, UserType, KeyspaceMetadata, get_schema_parser, _UnknownStrategy) @@ -287,17 +285,10 @@ class KeyspaceMetadataTest(unittest.TestCase): def test_export_as_string_user_types(self): keyspace_name = 'test' keyspace = KeyspaceMetadata(keyspace_name, True, 'SimpleStrategy', dict(replication_factor=3)) - keyspace.user_types['a'] = UserType(keyspace_name, 'a', ['one', 'two'], - [self.mock_user_type('UserType', 'c'), - self.mock_user_type('IntType', 'int')]) - keyspace.user_types['b'] = UserType(keyspace_name, 'b', ['one', 'two', 'three'], - [self.mock_user_type('UserType', 'd'), - self.mock_user_type('IntType', 'int'), - self.mock_user_type('UserType', 'a')]) - keyspace.user_types['c'] = UserType(keyspace_name, 'c', ['one'], - [self.mock_user_type('IntType', 'int')]) - keyspace.user_types['d'] = UserType(keyspace_name, 'd', ['one'], - [self.mock_user_type('UserType', 'c')]) + keyspace.user_types['a'] = UserType(keyspace_name, 'a', ['one', 'two'], ['c', 'int']) + keyspace.user_types['b'] = UserType(keyspace_name, 'b', ['one', 'two', 'three'], ['d', 'int', 'a']) + keyspace.user_types['c'] = UserType(keyspace_name, 'c', ['one'], ['int']) + keyspace.user_types['d'] = UserType(keyspace_name, 'd', ['one'], ['c']) self.assertEqual("""CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'} AND durable_writes = true; @@ -320,14 +311,11 @@ def test_export_as_string_user_types(self): three a );""", keyspace.export_as_string()) - def mock_user_type(self, cassname, typename): - return Mock(**{'cassname': cassname, 'typename': typename, 'cql_parameterized_type.return_value': typename}) - class UserTypesTest(unittest.TestCase): def test_as_cql_query(self): - field_types = [IntegerType, AsciiType, TupleType.apply_parameters([IntegerType, AsciiType])] + field_types = ['varint', 'ascii', 'frozen>'] udt = UserType("ks1", "mytype", ["a", "b", "c"], field_types) self.assertEqual("CREATE TYPE ks1.mytype (a varint, b ascii, c frozen>)", udt.as_cql_query(formatted=False)) @@ -338,7 +326,7 @@ def test_as_cql_query(self): );""", udt.export_as_string()) def test_as_cql_query_name_escaping(self): - udt = UserType("MyKeyspace", "MyType", ["AbA", "keyspace"], [AsciiType, AsciiType]) + udt = UserType("MyKeyspace", "MyType", ["AbA", "keyspace"], ['ascii', 'ascii']) self.assertEqual('CREATE TYPE "MyKeyspace"."MyType" ("AbA" ascii, "keyspace" ascii)', udt.as_cql_query(formatted=False)) From 3e3baa626f931ef78857e9aa56ef0f3227d5928b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 2 Nov 2015 10:50:17 -0600 Subject: [PATCH 0181/2005] accept datetime.date when serializing DateType --- cassandra/cqltypes.py | 11 +++++++---- tests/unit/test_marshalling.py | 7 ++++++- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 3cb8817294..e00196d80e 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -553,10 +553,13 @@ def serialize(v, protocol_version): timestamp_seconds = calendar.timegm(v.utctimetuple()) timestamp = timestamp_seconds * 1e3 + getattr(v, 'microsecond', 0) / 1e3 except AttributeError: - # Ints and floats are valid timestamps too - if type(v) not in _number_types: - raise TypeError('DateType arguments must be a datetime or timestamp') - timestamp = v + try: + timestamp = calendar.timegm(v.timetuple()) * 1e3 + except AttributeError: + # Ints and floats are valid timestamps too + if type(v) not in _number_types: + raise TypeError('DateType arguments must be a datetime, date, or timestamp') + timestamp = v return int64_pack(long(timestamp)) diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index 60ae33fd6b..3b98280c7c 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -23,7 +23,7 @@ from decimal import Decimal from uuid import UUID -from cassandra.cqltypes import lookup_casstype, DecimalType, UTF8Type +from cassandra.cqltypes import lookup_casstype, DecimalType, UTF8Type, DateType from cassandra.util import OrderedMap, OrderedMapSerializedKey, sortedset, Time, Date marshalled_value_pairs = ( @@ -39,6 +39,7 @@ (b'\x80\x00\x00\x00\x00\x00\x00\x00', 'CounterColumnType', -9223372036854775808), (b'', 'CounterColumnType', None), (b'\x00\x00\x013\x7fb\xeey', 'DateType', datetime(2011, 11, 7, 18, 55, 49, 881000)), + (b'\x00\x00\x01P\xc5~L\x00', 'DateType', datetime(2015, 11, 2)), (b'', 'DateType', None), (b'\x00\x00\x00\r\nJ\x04"^\x91\x04\x8a\xb1\x18\xfe', 'DecimalType', Decimal('1243878957943.1234124191998')), (b'\x00\x00\x00\x06\xe5\xde]\x98Y', 'DecimalType', Decimal('-112233.441191')), @@ -134,3 +135,7 @@ def test_bitlength(self): self.assertEqual(bitlength(9), 4) self.assertEqual(bitlength(-10), 0) self.assertEqual(bitlength(0), 0) + + def test_date(self): + # separate test because it will deserialize as datetime + self.assertEqual(DateType.from_binary(DateType.to_binary(date(2015, 11, 2), 1), 1), datetime(2015, 11, 2)) From eff319bbd363f0636c316382836b8f527a82bd51 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 2 Nov 2015 16:47:08 -0600 Subject: [PATCH 0182/2005] consider rack for NTS replica placement --- cassandra/metadata.py | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 8577d8f8dd..63cd1875a8 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -475,14 +475,16 @@ def make_token_replica_map(self, token_to_host_owner, ring): ring_len_range = range(ring_len) dc_rf_map = dict((dc, int(rf)) for dc, rf in self.dc_replication_factors.items() if rf > 0) - dcs = dict((h, h.datacenter) for h in set(token_to_host_owner.values())) # build a map of DCs to lists of indexes into `ring` for tokens that # belong to that DC dc_to_token_offset = defaultdict(list) + dc_racks = defaultdict(set) for i, token in enumerate(ring): host = token_to_host_owner[token] - dc_to_token_offset[dcs[host]].append(i) + dc_to_token_offset[host.datacenter].append(i) + if host.datacenter and host.rack: + dc_racks[host.datacenter].add(host.rack) # A map of DCs to an index into the dc_to_token_offset value for that dc. # This is how we keep track of advancing around the ring for each DC. @@ -506,20 +508,33 @@ def make_token_replica_map(self, token_to_host_owner, ring): index += 1 dc_to_current_index[dc] = index - # now add the next RF distinct token owners to the set of - # replicas for this DC + replicas_remaining = remaining[dc] + skipped_hosts = [] + racks_placed = set() + racks_this_dc = dc_racks[dc] for token_offset in islice(cycle(token_offsets), index, index + num_tokens): host = token_to_host_owner[ring[token_offset]] + if replicas_remaining == 0: + break + if host in replicas: continue + if host.rack in racks_placed and len(racks_placed) < len(racks_this_dc): + skipped_hosts.append(host) + continue + replicas.append(host) - dc_remaining = remaining[dc] - 1 - if dc_remaining == 0: - del remaining[dc] - break - else: - remaining[dc] = dc_remaining + replicas_remaining -= 1 + racks_placed.add(host.rack) + + if len(racks_placed) == len(racks_this_dc): + for host in skipped_hosts: + if replicas_remaining == 0: + break + replicas.append(host) + replicas_remaining -= 1 + del skipped_hosts[:] return replica_map From 88c6103282be62cbe463409c890ec86c9d7a249e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 3 Nov 2015 11:20:40 -0600 Subject: [PATCH 0183/2005] test: expand NTS testing to cover multi-rack case --- tests/unit/test_metadata.py | 45 +++++++++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 86758e2332..c7a7e7e7ad 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -19,7 +19,6 @@ from mock import Mock import os -import random import six import cassandra @@ -27,7 +26,7 @@ from cassandra.metadata import (Murmur3Token, MD5Token, BytesToken, ReplicationStrategy, NetworkTopologyStrategy, SimpleStrategy, - LocalStrategy, NoMurmur3, protect_name, + LocalStrategy, protect_name, protect_names, protect_value, is_valid_name, UserType, KeyspaceMetadata, get_schema_parser, _UnknownStrategy) @@ -113,6 +112,48 @@ def test_nts_make_token_replica_map(self): self.assertItemsEqual(replica_map[MD5Token(0)], (dc1_1, dc1_2, dc2_1, dc2_2, dc3_1)) + def test_nts_make_token_replica_map_multi_rack(self): + token_to_host_owner = {} + + # (A) not enough distinct racks, first skipped is used + dc1_1 = Host('dc1.1', SimpleConvictionPolicy) + dc1_2 = Host('dc1.2', SimpleConvictionPolicy) + dc1_3 = Host('dc1.3', SimpleConvictionPolicy) + dc1_4 = Host('dc1.4', SimpleConvictionPolicy) + dc1_1.set_location_info('dc1', 'rack1') + dc1_2.set_location_info('dc1', 'rack1') + dc1_3.set_location_info('dc1', 'rack2') + dc1_4.set_location_info('dc1', 'rack2') + token_to_host_owner[MD5Token(0)] = dc1_1 + token_to_host_owner[MD5Token(100)] = dc1_2 + token_to_host_owner[MD5Token(200)] = dc1_3 + token_to_host_owner[MD5Token(300)] = dc1_4 + + # (B) distinct racks, but not contiguous + dc2_1 = Host('dc2.1', SimpleConvictionPolicy) + dc2_2 = Host('dc2.2', SimpleConvictionPolicy) + dc2_3 = Host('dc2.3', SimpleConvictionPolicy) + dc2_1.set_location_info('dc2', 'rack1') + dc2_2.set_location_info('dc2', 'rack1') + dc2_3.set_location_info('dc2', 'rack2') + token_to_host_owner[MD5Token(1)] = dc2_1 + token_to_host_owner[MD5Token(101)] = dc2_2 + token_to_host_owner[MD5Token(201)] = dc2_3 + + ring = [MD5Token(0), + MD5Token(1), + MD5Token(100), + MD5Token(101), + MD5Token(200), + MD5Token(201), + MD5Token(300)] + + nts = NetworkTopologyStrategy({'dc1': 3, 'dc2': 2}) + replica_map = nts.make_token_replica_map(token_to_host_owner, ring) + + token_replicas = replica_map[MD5Token(0)] + self.assertItemsEqual(token_replicas, (dc1_1, dc1_2, dc1_3, dc2_1, dc2_3)) + def test_nts_make_token_replica_map_empty_dc(self): host = Host('1', SimpleConvictionPolicy) host.set_location_info('dc1', 'rack1') From 3b10ce56911c411abe5c9013cab13cc3fe21a39f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 3 Nov 2015 15:12:00 -0600 Subject: [PATCH 0184/2005] Properly serdes nested collections when proto < 3 Inner types are always serialized with v3 + encoding. PYTHON-215 --- cassandra/cqltypes.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index e00196d80e..ef31ed7040 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -687,12 +687,13 @@ def deserialize_safe(cls, byts, protocol_version): numelements = unpack(byts[:length]) p = length result = [] + inner_proto = max(3, protocol_version) for _ in range(numelements): itemlen = unpack(byts[p:p + length]) p += length item = byts[p:p + itemlen] p += itemlen - result.append(subtype.from_binary(item, protocol_version)) + result.append(subtype.from_binary(item, inner_proto)) return cls.adapter(result) @classmethod @@ -704,8 +705,9 @@ def serialize_safe(cls, items, protocol_version): pack = int32_pack if protocol_version >= 3 else uint16_pack buf = io.BytesIO() buf.write(pack(len(items))) + inner_proto = max(3, protocol_version) for item in items: - itembytes = subtype.to_binary(item, protocol_version) + itembytes = subtype.to_binary(item, inner_proto) buf.write(pack(len(itembytes))) buf.write(itembytes) return buf.getvalue() @@ -739,6 +741,7 @@ def deserialize_safe(cls, byts, protocol_version): numelements = unpack(byts[:length]) p = length themap = util.OrderedMapSerializedKey(key_type, protocol_version) + inner_proto = max(3, protocol_version) for _ in range(numelements): key_len = unpack(byts[p:p + length]) p += length @@ -748,8 +751,8 @@ def deserialize_safe(cls, byts, protocol_version): p += length valbytes = byts[p:p + val_len] p += val_len - key = key_type.from_binary(keybytes, protocol_version) - val = value_type.from_binary(valbytes, protocol_version) + key = key_type.from_binary(keybytes, inner_proto) + val = value_type.from_binary(valbytes, inner_proto) themap._insert_unchecked(key, keybytes, val) return themap @@ -763,9 +766,10 @@ def serialize_safe(cls, themap, protocol_version): items = six.iteritems(themap) except AttributeError: raise TypeError("Got a non-map object for a map value") + inner_proto = max(3, protocol_version) for key, val in items: - keybytes = key_type.to_binary(key, protocol_version) - valbytes = value_type.to_binary(val, protocol_version) + keybytes = key_type.to_binary(key, inner_proto) + valbytes = value_type.to_binary(val, inner_proto) buf.write(pack(len(keybytes))) buf.write(keybytes) buf.write(pack(len(valbytes))) From 5efef3013921c07999659225f86e5971e37519ad Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 4 Nov 2015 10:16:08 -0600 Subject: [PATCH 0185/2005] cqle: fix broken UDT management following metadata api update PYTHON-422 --- cassandra/cqlengine/management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 34de5fcb12..d7b6504c4d 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -269,7 +269,7 @@ def get_create_type(type_model, keyspace): type_meta = metadata.UserType(keyspace, type_model.type_name(), (f.db_field_name for f in type_model._fields.values()), - type_model._fields.values()) + (v.db_type for v in type_model._fields.values())) return type_meta.as_cql_query() From 109a6af03ab1b0c8cbb82d00480d34c08e717f80 Mon Sep 17 00:00:00 2001 From: Carl Yeksigian Date: Wed, 4 Nov 2015 13:17:02 -0500 Subject: [PATCH 0186/2005] Handle case where the query trace returns no entries without throwing exception --- cassandra/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/query.py b/cassandra/query.py index 9378d0cad6..e66f875c56 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -884,7 +884,7 @@ def populate(self, max_wait=2.0, wait_for_complete=True): session_results = self._execute( self._SELECT_SESSIONS_FORMAT, (self.trace_id,), time_spent, max_wait) - is_complete = session_results[0].duration is not None + is_complete = session_results and session_results[0].duration is not None if not session_results or (wait_for_complete and not is_complete): time.sleep(self._BASE_RETRY_SLEEP * (2 ** attempt)) attempt += 1 From 4ea3c72d809e92d2b554415b9f35945fbd573d18 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 4 Nov 2015 12:34:02 -0600 Subject: [PATCH 0187/2005] tighten up whitespace in collection cql encoders --- cassandra/encoder.py | 8 ++++---- tests/unit/test_parameter_binding.py | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cassandra/encoder.py b/cassandra/encoder.py index e4022306c1..07c38772a4 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -187,7 +187,7 @@ def cql_encode_sequence(self, val): Converts a sequence to a string of the form ``(item1, item2, ...)``. This is suitable for ``IN`` value lists. """ - return '( %s )' % ' , '.join(self.mapping.get(type(v), self.cql_encode_object)(v) + return '(%s)' % ', '.join(self.mapping.get(type(v), self.cql_encode_object)(v) for v in val) cql_encode_tuple = cql_encode_sequence @@ -201,7 +201,7 @@ def cql_encode_map_collection(self, val): Converts a dict into a string of the form ``{key1: val1, key2: val2, ...}``. This is suitable for ``map`` type columns. """ - return '{ %s }' % ' , '.join('%s : %s' % ( + return '{%s}' % ', '.join('%s: %s' % ( self.mapping.get(type(k), self.cql_encode_object)(k), self.mapping.get(type(v), self.cql_encode_object)(v) ) for k, v in six.iteritems(val)) @@ -211,14 +211,14 @@ def cql_encode_list_collection(self, val): Converts a sequence to a string of the form ``[item1, item2, ...]``. This is suitable for ``list`` type columns. """ - return '[ %s ]' % ' , '.join(self.mapping.get(type(v), self.cql_encode_object)(v) for v in val) + return '[%s]' % ', '.join(self.mapping.get(type(v), self.cql_encode_object)(v) for v in val) def cql_encode_set_collection(self, val): """ Converts a sequence to a string of the form ``{item1, item2, ...}``. This is suitable for ``set`` type columns. """ - return '{ %s }' % ' , '.join(self.mapping.get(type(v), self.cql_encode_object)(v) for v in val) + return '{%s}' % ', '.join(self.mapping.get(type(v), self.cql_encode_object)(v) for v in val) def cql_encode_all_types(self, val): """ diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index 044e032554..7da3ed939d 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -40,11 +40,11 @@ def test_bind_map(self): def test_sequence_param(self): result = bind_params("%s", (ValueSequence((1, "a", 2.0)),), Encoder()) - self.assertEqual(result, "( 1 , 'a' , 2.0 )") + self.assertEqual(result, "(1, 'a', 2.0)") def test_generator_param(self): result = bind_params("%s", ((i for i in xrange(3)),), Encoder()) - self.assertEqual(result, "[ 0 , 1 , 2 ]") + self.assertEqual(result, "[0, 1, 2]") def test_none_param(self): result = bind_params("%s", (None,), Encoder()) @@ -52,11 +52,11 @@ def test_none_param(self): def test_list_collection(self): result = bind_params("%s", (['a', 'b', 'c'],), Encoder()) - self.assertEqual(result, "[ 'a' , 'b' , 'c' ]") + self.assertEqual(result, "['a', 'b', 'c']") def test_set_collection(self): result = bind_params("%s", (set(['a', 'b']),), Encoder()) - self.assertIn(result, ("{ 'a' , 'b' }", "{ 'b' , 'a' }")) + self.assertIn(result, ("{'a', 'b'}", "{'b', 'a'}")) def test_map_collection(self): vals = OrderedDict() @@ -64,7 +64,7 @@ def test_map_collection(self): vals['b'] = 'b' vals['c'] = 'c' result = bind_params("%s", (vals,), Encoder()) - self.assertEqual(result, "{ 'a' : 'a' , 'b' : 'b' , 'c' : 'c' }") + self.assertEqual(result, "{'a': 'a', 'b': 'b', 'c': 'c'}") def test_quote_escaping(self): result = bind_params("%s", ("""'ef''ef"ef""ef'""",), Encoder()) From c6f59c1e754e7666121d36ac23c8e95da41dda0d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 6 Nov 2015 10:22:46 -0600 Subject: [PATCH 0188/2005] correct col type for compact value alias PYTHON-422 --- cassandra/metadata.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index ac189201e7..2e26b3367c 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1878,7 +1878,8 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): if value_alias_rows: # CASSANDRA-8487 validator = types.lookup_casstype(value_alias_rows[0].get('validator')) - col = ColumnMetadata(table_meta, value_alias, validator) + cql_type = _cql_from_cass_type(validator) + col = ColumnMetadata(table_meta, value_alias, cql_type) if value_alias: # CASSANDRA-8487 table_meta.columns[value_alias] = col From ca959de212cae920c69cee19a80d41e82e68a1a2 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 3 Nov 2015 18:32:43 -0600 Subject: [PATCH 0189/2005] Adding a skip where needed, fixing issue with cross test pollution in cqlengine --- tests/integration/cqlengine/__init__.py | 16 +++++++----- .../integration/cqlengine/query/test_named.py | 26 ++++++++++--------- .../integration/cqlengine/test_consistency.py | 1 - .../standard/test_cython_protocol_handlers.py | 1 + 4 files changed, 24 insertions(+), 20 deletions(-) diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index cce715d7c6..e89c47789b 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -20,6 +20,7 @@ from cassandra.cqlengine.management import create_keyspace_simple, CQLENG_ALLOW_SCHEMA_MANAGEMENT from tests.integration import get_server_versions, use_single_node, PROTOCOL_VERSION +DEFAULT_KEYSPACE = 'cqlengine_test' def setup_package(): @@ -28,16 +29,17 @@ def setup_package(): use_single_node() - keyspace = 'cqlengine_test' - connection.setup(['127.0.0.1'], - consistency=ConsistencyLevel.ONE, - protocol_version=PROTOCOL_VERSION, - default_keyspace=keyspace) - - create_keyspace_simple(keyspace, 1) + setup_connection(DEFAULT_KEYSPACE) + create_keyspace_simple(DEFAULT_KEYSPACE, 1) def is_prepend_reversed(): # do we have https://issues.apache.org/jira/browse/CASSANDRA-8733 ? ver, _ = get_server_versions() return not (ver >= (2, 0, 13) or ver >= (2, 1, 3)) + +def setup_connection(keyspace_name): + connection.setup(['127.0.0.1'], + consistency=ConsistencyLevel.ONE, + protocol_version=PROTOCOL_VERSION, + default_keyspace=keyspace_name) \ No newline at end of file diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index 2cc88ec51d..60076ad834 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -24,6 +24,7 @@ from cassandra.concurrent import execute_concurrent_with_args from cassandra.cqlengine import models +from tests.integration.cqlengine import setup_connection from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine.query.test_queryset import BaseQuerySetUsage @@ -128,7 +129,7 @@ def setUpClass(cls): from tests.integration.cqlengine.query.test_queryset import TestModel - ks,tn = TestModel.column_family_name().split('.') + ks, tn = TestModel.column_family_name().split('.') cls.keyspace = NamedKeyspace(ks) cls.table = cls.keyspace.table(tn) @@ -149,8 +150,8 @@ def test_query_expression_count(self): def test_iteration(self): """ Tests that iterating over a query set pulls back all of the expected results """ q = self.table.objects(test_id=0) - #tuple of expected attempt_id, expected_result values - compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + # tuple of expected attempt_id, expected_result values + compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)]) for t in q: val = t.attempt_id, t.expected_result assert val in compare_set @@ -160,8 +161,8 @@ def test_iteration(self): # test with regular filtering q = self.table.objects(attempt_id=3).allow_filtering() assert len(q) == 3 - #tuple of expected test_id, expected_result values - compare_set = set([(0,20), (1,20), (2,75)]) + # tuple of expected test_id, expected_result values + compare_set = set([(0, 20), (1, 20), (2, 75)]) for t in q: val = t.test_id, t.expected_result assert val in compare_set @@ -171,8 +172,8 @@ def test_iteration(self): # test with query method q = self.table.objects(self.table.column('attempt_id') == 3).allow_filtering() assert len(q) == 3 - #tuple of expected test_id, expected_result values - compare_set = set([(0,20), (1,20), (2,75)]) + # tuple of expected test_id, expected_result values + compare_set = set([(0, 20), (1, 20), (2, 75)]) for t in q: val = t.test_id, t.expected_result assert val in compare_set @@ -183,16 +184,16 @@ def test_multiple_iterations_work_properly(self): """ Tests that iterating over a query set more than once works """ # test with both the filtering method and the query method for q in (self.table.objects(test_id=0), self.table.objects(self.table.column('test_id') == 0)): - #tuple of expected attempt_id, expected_result values - compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + # tuple of expected attempt_id, expected_result values + compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)]) for t in q: val = t.attempt_id, t.expected_result assert val in compare_set compare_set.remove(val) assert len(compare_set) == 0 - #try it again - compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + # try it again + compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)]) for t in q: val = t.attempt_id, t.expected_result assert val in compare_set @@ -205,7 +206,7 @@ def test_multiple_iterators_are_isolated(self): """ for q in (self.table.objects(test_id=0), self.table.objects(self.table.column('test_id') == 0)): q = q.order_by('attempt_id') - expected_order = [0,1,2,3] + expected_order = [0, 1, 2, 3] iter1 = iter(q) iter2 = iter(q) for attempt_id in expected_order: @@ -280,6 +281,7 @@ def setUp(self): def tearDown(self): models.DEFAULT_KEYSPACE = self.default_keyspace + setup_connection(models.DEFAULT_KEYSPACE) def test_named_table_with_mv(self): """ diff --git a/tests/integration/cqlengine/test_consistency.py b/tests/integration/cqlengine/test_consistency.py index 174f620793..47e1661941 100644 --- a/tests/integration/cqlengine/test_consistency.py +++ b/tests/integration/cqlengine/test_consistency.py @@ -97,7 +97,6 @@ def test_blind_update(self): args = m.call_args self.assertEqual(CL.ALL, args[0][0].consistency_level) - def test_delete(self): # ensures we always carry consistency through on delete statements t = TestConsistencyModel.create(text="bacon and eggs") diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index f4e0a5a619..a61a1fc210 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -88,6 +88,7 @@ def test_numpy_parser(self): self.assertFalse(result.has_more_pages) self._verify_numpy_page(result[0]) + @notprotocolv1 @numpytest def test_numpy_results_paged(self): """ From 42dcf04479d53f71b3c1142748e9767885ee3ecc Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 5 Nov 2015 11:57:32 -0600 Subject: [PATCH 0190/2005] Removing long tests and setting ccm dependency explicitly --- build.yaml | 3 --- test-requirements.txt | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/build.yaml b/build.yaml index b31eac835d..1d729fcbbd 100644 --- a/build.yaml +++ b/build.yaml @@ -4,7 +4,6 @@ python: os: - ubuntu/trusty64 cassandra: - - 1.2 - 2.0 - 2.1 - 2.2 @@ -43,7 +42,5 @@ build: echo "==========RUNNING INTEGRATION TESTS==========" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true - echo "==========RUNNING LONG INTEGRATION TESTS==========" - CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true - xunit: - "*_results.xml" \ No newline at end of file diff --git a/test-requirements.txt b/test-requirements.txt index f3cc411c24..aa1aaa0584 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -3,7 +3,7 @@ blist scales nose mock<=1.0.1 -ccm>=2.0 +ccm==2.0.4.1 unittest2 PyYAML pytz From 76712df0ce0a34e820f8bfe7539bd6ebf9a6ff5e Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 5 Nov 2015 12:03:02 -0600 Subject: [PATCH 0191/2005] removing 3.0 from run configuraiton --- build.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/build.yaml b/build.yaml index 1d729fcbbd..3ec06294e2 100644 --- a/build.yaml +++ b/build.yaml @@ -7,7 +7,6 @@ cassandra: - 2.0 - 2.1 - 2.2 - - 3.0 env: EVENT_LOOP_MANAGER: - libev From c91615a931ae501a8c94c731fc4c983c79b8be5f Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 5 Nov 2015 17:21:47 -0600 Subject: [PATCH 0192/2005] First Pass at hardening test harness, against leaky session and adding and removal issues --- tests/integration/__init__.py | 54 +++++++++++++---- tests/integration/standard/test_cluster.py | 6 +- .../standard/test_custom_protocol_handler.py | 5 +- .../standard/test_cython_protocol_handlers.py | 5 +- tests/integration/standard/test_metadata.py | 6 +- tests/integration/standard/test_metrics.py | 58 ++++++++----------- 6 files changed, 76 insertions(+), 58 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 7cdb7143f1..8a87381395 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -22,7 +22,7 @@ from subprocess import call from itertools import groupby -from cassandra import OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure +from cassandra import OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure, AlreadyExists from cassandra.cluster import Cluster from cassandra.protocol import ConfigurationException @@ -262,7 +262,8 @@ def execute_until_pass(session, query): while tries < 100: try: return session.execute(query) - except ConfigurationException: + except (ConfigurationException, AlreadyExists): + log.warn("Recieved already exists from query {0} not exiting".format(query)) # keyspace/table was already created/dropped return except (OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure): @@ -274,6 +275,36 @@ def execute_until_pass(session, query): raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query)) +def execute_with_long_wait_retry(session, query, timeout=30): + tries = 0 + while tries < 10: + try: + return session.execute(query, timeout=timeout) + except (ConfigurationException, AlreadyExists): + log.warn("Recieved already exists from query {0} not exiting".format(query)) + # keyspace/table was already created/dropped + return + except (OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure): + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + tries += 1 + + raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query)) + + +def drop_keyspace_shutdown_cluster(keyspace_name, session, cluster): + try: + execute_with_long_wait_retry(session, "DROP KEYSPACE {0}".format(keyspace_name)) + except: + log.warn("Error encountered when droping keyspace {0}".format(keyspace_name)) + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + log.warn("Shutting down cluster") + cluster.shutdown() + + def setup_keyspace(ipformat=None): # wait for nodes to startup time.sleep(10) @@ -292,23 +323,23 @@ def setup_keyspace(ipformat=None): ddl = ''' CREATE KEYSPACE test3rf WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'}''' - execute_until_pass(session, ddl) + execute_with_long_wait_retry(session, ddl) ddl = ''' CREATE KEYSPACE test2rf WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '2'}''' - execute_until_pass(session, ddl) + execute_with_long_wait_retry(session, ddl) ddl = ''' CREATE KEYSPACE test1rf WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}''' - execute_until_pass(session, ddl) + execute_with_long_wait_retry(session, ddl) ddl = ''' CREATE TABLE test3rf.test ( k int PRIMARY KEY, v int )''' - execute_until_pass(session, ddl) + execute_with_long_wait_retry(session, ddl) except Exception: traceback.print_exc() @@ -356,12 +387,12 @@ def function_table_name(self): @classmethod def drop_keyspace(cls): - execute_until_pass(cls.session, "DROP KEYSPACE {0}".format(cls.ks_name)) + execute_with_long_wait_retry(cls.session, "DROP KEYSPACE {0}".format(cls.ks_name)) @classmethod def create_keyspace(cls, rf): ddl = "CREATE KEYSPACE {0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': '{1}'}}".format(cls.ks_name, rf) - execute_until_pass(cls.session, ddl) + execute_with_long_wait_retry(cls.session, ddl) @classmethod def common_setup(cls, rf, create_class_table=False, skip_if_cass_version_less_than=None): @@ -369,6 +400,7 @@ def common_setup(cls, rf, create_class_table=False, skip_if_cass_version_less_th cls.session = cls.cluster.connect() cls.ks_name = cls.__name__.lower() cls.create_keyspace(rf) + cls.cass_version = get_server_versions() if create_class_table: @@ -401,8 +433,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - cls.drop_keyspace() - cls.cluster.shutdown() + drop_keyspace_shutdown_cluster(cls.keyspace_name, cls.session, cls.cluster) class BasicSharedKeyspaceUnitTestCaseWTable(BasicSharedKeyspaceUnitTestCase): @@ -479,5 +510,4 @@ def setUp(self): self.common_setup(1) def tearDown(self): - self.drop_keyspace() - self.cluster.shutdown() + drop_keyspace_shutdown_cluster(self.keyspace_name, self.session, self.cluster) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index fb5ccf6f82..9baaf9fb78 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -31,7 +31,7 @@ from cassandra.protocol import MAX_SUPPORTED_VERSION from cassandra.query import SimpleStatement, TraceUnavailable -from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node, CASSANDRA_VERSION, execute_until_pass +from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node, CASSANDRA_VERSION, execute_until_pass, execute_with_long_wait_retry from tests.integration.util import assert_quiescent_pool_state @@ -80,7 +80,7 @@ def test_basic(self): """) self.assertFalse(result) - result = execute_until_pass(session, + result = execute_with_long_wait_retry(session, """ CREATE TABLE clustertests.cf0 ( a text, @@ -100,7 +100,7 @@ def test_basic(self): result = session.execute("SELECT * FROM clustertests.cf0") self.assertEqual([('a', 'b', 'c')], result) - execute_until_pass(session, "DROP KEYSPACE clustertests") + execute_with_long_wait_retry(session, "DROP KEYSPACE clustertests") cluster.shutdown() diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index ce16e78c79..1427b3b09b 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -20,7 +20,7 @@ from cassandra.protocol import ProtocolHandler, ResultMessage, UUIDType, read_int, EventMessage from cassandra.query import tuple_factory from cassandra.cluster import Cluster -from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass +from tests.integration import use_singledc, PROTOCOL_VERSION, drop_keyspace_shutdown_cluster from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES from tests.integration.standard.utils import create_table_with_all_types, get_all_primitive_params from six import binary_type @@ -44,8 +44,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - cls.session.execute("DROP KEYSPACE custserdes") - cls.cluster.shutdown() + drop_keyspace_shutdown_cluster("custserdes", cls.session, cls.cluster) def test_custom_raw_uuid_row_results(self): """ diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index a61a1fc210..7532426e5c 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -11,7 +11,7 @@ from cassandra.cluster import Cluster from cassandra.protocol import ProtocolHandler, LazyProtocolHandler, NumpyProtocolHandler -from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1 +from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1, drop_keyspace_shutdown_cluster from tests.integration.datatype_utils import update_datatypes from tests.integration.standard.utils import ( create_table_with_all_types, get_all_primitive_params, get_primitive_datatypes) @@ -38,8 +38,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - cls.session.execute("DROP KEYSPACE testspace") - cls.cluster.shutdown() + drop_keyspace_shutdown_cluster("testspace", cls.session, cls.session) @cythontest def test_cython_parser(self): diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index b2d8fc1ace..052d831e09 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -34,7 +34,7 @@ from cassandra.pool import Host from tests.integration import get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, \ - BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase + BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster def setup_module(): @@ -1863,7 +1863,6 @@ class BadMetaTest(unittest.TestCase): class BadMetaException(Exception): pass - @property def function_name(self): return self._testMethodName.lower() @@ -1880,8 +1879,7 @@ def setup_class(cls): @classmethod def teardown_class(cls): - cls.session.execute("DROP KEYSPACE %s" % cls.keyspace_name) - cls.cluster.shutdown() + drop_keyspace_shutdown_cluster(cls.keyspace_name, cls.session, cls.cluster) def _skip_if_not_version(self, version): if CASS_SERVER_VERSION < version: diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index b2a046bae6..70da6710fb 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -32,18 +32,23 @@ def setup_module(): class MetricsTests(unittest.TestCase): + def setUp(self): + self.cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect("test3rf") + + def tearDown(self): + self.cluster.shutdown() + def test_connection_error(self): """ Trigger and ensure connection_errors are counted Stop all node with the driver knowing about the "DOWN" states. """ - cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION) - session = cluster.connect("test3rf") # Test writes for i in range(0, 100): - session.execute_async("INSERT INTO test (k, v) VALUES ({0}, {1})".format(i, i)) + self.session.execute_async("INSERT INTO test (k, v) VALUES ({0}, {1})".format(i, i)) # Stop the cluster get_cluster().stop(wait=True, gently=False) @@ -52,14 +57,13 @@ def test_connection_error(self): # Ensure the nodes are actually down query = SimpleStatement("SELECT * FROM test", consistency_level=ConsistencyLevel.ALL) with self.assertRaises(NoHostAvailable): - session.execute(query) + self.session.execute(query) finally: get_cluster().start(wait_for_binary_proto=True, wait_other_notice=True) # Give some time for the cluster to come back up, for the next test time.sleep(5) - self.assertGreater(cluster.metrics.stats.connection_errors, 0) - cluster.shutdown() + self.assertGreater(self.cluster.metrics.stats.connection_errors, 0) def test_write_timeout(self): """ @@ -68,15 +72,12 @@ def test_write_timeout(self): Attempt a write at cl.ALL and receive a WriteTimeout. """ - cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION) - session = cluster.connect("test3rf") - # Test write - session.execute("INSERT INTO test (k, v) VALUES (1, 1)") + self.session.execute("INSERT INTO test (k, v) VALUES (1, 1)") # Assert read query = SimpleStatement("SELECT * FROM test WHERE k=1", consistency_level=ConsistencyLevel.ALL) - results = execute_until_pass(session, query) + results = execute_until_pass(self.session, query) self.assertTrue(results) # Pause node so it shows as unreachable to coordinator @@ -86,14 +87,12 @@ def test_write_timeout(self): # Test write query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL) with self.assertRaises(WriteTimeout): - session.execute(query, timeout=None) - self.assertEqual(1, cluster.metrics.stats.write_timeouts) + self.session.execute(query, timeout=None) + self.assertEqual(1, self.cluster.metrics.stats.write_timeouts) finally: get_node(1).resume() - cluster.shutdown() - def test_read_timeout(self): """ Trigger and ensure read_timeouts are counted @@ -101,15 +100,13 @@ def test_read_timeout(self): Attempt a read at cl.ALL and receive a ReadTimeout. """ - cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION) - session = cluster.connect("test3rf") # Test write - session.execute("INSERT INTO test (k, v) VALUES (1, 1)") + self.session.execute("INSERT INTO test (k, v) VALUES (1, 1)") # Assert read query = SimpleStatement("SELECT * FROM test WHERE k=1", consistency_level=ConsistencyLevel.ALL) - results = execute_until_pass(session, query) + results = execute_until_pass(self.session, query) self.assertTrue(results) # Pause node so it shows as unreachable to coordinator @@ -119,14 +116,12 @@ def test_read_timeout(self): # Test read query = SimpleStatement("SELECT * FROM test", consistency_level=ConsistencyLevel.ALL) with self.assertRaises(ReadTimeout): - session.execute(query, timeout=None) - self.assertEqual(1, cluster.metrics.stats.read_timeouts) + self.session.execute(query, timeout=None) + self.assertEqual(1, self.cluster.metrics.stats.read_timeouts) finally: get_node(1).resume() - cluster.shutdown() - def test_unavailable(self): """ Trigger and ensure unavailables are counted @@ -134,15 +129,12 @@ def test_unavailable(self): Attempt an insert/read at cl.ALL and receive a Unavailable Exception. """ - cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION) - session = cluster.connect("test3rf") - # Test write - session.execute("INSERT INTO test (k, v) VALUES (1, 1)") + self.session.execute("INSERT INTO test (k, v) VALUES (1, 1)") # Assert read query = SimpleStatement("SELECT * FROM test WHERE k=1", consistency_level=ConsistencyLevel.ALL) - results = execute_until_pass(session, query) + results = execute_until_pass(self.session, query) self.assertTrue(results) # Stop node gracefully @@ -152,20 +144,20 @@ def test_unavailable(self): # Test write query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL) with self.assertRaises(Unavailable): - session.execute(query) - self.assertEqual(1, cluster.metrics.stats.unavailables) + self.session.execute(query) + self.assertEqual(1, self.cluster.metrics.stats.unavailables) # Test write query = SimpleStatement("SELECT * FROM test", consistency_level=ConsistencyLevel.ALL) with self.assertRaises(Unavailable): - session.execute(query, timeout=None) - self.assertEqual(2, cluster.metrics.stats.unavailables) + self.session.execute(query, timeout=None) + self.assertEqual(2, self.cluster.metrics.stats.unavailables) finally: get_node(1).start(wait_other_notice=True, wait_for_binary_proto=True) # Give some time for the cluster to come back up, for the next test time.sleep(5) - cluster.shutdown() + self.cluster.shutdown() # def test_other_error(self): # # TODO: Bootstrapping or Overloaded cases From 4c0ce059a35d64fbaff32c2270f175c5ad496fe3 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 5 Nov 2015 17:30:52 -0600 Subject: [PATCH 0193/2005] Changing PreparedStatementTests over to use common setup and teardown methods to prevent session leaks --- .../standard/test_prepared_statements.py | 151 ++++++------------ 1 file changed, 52 insertions(+), 99 deletions(-) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index b0be36147c..854caee9cd 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -36,21 +36,26 @@ class PreparedStatementTests(unittest.TestCase): def setUpClass(cls): cls.cass_version = get_server_versions() + def setUp(self): + self.cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + + def tearDown(self): + self.cluster.shutdown() + def test_basic(self): """ Test basic PreparedStatement usage """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - session.execute( + self.session.execute( """ CREATE KEYSPACE preparedtests WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} """) - session.set_keyspace("preparedtests") - session.execute( + self.session.set_keyspace("preparedtests") + self.session.execute( """ CREATE TABLE cf0 ( a text, @@ -60,7 +65,7 @@ def test_basic(self): ) """) - prepared = session.prepare( + prepared = self.session.prepare( """ INSERT INTO cf0 (a, b, c) VALUES (?, ?, ?) """) @@ -68,20 +73,20 @@ def test_basic(self): self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind(('a', 'b', 'c')) - session.execute(bound) + self.session.execute(bound) - prepared = session.prepare( + prepared = self.session.prepare( """ SELECT * FROM cf0 WHERE a=? """) self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind(('a')) - results = session.execute(bound) + results = self.session.execute(bound) self.assertEqual(results, [('a', 'b', 'c')]) # test with new dict binding - prepared = session.prepare( + prepared = self.session.prepare( """ INSERT INTO cf0 (a, b, c) VALUES (?, ?, ?) """) @@ -93,9 +98,9 @@ def test_basic(self): 'c': 'z' }) - session.execute(bound) + self.session.execute(bound) - prepared = session.prepare( + prepared = self.session.prepare( """ SELECT * FROM cf0 WHERE a=? """) @@ -103,22 +108,16 @@ def test_basic(self): self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind({'a': 'x'}) - results = session.execute(bound) + results = self.session.execute(bound) self.assertEqual(results, [('x', 'y', 'z')]) - cluster.shutdown() - def test_missing_primary_key(self): """ Ensure an InvalidRequest is thrown when prepared statements are missing the primary key """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - self._run_missing_primary_key(session) - - cluster.shutdown() + self._run_missing_primary_key(self.session) def _run_missing_primary_key(self, session): statement_to_prepare = """INSERT INTO test3rf.test (v) VALUES (?)""" @@ -137,11 +136,7 @@ def test_missing_primary_key_dicts(self): when prepared statements are missing the primary key with dict bindings """ - - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - self._run_missing_primary_key_dicts(session) - cluster.shutdown() + self._run_missing_primary_key_dicts(self.session) def _run_missing_primary_key_dicts(self, session): statement_to_prepare = """ INSERT INTO test3rf.test (v) VALUES (?)""" @@ -158,10 +153,7 @@ def test_too_many_bind_values(self): """ Ensure a ValueError is thrown when attempting to bind too many variables """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - self._run_too_many_bind_values(session) - cluster.shutdown() + self._run_too_many_bind_values(self.session) def _run_too_many_bind_values(self, session): statement_to_prepare = """ INSERT INTO test3rf.test (v) VALUES (?)""" @@ -179,10 +171,7 @@ def test_imprecise_bind_values_dicts(self): with dict bindings """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - - prepared = session.prepare( + prepared = self.session.prepare( """ INSERT INTO test3rf.test (k, v) VALUES (?, ?) """) @@ -208,37 +197,30 @@ def test_imprecise_bind_values_dicts(self): # post v4, the driver attempts to use UNSET_VALUE for unspecified keys self.assertRaises(ValueError, prepared.bind, {}) - cluster.shutdown() - def test_none_values(self): """ Ensure binding None is handled correctly """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - - prepared = session.prepare( + prepared = self.session.prepare( """ INSERT INTO test3rf.test (k, v) VALUES (?, ?) """) self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind((1, None)) - session.execute(bound) + self.session.execute(bound) - prepared = session.prepare( + prepared = self.session.prepare( """ SELECT * FROM test3rf.test WHERE k=? """) self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind((1,)) - results = session.execute(bound) + results = self.session.execute(bound) self.assertEqual(results[0].v, None) - cluster.shutdown() - def test_unset_values(self): """ Test to validate that UNSET_VALUEs are bound, and have the expected effect @@ -256,13 +238,10 @@ def test_unset_values(self): if PROTOCOL_VERSION < 4: raise unittest.SkipTest("Binding UNSET values is not supported in protocol version < 4") - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - # table with at least two values so one can be used as a marker - session.execute("CREATE TABLE IF NOT EXISTS test1rf.test_unset_values (k int PRIMARY KEY, v0 int, v1 int)") - insert = session.prepare("INSERT INTO test1rf.test_unset_values (k, v0, v1) VALUES (?, ?, ?)") - select = session.prepare("SELECT * FROM test1rf.test_unset_values WHERE k=?") + self.session.execute("CREATE TABLE IF NOT EXISTS test1rf.test_unset_values (k int PRIMARY KEY, v0 int, v1 int)") + insert = self.session.prepare("INSERT INTO test1rf.test_unset_values (k, v0, v1) VALUES (?, ?, ?)") + select = self.session.prepare("SELECT * FROM test1rf.test_unset_values WHERE k=?") bind_expected = [ # initial condition @@ -281,127 +260,105 @@ def test_unset_values(self): ] for params, expected in bind_expected: - session.execute(insert, params) - results = session.execute(select, (0,)) + self.session.execute(insert, params) + results = self.session.execute(select, (0,)) self.assertEqual(results[0], expected) - self.assertRaises(ValueError, session.execute, select, (UNSET_VALUE, 0, 0)) - - cluster.shutdown() + self.assertRaises(ValueError, self.session.execute, select, (UNSET_VALUE, 0, 0)) def test_no_meta(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - prepared = session.prepare( + prepared = self.session.prepare( """ INSERT INTO test3rf.test (k, v) VALUES (0, 0) """) self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind(None) - session.execute(bound) + self.session.execute(bound) - prepared = session.prepare( + prepared = self.session.prepare( """ SELECT * FROM test3rf.test WHERE k=0 """) self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind(None) - results = session.execute(bound) + results = self.session.execute(bound) self.assertEqual(results[0].v, 0) - cluster.shutdown() - def test_none_values_dicts(self): """ Ensure binding None is handled correctly with dict bindings """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - # test with new dict binding - prepared = session.prepare( + prepared = self.session.prepare( """ INSERT INTO test3rf.test (k, v) VALUES (?, ?) """) self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind({'k': 1, 'v': None}) - session.execute(bound) + self.session.execute(bound) - prepared = session.prepare( + prepared = self.session.prepare( """ SELECT * FROM test3rf.test WHERE k=? """) self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind({'k': 1}) - results = session.execute(bound) + results = self.session.execute(bound) self.assertEqual(results[0].v, None) - cluster.shutdown() - def test_async_binding(self): """ Ensure None binding over async queries """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - - prepared = session.prepare( + prepared = self.session.prepare( """ INSERT INTO test3rf.test (k, v) VALUES (?, ?) """) self.assertIsInstance(prepared, PreparedStatement) - future = session.execute_async(prepared, (873, None)) + future = self.session.execute_async(prepared, (873, None)) future.result() - prepared = session.prepare( + prepared = self.session.prepare( """ SELECT * FROM test3rf.test WHERE k=? """) self.assertIsInstance(prepared, PreparedStatement) - future = session.execute_async(prepared, (873,)) + future = self.session.execute_async(prepared, (873,)) results = future.result() self.assertEqual(results[0].v, None) - cluster.shutdown() - def test_async_binding_dicts(self): """ Ensure None binding over async queries with dict bindings """ - - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - - prepared = session.prepare( + prepared = self.session.prepare( """ INSERT INTO test3rf.test (k, v) VALUES (?, ?) """) self.assertIsInstance(prepared, PreparedStatement) - future = session.execute_async(prepared, {'k': 873, 'v': None}) + future = self.session.execute_async(prepared, {'k': 873, 'v': None}) future.result() - prepared = session.prepare( + prepared = self.session.prepare( """ SELECT * FROM test3rf.test WHERE k=? """) self.assertIsInstance(prepared, PreparedStatement) - future = session.execute_async(prepared, {'k': 873}) + future = self.session.execute_async(prepared, {'k': 873}) results = future.result() self.assertEqual(results[0].v, None) - cluster.shutdown() - def test_raise_error_on_prepared_statement_execution_dropped_table(self): """ test for error in executing prepared statement on a dropped table @@ -418,14 +375,10 @@ def test_raise_error_on_prepared_statement_execution_dropped_table(self): @test_category prepared_statements """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect("test3rf") - session.execute("CREATE TABLE error_test (k int PRIMARY KEY, v int)") - prepared = session.prepare("SELECT * FROM error_test WHERE k=?") - session.execute("DROP TABLE error_test") + self.session.execute("CREATE TABLE test3rf.error_test (k int PRIMARY KEY, v int)") + prepared = self.session.prepare("SELECT * FROM test3rf.error_test WHERE k=?") + self.session.execute("DROP TABLE test3rf.error_test") with self.assertRaises(InvalidRequest): - session.execute(prepared, [0]) - - cluster.shutdown() \ No newline at end of file + self.session.execute(prepared, [0]) From 43cc46710dadcadf9e279343c1fa954ab1d42437 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 5 Nov 2015 21:09:36 -0600 Subject: [PATCH 0194/2005] Expanding test cases in build.yaml --- build.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/build.yaml b/build.yaml index 3ec06294e2..b31eac835d 100644 --- a/build.yaml +++ b/build.yaml @@ -4,9 +4,11 @@ python: os: - ubuntu/trusty64 cassandra: + - 1.2 - 2.0 - 2.1 - 2.2 + - 3.0 env: EVENT_LOOP_MANAGER: - libev @@ -41,5 +43,7 @@ build: echo "==========RUNNING INTEGRATION TESTS==========" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true + echo "==========RUNNING LONG INTEGRATION TESTS==========" + CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true - xunit: - "*_results.xml" \ No newline at end of file From 3e4ecf1b583d7c60ec9bfafe9b2c59511fc120d4 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 6 Nov 2015 11:51:03 -0600 Subject: [PATCH 0195/2005] Changing ccm to use installed version of ccm, rather then pypy --- build.yaml | 1 + test-requirements.txt | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/build.yaml b/build.yaml index b31eac835d..42d64f4d0b 100644 --- a/build.yaml +++ b/build.yaml @@ -20,6 +20,7 @@ build: export JAVA_HOME=$CCM_JAVA_HOME export PATH=$JAVA_HOME/bin:$PATH + sudo python /home/jenkins/ccm/setup.py install # Install dependencies if [[ $EVENT_LOOP_MANAGER == 'libev' ]]; then sudo apt-get install -y libev4 libev-dev diff --git a/test-requirements.txt b/test-requirements.txt index aa1aaa0584..f3cc411c24 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -3,7 +3,7 @@ blist scales nose mock<=1.0.1 -ccm==2.0.4.1 +ccm>=2.0 unittest2 PyYAML pytz From badc600d8a3449a098116b57e3c3f7ac2c7525e1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 6 Nov 2015 13:21:58 -0600 Subject: [PATCH 0196/2005] docstrings for ResultSet methods --- cassandra/cluster.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ffc563e194..530f47ba95 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3286,6 +3286,10 @@ def has_more_pages(self): @property def current_rows(self): + """ + :return: the list of current page rows. May be empty if the result was empty, + or this is the last page. + """ return self._current_rows or [] def __iter__(self): @@ -3311,6 +3315,11 @@ def next(self): __next__ = next def fetch_next_page(self): + """ + Manually, synchronously fetch the next page. Supplied for manually retrieving pages + and inspecting :meth:`~.current_page`. It is not necessary to call this when iterating + through results; paging happens implicitly in iteration. + """ if self.response_future.has_more_pages: self.response_future.start_fetching_next_page() result = self.response_future.result() From 78635fa7b47d4e099ad3b9932cb12129a8f82f0e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 6 Nov 2015 13:22:29 -0600 Subject: [PATCH 0197/2005] pre 3.0c1 changelog update --- CHANGELOG.rst | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f0efa299ec..08a492253a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,25 @@ +3.0.0c1 +======= + +Features +-------- +* Process Modernized Schema Tables for Cassandra 3.0 (PYTHON-276, PYTHON-408, PYTHON-400, PYTHON-422) +* Remove deprecated features (PYTHON-292) +* Don't assign trace data to Statements (PYTHON-318) +* Normalize results return (PYTHON-368) +* Process Materialized View Metadata/Events (PYTHON-371) +* Remove blist as soft dependency (PYTHON-385) +* Change default consistency level to LOCAL_QUORUM (PYTHON-416) +* Normalize CQL query/export in metadata model (PYTHON-405) + +Bug Fixes +--------- +* Implementation of named arguments bind is non-pythonic (PYTHON-178) +* CQL encoding is incorrect for NaN and Infinity floats (PYTHON-282) +* Protocol downgrade issue with C* 2.0.x, 2.1.x, and python3, with non-default logging (PYTHON-409) +* ValueError when accessing usertype with non-alphanumeric field names (PYTHON-413) +* NumpyProtocolHandler does not play well with PagedResult (PYTHON-430) + 2.7.2 ===== Setpember 14, 2015 From 6e6d3d8aabb77bc459bc10e7923a298c1ef16d07 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 6 Nov 2015 13:43:42 -0600 Subject: [PATCH 0198/2005] Remove overlooked ColumnMetadata.index attr PYTHON-276 --- cassandra/metadata.py | 8 -------- tests/integration/standard/test_metadata.py | 2 -- 2 files changed, 10 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 2e26b3367c..51d9768539 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1268,13 +1268,6 @@ class ColumnMetadata(object): The CQL type for the column. """ - # TODO: probably remove this in favor of the table level mapping - index = None - """ - If an index exists on this column, this is an instance of - :class:`.IndexMetadata`, otherwise :const:`None`. - """ - is_static = False """ If this column is static (available in Cassandra 2.1+), this will @@ -1889,7 +1882,6 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): if column_meta.name: table_meta.columns[column_meta.name] = column_meta index_meta = self._build_index_metadata(column_meta, col_row) - column_meta.index = index_meta if index_meta: table_meta.indexes[index_meta.name] = index_meta diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index b2d8fc1ace..fb20aeca30 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -2126,7 +2126,6 @@ def compare_columns(a, b, name): self.assertEquals(a.name, b.name) self.assertEquals(a.table, b.table) self.assertEquals(a.cql_type, b.cql_type) - self.assertEquals(a.index, b.index) self.assertEquals(a.is_static, b.is_static) self.assertEquals(a.is_reversed, b.is_reversed) @@ -2327,7 +2326,6 @@ def test_metadata_with_quoted_identifiers(self): self.assertEquals(cluster_column.name, 'the;Clustering') self.assertEquals(cluster_column.name, mv.clustering_key[0].name) self.assertEquals(cluster_column.table, mv.clustering_key[0].table) - self.assertEquals(cluster_column.index, mv.clustering_key[0].index) self.assertEquals(cluster_column.is_static, mv.clustering_key[0].is_static) self.assertEquals(cluster_column.is_reversed, mv.clustering_key[0].is_reversed) From ebbe4198fbba05c8d23101819bcd4b5557a9a831 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 6 Nov 2015 14:04:39 -0600 Subject: [PATCH 0199/2005] 3.0 upgrade guide --- docs/upgrading.rst | 116 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) diff --git a/docs/upgrading.rst b/docs/upgrading.rst index 06c29c25ed..686a7dee7d 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -4,6 +4,122 @@ Upgrading .. toctree:: :maxdepth: 1 +Upgrading to 3.0 +---------------- +Version 3.0 of the DataStax Python driver for Apache Cassandra +adds support for Cassandra 3.0 while maintaining support for +all earlier versions. In addition to substantial internal rework, +there are several updates to the API that integrators will need +to consider: + +Default consistency is now ``LOCAL_QUORUM`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Previous value was ``ONE``. ``ONE`` (or ``LOCAL_ONE``) is still appropriate for +many applications, but the new default was chosen to be least surprising for +new users not specifying a consistency level. + +Execution API Updates +^^^^^^^^^^^^^^^^^^^^^ +Result return normalization +~~~~~~~~~~~~~~~~~~~~~~~~~~~ +`PYTHON-368 `_ +Previously results would be returned as a ``list`` of rows for result rows +up to ``fetch_size``, and ``PagedResult`` afterward. This could break +application code that assumed one type and got another. + +Now, all results are returned as an iterable :class:`~.ResultSet`. + +The preferred way to consume results of unknown size is to iterate through +them, letting automatic paging occur as they are consumed. + +.. code-block:: python + + results = session.execute("SELECT * FROM system.local") + for row in results: + process(row) + +If the expected size of the results is known, it is still possible to +materialize a list using the iterator: + +.. code-block:: python + + results = session.execute("SELECT * FROM system.local") + row_list = list(results) + +For backward compatability, :class:`~.ResultSet` supports indexing. If +the result is paged, all pages will be materialized. A warning will +be logged if a paged query is implicitly materialized. + +Trace information is not attached to executed Statements +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +`PYTHON-318 `_ +Previously trace data was attached to Statements if tracing was enabled. This +could lead to confusion if the same statement was used for multiple executions. + +Now, trace data is associated with the ``ResponseFuture`` and ``ResultSet`` +returned for each query: + +:meth:`.ResponseFuture.get_query_trace()` + +:meth:`.ResponseFuture.get_all_query_traces()` + +:meth:`.ResultSet.get_query_trace()` + +:meth:`.ResultSet.get_all_query_traces()` + +Binding named parameters now ignores extra names +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +`PYTHON-178 `_ +Previously, :meth:`.BoundStatement.bind()` would raise if a mapping +was passed with extra names not found in the prepared statement. + +Behavior in 3.0+ is to ignore extra names. + +blist removed as soft dependency +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +`PYTHON-385 `_ +Previously the driver had a soft dependency on ``blist sortedset``, using +that where available and using an internal fallback where possible. + +Now, the driver never chooses the ``blist`` variant, instead returning the +internal :class:`.util.SortedSet` for all ``set`` results. The class implements +all standard set operations, so no integration code should need to change unless +it explicitly checks for ``sortedset`` type. + +Metadata API Updates +^^^^^^^^^^^^^^^^^^^^ +`PYTHON-276 `_, `PYTHON-408 `_, `PYTHON-400 `_, `PYTHON-422 `_ + +Cassandra 3.0 brought a substantial overhaul to the internal schema metadata representation. +This version of the driver supports that metadata in addition to the legacy version. Doing so +also brought some changes to the metadata model. + +The present API is documented: :any:`cassandra.metadata`. Changes highlighted below: + +* All types are now exposed as CQL types instead of types derived from the internal server implementation +* Some metadata attributes have changed names to match current nomenclature (for example, :attr:`.Index.kind` in place of ``Index.type``). +* Some metadata attributes removed + + * ``TableMetadata.keyspace`` reference replaced with :attr:`.TableMetadata.keyspace_name` + * ``ColumnMetadata.index`` is removed table- and keyspace-level mappings are still maintained + +Several deprecated features are removed +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +`PYTHON-292 `_ + +* ``ResponseFuture.result`` timeout parameter is removed, use ``Session.execute`` timeout instead (`031ebb0 `_) +* ``Cluster.refresh_schema`` removed, use ``Cluster.refresh_*_metadata`` instead (`419fcdf `_) +* ``Cluster.submit_schema_refresh`` removed (`574266d `_) +* ``cqltypes`` time/date functions removed, use ``util`` entry points instead (`bb984ee `_) +* ``decoder`` module removed (`e16a073 `_) +* ``TableMetadata.keyspace`` attribute replaced with ``keyspace_name`` (`cc94073 `_) +* ``cqlengine.columns.TimeUUID.from_datetime`` removed, use ``util`` variant instead (`96489cc `_) +* ``cqlengine.columns.Float(double_precision)`` parameter removed, use ``columns.Double`` instead (`a2d3a98 `_) +* ``cqlengine`` keyspace management functions are removed in favor of the strategy-specific entry points (`4bd5909 `_) +* ``cqlengine.Model.__polymorphic_*__`` attributes removed, use ``__discriminator*`` attributes instead (`9d98c8e `_) +* ``cqlengine.statements`` will no longer warn about list list prepend behavior (`79efe97 `_) + + Upgrading to 2.1 from 2.0 ------------------------- Version 2.1 of the DataStax Python driver for Apache Cassandra From 9536eee7b633afb632c1e5ba72db0bedf9b60ae3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 6 Nov 2015 14:27:05 -0600 Subject: [PATCH 0200/2005] add a note about tcp_nodelay --- cassandra/cluster.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 530f47ba95..6656376a4e 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -386,6 +386,10 @@ def auth_provider(self, value): """ An optional list of tuples which will be used as arguments to ``socket.setsockopt()`` for all created sockets. + + Note: some drivers find setting TCPNODELAY beneficial in the context of + their execution model. It was not found generally beneficial for this driver. + To try with your own workload, set ``sockopts = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` """ max_schema_agreement_wait = 10 From 83d6f6a87651c428b0e4fb8db47b1acca0cb7da4 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 9 Nov 2015 11:24:40 -0600 Subject: [PATCH 0201/2005] Updating build.yaml to reduce c* version scope --- build.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/build.yaml b/build.yaml index 42d64f4d0b..0fbaf89a0a 100644 --- a/build.yaml +++ b/build.yaml @@ -8,7 +8,6 @@ cassandra: - 2.0 - 2.1 - 2.2 - - 3.0 env: EVENT_LOOP_MANAGER: - libev From a1d3dfaac8403cb3ff46f31f783cb28f67991bd8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 Nov 2015 14:56:59 -0600 Subject: [PATCH 0202/2005] post-release version update --- CHANGELOG.rst | 1 + cassandra/__init__.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 08a492253a..987210811c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,6 @@ 3.0.0c1 ======= +November 9, 2015 Features -------- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 2bcc97d70f..fd3fbf1aff 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 0, '0a3', 'post0') +__version_info__ = (3, 0, '0rc1', 'post0) __version__ = '.'.join(map(str, __version_info__)) From c552b85d8b11048b5e4a7023040d9e7532ad4f8f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 Nov 2015 14:57:28 -0600 Subject: [PATCH 0203/2005] Update PEP version reference PEP 440 replaces 386, has different specification for RC releases. 386 obsolete because setuptools now normalizes the version when registering --- README-dev.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README-dev.rst b/README-dev.rst index aa784ac4c7..22a1443f12 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -6,7 +6,7 @@ Releasing * For beta releases, use a version like ``(2, 1, '0b1')`` * For release candidates, use a version like ``(2, 1, '0c1')`` - * When in doubt, follow PEP 386 versioning + * When in doubt, follow PEP 440 versioning * Commit the changelog and version changes * Tag the release. For example: ``git tag -a 1.0.0 -m 'version 1.0.0'`` From 15dff70b28aee8a804aced91cc56a11c5ed64b0b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 Nov 2015 15:03:41 -0600 Subject: [PATCH 0204/2005] correct changelog to reflect normalized version --- CHANGELOG.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 987210811c..83c235066b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,5 @@ -3.0.0c1 -======= +3.0.0rc1 +======== November 9, 2015 Features From 1ee8cd15f4916da475ef6ace5578865c010f44c3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 Nov 2015 15:06:30 -0600 Subject: [PATCH 0205/2005] close the string --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index fd3fbf1aff..5686621355 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 0, '0rc1', 'post0) +__version_info__ = (3, 0, '0rc1', 'post0') __version__ = '.'.join(map(str, __version_info__)) From 03781ecb5b8c862d7649473673ea191024725d45 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 Nov 2015 15:41:32 -0600 Subject: [PATCH 0206/2005] another tweak to version guidance --- README-dev.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README-dev.rst b/README-dev.rst index 22a1443f12..c5901534c7 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -5,7 +5,7 @@ Releasing * Update the version in ``cassandra/__init__.py`` * For beta releases, use a version like ``(2, 1, '0b1')`` - * For release candidates, use a version like ``(2, 1, '0c1')`` + * For release candidates, use a version like ``(2, 1, '0rc1')`` * When in doubt, follow PEP 440 versioning * Commit the changelog and version changes From 8b5b0c2dffc6cb1afdda4bc23d3b0067e07448b2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 10 Nov 2015 16:09:47 -0600 Subject: [PATCH 0207/2005] Don't reference cython genexp from core modules Fixes a not-well-understood issue that would cause segfualts when building with Cython < 0.22 --- cassandra/cluster.py | 12 ++++++++---- cassandra/obj_parser.pyx | 4 ---- cassandra/protocol.py | 5 ----- 3 files changed, 8 insertions(+), 13 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 5bacfe8635..2be1193061 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -19,7 +19,7 @@ from __future__ import absolute_import import atexit -from collections import defaultdict +from collections import defaultdict, Mapping from concurrent.futures import ThreadPoolExecutor import logging from random import random @@ -61,7 +61,7 @@ BatchMessage, RESULT_KIND_PREPARED, RESULT_KIND_SET_KEYSPACE, RESULT_KIND_ROWS, RESULT_KIND_SCHEMA_CHANGE, MIN_SUPPORTED_VERSION, - ProtocolHandler, _RESULT_SEQUENCE_TYPES) + ProtocolHandler) from cassandra.metadata import Metadata, protect_name, murmur3 from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, @@ -3348,9 +3348,13 @@ def fetch_next_page(self): self._current_rows = [] def _set_current_rows(self, result): - if isinstance(result, _RESULT_SEQUENCE_TYPES): + if isinstance(result, Mapping): + self._current_rows = [result] if result else [] + return + try: + iter(result) # can't check directly for generator types because cython generators are different self._current_rows = result - else: + except TypeError: self._current_rows = [result] if result else [] def _fetch_all(self): diff --git a/cassandra/obj_parser.pyx b/cassandra/obj_parser.pyx index 21ce95e0bd..8aa5b3940f 100644 --- a/cassandra/obj_parser.pyx +++ b/cassandra/obj_parser.pyx @@ -38,8 +38,6 @@ cdef class LazyParser(ColumnParser): # supported in cpdef methods return parse_rows_lazy(reader, desc) - cpdef get_cython_generator_type(self): - return get_cython_generator_type() def parse_rows_lazy(BytesIOReader reader, ParseDesc desc): cdef Py_ssize_t i, rowcount @@ -47,8 +45,6 @@ def parse_rows_lazy(BytesIOReader reader, ParseDesc desc): cdef RowParser rowparser = TupleRowParser() return (rowparser.unpack_row(reader, desc) for i in range(rowcount)) -def get_cython_generator_type(): - return type((i for i in range(0))) cdef class TupleRowParser(RowParser): """ diff --git a/cassandra/protocol.py b/cassandra/protocol.py index eec9ac3747..53a9a55bdf 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -1000,8 +1000,6 @@ def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcod return msg -_RESULT_SEQUENCE_TYPES = (list, tuple) # types retuned by ResultMessages - def cython_protocol_handler(colparser): """ Given a column parser to deserialize ResultMessages, return a suitable @@ -1046,9 +1044,6 @@ class CythonProtocolHandler(ProtocolHandler): if HAVE_CYTHON: from cassandra.obj_parser import ListParser, LazyParser ProtocolHandler = cython_protocol_handler(ListParser()) - - lazy_parser = LazyParser() - _RESULT_SEQUENCE_TYPES += (lazy_parser.get_cython_generator_type(),) LazyProtocolHandler = cython_protocol_handler(LazyParser()) else: # Use Python-based ProtocolHandler From 81eddb89faa8654e2ffaed30887fb82fd3c78730 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 10 Nov 2015 16:56:36 -0600 Subject: [PATCH 0208/2005] Updating build.yaml for current supported c* versions. --- build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.yaml b/build.yaml index 0fbaf89a0a..2f1e2f6d5e 100644 --- a/build.yaml +++ b/build.yaml @@ -4,10 +4,10 @@ python: os: - ubuntu/trusty64 cassandra: - - 1.2 - 2.0 - 2.1 - 2.2 + - 3.0 env: EVENT_LOOP_MANAGER: - libev From 403a0cda2018954bb11c5df8d896c9fcec9c6966 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 12 Nov 2015 11:59:05 -0600 Subject: [PATCH 0209/2005] Fixing issue where sessions can leak upon test failure. Improving validation --- tests/integration/standard/test_metadata.py | 29 +++++++++++++++------ 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 03b3290a6c..fa9f037447 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -559,15 +559,16 @@ def test_refresh_metadata_for_mv(self): cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) cluster2.connect() - self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) - self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)" - .format(self.keyspace_name, self.function_table_name)) - self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) - - cluster2.refresh_table_metadata(self.keyspace_name, "mv1") - self.assertIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) + try: + self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) + self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)" + .format(self.keyspace_name, self.function_table_name)) + self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) - cluster2.shutdown() + cluster2.refresh_table_metadata(self.keyspace_name, "mv1") + self.assertIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) + finally: + cluster2.shutdown() original_meta = self.cluster.metadata.keyspaces[self.keyspace_name].views['mv1'] self.assertIs(original_meta, self.session.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1']) @@ -578,6 +579,18 @@ def test_refresh_metadata_for_mv(self): self.assertIsNot(original_meta, self.session.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1']) self.assertEqual(original_meta.as_cql_query(), current_meta.as_cql_query()) + cluster3 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) + cluster3.connect() + try: + self.assertNotIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) + self.session.execute("CREATE MATERIALIZED VIEW {0}.mv2 AS SELECT b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)" + .format(self.keyspace_name, self.function_table_name)) + self.assertNotIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) + cluster3.refresh_materialized_view_metadata(self.keyspace_name, 'mv2') + self.assertIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) + finally: + cluster3.shutdown() + def test_refresh_user_type_metadata(self): """ test for synchronously refreshing UDT metadata in keyspace From e76c142c2701afc332af537a63e66f42af8e89e5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 12 Nov 2015 16:12:00 -0600 Subject: [PATCH 0210/2005] concurrent: raise error even if no wait on condition Fixes an issue where execute concurrent with raise_on_first_error=True would not raise if all results were back before going to wait for results. --- cassandra/concurrent.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index a360ffaaca..ffc0f9081e 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -199,6 +199,8 @@ def _results(self): self._condition.wait() if self._exception and self._fail_fast: self._raise(self._exception) + if self._exception and self._fail_fast: # raise the exception even if there was no wait + self._raise(self._exception) return [r[1] for r in sorted(self._results_queue)] From 699801ad9d8a57e62854b83663630e111e44245a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 12 Nov 2015 16:20:28 -0600 Subject: [PATCH 0211/2005] NTS: remove extra vars; eliminate unnecessary map copy peer review PYHTON-378 --- cassandra/metadata.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 63cd1875a8..a29b1338be 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -471,8 +471,6 @@ def __init__(self, dc_replication_factors): def make_token_replica_map(self, token_to_host_owner, ring): # note: this does not account for hosts having different racks replica_map = defaultdict(list) - ring_len = len(ring) - ring_len_range = range(ring_len) dc_rf_map = dict((dc, int(rf)) for dc, rf in self.dc_replication_factors.items() if rf > 0) @@ -490,13 +488,12 @@ def make_token_replica_map(self, token_to_host_owner, ring): # This is how we keep track of advancing around the ring for each DC. dc_to_current_index = defaultdict(int) - for i in ring_len_range: - remaining = dc_rf_map.copy() + for i in range(len(ring)): replicas = replica_map[ring[i]] # go through each DC and find the replicas in that DC for dc in dc_to_token_offset.keys(): - if dc not in remaining: + if dc not in dc_rf_map: continue # advance our per-DC index until we're up to at least the @@ -508,7 +505,7 @@ def make_token_replica_map(self, token_to_host_owner, ring): index += 1 dc_to_current_index[dc] = index - replicas_remaining = remaining[dc] + replicas_remaining = dc_rf_map[dc] skipped_hosts = [] racks_placed = set() racks_this_dc = dc_racks[dc] From 8c3d0b468026b9bcb757074307da6e2fa4e09640 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 12 Nov 2015 17:45:05 -0600 Subject: [PATCH 0212/2005] Adding test for PYTHON-353 --- .../integration/cqlengine/model/test_udts.py | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 4c5525f12b..e39fc2190e 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -368,3 +368,38 @@ class Container(Model): self.assertEqual(1, Container.objects.count()) names_output = Container.objects().first().names self.assertEqual(names_output, names) + + def test_udts_with_unicode(self): + """ + Test for inserting models with unicode and udt columns. + + test_udts_with_unicode constructs a model with a user defined type. It then attempts to insert that model with + a unicode primary key. It will also attempt to upsert a udt that contains unicode text. + + @since 3.0.0 + @jira_ticket PYTHON-353 + @expected_result No exceptions thrown + + @test_category data_types:udt + """ + ascii_name = 'normal name' + unicode_name = u'Fran\u00E7ois' + + class User(UserType): + age = columns.Integer() + name = columns.Text() + + class UserModelText(Model): + id = columns.Text(primary_key=True) + info = columns.UserDefinedType(User) + + sync_table(UserModelText) + # Two udt instances one with a unicode one with ascii + user_template_ascii = User(age=25, name=ascii_name) + user_template_unicode = User(age=25, name=unicode_name) + + UserModelText.create(id=ascii_name, info=user_template_unicode) + UserModelText.create(id=unicode_name, info=user_template_ascii) + UserModelText.create(id=unicode_name, info=user_template_unicode) + + From af7f4449acf64cdaf488bc17b77fd84acd0141d8 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 12 Nov 2015 16:00:27 -0600 Subject: [PATCH 0213/2005] Added multi-fields to previous value tests. Misc cleanup. Cleaned up pep8 issues, refactored tests to use unit asserts. --- .../cqlengine/model/test_model_io.py | 158 ++++++++++-------- 1 file changed, 90 insertions(+), 68 deletions(-) diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index dd00172ac7..96c1f63b00 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -32,12 +32,13 @@ from tests.integration import PROTOCOL_VERSION from tests.integration.cqlengine.base import BaseCassEngTestCase + class TestModel(Model): - id = columns.UUID(primary_key=True, default=lambda: uuid4()) - count = columns.Integer() - text = columns.Text(required=False) - a_bool = columns.Boolean(default=False) + id = columns.UUID(primary_key=True, default=lambda: uuid4()) + count = columns.Integer() + text = columns.Text(required=False) + a_bool = columns.Boolean(default=False) class TestModelIO(BaseCassEngTestCase): @@ -122,8 +123,8 @@ def test_column_deleting_works_properly(self): tm2 = TestModel.objects(id=tm.pk).first() self.assertIsInstance(tm2, TestModel) - assert tm2.text is None - assert tm2._values['text'].previous_value is None + self.assertTrue(tm2.text is None) + self.assertTrue(tm2._values['text'].previous_value is None) def test_a_sensical_error_is_raised_if_you_try_to_create_a_table_twice(self): """ @@ -271,10 +272,10 @@ class FloatingPointModel(Model): class TestMultiKeyModel(Model): - partition = columns.Integer(primary_key=True) - cluster = columns.Integer(primary_key=True) - count = columns.Integer(required=False) - text = columns.Text(required=False) + partition = columns.Integer(primary_key=True) + cluster = columns.Integer(primary_key=True) + count = columns.Integer(required=False) + text = columns.Text(required=False) class TestDeleting(BaseCassEngTestCase): @@ -295,11 +296,11 @@ def test_deleting_only_deletes_one_object(self): for i in range(5): TestMultiKeyModel.create(partition=partition, cluster=i, count=i, text=str(i)) - assert TestMultiKeyModel.filter(partition=partition).count() == 5 + self.assertTrue(TestMultiKeyModel.filter(partition=partition).count() == 5) TestMultiKeyModel.get(partition=partition, cluster=0).delete() - assert TestMultiKeyModel.filter(partition=partition).count() == 4 + self.assertTrue(TestMultiKeyModel.filter(partition=partition).count() == 4) TestMultiKeyModel.filter(partition=partition).delete() @@ -331,8 +332,8 @@ def test_vanilla_update(self): self.instance.save() check = TestMultiKeyModel.get(partition=self.instance.partition, cluster=self.instance.cluster) - assert check.count == 5 - assert check.text == 'happy' + self.assertTrue(check.count == 5) + self.assertTrue(check.text == 'happy') def test_deleting_only(self): self.instance.count = None @@ -340,58 +341,79 @@ def test_deleting_only(self): self.instance.save() check = TestMultiKeyModel.get(partition=self.instance.partition, cluster=self.instance.cluster) - assert check.count is None - assert check.text is None + self.assertTrue(check.count is None) + self.assertTrue(check.text is None) def test_get_changed_columns(self): - assert self.instance.get_changed_columns() == [] + self.assertTrue(self.instance.get_changed_columns() == []) self.instance.count = 1 changes = self.instance.get_changed_columns() - assert len(changes) == 1 - assert changes == ['count'] + self.assertTrue(len(changes) == 1) + self.assertTrue(changes == ['count']) self.instance.save() - assert self.instance.get_changed_columns() == [] + self.assertTrue(self.instance.get_changed_columns() == []) def test_previous_value_tracking_of_persisted_instance(self): # Check initial internal states. - assert self.instance.get_changed_columns() == [] - assert self.instance._values['count'].previous_value == 0 + self.assertTrue(self.instance.get_changed_columns() == []) + self.assertTrue(self.instance._values['count'].previous_value == 0) # Change value and check internal states. self.instance.count = 1 - assert self.instance.get_changed_columns() == ['count'] - assert self.instance._values['count'].previous_value == 0 + self.assertTrue(self.instance.get_changed_columns() == ['count']) + self.assertTrue(self.instance._values['count'].previous_value == 0) # Internal states should be updated on save. self.instance.save() - assert self.instance.get_changed_columns() == [] - assert self.instance._values['count'].previous_value == 1 + self.assertTrue(self.instance.get_changed_columns() == []) + self.assertTrue(self.instance._values['count'].previous_value == 1) # Change value twice. self.instance.count = 2 - assert self.instance.get_changed_columns() == ['count'] - assert self.instance._values['count'].previous_value == 1 + self.assertTrue(self.instance.get_changed_columns() == ['count']) + self.assertTrue(self.instance._values['count'].previous_value == 1) self.instance.count = 3 - assert self.instance.get_changed_columns() == ['count'] - assert self.instance._values['count'].previous_value == 1 + self.assertTrue(self.instance.get_changed_columns() == ['count']) + self.assertTrue(self.instance._values['count'].previous_value == 1) # Internal states updated on save. self.instance.save() - assert self.instance.get_changed_columns() == [] - assert self.instance._values['count'].previous_value == 3 + self.assertTrue(self.instance.get_changed_columns() == []) + self.assertTrue(self.instance._values['count'].previous_value == 3) # Change value and reset it. self.instance.count = 2 - assert self.instance.get_changed_columns() == ['count'] - assert self.instance._values['count'].previous_value == 3 + self.assertTrue(self.instance.get_changed_columns() == ['count']) + self.assertTrue(self.instance._values['count'].previous_value == 3) self.instance.count = 3 - assert self.instance.get_changed_columns() == [] - assert self.instance._values['count'].previous_value == 3 + self.assertTrue(self.instance.get_changed_columns() == []) + self.assertTrue(self.instance._values['count'].previous_value == 3) # Nothing to save: values in initial conditions. self.instance.save() - assert self.instance.get_changed_columns() == [] - assert self.instance._values['count'].previous_value == 3 + self.assertTrue(self.instance.get_changed_columns() == []) + self.assertTrue(self.instance._values['count'].previous_value == 3) + + # Change Multiple values + self.instance.count = 4 + self.instance.text = "changed" + self.assertTrue(len(self.instance.get_changed_columns()) == 2) + self.assertTrue('text' in self.instance.get_changed_columns()) + self.assertTrue('count' in self.instance.get_changed_columns()) + self.instance.save() + self.assertTrue(self.instance.get_changed_columns() == []) + + # Reset Multiple Values + self.instance.count = 5 + self.instance.text = "changed" + self.assertTrue(self.instance.get_changed_columns() == ['count']) + self.instance.text = "changed2" + self.assertTrue(len(self.instance.get_changed_columns()) == 2) + self.assertTrue('text' in self.instance.get_changed_columns()) + self.assertTrue('count' in self.instance.get_changed_columns()) + self.instance.count = 4 + self.instance.text = "changed" + self.assertTrue(self.instance.get_changed_columns() == []) def test_previous_value_tracking_on_instantiation(self): self.instance = TestMultiKeyModel( @@ -401,30 +423,30 @@ def test_previous_value_tracking_on_instantiation(self): text='happy') # Columns of instances not persisted yet should be marked as changed. - assert set(self.instance.get_changed_columns()) == set([ - 'partition', 'cluster', 'count', 'text']) - assert self.instance._values['partition'].previous_value is None - assert self.instance._values['cluster'].previous_value is None - assert self.instance._values['count'].previous_value is None - assert self.instance._values['text'].previous_value is None + self.assertTrue(set(self.instance.get_changed_columns()) == set([ + 'partition', 'cluster', 'count', 'text'])) + self.assertTrue(self.instance._values['partition'].previous_value is None) + self.assertTrue(self.instance._values['cluster'].previous_value is None) + self.assertTrue(self.instance._values['count'].previous_value is None) + self.assertTrue(self.instance._values['text'].previous_value is None) # Value changes doesn't affect internal states. self.instance.count = 1 - assert 'count' in self.instance.get_changed_columns() - assert self.instance._values['count'].previous_value is None + self.assertTrue('count' in self.instance.get_changed_columns()) + self.assertTrue(self.instance._values['count'].previous_value is None) self.instance.count = 2 - assert 'count' in self.instance.get_changed_columns() - assert self.instance._values['count'].previous_value is None + self.assertTrue('count' in self.instance.get_changed_columns()) + self.assertTrue(self.instance._values['count'].previous_value is None) # Value reset is properly tracked. self.instance.count = None - assert 'count' not in self.instance.get_changed_columns() - assert self.instance._values['count'].previous_value == None + self.assertTrue('count' not in self.instance.get_changed_columns()) + self.assertTrue(self.instance._values['count'].previous_value is None) self.instance.save() - assert self.instance.get_changed_columns() == [] - assert self.instance._values['count'].previous_value == None - assert self.instance.count is None + self.assertTrue(self.instance.get_changed_columns() == []) + self.assertTrue(self.instance._values['count'].previous_value is None) + self.assertTrue(self.instance.count is None) class TestCanUpdate(BaseCassEngTestCase): @@ -445,34 +467,34 @@ def test_success_case(self): # object hasn't been saved, # shouldn't be able to update - assert not tm._is_persisted - assert not tm._can_update() + self.assertTrue(not tm._is_persisted) + self.assertTrue(not tm._can_update()) tm.save() # object has been saved, # should be able to update - assert tm._is_persisted - assert tm._can_update() + self.assertTrue(tm._is_persisted) + self.assertTrue(tm._can_update()) tm.count = 200 # primary keys haven't changed, # should still be able to update - assert tm._can_update() + self.assertTrue(tm._can_update()) tm.save() tm.id = uuid4() # primary keys have changed, # should not be able to update - assert not tm._can_update() + self.assertTrue(not tm._can_update()) class IndexDefinitionModel(Model): - key = columns.UUID(primary_key=True) - val = columns.Text(index=True) + key = columns.UUID(primary_key=True) + val = columns.Text(index=True) class TestIndexedColumnDefinition(BaseCassEngTestCase): @@ -484,8 +506,8 @@ def test_exception_isnt_raised_if_an_index_is_defined_more_than_once(self): class ReservedWordModel(Model): - token = columns.Text(primary_key=True) - insert = columns.Integer(index=True) + token = columns.Text(primary_key=True) + insert = columns.Integer(index=True) class TestQueryQuoting(BaseCassEngTestCase): @@ -499,9 +521,9 @@ def test_reserved_cql_words_can_be_used_as_column_names(self): model2 = ReservedWordModel.filter(token='1') - assert len(model2) == 1 - assert model1.token == model2[0].token - assert model1.insert == model2[0].insert + self.assertTrue(len(model2) == 1) + self.assertTrue(model1.token == model2[0].token) + self.assertTrue(model1.insert == model2[0].insert) class TestQueryModel(Model): @@ -545,8 +567,8 @@ def test_query_with_date(self): TestQueryModel.test_id == uid, TestQueryModel.date == day).limit(1).first() - assert inst.test_id == uid - assert inst.date == day + self.assertTrue(inst.test_id == uid) + self.assertTrue(inst.date == day) def test_none_filter_fails(): From d1273b416bd803da91a2415c2e49d29c380114d6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 13 Nov 2015 12:35:22 -0600 Subject: [PATCH 0214/2005] 3.0 changelog update --- CHANGELOG.rst | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 83c235066b..6b2531db8f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,24 @@ +3.0.0 +===== + +Features +-------- +* Support datetime.date objects as a DateType (PYTHON-212) +* Add Cluster.update_view_metadata (PYTHON-407) +* QueryTrace option to populate partial trace sessions (PYTHON-438) +* Attach column names to ResultSet (PYTHON-439) + +Bug Fixes +--------- +* Properly SerDes nested collections when protocol_version < 3 (PYTHON-215) +* Evict UDTs from UserType cache on change (PYTHON-226) +* Make sure query strings are always encoded UTF-8 (PYTHON-334) +* Track previous value of columns at instantiation in CQLengine (PYTHON-348) +* UDT CQL encoding does not work for unicode values (PYTHON-353) +* NetworkTopologyStrategy#make_token_replica_map does not account for multiple racks in a DC (PYTHON-378) +* Cython integer overflow on decimal type deserialization (PYTHON-433) +* Query trace: if session hasn't been logged, query trace can throw exception (PYTHON-442) + 3.0.0rc1 ======== November 9, 2015 From b902ec42e98699bb50a78687cb93a1c46915936c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 13 Nov 2015 15:09:21 -0600 Subject: [PATCH 0215/2005] doc: tweak previous supported wording --- docs/upgrading.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/upgrading.rst b/docs/upgrading.rst index 686a7dee7d..25622c346c 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -8,7 +8,7 @@ Upgrading to 3.0 ---------------- Version 3.0 of the DataStax Python driver for Apache Cassandra adds support for Cassandra 3.0 while maintaining support for -all earlier versions. In addition to substantial internal rework, +previously supported versions. In addition to substantial internal rework, there are several updates to the API that integrators will need to consider: From 114a8e06cc70f1a6bd62ecdb34d0bc8821209d3e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 13 Nov 2015 15:12:55 -0600 Subject: [PATCH 0216/2005] doc: upgrade guide formatting --- docs/upgrading.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/upgrading.rst b/docs/upgrading.rst index 25622c346c..21e2abcd58 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -23,6 +23,7 @@ Execution API Updates Result return normalization ~~~~~~~~~~~~~~~~~~~~~~~~~~~ `PYTHON-368 `_ + Previously results would be returned as a ``list`` of rows for result rows up to ``fetch_size``, and ``PagedResult`` afterward. This could break application code that assumed one type and got another. @@ -53,6 +54,7 @@ be logged if a paged query is implicitly materialized. Trace information is not attached to executed Statements ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `PYTHON-318 `_ + Previously trace data was attached to Statements if tracing was enabled. This could lead to confusion if the same statement was used for multiple executions. @@ -70,6 +72,7 @@ returned for each query: Binding named parameters now ignores extra names ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `PYTHON-178 `_ + Previously, :meth:`.BoundStatement.bind()` would raise if a mapping was passed with extra names not found in the prepared statement. @@ -78,6 +81,7 @@ Behavior in 3.0+ is to ignore extra names. blist removed as soft dependency ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `PYTHON-385 `_ + Previously the driver had a soft dependency on ``blist sortedset``, using that where available and using an internal fallback where possible. From 7f85c7d506a43c1c00e2ab00c275d62aeb4e3f41 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 13 Nov 2015 14:25:39 -0600 Subject: [PATCH 0217/2005] Adding test for python-334 --- tests/integration/standard/test_query.py | 53 +++++++++++++++++++++++- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index faffd33a92..99add1a99d 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -283,7 +283,7 @@ def test_prepared_statement(self): cluster.shutdown() -class BatchStatementTests(unittest.TestCase): +class BatchStatementTests(BasicSharedKeyspaceUnitTestCase): def setUp(self): if PROTOCOL_VERSION < 2: @@ -296,7 +296,6 @@ def setUp(self): self.cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) self.session = self.cluster.connect() - self.session.execute("TRUNCATE test3rf.test") def tearDown(self): self.cluster.shutdown() @@ -386,6 +385,21 @@ def test_no_parameters_many_times(self): self.test_no_parameters() self.session.execute("TRUNCATE test3rf.test") + def test_unicode(self): + ddl = ''' + CREATE TABLE test3rf.testtext ( + k int PRIMARY KEY, + v text )''' + self.session.execute(ddl) + unicode_text = u'Fran\u00E7ois' + query = u'INSERT INTO test3rf.testtext (k, v) VALUES (%s, %s)' + try: + batch = BatchStatement(BatchType.LOGGED) + batch.add(u"INSERT INTO test3rf.testtext (k, v) VALUES (%s, %s)", (0, unicode_text)) + self.session.execute(batch) + finally: + self.session.execute("DROP TABLE test3rf.testtext") + class SerialConsistencyTests(unittest.TestCase): def setUp(self): @@ -788,3 +802,38 @@ def test_mv_filtering(self): self.assertEquals(results[1].day, 25) self.assertEquals(results[1].score, 3200) self.assertEquals(results[1].user, "pcmanus") + + +class UnicodeQueryTest(BasicSharedKeyspaceUnitTestCase): + + def setUp(self): + ddl = ''' + CREATE TABLE {0}.{1} ( + k int PRIMARY KEY, + v text )'''.format(self.keyspace_name, self.function_table_name) + self.session.execute(ddl) + + def tearDown(self): + self.session.execute("DROP TABLE {0}.{1}".format(self.keyspace_name,self.function_table_name)) + + def test_unicode(self): + """ + Test to validate that unicode query strings are handled appropriately by various query types + + @since 3.0.0 + @jira_ticket PYTHON-334 + @expected_result no unicode exceptions are thrown + + @test_category query + """ + + unicode_text = u'Fran\u00E7ois' + batch = BatchStatement(BatchType.LOGGED) + batch.add(u"INSERT INTO {0}.{1} (k, v) VALUES (%s, %s)".format(self.keyspace_name, self.function_table_name), (0, unicode_text)) + self.session.execute(batch) + self.session.execute(u"INSERT INTO {0}.{1} (k, v) VALUES (%s, %s)".format(self.keyspace_name, self.function_table_name), (0, unicode_text)) + prepared = self.session.prepare(u"INSERT INTO {0}.{1} (k, v) VALUES (?, ?)".format(self.keyspace_name, self.function_table_name)) + bound = prepared.bind((1, unicode_text)) + self.session.execute(bound) + + From e2319577ec56bc36437cedca07c2f35e298155f8 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 16 Nov 2015 13:13:48 -0600 Subject: [PATCH 0218/2005] Added tests for Python-215, 226, 212, 439. Refactoring tests to use common setup and teardown. Fixing broken testcases. --- tests/integration/__init__.py | 44 ++++- .../integration/cqlengine/query/test_named.py | 21 ++- .../standard/test_cython_protocol_handlers.py | 1 + tests/integration/standard/test_query.py | 71 ++++---- tests/integration/standard/test_types.py | 162 +++++++++++++++--- tests/integration/standard/test_udts.py | 102 ++++++----- 6 files changed, 284 insertions(+), 117 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 8a87381395..1fd1a6709c 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -127,6 +127,12 @@ def _tuple_version(version_string): PROTOCOL_VERSION = int(os.getenv('PROTOCOL_VERSION', default_protocol_version)) notprotocolv1 = unittest.skipUnless(PROTOCOL_VERSION > 1, 'Protocol v1 not supported') +lessthenprotocolv4 = unittest.skipUnless(PROTOCOL_VERSION < 4, 'Protocol versions 4 or greater no supported') +greaterthanprotocolv3 = unittest.skipUnless(PROTOCOL_VERSION >= 4, 'Protocol versions less than 4 are not supported') + +greaterthancass20 = unittest.skipUnless(CASSANDRA_VERSION >= '2.1', 'Cassandra version 2.1 or greater required') +greaterthanorequalcass30 = unittest.skipUnless(CASSANDRA_VERSION >= '3.0', 'Cassandra version 3.0 or greater required') +lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < '3.0', 'Cassandra version less then 3.0 required') def get_cluster(): @@ -171,6 +177,7 @@ def remove_cluster(): raise RuntimeError("Failed to remove cluster after 100 attempts") + def is_current_cluster(cluster_name, node_counts): global CCM_CLUSTER if CCM_CLUSTER and CCM_CLUSTER.name == cluster_name: @@ -395,12 +402,13 @@ def create_keyspace(cls, rf): execute_with_long_wait_retry(cls.session, ddl) @classmethod - def common_setup(cls, rf, create_class_table=False, skip_if_cass_version_less_than=None): + def common_setup(cls, rf, keyspace_creation=True, create_class_table=False): cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) cls.session = cls.cluster.connect() cls.ks_name = cls.__name__.lower() - cls.create_keyspace(rf) - cls.cass_version = get_server_versions() + if keyspace_creation: + cls.create_keyspace(rf) + cls.cass_version, cls.cql_version = get_server_versions() if create_class_table: @@ -422,6 +430,19 @@ def drop_function_table(self): execute_until_pass(self.session, ddl) +class BasicExistingKeyspaceUnitTestCase(BasicKeyspaceUnitTestCase): + """ + This is basic unit test defines class level teardown and setup methods. It assumes that keyspace is already defined, or created as part of the test. + """ + @classmethod + def setUpClass(cls): + cls.common_setup(1, keyspace_creation=False) + + @classmethod + def tearDownClass(cls): + cls.cluster.shutdown() + + class BasicSharedKeyspaceUnitTestCase(BasicKeyspaceUnitTestCase): """ This is basic unit test case that can be leveraged to scope a keyspace to a specific test class. @@ -433,7 +454,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - drop_keyspace_shutdown_cluster(cls.keyspace_name, cls.session, cls.cluster) + drop_keyspace_shutdown_cluster(cls.ks_name, cls.session, cls.cluster) class BasicSharedKeyspaceUnitTestCaseWTable(BasicSharedKeyspaceUnitTestCase): @@ -510,4 +531,17 @@ def setUp(self): self.common_setup(1) def tearDown(self): - drop_keyspace_shutdown_cluster(self.keyspace_name, self.session, self.cluster) + drop_keyspace_shutdown_cluster(self.ks_name, self.session, self.cluster) + + +class BasicExistingSegregatedKeyspaceUnitTestCase(BasicKeyspaceUnitTestCase): + """ + This unit test will create and teardown or each individual unit tests. It assumes that keyspace is existing + or created as part of a test. + It has some overhead and should only be used when sharing cluster/session is not feasible. + """ + def setUp(self): + self.common_setup(1, keyspace_creation=False) + + def tearDown(self): + self.cluster.shutdown() diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index 9f744cfddc..12b02003c1 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -29,7 +29,7 @@ from tests.integration.cqlengine.query.test_queryset import BaseQuerySetUsage -from tests.integration import BasicSharedKeyspaceUnitTestCase, get_server_versions +from tests.integration import BasicSharedKeyspaceUnitTestCase, greaterthanorequalcass30 class TestQuerySetOperation(BaseCassEngTestCase): @@ -270,18 +270,21 @@ def test_get_multipleobjects_exception(self): self.table.objects.get(test_id=1) -class TestNamedWithMV(BaseCassEngTestCase): +class TestNamedWithMV(BasicSharedKeyspaceUnitTestCase): - def setUp(self): - cass_version = get_server_versions()[0] - if cass_version < (3, 0): - raise unittest.SkipTest("Materialized views require Cassandra 3.0+") - super(TestNamedWithMV, self).setUp() + @classmethod + def setUpClass(cls): + super(TestNamedWithMV, cls).setUpClass() + cls.default_keyspace = models.DEFAULT_KEYSPACE + models.DEFAULT_KEYSPACE = cls.ks_name - def tearDown(self): - models.DEFAULT_KEYSPACE = self.default_keyspace + @classmethod + def tearDownClass(cls): + models.DEFAULT_KEYSPACE = cls.default_keyspace setup_connection(models.DEFAULT_KEYSPACE) + super(TestNamedWithMV, cls).tearDownClass() + @greaterthanorequalcass30 def test_named_table_with_mv(self): """ Test NamedTable access to materialized views diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index 7532426e5c..dc24d0a3e6 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -18,6 +18,7 @@ from tests.unit.cython.utils import cythontest, numpytest + def setup_module(): use_singledc() update_datatypes() diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 99add1a99d..ade54bcfe3 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -26,48 +26,42 @@ from cassandra.cluster import Cluster from cassandra.policies import HostDistance -from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions +from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, greaterthanprotocolv3 import re def setup_module(): + print("Setting up module") use_singledc() global CASS_SERVER_VERSION CASS_SERVER_VERSION = get_server_versions()[0] -class QueryTests(unittest.TestCase): +class QueryTests(BasicSharedKeyspaceUnitTestCase): def test_query(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - prepared = session.prepare( + prepared = self.session.prepare( """ INSERT INTO test3rf.test (k, v) VALUES (?, ?) - """) + """.format(self.keyspace_name)) self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind((1, None)) self.assertIsInstance(bound, BoundStatement) self.assertEqual(2, len(bound.values)) - session.execute(bound) + self.session.execute(bound) self.assertEqual(bound.routing_key, b'\x00\x00\x00\x01') - cluster.shutdown() - def test_trace_prints_okay(self): """ Code coverage to ensure trace prints to string without error """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - query = "SELECT * FROM system.local" statement = SimpleStatement(query) - rs = session.execute(statement, trace=True) + rs = self.session.execute(statement, trace=True) # Ensure this does not throw an exception trace = rs.get_query_trace() @@ -76,13 +70,9 @@ def test_trace_prints_okay(self): for event in trace.events: str(event) - cluster.shutdown() - def test_trace_id_to_resultset(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - future = session.execute_async("SELECT * FROM system.local", trace=True) + future = self.session.execute_async("SELECT * FROM system.local", trace=True) # future should have the current trace rs = future.result() @@ -96,16 +86,12 @@ def test_trace_id_to_resultset(self): self.assertListEqual([rs_trace], rs.get_all_query_traces()) - cluster.shutdown() - def test_trace_ignores_row_factory(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - session.row_factory = dict_factory + self.session.row_factory = dict_factory query = "SELECT * FROM system.local" statement = SimpleStatement(query) - rs = session.execute(statement, trace=True) + rs = self.session.execute(statement, trace=True) # Ensure this does not throw an exception trace = rs.get_query_trace() @@ -114,8 +100,7 @@ def test_trace_ignores_row_factory(self): for event in trace.events: str(event) - cluster.shutdown() - + @greaterthanprotocolv3 def test_client_ip_in_trace(self): """ Test to validate that client trace contains client ip information. @@ -136,18 +121,10 @@ def test_client_ip_in_trace(self): # raise unittest.SkipTest("Client IP was not present in trace until C* 2.2") """ - if PROTOCOL_VERSION < 4: - raise unittest.SkipTest( - "Protocol 4+ is required for client ip tracing, currently testing against %r" - % (PROTOCOL_VERSION,)) - - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - # Make simple query with trace enabled query = "SELECT * FROM system.local" statement = SimpleStatement(query) - response_future = session.execute_async(statement, trace=True) + response_future = self.session.execute_async(statement, trace=True) response_future.result() # Fetch the client_ip from the trace. @@ -161,7 +138,29 @@ def test_client_ip_in_trace(self): self.assertIsNotNone(client_ip, "Client IP was not set in trace with C* >= 2.2") self.assertTrue(pat.match(client_ip), "Client IP from trace did not match the expected value") - cluster.shutdown() + def test_column_names(self): + """ + Test to validate the columns are present on the result set. + Preforms a simple query against a table then checks to ensure column names are correct and present and correct. + + @since 3.0.0 + @jira_ticket PYTHON-439 + @expected_result column_names should be preset. + + @test_category queries basic + """ + create_table = """CREATE TABLE {0}.{1}( + user TEXT, + game TEXT, + year INT, + month INT, + day INT, + score INT, + PRIMARY KEY (user, game, year, month, day) + )""".format(self.keyspace_name, self.function_table_name) + self.session.execute(create_table) + result_set = self.session.execute("SELECT * FROM {0}.{1}".format(self.keyspace_name, self.function_table_name)) + self.assertEqual(result_set.column_names, [u'user', u'game', u'year', u'month', u'day', u'score']) class PreparedStatementTests(unittest.TestCase): diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 73fbc89507..5d14f4be9d 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -27,8 +27,10 @@ from cassandra.cqltypes import Int32Type, EMPTY from cassandra.query import dict_factory, ordered_dict_factory from cassandra.util import sortedset +from tests.unit.cython.utils import cythontest -from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION, execute_until_pass, notprotocolv1 +from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass, notprotocolv1, \ + BasicSharedKeyspaceUnitTestCase, greaterthancass20, lessthancass30 from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, \ get_sample, get_collection_sample @@ -38,21 +40,14 @@ def setup_module(): update_datatypes() -class TypeTests(unittest.TestCase): +class TypeTests(BasicSharedKeyspaceUnitTestCase): @classmethod def setUpClass(cls): - cls._cass_version, cls._cql_version = get_server_versions() - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - cls.session = cls.cluster.connect() - cls.session.execute("CREATE KEYSPACE typetests WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}") - cls.session.set_keyspace("typetests") + # cls._cass_version, cls. = get_server_versions() + super(TypeTests, cls).setUpClass() + cls.session.set_keyspace(cls.ks_name) - @classmethod - def tearDownClass(cls): - execute_until_pass(cls.session, "DROP KEYSPACE typetests") - cls.cluster.shutdown() - def test_can_insert_blob_type_as_string(self): """ Tests that byte strings in Python maps to blob type in Cassandra @@ -66,10 +61,10 @@ def test_can_insert_blob_type_as_string(self): # In python2, with Cassandra > 2.0, we don't treat the 'byte str' type as a blob, so we'll encode it # as a string literal and have the following failure. - if six.PY2 and self._cql_version >= (3, 1, 0): + if six.PY2 and self.cql_version >= (3, 1, 0): # Blob values can't be specified using string notation in CQL 3.1.0 and # above which is used by default in Cassandra 2.0. - if self._cass_version >= (2, 1, 0): + if self.cass_version >= (2, 1, 0): msg = r'.*Invalid STRING constant \(.*?\) for "b" of type blob.*' else: msg = r'.*Invalid STRING constant \(.*?\) for b of type blob.*' @@ -108,7 +103,7 @@ def test_can_insert_primitive_datatypes(self): Test insertion of all datatype primitives """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("typetests") + s = c.connect(self.keyspace_name) # create table alpha_type_list = ["zz int PRIMARY KEY"] @@ -167,7 +162,7 @@ def test_can_insert_collection_datatypes(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("typetests") + s = c.connect(self.keyspace_name) # use tuple encoding, to convert native python tuple into raw CQL s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple @@ -394,11 +389,11 @@ def test_can_insert_tuples(self): Basic test of tuple functionality """ - if self._cass_version < (2, 1, 0): + if self.cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("typetests") + s = c.connect(self.keyspace_name) # use this encoder in order to insert tuples s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple @@ -446,11 +441,11 @@ def test_can_insert_tuples_with_varying_lengths(self): as expected. """ - if self._cass_version < (2, 1, 0): + if self.cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("typetests") + s = c.connect(self.keyspace_name) # set the row_factory to dict_factory for programmatic access # set the encoder for tuples for the ability to write tuples @@ -485,11 +480,11 @@ def test_can_insert_tuples_all_primitive_datatypes(self): Ensure tuple subtypes are appropriately handled. """ - if self._cass_version < (2, 1, 0): + if self.cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("typetests") + s = c.connect(self.keyspace_name) s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple s.execute("CREATE TABLE tuple_primitive (" @@ -513,11 +508,11 @@ def test_can_insert_tuples_all_collection_datatypes(self): Ensure tuple subtypes are appropriately handled for maps, sets, and lists. """ - if self._cass_version < (2, 1, 0): + if self.cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("typetests") + s = c.connect(self.keyspace_name) # set the row_factory to dict_factory for programmatic access # set the encoder for tuples for the ability to write tuples @@ -612,11 +607,11 @@ def test_can_insert_nested_tuples(self): Ensure nested are appropriately handled. """ - if self._cass_version < (2, 1, 0): + if self.cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("typetests") + s = c.connect(self.keyspace_name) # set the row_factory to dict_factory for programmatic access # set the encoder for tuples for the ability to write tuples @@ -652,7 +647,7 @@ def test_can_insert_tuples_with_nulls(self): Test tuples with null and empty string fields. """ - if self._cass_version < (2, 1, 0): + if self.cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") s = self.session @@ -746,3 +741,116 @@ def verify_insert_select(ins_statement, sel_statement): # prepared binding verify_insert_select(s.prepare('INSERT INTO float_cql_encoding (f, d) VALUES (?, ?)'), s.prepare('SELECT * FROM float_cql_encoding WHERE f=?')) + + @cythontest + def test_cython_decimal(self): + """ + Test to validate that decimal deserialization works correctly in with our cython extensions + + @since 3.0.0 + @jira_ticket PYTHON-212 + @expected_result no exceptions are thrown, decimal is decoded correctly + + @test_category data_types serialization + """ + + self.session.execute("CREATE TABLE {0} (dc decimal PRIMARY KEY)".format(self.function_table_name)) + try: + self.session.execute("INSERT INTO {0} (dc) VALUES (-1.08430792318105707)".format(self.function_table_name)) + results = self.session.execute("SELECT * FROM {0}".format(self.function_table_name)) + self.assertTrue(str(results[0].dc) == '-1.08430792318105707') + finally: + self.session.execute("DROP TABLE {0}".format(self.function_table_name)) + + +class TypeTestsProtocol(BasicSharedKeyspaceUnitTestCase): + + @greaterthancass20 + @lessthancass30 + def test_nested_types_with_protocol_version(self): + """ + Test to validate that nested type serialization works on various protocol versions. Provided + the version of cassandra is greater the 2.1.3 we would expect to nested to types to work at all protocol versions. + + @since 3.0.0 + @jira_ticket PYTHON-215 + @expected_result no exceptions are thrown + + @test_category data_types serialization + """ + ddl = '''CREATE TABLE {0}.t ( + k int PRIMARY KEY, + v list>>)'''.format(self.keyspace_name) + + self.session.execute(ddl) + ddl = '''CREATE TABLE {0}.u ( + k int PRIMARY KEY, + v set>>)'''.format(self.keyspace_name) + self.session.execute(ddl) + ddl = '''CREATE TABLE {0}.v ( + k int PRIMARY KEY, + v map>, frozen>>, + v1 frozen>)'''.format(self.keyspace_name) + self.session.execute(ddl) + + self.session.execute("CREATE TYPE {0}.typ (v0 frozen>>>, v1 frozen>)".format(self.keyspace_name)) + + ddl = '''CREATE TABLE {0}.w ( + k int PRIMARY KEY, + v frozen)'''.format(self.keyspace_name) + + self.session.execute(ddl) + + for pvi in range(1, 5): + self.run_inserts_at_version(pvi) + for pvr in range(1, 5): + self.read_inserts_at_level(pvr) + + def print_results(self, results): + print("printing results") + print(str(results.v)) + + def read_inserts_at_level(self, proto_ver): + session = Cluster(protocol_version=proto_ver).connect(self.keyspace_name) + try: + print("reading at version {0}".format(proto_ver)) + results = session.execute('select * from t')[0] + self.print_results(results) + self.assertEqual("[SortedSet([1, 2]), SortedSet([3, 5])]", str(results.v)) + + results = session.execute('select * from u')[0] + self.print_results(results) + self.assertEqual("SortedSet([[1, 2], [3, 5]])", str(results.v)) + + results = session.execute('select * from v')[0] + self.print_results(results) + self.assertEqual("{SortedSet([1, 2]): [1, 2, 3], SortedSet([3, 5]): [4, 5, 6]}", str(results.v)) + + results = session.execute('select * from w')[0] + self.print_results(results) + self.assertEqual("typ(v0=OrderedMapSerializedKey([(1, [1, 2, 3]), (2, [4, 5, 6])]), v1=[7, 8, 9])", str(results.v)) + + finally: + session.cluster.shutdown() + + def run_inserts_at_version(self, proto_ver): + session = Cluster(protocol_version=proto_ver).connect(self.keyspace_name) + try: + print("running inserts with protocol version {0}".format(str(proto_ver))) + p = session.prepare('insert into t (k, v) values (?, ?)') + session.execute(p, (0, [{1, 2}, {3, 5}])) + + p = session.prepare('insert into u (k, v) values (?, ?)') + session.execute(p, (0, {(1, 2), (3, 5)})) + + p = session.prepare('insert into v (k, v, v1) values (?, ?, ?)') + session.execute(p, (0, {(1, 2): [1, 2, 3], (3, 5): [4, 5, 6]}, (123, 'four'))) + + p = session.prepare('insert into w (k, v) values (?, ?)') + session.execute(p, (0, ({1: [1, 2, 3], 2: [4, 5, 6]}, [7, 8, 9]))) + + finally: + session.cluster.shutdown() + + + diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 6cebf97bd2..5c6b0d663c 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -26,7 +26,7 @@ from cassandra.query import dict_factory from cassandra.util import OrderedMap -from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION, execute_until_pass +from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION, execute_until_pass, BasicSegregatedKeyspaceUnitTestCase, greaterthancass20 from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, \ get_sample, get_collection_sample @@ -39,27 +39,16 @@ def setup_module(): update_datatypes() -class UDTTests(unittest.TestCase): +@greaterthancass20 +class UDTTests(BasicSegregatedKeyspaceUnitTestCase): @property def table_name(self): return self._testMethodName.lower() def setUp(self): - self._cass_version, self._cql_version = get_server_versions() - - if self._cass_version < (2, 1, 0): - raise unittest.SkipTest("User Defined Types were introduced in Cassandra 2.1") - - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect() - execute_until_pass(self.session, - "CREATE KEYSPACE udttests WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}") - self.session.set_keyspace("udttests") - - def tearDown(self): - execute_until_pass(self.session, "DROP KEYSPACE udttests") - self.cluster.shutdown() + super(UDTTests, self).setUp() + self.session.set_keyspace(self.keyspace_name) def test_can_insert_unprepared_registered_udts(self): """ @@ -67,13 +56,13 @@ def test_can_insert_unprepared_registered_udts(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("udttests") + s = c.connect(self.keyspace_name) s.execute("CREATE TYPE user (age int, name text)") s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)") User = namedtuple('user', ('age', 'name')) - c.register_user_type("udttests", "user", User) + c.register_user_type(self.keyspace_name, "user", User) s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User(42, 'bob'))) result = s.execute("SELECT b FROM mytable WHERE a=0") @@ -169,7 +158,7 @@ def test_can_insert_prepared_unregistered_udts(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("udttests") + s = c.connect(self.keyspace_name) s.execute("CREATE TYPE user (age int, name text)") s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)") @@ -213,11 +202,11 @@ def test_can_insert_prepared_registered_udts(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("udttests") + s = c.connect(self.keyspace_name) s.execute("CREATE TYPE user (age int, name text)") User = namedtuple('user', ('age', 'name')) - c.register_user_type("udttests", "user", User) + c.register_user_type(self.keyspace_name, "user", User) s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)") @@ -263,11 +252,11 @@ def test_can_insert_udts_with_nulls(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("udttests") + s = c.connect(self.keyspace_name) s.execute("CREATE TYPE user (a text, b int, c uuid, d blob)") User = namedtuple('user', ('a', 'b', 'c', 'd')) - c.register_user_type("udttests", "user", User) + c.register_user_type(self.keyspace_name, "user", User) s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)") @@ -293,7 +282,7 @@ def test_can_insert_udts_with_varying_lengths(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("udttests") + s = c.connect(self.keyspace_name) MAX_TEST_LENGTH = 254 @@ -310,7 +299,7 @@ def test_can_insert_udts_with_varying_lengths(self): # create and register the seed udt type udt = namedtuple('lengthy_udt', tuple(['v_{0}'.format(i) for i in range(MAX_TEST_LENGTH)])) - c.register_user_type("udttests", "lengthy_udt", udt) + c.register_user_type(self.keyspace_name, "lengthy_udt", udt) # verify inserts and reads for i in (0, 1, 2, 3, MAX_TEST_LENGTH): @@ -377,7 +366,7 @@ def test_can_insert_nested_registered_udts(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("udttests") + s = c.connect(self.keyspace_name) s.row_factory = dict_factory MAX_NESTING_DEPTH = 16 @@ -389,13 +378,13 @@ def test_can_insert_nested_registered_udts(self): udts = [] udt = namedtuple('depth_0', ('age', 'name')) udts.append(udt) - c.register_user_type("udttests", "depth_0", udts[0]) + c.register_user_type(self.keyspace_name, "depth_0", udts[0]) # create and register the nested udt types for i in range(MAX_NESTING_DEPTH): udt = namedtuple('depth_{0}'.format(i + 1), ('value')) udts.append(udt) - c.register_user_type("udttests", "depth_{0}".format(i + 1), udts[i + 1]) + c.register_user_type(self.keyspace_name, "depth_{0}".format(i + 1), udts[i + 1]) # insert udts and verify inserts with reads self.nested_udt_verification_helper(s, MAX_NESTING_DEPTH, udts) @@ -408,7 +397,7 @@ def test_can_insert_nested_unregistered_udts(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("udttests") + s = c.connect(self.keyspace_name) s.row_factory = dict_factory MAX_NESTING_DEPTH = 16 @@ -448,7 +437,7 @@ def test_can_insert_nested_registered_udts_with_different_namedtuples(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("udttests") + s = c.connect(self.keyspace_name) s.row_factory = dict_factory MAX_NESTING_DEPTH = 16 @@ -460,13 +449,13 @@ def test_can_insert_nested_registered_udts_with_different_namedtuples(self): udts = [] udt = namedtuple('level_0', ('age', 'name')) udts.append(udt) - c.register_user_type("udttests", "depth_0", udts[0]) + c.register_user_type(self.keyspace_name, "depth_0", udts[0]) # create and register the nested udt types for i in range(MAX_NESTING_DEPTH): udt = namedtuple('level_{0}'.format(i + 1), ('value')) udts.append(udt) - c.register_user_type("udttests", "depth_{0}".format(i + 1), udts[i + 1]) + c.register_user_type(self.keyspace_name, "depth_{0}".format(i + 1), udts[i + 1]) # insert udts and verify inserts with reads self.nested_udt_verification_helper(s, MAX_NESTING_DEPTH, udts) @@ -479,7 +468,7 @@ def test_raise_error_on_nonexisting_udts(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("udttests") + s = c.connect(self.keyspace_name) User = namedtuple('user', ('age', 'name')) with self.assertRaises(UserTypeDoesNotExist): @@ -499,7 +488,7 @@ def test_can_insert_udt_all_datatypes(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("udttests") + s = c.connect(self.keyspace_name) # create UDT alpha_type_list = [] @@ -510,7 +499,7 @@ def test_can_insert_udt_all_datatypes(self): s.execute(""" CREATE TYPE alldatatypes ({0}) """.format(', '.join(alpha_type_list)) - ) + ) s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)") @@ -519,7 +508,7 @@ def test_can_insert_udt_all_datatypes(self): for i in range(ord('a'), ord('a') + len(PRIMITIVE_DATATYPES)): alphabet_list.append('{0}'.format(chr(i))) Alldatatypes = namedtuple("alldatatypes", alphabet_list) - c.register_user_type("udttests", "alldatatypes", Alldatatypes) + c.register_user_type(self.keyspace_name, "alldatatypes", Alldatatypes) # insert UDT data params = [] @@ -544,7 +533,7 @@ def test_can_insert_udt_all_collection_datatypes(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("udttests") + s = c.connect(self.keyspace_name) # create UDT alpha_type_list = [] @@ -576,7 +565,7 @@ def test_can_insert_udt_all_collection_datatypes(self): alphabet_list.append('{0}_{1}'.format(chr(i), chr(j))) Alldatatypes = namedtuple("alldatatypes", alphabet_list) - c.register_user_type("udttests", "alldatatypes", Alldatatypes) + c.register_user_type(self.keyspace_name, "alldatatypes", Alldatatypes) # insert UDT data params = [] @@ -607,11 +596,11 @@ def test_can_insert_nested_collections(self): Test for inserting various types of nested COLLECTION_TYPES into tables and UDTs """ - if self._cass_version < (2, 1, 3): + if self.cass_version < (2, 1, 3): raise unittest.SkipTest("Support for nested collections was introduced in Cassandra 2.1.3") c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("udttests") + s = c.connect(self.keyspace_name) s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple name = self._testMethodName @@ -710,3 +699,36 @@ def test_type_alteration(self): val = s.execute('SELECT v FROM %s' % self.table_name)[0][0] self.assertEqual(val['v0'], 3) self.assertEqual(val['v1'], six.b('\xde\xad\xbe\xef')) + + def test_alter_udt(self): + """ + Test to ensure that altered UDT's are properly surfaced without needing to restart the underlying session. + + @since 3.0.0 + @jira_ticket PYTHON-226 + @expected_result UDT's will reflect added columns without a session restart. + + @test_category data_types, udt + """ + + # Create udt ensure it has the proper column names. + self.session.set_keyspace(self.keyspace_name) + self.session.execute("CREATE TYPE typetoalter (a int)") + typetoalter = namedtuple('typetoalter', ('a')) + self.session.execute("CREATE TABLE {0} (pk int primary key, typetoalter frozen)".format(self.function_table_name)) + insert_statement = self.session.prepare("INSERT INTO {0} (pk, typetoalter) VALUES (?, ?)".format(self.function_table_name)) + self.session.execute(insert_statement, [1, typetoalter(1)]) + results = self.session.execute("SELECT * from {0}".format(self.function_table_name)) + for result in results: + self.assertTrue(hasattr(result.typetoalter, 'a')) + self.assertFalse(hasattr(result.typetoalter, 'b')) + + # Alter UDT and ensure the alter is honored in results + self.session.execute("ALTER TYPE typetoalter add b int") + typetoalter = namedtuple('typetoalter', ('a', 'b')) + self.session.execute(insert_statement, [2, typetoalter(2, 2)]) + results = self.session.execute("SELECT * from {0}".format(self.function_table_name)) + for result in results: + self.assertTrue(hasattr(result.typetoalter, 'a')) + self.assertTrue(hasattr(result.typetoalter, 'b')) + From b0bb19bd5eff8f2d0f56140bd7d6833f08c2ff0f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 23 Nov 2015 11:14:12 -0600 Subject: [PATCH 0219/2005] Fix nested type deserialization for proto < 3 in cython PYTHON-215 --- cassandra/deserializers.pyx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/deserializers.pyx b/cassandra/deserializers.pyx index ffbb702a4f..061d340a3f 100644 --- a/cassandra/deserializers.pyx +++ b/cassandra/deserializers.pyx @@ -238,7 +238,7 @@ cdef list _deserialize_list_or_set(itemlen_t dummy_version, _unpack_len[itemlen_t](0, &numelements, buf) idx = sizeof(itemlen_t) - + protocol_version = max(3, protocol_version) for _ in range(numelements): subelem(buf, &elem_buf, &idx) result.append(from_binary(deserializer, &elem_buf, protocol_version)) @@ -325,6 +325,7 @@ cdef _deserialize_map(itemlen_t dummy_version, _unpack_len[itemlen_t](0, &numelements, buf) idx = sizeof(itemlen_t) themap = util.OrderedMapSerializedKey(key_type, protocol_version) + protocol_version = max(3, protocol_version) for _ in range(numelements): subelem(buf, &key_buf, &idx) subelem(buf, &val_buf, &idx) From 47b842cc3dac1e36dbb53ff4777872366b05bd6c Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 23 Nov 2015 12:35:21 -0600 Subject: [PATCH 0220/2005] Removing superfluous debug statements in test case --- tests/integration/__init__.py | 2 +- tests/integration/standard/test_types.py | 10 ---------- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 1fd1a6709c..904c57e673 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -127,7 +127,7 @@ def _tuple_version(version_string): PROTOCOL_VERSION = int(os.getenv('PROTOCOL_VERSION', default_protocol_version)) notprotocolv1 = unittest.skipUnless(PROTOCOL_VERSION > 1, 'Protocol v1 not supported') -lessthenprotocolv4 = unittest.skipUnless(PROTOCOL_VERSION < 4, 'Protocol versions 4 or greater no supported') +lessthenprotocolv4 = unittest.skipUnless(PROTOCOL_VERSION < 4, 'Protocol versions 4 or greater not supported') greaterthanprotocolv3 = unittest.skipUnless(PROTOCOL_VERSION >= 4, 'Protocol versions less than 4 are not supported') greaterthancass20 = unittest.skipUnless(CASSANDRA_VERSION >= '2.1', 'Cassandra version 2.1 or greater required') diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 5d14f4be9d..e296286cc8 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -806,28 +806,19 @@ def test_nested_types_with_protocol_version(self): for pvr in range(1, 5): self.read_inserts_at_level(pvr) - def print_results(self, results): - print("printing results") - print(str(results.v)) - def read_inserts_at_level(self, proto_ver): session = Cluster(protocol_version=proto_ver).connect(self.keyspace_name) try: - print("reading at version {0}".format(proto_ver)) results = session.execute('select * from t')[0] - self.print_results(results) self.assertEqual("[SortedSet([1, 2]), SortedSet([3, 5])]", str(results.v)) results = session.execute('select * from u')[0] - self.print_results(results) self.assertEqual("SortedSet([[1, 2], [3, 5]])", str(results.v)) results = session.execute('select * from v')[0] - self.print_results(results) self.assertEqual("{SortedSet([1, 2]): [1, 2, 3], SortedSet([3, 5]): [4, 5, 6]}", str(results.v)) results = session.execute('select * from w')[0] - self.print_results(results) self.assertEqual("typ(v0=OrderedMapSerializedKey([(1, [1, 2, 3]), (2, [4, 5, 6])]), v1=[7, 8, 9])", str(results.v)) finally: @@ -836,7 +827,6 @@ def read_inserts_at_level(self, proto_ver): def run_inserts_at_version(self, proto_ver): session = Cluster(protocol_version=proto_ver).connect(self.keyspace_name) try: - print("running inserts with protocol version {0}".format(str(proto_ver))) p = session.prepare('insert into t (k, v) values (?, ?)') session.execute(p, (0, [{1, 2}, {3, 5}])) From 223709aaae876580f938b651b94134f4a9af26b9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 24 Nov 2015 11:24:56 -0600 Subject: [PATCH 0221/2005] default consistency is now LOCAL_ONE --- cassandra/cluster.py | 4 ++-- docs/api/cassandra/cluster.rst | 2 +- docs/getting_started.rst | 7 ++----- docs/upgrading.rst | 7 +++---- tests/integration/cqlengine/test_consistency.py | 2 +- 5 files changed, 9 insertions(+), 13 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 2be1193061..402a4d3822 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1428,7 +1428,7 @@ class Session(object): .. versionadded:: 2.0.0 """ - default_consistency_level = ConsistencyLevel.LOCAL_QUORUM + default_consistency_level = ConsistencyLevel.LOCAL_ONE """ The default :class:`~ConsistencyLevel` for operations executed through this session. This default may be overridden by setting the @@ -1438,7 +1438,7 @@ class Session(object): .. versionchanged:: 3.0.0 - default changed from ONE to LOCAL_QUORUM + default changed from ONE to LOCAL_ONE """ max_trace_wait = 2.0 diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 324d7348c3..ac2cbcd399 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -97,7 +97,7 @@ .. autoattribute:: default_timeout .. autoattribute:: default_consistency_level - :annotation: = LOCAL_QUORUM + :annotation: = LOCAL_ONE .. autoattribute:: row_factory diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 2920b9ef27..9733fb97ce 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -327,12 +327,9 @@ The consistency level used for a query determines how many of the replicas of the data you are interacting with need to respond for the query to be considered a success. -By default, :attr:`.ConsistencyLevel.LOCAL_QUORUM` will be used for all queries. -This is to provide unsurprising default behavior with respect to read-after-write -consistency. However, many applications will prefer :attr:`.ConsistencyLevel.ONE` -to maximize performance and availability. +By default, :attr:`.ConsistencyLevel.LOCAL_ONE` will be used for all queries. You can specify a different default for the session on :attr:`.Session.default_consistency_level`. -To specify a different consistency level per request, you will need to wrap your queries +To specify a different consistency level per request, wrap queries in a :class:`~.SimpleStatement`: .. code-block:: python diff --git a/docs/upgrading.rst b/docs/upgrading.rst index 21e2abcd58..2fa86b8fc7 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -12,11 +12,10 @@ previously supported versions. In addition to substantial internal rework, there are several updates to the API that integrators will need to consider: -Default consistency is now ``LOCAL_QUORUM`` +Default consistency is now ``LOCAL_ONE`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Previous value was ``ONE``. ``ONE`` (or ``LOCAL_ONE``) is still appropriate for -many applications, but the new default was chosen to be least surprising for -new users not specifying a consistency level. +Previous value was ``ONE``. The new value is introduced to mesh with the default +DC-aware load balancing policy and to match other drivers. Execution API Updates ^^^^^^^^^^^^^^^^^^^^^ diff --git a/tests/integration/cqlengine/test_consistency.py b/tests/integration/cqlengine/test_consistency.py index 47e1661941..6d8e038847 100644 --- a/tests/integration/cqlengine/test_consistency.py +++ b/tests/integration/cqlengine/test_consistency.py @@ -114,7 +114,7 @@ def test_delete(self): def test_default_consistency(self): # verify global assumed default - self.assertEqual(Session.default_consistency_level, ConsistencyLevel.LOCAL_QUORUM) + self.assertEqual(Session.default_consistency_level, ConsistencyLevel.LOCAL_ONE) # verify that this session default is set according to connection.setup # assumes tests/cqlengine/__init__ setup uses CL.ONE From e9ac05b549ea1cda4b97cbc2a3e74609ee3811cb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 24 Nov 2015 11:27:22 -0600 Subject: [PATCH 0222/2005] mention default CL in changelog --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6b2531db8f..504af2babc 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,6 +7,7 @@ Features * Add Cluster.update_view_metadata (PYTHON-407) * QueryTrace option to populate partial trace sessions (PYTHON-438) * Attach column names to ResultSet (PYTHON-439) +* Change default consistency level to LOCAL_ONE Bug Fixes --------- From 4e42780f30399fd52c68df5363685486ab93364f Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 24 Nov 2015 11:34:57 -0600 Subject: [PATCH 0223/2005] Adding test for python-438 --- tests/integration/__init__.py | 4 +++ tests/integration/standard/test_query.py | 37 ++++++++++++++++++++++-- 2 files changed, 39 insertions(+), 2 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 904c57e673..34606f6360 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -392,6 +392,10 @@ def class_table_name(self): def function_table_name(self): return self._testMethodName.lower() + @property + def keyspace_table_name(self): + return "{0}.{1}".format(self.keyspace_name, self._testMethodName.lower()) + @classmethod def drop_keyspace(cls): execute_with_long_wait_retry(cls.session, "DROP KEYSPACE {0}".format(cls.ks_name)) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index ade54bcfe3..5bb165c08e 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -22,7 +22,7 @@ from cassandra import ConsistencyLevel from cassandra.query import (PreparedStatement, BoundStatement, SimpleStatement, - BatchStatement, BatchType, dict_factory) + BatchStatement, BatchType, dict_factory, TraceUnavailable) from cassandra.cluster import Cluster from cassandra.policies import HostDistance @@ -138,6 +138,40 @@ def test_client_ip_in_trace(self): self.assertIsNotNone(client_ip, "Client IP was not set in trace with C* >= 2.2") self.assertTrue(pat.match(client_ip), "Client IP from trace did not match the expected value") + def test_incomplete_query_trace(self): + """ + Tests to ensure that partial tracing works. + + Creates a table and runs an insert. Then attempt a query with tracing enabled. After the query is run we delete the + duration information associated with the trace, and attempt to populate the tracing information. We should receive tracing + information without any trace enabled. + + @since 3.0.0 + @jira_ticket PYTHON-438 + @expected_result tracing comes back sans duration + + @test_category tracing + """ + + # Create table and run insert, then select + self.session.execute("CREATE TABLE {0} (k INT, i INT, PRIMARY KEY(k, i))".format(self.keyspace_table_name)) + self.session.execute("INSERT INTO {0} (k, i) VALUES (0, 1)".format(self.keyspace_table_name)) + response_future = self.session.execute_async("SELECT i FROM {0} WHERE k=0".format(self.keyspace_table_name), trace=True) + response_future.result() + + # Delete trace duration from latest select session. + DELETE_TRACES = "DELETE duration FROM system_traces.sessions WHERE session_id = {0}" + + for trace in response_future._query_traces: + self.session.execute(DELETE_TRACES.format(trace.trace_id)) + self.assertRaises(TraceUnavailable,trace.populate(wait_for_complete=False)) + self.assertIsNone(trace.duration) + self.assertIsNotNone(trace.trace_id) + self.assertIsNotNone(trace.request_type) + self.assertIsNotNone(trace.parameters) + self.assertIsNotNone(trace.events) + self.assertIsNotNone(trace.started_at) + def test_column_names(self): """ Test to validate the columns are present on the result set. @@ -295,7 +329,6 @@ def setUp(self): self.cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) self.session = self.cluster.connect() - def tearDown(self): self.cluster.shutdown() From eda07cf6c56ea844261139a6d510ec7f11baabae Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 24 Nov 2015 11:55:34 -0600 Subject: [PATCH 0224/2005] Additional verification for partial trace session --- tests/integration/standard/test_query.py | 35 ++++++++++++++---------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 5bb165c08e..1693f171d7 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -32,7 +32,6 @@ def setup_module(): - print("Setting up module") use_singledc() global CASS_SERVER_VERSION CASS_SERVER_VERSION = get_server_versions()[0] @@ -143,8 +142,8 @@ def test_incomplete_query_trace(self): Tests to ensure that partial tracing works. Creates a table and runs an insert. Then attempt a query with tracing enabled. After the query is run we delete the - duration information associated with the trace, and attempt to populate the tracing information. We should receive tracing - information without any trace enabled. + duration information associated with the trace, and attempt to populate the tracing information. + Should fail with wait_for_complete=True, succeed for False. @since 3.0.0 @jira_ticket PYTHON-438 @@ -159,18 +158,24 @@ def test_incomplete_query_trace(self): response_future = self.session.execute_async("SELECT i FROM {0} WHERE k=0".format(self.keyspace_table_name), trace=True) response_future.result() - # Delete trace duration from latest select session. - DELETE_TRACES = "DELETE duration FROM system_traces.sessions WHERE session_id = {0}" - - for trace in response_future._query_traces: - self.session.execute(DELETE_TRACES.format(trace.trace_id)) - self.assertRaises(TraceUnavailable,trace.populate(wait_for_complete=False)) - self.assertIsNone(trace.duration) - self.assertIsNotNone(trace.trace_id) - self.assertIsNotNone(trace.request_type) - self.assertIsNotNone(trace.parameters) - self.assertIsNotNone(trace.events) - self.assertIsNotNone(trace.started_at) + self.assertEqual(len(response_future._query_traces), 1) + trace = response_future._query_traces[0] + + # Delete trace duration from the session (this is what the driver polls for "complete") + self.session.execute("DELETE duration FROM system_traces.sessions WHERE session_id = %s", (trace.trace_id,)) + + # should raise because duration is not set + self.assertRaises(TraceUnavailable, trace.populate, max_wait=0.2, wait_for_complete=True) + self.assertFalse(trace.events) + + # should get the events with wait False + trace.populate(wait_for_complete=False) + self.assertIsNone(trace.duration) + self.assertIsNotNone(trace.trace_id) + self.assertIsNotNone(trace.request_type) + self.assertIsNotNone(trace.parameters) + self.assertTrue(trace.events) # non-zero list len + self.assertIsNotNone(trace.started_at) def test_column_names(self): """ From 6af642dc6ad34c927f4c13c6c632e5f13a0b4dbe Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 24 Nov 2015 14:39:55 -0600 Subject: [PATCH 0225/2005] finalize changelog and version for 3.0 release --- CHANGELOG.rst | 1 + cassandra/__init__.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 504af2babc..79ff1ff980 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,6 @@ 3.0.0 ===== +November 24, 2015 Features -------- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 5686621355..88898842e7 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 0, '0rc1', 'post0') +__version_info__ = (3, 0, 0) __version__ = '.'.join(map(str, __version_info__)) From cbbd024e14985af258eeeadaadb5d5dff6b1bce8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 24 Nov 2015 14:46:36 -0600 Subject: [PATCH 0226/2005] post-release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 88898842e7..cdcd34550b 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 0, 0) +__version_info__ = (3, 0, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From c488c57e005c3aff29946c52cabb3c327ca9ac25 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 25 Nov 2015 10:03:11 -0600 Subject: [PATCH 0227/2005] Add instructions for bundling driver for Cassandra --- README-dev.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README-dev.rst b/README-dev.rst index c5901534c7..bc38f2b997 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -131,3 +131,17 @@ To run the benchmarks, pick one of the files under the ``benchmarks/`` dir and r There are a few options. Use ``--help`` to see them all:: python benchmarks/future_batches.py --help + +Packaging for Cassandra +======================= +A source distribution is included in Cassandra, which uses the driver internally for ``cqlsh``. +To package a released version, checkout the tag and build a source zip archive:: + + python setup.py sdist --formats=zip + +If packaging a pre-release (untagged) version, it is useful to include a commit hash in the archive +name to specify the built version:: + + python setup.py egg_info -b-`git rev-parse --short HEAD` sdist --formats=zip + +The file (``dist/cassandra-driver-.zip``) is packaged with Cassandra in ``cassandra/lib/cassandra-driver-internal-only*zip``. From bbba912a886c1b2437bfef45511fec7ff83a307e Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 25 Nov 2015 12:15:50 -0600 Subject: [PATCH 0228/2005] skipping bad keyspace metadata until python-440 is resolved --- tests/integration/standard/test_metadata.py | 2 ++ tests/unit/cython/utils.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index fa9f037447..38a4cecb95 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -36,6 +36,7 @@ from tests.integration import get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, \ BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster +from tests.unit.cython.utils import notcython def setup_module(): use_singledc() @@ -1906,6 +1907,7 @@ def _skip_if_not_version(self, version): if CASS_SERVER_VERSION < version: raise unittest.SkipTest("Requires server version >= %s" % (version,)) + @notcython def test_bad_keyspace(self): with patch.object(self.parser_class, '_build_keyspace_metadata_internal', side_effect=self.BadMetaException): self.cluster.refresh_keyspace_metadata(self.keyspace_name) diff --git a/tests/unit/cython/utils.py b/tests/unit/cython/utils.py index f8de215c27..0d9a4c2b77 100644 --- a/tests/unit/cython/utils.py +++ b/tests/unit/cython/utils.py @@ -35,4 +35,5 @@ def cyimport(import_path): # @cythontest # def test_something(self): ... cythontest = unittest.skipUnless(HAVE_CYTHON, 'Cython is not available') -numpytest = unittest.skipUnless(HAVE_CYTHON and HAVE_NUMPY, 'NumPy is not available') +notcython = unittest.skipIf(HAVE_CYTHON, 'Cython not supported') +numpytest = unittest.skipUnless(HAVE_CYTHON and HAVE_NUMPY, 'NumPy is not available') From 1e857176e44e168e4e613fd9d42676557f2ca307 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 25 Nov 2015 14:34:20 -0600 Subject: [PATCH 0229/2005] capture exc_info early in keyspace meta handling Resolves an issue where exc_info is being cleared in nosetest context, causing tests to fail. --- cassandra/metadata.py | 4 ++-- tests/integration/standard/test_metadata.py | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index cf5a26b11c..19bf5feac1 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1708,9 +1708,9 @@ def _build_keyspace_metadata(cls, row): ksm = cls._build_keyspace_metadata_internal(row) except Exception: name = row["keyspace_name"] - log.exception("Error while parsing metadata for keyspace %s row(%s)", name, row) ksm = KeyspaceMetadata(name, False, 'UNKNOWN', {}) - ksm._exc_info = sys.exc_info() + ksm._exc_info = sys.exc_info() # capture exc_info before log because nose (test) logging clears it in certain circumstances + log.exception("Error while parsing metadata for keyspace %s row(%s)", name, row) return ksm @staticmethod diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 38a4cecb95..78589d5b3a 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1907,7 +1907,6 @@ def _skip_if_not_version(self, version): if CASS_SERVER_VERSION < version: raise unittest.SkipTest("Requires server version >= %s" % (version,)) - @notcython def test_bad_keyspace(self): with patch.object(self.parser_class, '_build_keyspace_metadata_internal', side_effect=self.BadMetaException): self.cluster.refresh_keyspace_metadata(self.keyspace_name) From 1f46791fa502c80149b9e6f17386d013dc054066 Mon Sep 17 00:00:00 2001 From: Kevin Gallardo Date: Fri, 20 Nov 2015 15:02:45 +0000 Subject: [PATCH 0230/2005] First Python support for DSE Graph. --- cassandra/cluster.py | 46 ++++++++++++++++++++++++++++++++++++++++++-- cassandra/query.py | 10 ++++++++++ 2 files changed, 54 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 402a4d3822..4ddccc948b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -19,7 +19,7 @@ from __future__ import absolute_import import atexit -from collections import defaultdict, Mapping +from collections import defaultdict, Mapping, namedtuple from concurrent.futures import ThreadPoolExecutor import logging from random import random @@ -33,6 +33,8 @@ from six.moves import range from six.moves import queue as Queue +import json + import weakref from weakref import WeakValueDictionary try: @@ -71,7 +73,7 @@ NoConnectionsAvailable) from cassandra.query import (SimpleStatement, PreparedStatement, BoundStatement, BatchStatement, bind_params, QueryTrace, Statement, - named_tuple_factory, dict_factory, FETCH_SIZE_UNSET) + GraphStatement, named_tuple_factory, dict_factory, FETCH_SIZE_UNSET) def _is_eventlet_monkey_patched(): @@ -1934,6 +1936,22 @@ def get_pool_state(self): def get_pools(self): return self._pools.values() +class GraphSession(object): + + default_graph_options = {'graph-source':b'default', 'graph-language':b'gremlin-groovy'} + + def __init__(self, session, default_graph_options=None): + self.session = session + if default_graph_options: + self.default_graph_options.update(default_graph_options) + + def execute(self, query, *args, **kwargs): + if (isinstance(query, GraphStatement)): + z = self.default_graph_options.copy() + z.update(query.graph_options) + return GraphResultSet(self.session.execute(query, custom_payload=z)) + else: + return GraphResultSet(self.session.execute(query, custom_payload=self.default_graph_options)) class UserTypeDoesNotExist(Exception): """ @@ -3268,6 +3286,30 @@ class QueryExhausted(Exception): """ pass +class GraphResultSet(object): + wrapped_rs = None + + def __init__(self, result_set): + self.wrapped_rs = result_set + + def __iter__(self): + # Not great but doing iter(self.wrapped_rs); return self; Does not work- + # -with the paging and __getitem__ on the wrapped result set. + return GraphResultSet(iter(self.wrapped_rs)) + + def __getitem__(self, i): + return self._generate_traversal_result_tuple(self.wrapped_rs[i].gremlin) + + def next(self): + return self._generate_traversal_result_tuple(self.wrapped_rs.next().gremlin) + + __next__ = next + + def _generate_traversal_result_tuple(self, result): + json_result = json.loads(result)['result'] + GraphTraversalResult = namedtuple('GraphTraversalResult', json_result.keys()) + return GraphTraversalResult(*json_result.values()) + class ResultSet(object): """ diff --git a/cassandra/query.py b/cassandra/query.py index e66f875c56..0ec02d6233 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -335,6 +335,16 @@ def __str__(self): (self.query_string, consistency)) __repr__ = __str__ +class GraphStatement(SimpleStatement): + """ + A simple, un-prepared graph query. + """ + + def __init__(self, query_string, *args, **kwargs): + """ + """ + SimpleStatement.__init__(self, query_string, *args, **kwargs) + self.graph_options = {} class PreparedStatement(object): """ From 8618bf9e1484669595d6e62cc39658645b1756d3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 2 Dec 2015 15:09:32 -0600 Subject: [PATCH 0231/2005] additional setup envvar disabling all extensions --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index c2845e85c9..4fcbe35000 100644 --- a/setup.py +++ b/setup.py @@ -185,7 +185,7 @@ def __init__(self, ext): elif not is_supported_arch: sys.stderr.write(arch_unsupported_msg) -try_extensions = "--no-extensions" not in sys.argv and is_supported_platform and is_supported_arch +try_extensions = "--no-extensions" not in sys.argv and is_supported_platform and is_supported_arch and not os.environ.get('CASS_DRIVER_NO_EXTENSIONS') try_murmur3 = try_extensions and "--no-murmur3" not in sys.argv try_libev = try_extensions and "--no-libev" not in sys.argv and not is_pypy and not is_windows try_cython = try_extensions and "--no-cython" not in sys.argv and not is_pypy and not os.environ.get('CASS_DRIVER_NO_CYTHON') From 4cbf170d42a0ecadee5d09ae2b327521b348b814 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 2 Dec 2015 15:52:31 -0600 Subject: [PATCH 0232/2005] doc: more detail about ways to skip cython --- cassandra/cluster.py | 2 +- docs/installation.rst | 21 +++++++++++++++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 402a4d3822..dfeb70263d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3307,7 +3307,7 @@ def has_more_pages(self): @property def current_rows(self): """ - :return: the list of current page rows. May be empty if the result was empty, + The list of current page rows. May be empty if the result was empty, or this is the last page. """ return self._current_rows or [] diff --git a/docs/installation.rst b/docs/installation.rst index eb252a551f..9c5e52ff61 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -129,16 +129,33 @@ By default, this package uses `Cython `_ to optimize core mo This is not a hard requirement, but is engaged by default to build extensions offering better performance than the pure Python implementation. +This is a costly build phase, especially in clean environments where the Cython compiler must be built This build phase can be avoided using the build switch, or an environment variable:: python setup.py install --no-cython - -or- - pip install --install-option="--no-cython" Alternatively, an environment variable can be used to switch this option regardless of context:: CASS_DRIVER_NO_CYTHON=1 + - or, to disable all extensions: + CASS_DRIVER_NO_EXTENSIONS=1 + +This method is required when using pip, which provides no other way of injecting user options in a single command:: + + CASS_DRIVER_NO_CYTHON=1 pip install cassandra-driver + CASS_DRIVER_NO_CYTHON=1 sudo -E pip install ~/python-driver + +The environment variable is the preferred option because it spans all invocations of setup.py, and will +prevent Cython from being materialized as a setup requirement. + +If your sudo configuration does not allow SETENV, you must push the option flag down via pip. However, pip +applies these options to all dependencies (which break on the custom flag). Therefore, you must first install +dependencies, then use install-option:: + + sudo pip install six futures + sudo pip install --install-option="--no-cython" + libev support ^^^^^^^^^^^^^ From d8fff462863104a8b883612f80f91e8e20699f40 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 2 Dec 2015 15:56:14 -0600 Subject: [PATCH 0233/2005] short-circuit setup_requires for egg_info in pip context PYTHON-448 --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 4fcbe35000..2ece7e6db8 100644 --- a/setup.py +++ b/setup.py @@ -189,6 +189,7 @@ def __init__(self, ext): try_murmur3 = try_extensions and "--no-murmur3" not in sys.argv try_libev = try_extensions and "--no-libev" not in sys.argv and not is_pypy and not is_windows try_cython = try_extensions and "--no-cython" not in sys.argv and not is_pypy and not os.environ.get('CASS_DRIVER_NO_CYTHON') +try_cython &= 'egg_info' not in sys.argv # bypass setup_requires for pip egg_info calls, which will never have --install-option"--no-cython" coming fomr pip sys.argv = [a for a in sys.argv if a not in ("--no-murmur3", "--no-libev", "--no-cython", "--no-extensions")] From 1ff7eb37e5b23c1cfa4c2e2b7a9487244cf98ed5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 2 Dec 2015 16:34:37 -0600 Subject: [PATCH 0234/2005] Twisted gave up on Python 2.6 and 3.3. Lock down for testing --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 89278ce902..f4d21ba5df 100644 --- a/tox.ini +++ b/tox.ini @@ -12,7 +12,7 @@ deps = {[base]deps} cython py26: unittest2 py{26,27}: gevent - twisted + twisted <15.5.0 setenv = LIBEV_EMBED=0 CARES_EMBED=0 changedir = {envtmpdir} From 5ce7dbee305dee92a4b0145cf33015c32ac28a7d Mon Sep 17 00:00:00 2001 From: Kevin Gallardo Date: Fri, 4 Dec 2015 11:06:58 +0000 Subject: [PATCH 0235/2005] Moved Graph components in the dse module. --- cassandra/cluster.py | 47 ++---------------------------------- cassandra/query.py | 11 --------- dse/__init__.py | 0 dse/graph.py | 57 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 59 insertions(+), 56 deletions(-) create mode 100644 dse/__init__.py create mode 100644 dse/graph.py diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 4ddccc948b..8b8749172d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -19,7 +19,7 @@ from __future__ import absolute_import import atexit -from collections import defaultdict, Mapping, namedtuple +from collections import defaultdict, Mapping from concurrent.futures import ThreadPoolExecutor import logging from random import random @@ -33,8 +33,6 @@ from six.moves import range from six.moves import queue as Queue -import json - import weakref from weakref import WeakValueDictionary try: @@ -73,7 +71,7 @@ NoConnectionsAvailable) from cassandra.query import (SimpleStatement, PreparedStatement, BoundStatement, BatchStatement, bind_params, QueryTrace, Statement, - GraphStatement, named_tuple_factory, dict_factory, FETCH_SIZE_UNSET) + named_tuple_factory, dict_factory, FETCH_SIZE_UNSET) def _is_eventlet_monkey_patched(): @@ -1936,23 +1934,6 @@ def get_pool_state(self): def get_pools(self): return self._pools.values() -class GraphSession(object): - - default_graph_options = {'graph-source':b'default', 'graph-language':b'gremlin-groovy'} - - def __init__(self, session, default_graph_options=None): - self.session = session - if default_graph_options: - self.default_graph_options.update(default_graph_options) - - def execute(self, query, *args, **kwargs): - if (isinstance(query, GraphStatement)): - z = self.default_graph_options.copy() - z.update(query.graph_options) - return GraphResultSet(self.session.execute(query, custom_payload=z)) - else: - return GraphResultSet(self.session.execute(query, custom_payload=self.default_graph_options)) - class UserTypeDoesNotExist(Exception): """ An attempt was made to use a user-defined type that does not exist. @@ -3286,30 +3267,6 @@ class QueryExhausted(Exception): """ pass -class GraphResultSet(object): - wrapped_rs = None - - def __init__(self, result_set): - self.wrapped_rs = result_set - - def __iter__(self): - # Not great but doing iter(self.wrapped_rs); return self; Does not work- - # -with the paging and __getitem__ on the wrapped result set. - return GraphResultSet(iter(self.wrapped_rs)) - - def __getitem__(self, i): - return self._generate_traversal_result_tuple(self.wrapped_rs[i].gremlin) - - def next(self): - return self._generate_traversal_result_tuple(self.wrapped_rs.next().gremlin) - - __next__ = next - - def _generate_traversal_result_tuple(self, result): - json_result = json.loads(result)['result'] - GraphTraversalResult = namedtuple('GraphTraversalResult', json_result.keys()) - return GraphTraversalResult(*json_result.values()) - class ResultSet(object): """ diff --git a/cassandra/query.py b/cassandra/query.py index 0ec02d6233..af3ebd8f1a 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -335,17 +335,6 @@ def __str__(self): (self.query_string, consistency)) __repr__ = __str__ -class GraphStatement(SimpleStatement): - """ - A simple, un-prepared graph query. - """ - - def __init__(self, query_string, *args, **kwargs): - """ - """ - SimpleStatement.__init__(self, query_string, *args, **kwargs) - self.graph_options = {} - class PreparedStatement(object): """ A statement that has been prepared against at least one Cassandra node. diff --git a/dse/__init__.py b/dse/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/dse/graph.py b/dse/graph.py new file mode 100644 index 0000000000..2ea3512c7d --- /dev/null +++ b/dse/graph.py @@ -0,0 +1,57 @@ +from cassandra.query import SimpleStatement +from collections import namedtuple + +import json + + +class GraphStatement(SimpleStatement): + """ + A simple, un-prepared graph query. + """ + + def __init__(self, query_string, *args, **kwargs): + """ + """ + SimpleStatement.__init__(self, query_string, *args, **kwargs) + self.graph_options = {} + +class GraphResultSet(object): + wrapped_rs = None + + def __init__(self, result_set): + self.wrapped_rs = result_set + + def __iter__(self): + # Not great but doing iter(self.wrapped_rs); return self; Does not work- + # -with the paging and __getitem__ on the wrapped result set. + return GraphResultSet(iter(self.wrapped_rs)) + + def __getitem__(self, i): + return self._generate_traversal_result_tuple(self.wrapped_rs[i].gremlin) + + def next(self): + return self._generate_traversal_result_tuple(self.wrapped_rs.next().gremlin) + + __next__ = next + + def _generate_traversal_result_tuple(self, result): + json_result = json.loads(result)['result'] + GraphTraversalResult = namedtuple('GraphTraversalResult', json_result.keys()) + return GraphTraversalResult(*json_result.values()) + +class GraphSession(object): + + default_graph_options = {'graph-source':b'default', 'graph-language':b'gremlin-groovy'} + + def __init__(self, session, default_graph_options=None): + self.session = session + if default_graph_options: + self.default_graph_options.update(default_graph_options) + + def execute(self, query, *args, **kwargs): + if (isinstance(query, GraphStatement)): + z = self.default_graph_options.copy() + z.update(query.graph_options) + return GraphResultSet(self.session.execute(query, custom_payload=z)) + else: + return GraphResultSet(self.session.execute(query, custom_payload=self.default_graph_options)) From 7e31136c3a527374504c65891a0cee1f9ce62ba3 Mon Sep 17 00:00:00 2001 From: Kevin Gallardo Date: Fri, 4 Dec 2015 19:07:05 +0000 Subject: [PATCH 0236/2005] Added AbstractGraphStatement, to add more statement classes (Prepared, Bound) --- dse/graph.py | 59 ++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 43 insertions(+), 16 deletions(-) diff --git a/dse/graph.py b/dse/graph.py index 2ea3512c7d..f31ee30e8c 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -3,34 +3,61 @@ import json - -class GraphStatement(SimpleStatement): +class AbstractGraphStatement: """ - A simple, un-prepared graph query. """ + _wrapped = None + graph_options = {} + + def __init__(self): + pass - def __init__(self, query_string, *args, **kwargs): + def configure(self, session_graph_options): """ + Handles the general tranformations to apply on the wrapped statement. """ - SimpleStatement.__init__(self, query_string, *args, **kwargs) - self.graph_options = {} + + merged_options = session_graph_options.copy() + merged_options.update(self.graph_options) + self._wrapped.custom_payload = merged_options + + def configure_and_get_wrapped(self, graph_options): + raise NotImplementedError() + + +class GraphStatement(AbstractGraphStatement): + """ + A simple, un-prepared graph query. + """ + + def __init__(self, query_string, graph_options=None, *args, **kwargs): + AbstractGraphStatement.__init__(self) + self.query_string = query_string + self.args = args + + def configure_and_get_wrapped(self, session_graph_options): + self._wrapped = SimpleStatement(self.query_string) + self.configure(session_graph_options) + return self._wrapped + class GraphResultSet(object): - wrapped_rs = None + """ + """ def __init__(self, result_set): - self.wrapped_rs = result_set + self._wrapped_rs = result_set def __iter__(self): - # Not great but doing iter(self.wrapped_rs); return self; Does not work- + # Not great but doing iter(self._wrapped_rs); return self; Does not work- # -with the paging and __getitem__ on the wrapped result set. - return GraphResultSet(iter(self.wrapped_rs)) + return GraphResultSet(iter(self._wrapped_rs)) def __getitem__(self, i): - return self._generate_traversal_result_tuple(self.wrapped_rs[i].gremlin) + return self._generate_traversal_result_tuple(self._wrapped_rs[i].gremlin) def next(self): - return self._generate_traversal_result_tuple(self.wrapped_rs.next().gremlin) + return self._generate_traversal_result_tuple(self._wrapped_rs.next().gremlin) __next__ = next @@ -40,6 +67,8 @@ def _generate_traversal_result_tuple(self, result): return GraphTraversalResult(*json_result.values()) class GraphSession(object): + """ + """ default_graph_options = {'graph-source':b'default', 'graph-language':b'gremlin-groovy'} @@ -49,9 +78,7 @@ def __init__(self, session, default_graph_options=None): self.default_graph_options.update(default_graph_options) def execute(self, query, *args, **kwargs): - if (isinstance(query, GraphStatement)): - z = self.default_graph_options.copy() - z.update(query.graph_options) - return GraphResultSet(self.session.execute(query, custom_payload=z)) + if (isinstance(query, AbstractGraphStatement)): + return GraphResultSet(self.session.execute(query.configure_and_get_wrapped(self.default_graph_options))) else: return GraphResultSet(self.session.execute(query, custom_payload=self.default_graph_options)) From 5c6cd5f9b77fe99f6906b785e332f74df260ce74 Mon Sep 17 00:00:00 2001 From: Kevin Gallardo Date: Mon, 7 Dec 2015 14:02:51 +0000 Subject: [PATCH 0237/2005] Added some documentation about the new classes. --- dse/graph.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/dse/graph.py b/dse/graph.py index f31ee30e8c..a18f6bef1d 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -5,6 +5,11 @@ class AbstractGraphStatement: """ + Parent implementation of the Graph specific statements. It encapsulate the necessary work + to produce a statement recognizable to the DSEQueryHandler. + Child implementations will contain the graph_options field and will have to call self.configure() + before being executed. They also need to implement the configure_and_get_wrapped() method that + will be called by the GraphSession to transparently execute the wrapped statement. """ _wrapped = None graph_options = {} @@ -16,7 +21,6 @@ def configure(self, session_graph_options): """ Handles the general tranformations to apply on the wrapped statement. """ - merged_options = session_graph_options.copy() merged_options.update(self.graph_options) self._wrapped.custom_payload = merged_options @@ -43,6 +47,7 @@ def configure_and_get_wrapped(self, session_graph_options): class GraphResultSet(object): """ + A results set containing multiple GraphTraversalResult objects. """ def __init__(self, result_set): @@ -50,7 +55,7 @@ def __init__(self, result_set): def __iter__(self): # Not great but doing iter(self._wrapped_rs); return self; Does not work- - # -with the paging and __getitem__ on the wrapped result set. + # -because of the paging and __getitem__ on the wrapped result set. return GraphResultSet(iter(self._wrapped_rs)) def __getitem__(self, i): @@ -68,6 +73,7 @@ def _generate_traversal_result_tuple(self, result): class GraphSession(object): """ + A session object allowing to execute statements against a DSEGraph cluster. """ default_graph_options = {'graph-source':b'default', 'graph-language':b'gremlin-groovy'} From 4bdf8a7632a2309d8097cee1f1b4df01c94b3f70 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 7 Dec 2015 08:26:31 -0600 Subject: [PATCH 0238/2005] doc: missing ')' in code block --- docs/getting_started.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 9733fb97ce..2d9c7ea461 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -397,6 +397,6 @@ level on that: .. code-block:: python # override the QUORUM default - user3_lookup = user_lookup_stmt.bind([user_id3] + user3_lookup = user_lookup_stmt.bind([user_id3]) user3_lookup.consistency_level = ConsistencyLevel.ALL user3 = session.execute(user3_lookup) From 2bc5548c27f0c7bfd30c40db17be0ee4d729a3e6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 2 Dec 2015 16:34:37 -0600 Subject: [PATCH 0239/2005] Twisted gave up on Python 2.6 and 3.3. Lock down for testing --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 89278ce902..f4d21ba5df 100644 --- a/tox.ini +++ b/tox.ini @@ -12,7 +12,7 @@ deps = {[base]deps} cython py26: unittest2 py{26,27}: gevent - twisted + twisted <15.5.0 setenv = LIBEV_EMBED=0 CARES_EMBED=0 changedir = {envtmpdir} From a53af681ad1832e9cfcaa8a9169b45623340fd92 Mon Sep 17 00:00:00 2001 From: Kevin Gallardo Date: Wed, 9 Dec 2015 18:12:34 +0000 Subject: [PATCH 0240/2005] Added PreparedGraphStatement and BoundGraphStatement classes. - Once DSE Graph adds the possibility to prepare Gremlin queries, these classes should allow preparing, binding and executing binded statements. Tested with a sort of mock prepared statement, the behaviour of Prepared and BoundGraphStatements is validated in the sense that they serialize the parameters in the expected way defined in the specs, and returns results of the executions. - Minor GraphResultSet method signature change. --- dse/graph.py | 77 ++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 72 insertions(+), 5 deletions(-) diff --git a/dse/graph.py b/dse/graph.py index a18f6bef1d..abcef57bfc 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -45,6 +45,56 @@ def configure_and_get_wrapped(self, session_graph_options): return self._wrapped +class PreparedGraphStatement(object): + """ + """ + + def __init__(self, prepared_statement, graph_statement): + self.ps = prepared_statement + self.gs = graph_statement + + def bind(self, values=None): + return BoundGraphStatement(self.ps, self.gs).bind(values) + + +class BoundGraphStatement(AbstractGraphStatement): + """ + """ + + prepared_statement = None + graph_statement = None + + def __init__(self, prepared_statement, graph_statement): + self.ps = prepared_statement + self.gs = graph_statement + self.values = None + + def bind(self, values_param=None): + values = None + if values_param is None: + values = {} + else: + values = values_param + + if not isinstance(values, dict): + # TODO: change the exception class + raise Exception('The values parameter can only be a dict, unnamed parameters are not authorized in Gremlin queries.') + self.values = values + return self + + def configure_and_get_wrapped(self, graph_options): + # Transform the dict of parameters in a [{'name':name, 'value':value}] as imposed in the specs + values_list = [] + for key, value in self.values.iteritems(): + json_param = json.dumps({'name':key, 'value':value}) + values_list.append(json_param) + + self._wrapped = self.ps.bind(values_list) + self.configure(graph_options) + + return self._wrapped + + class GraphResultSet(object): """ A results set containing multiple GraphTraversalResult objects. @@ -66,14 +116,14 @@ def next(self): __next__ = next - def _generate_traversal_result_tuple(self, result): + def _generate_traversal_result_tuple(result): json_result = json.loads(result)['result'] GraphTraversalResult = namedtuple('GraphTraversalResult', json_result.keys()) return GraphTraversalResult(*json_result.values()) class GraphSession(object): """ - A session object allowing to execute statements against a DSEGraph cluster. + A session object allowing to execute Gremlin queries against a DSEGraph cluster. """ default_graph_options = {'graph-source':b'default', 'graph-language':b'gremlin-groovy'} @@ -84,7 +134,24 @@ def __init__(self, session, default_graph_options=None): self.default_graph_options.update(default_graph_options) def execute(self, query, *args, **kwargs): - if (isinstance(query, AbstractGraphStatement)): - return GraphResultSet(self.session.execute(query.configure_and_get_wrapped(self.default_graph_options))) + if isinstance(query, AbstractGraphStatement): + return self._execute(query, args, kwargs) + else: + graph_statement = GraphStatement(query, args) + return self._execute(graph_statement, args, kwargs) + + def _execute(self, graph_statement, *args, **kwargs): + statement = graph_statement.configure_and_get_wrapped(self.default_graph_options) + return GraphResultSet(self.session.execute(statement, args, kwargs)) + + def prepare(self, query, *args, **kwargs): + # Should maybe only accept GraphStatement instances instead of AbstractGraphStatement. + if isinstance(query, AbstractGraphStatement): + return self._prepare(query, args, kwargs) else: - return GraphResultSet(self.session.execute(query, custom_payload=self.default_graph_options)) + graph_statement = GraphStatement(query, args) + return self._prepare(graph_statement, args, kwargs) + + def _prepare(self, graph_statement, *args, **kwargs): + return PreparedGraphStatement(self.session.prepare(graph_statement.configure_and_get_wrapped(self.default_graph_options), args), graph_statement) + From 6412db55cd793fa0d5fdd94c9feeccb517529983 Mon Sep 17 00:00:00 2001 From: Kevin Gallardo Date: Thu, 10 Dec 2015 14:13:11 +0000 Subject: [PATCH 0241/2005] Docs for all public fields and methods, misc code changes. --- dse/graph.py | 127 +++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 92 insertions(+), 35 deletions(-) diff --git a/dse/graph.py b/dse/graph.py index abcef57bfc..e507a2889d 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -8,24 +8,31 @@ class AbstractGraphStatement: Parent implementation of the Graph specific statements. It encapsulate the necessary work to produce a statement recognizable to the DSEQueryHandler. Child implementations will contain the graph_options field and will have to call self.configure() - before being executed. They also need to implement the configure_and_get_wrapped() method that + before being executed. They also need to implement the _configure_and_get_wrapped() method that will be called by the GraphSession to transparently execute the wrapped statement. """ _wrapped = None + + """ + The graph specific options that will be included in the execution of the statement. + The 4 available graph options are : + - graph-keyspace : define the keyspace the graph is defined with. + - graph-source : choose the graph traversal source, configured on the server side. + - graph-language : the language used in the queries. + - graph-rebinding : rename the traversal source. + """ graph_options = {} def __init__(self): pass - def configure(self, session_graph_options): - """ - Handles the general tranformations to apply on the wrapped statement. - """ + def _configure(self, session_graph_options): + # Handles the general tranformations to apply on the wrapped statement. merged_options = session_graph_options.copy() merged_options.update(self.graph_options) self._wrapped.custom_payload = merged_options - def configure_and_get_wrapped(self, graph_options): + def _configure_and_get_wrapped(self, graph_options): raise NotImplementedError() @@ -34,42 +41,70 @@ class GraphStatement(AbstractGraphStatement): A simple, un-prepared graph query. """ - def __init__(self, query_string, graph_options=None, *args, **kwargs): + """ + The query string used in this statement. + """ + query_string = None + + + def __init__(self, query_string, graph_options=None): AbstractGraphStatement.__init__(self) self.query_string = query_string - self.args = args - def configure_and_get_wrapped(self, session_graph_options): + def _configure_and_get_wrapped(self, session_graph_options): self._wrapped = SimpleStatement(self.query_string) - self.configure(session_graph_options) + self._configure(session_graph_options) return self._wrapped class PreparedGraphStatement(object): """ + A prepared graph statement returned from the execution of a GraphSession.prepare() + method. This query corresponding to this statement is supposed to contain Gremlin + parameters that will eventually be bound. """ + _prepared_statement = None + _graph_statement = None + def __init__(self, prepared_statement, graph_statement): - self.ps = prepared_statement - self.gs = graph_statement + self._prepared_statement = prepared_statement + self._graph_statement = graph_statement def bind(self, values=None): - return BoundGraphStatement(self.ps, self.gs).bind(values) + """ + Add parameters values to this prepared statement. This will create a BoundGraphStatement + instance. It is possible to not provide values for now, but call bind() later on the + BoundGraphStatement. + """ + return BoundGraphStatement(self._prepared_statement, self._graph_statement).bind(values) class BoundGraphStatement(AbstractGraphStatement): """ + A statement bound with values resulting from the preparation of a simple graph statement. """ - prepared_statement = None - graph_statement = None + _prepared_statement = None + _graph_statement = None + + + """ + The values of the parameters to be sent with the statement. + """ + values = None def __init__(self, prepared_statement, graph_statement): - self.ps = prepared_statement - self.gs = graph_statement + self._prepared_statement = prepared_statement + self._graph_statement = graph_statement self.values = None def bind(self, values_param=None): + """ + Add values that values will be added to the execution of the previously prepared statement. + The values bound can only be in the form of a dictionary, meaning that each Gremlin parameter + must be named, and bound with its name as well. + """ values = None if values_param is None: values = {} @@ -78,19 +113,22 @@ def bind(self, values_param=None): if not isinstance(values, dict): # TODO: change the exception class - raise Exception('The values parameter can only be a dict, unnamed parameters are not authorized in Gremlin queries.') + raise Exception('The values parameter can only be a dictionary, unnamed parameters are not authorized in Gremlin queries.') + self.values = values return self - def configure_and_get_wrapped(self, graph_options): + def _configure_and_get_wrapped(self, graph_options): # Transform the dict of parameters in a [{'name':name, 'value':value}] as imposed in the specs values_list = [] for key, value in self.values.iteritems(): json_param = json.dumps({'name':key, 'value':value}) values_list.append(json_param) - self._wrapped = self.ps.bind(values_list) - self.configure(graph_options) + self._wrapped = self._prepared_statement.bind(values_list) + + # We want to propagate the options from the original GraphStatement. + self._configure(self._graph_statement.graph_options) return self._wrapped @@ -116,7 +154,7 @@ def next(self): __next__ = next - def _generate_traversal_result_tuple(result): + def _generate_traversal_result_tuple(self, result): json_result = json.loads(result)['result'] GraphTraversalResult = namedtuple('GraphTraversalResult', json_result.keys()) return GraphTraversalResult(*json_result.values()) @@ -126,32 +164,51 @@ class GraphSession(object): A session object allowing to execute Gremlin queries against a DSEGraph cluster. """ + """ + The default options to be used for all graph statements executions. Is initialised with : + - graph-source = 'default' + - graph-language = 'gremlin-groovy' + + These options can be over redefined, or removed. + """ default_graph_options = {'graph-source':b'default', 'graph-language':b'gremlin-groovy'} + # Expose the session or private? + session = None + def __init__(self, session, default_graph_options=None): self.session = session if default_graph_options: self.default_graph_options.update(default_graph_options) - def execute(self, query, *args, **kwargs): + def execute(self, query): + """ + Executes a Gremlin query string, a GraphStatement, or a BoundGraphStatement synchronously, + and returns a GraphResultSet from this execution. + """ if isinstance(query, AbstractGraphStatement): - return self._execute(query, args, kwargs) + return self._execute(query) else: - graph_statement = GraphStatement(query, args) - return self._execute(graph_statement, args, kwargs) + graph_statement = GraphStatement(query) + return self._execute(graph_statement) - def _execute(self, graph_statement, *args, **kwargs): - statement = graph_statement.configure_and_get_wrapped(self.default_graph_options) - return GraphResultSet(self.session.execute(statement, args, kwargs)) + def _execute(self, graph_statement): + statement = graph_statement._configure_and_get_wrapped(self.default_graph_options) + return GraphResultSet(self.session.execute(statement)) - def prepare(self, query, *args, **kwargs): + def prepare(self, query): + """ + Prepares a Gremlin query string, or a GraphStatement. This returns a PreparedGraphStatement + resulting of the Cassandra prepare phase. + """ # Should maybe only accept GraphStatement instances instead of AbstractGraphStatement. if isinstance(query, AbstractGraphStatement): - return self._prepare(query, args, kwargs) + return self._prepare(query) else: - graph_statement = GraphStatement(query, args) - return self._prepare(graph_statement, args, kwargs) + graph_statement = GraphStatement(query) + return self._prepare(graph_statement) - def _prepare(self, graph_statement, *args, **kwargs): - return PreparedGraphStatement(self.session.prepare(graph_statement.configure_and_get_wrapped(self.default_graph_options), args), graph_statement) + def _prepare(self, graph_statement): + statement = graph_statement._configure_and_get_wrapped(self.default_graph_options) + return PreparedGraphStatement(self.session.prepare(statement), graph_statement) From c1e0b88c9a17214119bc4361316703788c17a064 Mon Sep 17 00:00:00 2001 From: Kevin Gallardo Date: Thu, 10 Dec 2015 16:00:38 +0000 Subject: [PATCH 0242/2005] BoundStatement copies graph_options from PreparedStatement. --- dse/graph.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/dse/graph.py b/dse/graph.py index e507a2889d..8bf3673c78 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -99,6 +99,8 @@ def __init__(self, prepared_statement, graph_statement): self._graph_statement = graph_statement self.values = None + self.graph_options.update(graph_statement.graph_options) + def bind(self, values_param=None): """ Add values that values will be added to the execution of the previously prepared statement. @@ -128,7 +130,7 @@ def _configure_and_get_wrapped(self, graph_options): self._wrapped = self._prepared_statement.bind(values_list) # We want to propagate the options from the original GraphStatement. - self._configure(self._graph_statement.graph_options) + self._configure(graph_options) return self._wrapped From 6a0a4371bcee7568dda13ebc543d7906d68421d1 Mon Sep 17 00:00:00 2001 From: Kevin Gallardo Date: Thu, 10 Dec 2015 16:56:06 +0000 Subject: [PATCH 0243/2005] Added file dse/changes.md, summary of the API for Graph. --- dse/changes.md | 106 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 dse/changes.md diff --git a/dse/changes.md b/dse/changes.md new file mode 100644 index 0000000000..0b6cec3f6c --- /dev/null +++ b/dse/changes.md @@ -0,0 +1,106 @@ +# New API for Python Driver's DSEGraph integration + +## GraphSession + +#### constructor +- parameters: `Session` mandatory, `default_graph_options` optional. + +#### public fields + - `default_graph_options`, dictionary initialised with the default graph options defined in the specs. Options can be overriten or other added. +- `session`, the wrapped `cassandra.cluster.Session`. + +#### exposed methods + - `execute()` takes arguments as either a `GraphStatement` or `BoundGraphStatement` or a query string, to execute it *synchronously* against the DSE cluster. Returns a `GraphResultSet` object containing the results of the execution. + - `prepare()` takes in argument a `GraphStatement` or a query string, and prepares it against the DSE instance. Returns a `PreparedGraphStatement`. + +##### private fields +_None_ + + + +## AbstractGraphStatement + +_Is not supposed to be directly used by users, is not part of the public API. Whereas child implementations will be exposed._ + +#### constructor +- parameters: _None_ + +#### public fields +- `graph_options`, dictionary containing the graph options to use and to override from the default ones defined in the session. + +#### exposed methods +_None_ + +##### private fields +- `_wrapped`, the wrapped driver's statement. It is not supposed to be modified by users but only used internally. + + + +## GraphStatement _[extends AbstractGraphStatement]_ + +#### constructor +- parameters: `query_string` mandatory, `graph_options` optional. + +#### public fields +- inherited from `AbstractGraphStatement` +- `graph_options`, used to override `GraphSession`'s default graph options, on a per-statement basis. + +#### exposed methods + +_None_ + +##### private fields +- inherited from `AbstractGraphStatement` + + + +## PreparedGraphStatement + +#### constructor +- not supposed to be used by users + +#### public fields +_None_ + +#### exposed methods +- `bind()`, binds the provided values, and creates a `BoundGraphStatement` instance. The values must be in a dictionary, meaning that all parameters must be named, an exception will be raised if it is not the case. + +##### private fields +- `_prepared_statement`, the `cassandra.query.PreparedStatement` instance wrapped. +- `_graph_statement`, the graph statement associated with this statement. Used to propagate the graph options to a `BoundGraphStatement` created from a `PreparedGraphStatement`. + + + +## BoundGraphStatement _[extends AbstractGraphStatement]_ + +#### constructor +- not supposed to be used by users. + +#### public fields +- inherited from `AbstractGraphStatement` +- `values`, the values to be bound with the statement and sent to the server. + +#### exposed methods +- `bind()`, bind the values parameters to this `BoundGraphStatement` instance. The values must be in a dictionary, meaning that all parameters must be named, an exception will be raised if it is not the case. + +##### private fields: +- inherited from `AbstractGraphStatement` +- `_prepared_statement`, the `cassandra.query.PreparedStatement` tied to this `BoundGraphStatement`. +- `_graph_statement`, the graph statement associated with this statement. Used to propagate the graph options to a `BoundGraphStatement` created from a `PreparedGraphStatement`. + + + +## GraphResultSet + +#### constructor +- not supposed to be used by users. + +#### public fields +_None_ + +#### exposed methods +- `__iter__()`/`next()`, allows iteration in the set, in `for ... in ...` loops. And manual iteration. Each iteration produces and returns one `GraphTraversalResult`. +- `__getitem__`, allows getting results by index as a array. Each indexed access will return a `GraphTraversalResult`. + +##### private fields + - `_wrapped_rs`, the wrapped `cassandra.cluster.ResultSet`. From a001257c55372571e133fbaa56f5585d9eddb345 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 11 Dec 2015 09:00:43 -0600 Subject: [PATCH 0244/2005] cqle: correct exception message for INE with counters PYTHON-453 --- cassandra/cqlengine/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 19b06552f9..fb7af3812b 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -868,7 +868,7 @@ def timestamp(self, timestamp): def if_not_exists(self): if self.model._has_counter: - raise IfNotExistsWithCounterColumn('if_not_exists cannot be used with tables containing columns') + raise IfNotExistsWithCounterColumn('if_not_exists cannot be used with tables containing counter columns') clone = copy.deepcopy(self) clone._if_not_exists = True return clone From de1273af07423732cc509fff2f4db39490ce25c2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 14 Dec 2015 11:25:32 -0600 Subject: [PATCH 0245/2005] doc: remove timeout parameter from ResponseFuture.result --- docs/api/cassandra/cluster.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index ac2cbcd399..fbe8fe3f1b 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -123,7 +123,7 @@ .. autoattribute:: query - .. automethod:: result([timeout]) + .. automethod:: result() .. automethod:: get_query_trace() From 379f6a3aa740f7830fc486bc5835cd725819e3df Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 3 Dec 2015 13:53:19 -0600 Subject: [PATCH 0246/2005] Fixing timing, dependency and CL issues in tests. --- build.yaml | 1 - test-requirements.txt | 1 - .../integration/cqlengine/query/test_named.py | 27 +++++++++++++------ .../standard/test_prepared_statements.py | 3 +++ tests/integration/standard/test_query.py | 3 ++- 5 files changed, 24 insertions(+), 11 deletions(-) diff --git a/build.yaml b/build.yaml index 2f1e2f6d5e..49909596ca 100644 --- a/build.yaml +++ b/build.yaml @@ -19,7 +19,6 @@ build: export JAVA_HOME=$CCM_JAVA_HOME export PATH=$JAVA_HOME/bin:$PATH - sudo python /home/jenkins/ccm/setup.py install # Install dependencies if [[ $EVENT_LOOP_MANAGER == 'libev' ]]; then sudo apt-get install -y libev4 libev-dev diff --git a/test-requirements.txt b/test-requirements.txt index f3cc411c24..50875b6660 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,5 +1,4 @@ -r requirements.txt -blist scales nose mock<=1.0.1 diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index 12b02003c1..dbe664b3f3 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -17,7 +17,8 @@ except ImportError: import unittest # noqa -from cassandra.cqlengine import operators, connection +from cassandra import ConsistencyLevel +from cassandra.cqlengine import operators from cassandra.cqlengine.named import NamedKeyspace from cassandra.cqlengine.operators import EqualsOperator, GreaterThanOrEqualOperator from cassandra.cqlengine.query import ResultObject @@ -343,20 +344,17 @@ def test_named_table_with_mv(self): ('jbellis', 'Checkers', 2015, 6, 20, 1200), ('jbellis', 'Chess', 2015, 6, 21, 3500), ('pcmanus', 'Chess', 2015, 1, 25, 3200)} + prepared_insert.consistency_level = ConsistencyLevel.ALL execute_concurrent_with_args(self.session, prepared_insert, parameters) # Attempt to query the data using Named Table interface # Also test filtering on mv's key_space = NamedKeyspace(ks) - table = key_space.table("scores") mv_monthly = key_space.table("monthlyhigh") - table_objects = table.objects.all() - mv_monthly_objects = mv_monthly.objects.all() mv_all_time = key_space.table("alltimehigh") - mv_all_objects = mv_all_time.objects.all() - self.assertEqual(len(table_objects), len(parameters)) - self.assertEqual(len(mv_monthly_objects), len(parameters)) - self.assertEqual(len(mv_all_objects), len(parameters)) + self.assertTrue(self.check_table_size("scores", key_space, len(parameters))) + self.assertTrue(self.check_table_size("monthlyhigh", key_space, len(parameters))) + self.assertTrue(self.check_table_size("alltimehigh", key_space, len(parameters))) filtered_mv_monthly_objects = mv_monthly.objects.filter(game='Chess', year=2015, month=6) self.assertEqual(len(filtered_mv_monthly_objects), 1) @@ -365,3 +363,16 @@ def test_named_table_with_mv(self): filtered_mv_alltime_objects = mv_all_time.objects.filter(game='Chess') self.assertEqual(len(filtered_mv_alltime_objects), 2) self.assertEqual(filtered_mv_alltime_objects[0]['score'], 3500) + + def check_table_size(self, table_name, key_space, expected_size): + table = key_space.table(table_name) + attempts = 0 + while attempts < 10: + attempts += 1 + table_size = len(table.objects.all()) + if(table_size is not expected_size): + print("Table {0} size was {1} and was expected to be {2}".format(table_name, table_size, expected_size)) + else: + return True + + return False diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 854caee9cd..4a14961344 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -21,6 +21,7 @@ import unittest # noqa from cassandra import InvalidRequest +from cassandra import ConsistencyLevel from cassandra.cluster import Cluster from cassandra.query import PreparedStatement, UNSET_VALUE from tests.integration import get_server_versions @@ -275,6 +276,7 @@ def test_no_meta(self): self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind(None) + bound.consistency_level = ConsistencyLevel.ALL self.session.execute(bound) prepared = self.session.prepare( @@ -284,6 +286,7 @@ def test_no_meta(self): self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind(None) + bound.consistency_level = ConsistencyLevel.ALL results = self.session.execute(bound) self.assertEqual(results[0].v, 0) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 1693f171d7..b087031e04 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -162,7 +162,8 @@ def test_incomplete_query_trace(self): trace = response_future._query_traces[0] # Delete trace duration from the session (this is what the driver polls for "complete") - self.session.execute("DELETE duration FROM system_traces.sessions WHERE session_id = %s", (trace.trace_id,)) + delete_statement = SimpleStatement("DELETE duration FROM system_traces.sessions WHERE session_id = {}".format(trace.trace_id), consistency_level=ConsistencyLevel.ALL) + self.session.execute(delete_statement) # should raise because duration is not set self.assertRaises(TraceUnavailable, trace.populate, max_wait=0.2, wait_for_complete=True) From 2be8840bffe467d0dfdb37943438d9770aea496d Mon Sep 17 00:00:00 2001 From: Mike Adamson Date: Tue, 15 Dec 2015 12:48:57 +0000 Subject: [PATCH 0247/2005] PYTHON-454 Pass authenticator class name to Authenticator implementations --- cassandra/auth.py | 3 ++- cassandra/connection.py | 3 +-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/auth.py b/cassandra/auth.py index f07885203d..8aa49ba3ca 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -15,7 +15,6 @@ except ImportError: SASLClient = None - class AuthProvider(object): """ An abstract class that defines the interface that will be used for @@ -62,6 +61,8 @@ class Authenticator(object): .. versionadded:: 2.0.0 """ + def set_authenticator_class(self, authenticator_class): + self.authenticator_class = authenticator_class def initial_response(self): """ diff --git a/cassandra/connection.py b/cassandra/connection.py index 29edaaca1c..59ca39e937 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -694,8 +694,6 @@ def _handle_startup_response(self, startup_response, did_authenticate=False): if self.authenticator is None: raise AuthenticationFailed('Remote end requires authentication.') - self.authenticator_class = startup_response.authenticator - if isinstance(self.authenticator, dict): log.debug("Sending credentials-based auth response on %s", self) cm = CredentialsMessage(creds=self.authenticator) @@ -703,6 +701,7 @@ def _handle_startup_response(self, startup_response, did_authenticate=False): self.send_msg(cm, self.get_request_id(), cb=callback) else: log.debug("Sending SASL-based auth response on %s", self) + self.authenticator.set_authenticator_class(startup_response.authenticator) initial_response = self.authenticator.initial_response() initial_response = "" if initial_response is None else initial_response self.send_msg(AuthResponseMessage(initial_response), self.get_request_id(), self._handle_auth_response) From 3891595b2eecac71d6db33c470d19c2a20ff4146 Mon Sep 17 00:00:00 2001 From: Kevin Gallardo Date: Tue, 15 Dec 2015 15:16:22 +0000 Subject: [PATCH 0248/2005] Send simple statements parameters through the protocol. --- cassandra/cluster.py | 4 +--- cassandra/protocol.py | 12 +++++++++++- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 8b8749172d..6b09657918 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1645,11 +1645,9 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time if isinstance(query, SimpleStatement): query_string = query.query_string - if parameters: - query_string = bind_params(query_string, parameters, self.encoder) message = QueryMessage( query_string, cl, query.serial_consistency_level, - fetch_size, timestamp=timestamp) + fetch_size, timestamp=timestamp, query_params=parameters) elif isinstance(query, BoundStatement): message = ExecuteMessage( query.prepared_statement.query_id, query.values, cl, diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 53a9a55bdf..b48bc891f1 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -484,13 +484,14 @@ class QueryMessage(_MessageType): name = 'QUERY' def __init__(self, query, consistency_level, serial_consistency_level=None, - fetch_size=None, paging_state=None, timestamp=None): + fetch_size=None, paging_state=None, timestamp=None, query_params=None): self.query = query self.consistency_level = consistency_level self.serial_consistency_level = serial_consistency_level self.fetch_size = fetch_size self.paging_state = paging_state self.timestamp = timestamp + self.query_params = query_params def send_body(self, f, protocol_version): write_longstring(f, self.query) @@ -524,7 +525,16 @@ def send_body(self, f, protocol_version): if self.timestamp is not None: flags |= _PROTOCOL_TIMESTAMP + if self.query_params is not None: + flags |= _VALUES_FLAG + write_byte(f, flags) + + if self.query_params is not None: + write_short(f, len(self.query_params)) + for param in self.query_params: + write_value(f, param) + if self.fetch_size: write_int(f, self.fetch_size) if self.paging_state: From 6c1329eb98e97fbba429a3c74f1a59b245daf347 Mon Sep 17 00:00:00 2001 From: Kevin Gallardo Date: Tue, 15 Dec 2015 15:25:27 +0000 Subject: [PATCH 0249/2005] Added simple Statements with parameters. --- dse/graph.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/dse/graph.py b/dse/graph.py index 8bf3673c78..f42dfcb008 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -183,20 +183,23 @@ def __init__(self, session, default_graph_options=None): if default_graph_options: self.default_graph_options.update(default_graph_options) - def execute(self, query): + def execute(self, query, parameters=None): """ Executes a Gremlin query string, a GraphStatement, or a BoundGraphStatement synchronously, and returns a GraphResultSet from this execution. """ if isinstance(query, AbstractGraphStatement): - return self._execute(query) + return self._execute(query, parameters) else: graph_statement = GraphStatement(query) - return self._execute(graph_statement) + return self._execute(graph_statement, parameters) - def _execute(self, graph_statement): + def _execute(self, graph_statement, parameters): statement = graph_statement._configure_and_get_wrapped(self.default_graph_options) - return GraphResultSet(self.session.execute(statement)) + graph_parameters = None + if parameters: + graph_parameters = self._transform_params(parameters) + return GraphResultSet(self.session.execute(statement, parameters=graph_parameters)) def prepare(self, query): """ @@ -214,3 +217,15 @@ def _prepare(self, graph_statement): statement = graph_statement._configure_and_get_wrapped(self.default_graph_options) return PreparedGraphStatement(self.session.prepare(statement), graph_statement) + def _transform_params(self, parameters): + if not isinstance(parameters, dict): + raise Exception('The values parameter can only be a dictionary, unnamed parameters are not authorized in Gremlin queries.') + + values_list = [] + for key, value in parameters.iteritems(): + json_param = json.dumps({'name':key, 'value':value}) + values_list.append(json_param) + + return values_list + + From 1eb9a8456a2bb8ecc66a694904224ad98700f80b Mon Sep 17 00:00:00 2001 From: Kevin Gallardo Date: Wed, 16 Dec 2015 17:15:46 +0000 Subject: [PATCH 0250/2005] Forgot to add the graph options from the GraphStatement constructor to the statement. --- dse/graph.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dse/graph.py b/dse/graph.py index f42dfcb008..3162b63031 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -50,6 +50,8 @@ class GraphStatement(AbstractGraphStatement): def __init__(self, query_string, graph_options=None): AbstractGraphStatement.__init__(self) self.query_string = query_string + if graph_options: + self.graph_options = graph_options def _configure_and_get_wrapped(self, session_graph_options): self._wrapped = SimpleStatement(self.query_string) From 3355bf9a1243b908a9459809b70ee71fb8ce2b02 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 16 Dec 2015 13:58:34 -0600 Subject: [PATCH 0251/2005] Make server auth string a class attribute, add test PYTHON-454 --- cassandra/auth.py | 5 ++-- cassandra/connection.py | 2 +- .../standard/test_authentication.py | 25 ++++++++++++------- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/cassandra/auth.py b/cassandra/auth.py index 8aa49ba3ca..7ac064fa6f 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -61,8 +61,9 @@ class Authenticator(object): .. versionadded:: 2.0.0 """ - def set_authenticator_class(self, authenticator_class): - self.authenticator_class = authenticator_class + + server_authenticator_class = None + """ Set during the connection AUTHENTICATE phase """ def initial_response(self): """ diff --git a/cassandra/connection.py b/cassandra/connection.py index 59ca39e937..b54f4516eb 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -701,7 +701,7 @@ def _handle_startup_response(self, startup_response, did_authenticate=False): self.send_msg(cm, self.get_request_id(), cb=callback) else: log.debug("Sending SASL-based auth response on %s", self) - self.authenticator.set_authenticator_class(startup_response.authenticator) + self.authenticator.server_authenticator_class = startup_response.authenticator initial_response = self.authenticator.initial_response() initial_response = "" if initial_response is None else initial_response self.send_msg(AuthResponseMessage(initial_response), self.get_request_id(), self._handle_auth_response) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 80c99ede32..82f81cd885 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -80,15 +80,22 @@ def test_auth_connect(self): root_session = self.cluster_as('cassandra', 'cassandra').connect() root_session.execute('CREATE USER %s WITH PASSWORD %s', (user, passwd)) - cluster = self.cluster_as(user, passwd) - session = cluster.connect() - self.assertTrue(session.execute('SELECT release_version FROM system.local')) - assert_quiescent_pool_state(self, cluster) - cluster.shutdown() - - root_session.execute('DROP USER %s', user) - assert_quiescent_pool_state(self, root_session.cluster) - root_session.cluster.shutdown() + try: + cluster = self.cluster_as(user, passwd) + session = cluster.connect() + try: + self.assertTrue(session.execute('SELECT release_version FROM system.local')) + assert_quiescent_pool_state(self, cluster) + for pool in session.get_pools(): + connection, _ = pool.borrow_connection(timeout=0) + self.assertEqual(connection.authenticator.server_authenticator_class, 'org.apache.cassandra.auth.PasswordAuthenticator') + pool.return_connection(connection) + finally: + cluster.shutdown() + finally: + root_session.execute('DROP USER %s', user) + assert_quiescent_pool_state(self, root_session.cluster) + root_session.cluster.shutdown() def test_connect_wrong_pwd(self): cluster = self.cluster_as('cassandra', 'wrong_pass') From 45fdb57dcc96c99d5d8810a4b5853ecfb0eebac8 Mon Sep 17 00:00:00 2001 From: Mike Adamson Date: Wed, 16 Dec 2015 16:48:11 -0600 Subject: [PATCH 0252/2005] PYTHON-454 Pass authenticator class name to Authenticator implementations --- cassandra/dseauth.py | 66 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 cassandra/dseauth.py diff --git a/cassandra/dseauth.py b/cassandra/dseauth.py new file mode 100644 index 0000000000..af0aedd4ff --- /dev/null +++ b/cassandra/dseauth.py @@ -0,0 +1,66 @@ +from cassandra.auth import AuthProvider, Authenticator + +try: + from puresasl.client import SASLClient +except ImportError: + SASLClient = None + +class DseAuthProvider(AuthProvider): + def __init__(self, username=None, password=None, service=None, qops=None): + self.username = username + self.password = password + if username is None: + if SASLClient is None: + raise ImportError('The puresasl library has not been installed') + self.service = service + self.qops = qops + + def new_authenticator(self, host): + if self.username: + return PlainTextAuthenticator(self.username, self.password) + return GSSAPIAuthenticator(host, self.service, self.qops) + +class BaseDseAuthenticator(Authenticator): + def get_mechanism(self): + raise NotImplementedError("get_mechanism not implemented") + + def get_initial_challenge(self): + raise NotImplementedError("get_initial_challenge not implemented") + + def initial_response(self): + if (self.authenticator_class == "com.datastax.bdp.cassandra.auth.DseAuthenticator"): + return self.get_mechanism() + else: + return self.evaluate_challenge(self.get_initial_challenge()) + +class PlainTextAuthenticator(BaseDseAuthenticator): + def __init__(self, username, password): + self.username = username + self.password = password + + def get_mechanism(self): + return "PLAIN" + + def get_initial_challenge(self): + return "PLAIN-START" + + def evaluate_challenge(self, challenge): + if challenge == 'PLAIN-START': + return "\x00%s\x00%s" % (self.username, self.password) + raise Exception('Did not receive a valid challenge response from server') + +class GSSAPIAuthenticator(BaseDseAuthenticator): + def __init__(self, host, service, qops): + self.sasl = SASLClient(host, service, 'GSSAPI', authorization_id=None, callback=None, qops=qops) + + def get_mechanism(self): + return "GSSAPI" + + def get_initial_challenge(self): + return "GSSAPI-START" + + def evaluate_challenge(self, challenge): + if challenge == 'GSSAPI-START': + return self.sasl.process() + else: + return self.sasl.process(challenge) From 693463e5ab1eb1f7a26eb9ded2ba15ca76d3efa8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 16 Dec 2015 16:50:30 -0600 Subject: [PATCH 0253/2005] dseauth to separate package --- dse/__init__.py | 0 cassandra/dseauth.py => dse/auth.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 dse/__init__.py rename cassandra/dseauth.py => dse/auth.py (100%) diff --git a/dse/__init__.py b/dse/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cassandra/dseauth.py b/dse/auth.py similarity index 100% rename from cassandra/dseauth.py rename to dse/auth.py From 5fe962b6075a355812ecceb8afd59c2e5ef6f1bb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 16 Dec 2015 16:51:22 -0600 Subject: [PATCH 0254/2005] pep8 --- dse/auth.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/dse/auth.py b/dse/auth.py index af0aedd4ff..469c3e4036 100644 --- a/dse/auth.py +++ b/dse/auth.py @@ -5,7 +5,8 @@ except ImportError: SASLClient = None -class DseAuthProvider(AuthProvider): + +class DSEAuthProvider(AuthProvider): def __init__(self, username=None, password=None, service=None, qops=None): self.username = username self.password = password @@ -20,7 +21,8 @@ def new_authenticator(self, host): return PlainTextAuthenticator(self.username, self.password) return GSSAPIAuthenticator(host, self.service, self.qops) -class BaseDseAuthenticator(Authenticator): + +class BaseDSEAuthenticator(Authenticator): def get_mechanism(self): raise NotImplementedError("get_mechanism not implemented") @@ -28,12 +30,13 @@ def get_initial_challenge(self): raise NotImplementedError("get_initial_challenge not implemented") def initial_response(self): - if (self.authenticator_class == "com.datastax.bdp.cassandra.auth.DseAuthenticator"): + if self.server_authenticator_class == "com.datastax.bdp.cassandra.auth.DseAuthenticator": return self.get_mechanism() else: return self.evaluate_challenge(self.get_initial_challenge()) -class PlainTextAuthenticator(BaseDseAuthenticator): + +class PlainTextAuthenticator(BaseDSEAuthenticator): def __init__(self, username, password): self.username = username self.password = password @@ -49,7 +52,8 @@ def evaluate_challenge(self, challenge): return "\x00%s\x00%s" % (self.username, self.password) raise Exception('Did not receive a valid challenge response from server') -class GSSAPIAuthenticator(BaseDseAuthenticator): + +class GSSAPIAuthenticator(BaseDSEAuthenticator): def __init__(self, host, service, qops): self.sasl = SASLClient(host, service, 'GSSAPI', authorization_id=None, callback=None, qops=qops) From 7d555d6e9e6c4a5688f504d914fadff1c361779f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 16 Dec 2015 16:52:50 -0600 Subject: [PATCH 0255/2005] Test for DSE Authenticator PYTHON-455 --- .../integration/standard/test_authentication.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 82f81cd885..310e636740 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -17,6 +17,7 @@ from cassandra.cluster import Cluster, NoHostAvailable from cassandra.auth import PlainTextAuthProvider, SASLClient, SaslAuthProvider +from dse.auth import DSEAuthProvider from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION from tests.integration.util import assert_quiescent_pool_state @@ -164,3 +165,18 @@ def test_host_passthrough(self): def test_host_rejected(self): sasl_kwargs = {'host': 'something'} self.assertRaises(ValueError, SaslAuthProvider, **sasl_kwargs) + + +class DSEAuthenticatorTests(AuthenticationTests): + """ + Test SaslAuthProvider as PlainText + """ + + def setUp(self): + if PROTOCOL_VERSION < 2: + raise unittest.SkipTest('Sasl authentication not available for protocol v1') + if SASLClient is None: + raise unittest.SkipTest('pure-sasl is not installed') + + def get_authentication_provider(self, username, password): + return DSEAuthProvider(username, password) From 2937df853c8bf908ccdb449360676c2ad9d3ad7e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 18 Dec 2015 15:20:47 -0600 Subject: [PATCH 0256/2005] Make parameter transform more pythonic --- dse/graph.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/dse/graph.py b/dse/graph.py index 3162b63031..1c8a70ff3a 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -1,7 +1,8 @@ from cassandra.query import SimpleStatement -from collections import namedtuple +from collections import namedtuple import json +import six class AbstractGraphStatement: """ @@ -219,15 +220,8 @@ def _prepare(self, graph_statement): statement = graph_statement._configure_and_get_wrapped(self.default_graph_options) return PreparedGraphStatement(self.session.prepare(statement), graph_statement) + # this may go away if we change parameter encoding def _transform_params(self, parameters): if not isinstance(parameters, dict): raise Exception('The values parameter can only be a dictionary, unnamed parameters are not authorized in Gremlin queries.') - - values_list = [] - for key, value in parameters.iteritems(): - json_param = json.dumps({'name':key, 'value':value}) - values_list.append(json_param) - - return values_list - - + return [json.dumps({'name': name, 'value': value}) for name, value in six.iteritems(parameters)] From e7561d1b0a8ee0768519959ace71bdda5ff97638 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 21 Dec 2015 11:21:56 -0600 Subject: [PATCH 0257/2005] Add common graph options to GraphStatement Also abandoned prepared graph statement until the simple API is settled --- dse/graph.py | 188 +++++++++++---------------------------------------- 1 file changed, 41 insertions(+), 147 deletions(-) diff --git a/dse/graph.py b/dse/graph.py index 1c8a70ff3a..f4bd884c63 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -4,138 +4,47 @@ import json import six -class AbstractGraphStatement: - """ - Parent implementation of the Graph specific statements. It encapsulate the necessary work - to produce a statement recognizable to the DSEQueryHandler. - Child implementations will contain the graph_options field and will have to call self.configure() - before being executed. They also need to implement the _configure_and_get_wrapped() method that - will be called by the GraphSession to transparently execute the wrapped statement. - """ - _wrapped = None - - """ - The graph specific options that will be included in the execution of the statement. - The 4 available graph options are : - - graph-keyspace : define the keyspace the graph is defined with. - - graph-source : choose the graph traversal source, configured on the server side. - - graph-language : the language used in the queries. - - graph-rebinding : rename the traversal source. - """ - graph_options = {} - - def __init__(self): - pass - - def _configure(self, session_graph_options): - # Handles the general tranformations to apply on the wrapped statement. - merged_options = session_graph_options.copy() - merged_options.update(self.graph_options) - self._wrapped.custom_payload = merged_options - - def _configure_and_get_wrapped(self, graph_options): - raise NotImplementedError() - - -class GraphStatement(AbstractGraphStatement): - """ - A simple, un-prepared graph query. - """ - - """ - The query string used in this statement. - """ - query_string = None - - - def __init__(self, query_string, graph_options=None): - AbstractGraphStatement.__init__(self) - self.query_string = query_string - if graph_options: - self.graph_options = graph_options - - def _configure_and_get_wrapped(self, session_graph_options): - self._wrapped = SimpleStatement(self.query_string) - self._configure(session_graph_options) - return self._wrapped - - -class PreparedGraphStatement(object): - """ - A prepared graph statement returned from the execution of a GraphSession.prepare() - method. This query corresponding to this statement is supposed to contain Gremlin - parameters that will eventually be bound. - """ - - _prepared_statement = None - _graph_statement = None - - def __init__(self, prepared_statement, graph_statement): - self._prepared_statement = prepared_statement - self._graph_statement = graph_statement - - def bind(self, values=None): - """ - Add parameters values to this prepared statement. This will create a BoundGraphStatement - instance. It is possible to not provide values for now, but call bind() later on the - BoundGraphStatement. - """ - return BoundGraphStatement(self._prepared_statement, self._graph_statement).bind(values) - - -class BoundGraphStatement(AbstractGraphStatement): - """ - A statement bound with values resulting from the preparation of a simple graph statement. - """ - - _prepared_statement = None - _graph_statement = None +# (attr, server option, description) +_graph_options = ( +('graph_keyspace', 'define the keyspace the graph is defined with.', 'graph-keyspace'), +('graph_source', 'choose the graph traversal source, configured on the server side.', 'graph-source'), +('graph_language', 'the language used in the queries (default "gremlin-groovy"', 'graph-language'), +('graph_rebinding', 'name of the graph in the query (default "g")', 'graph-rebinding') +) + + +class GraphStatement(SimpleStatement): + + def __init__(self, *args, **kwargs): + super(GraphStatement, self).__init__(*args, **kwargs) + self._graph_options = {} + + def get_options_map(self, base_options): + if self._graph_options: + options = base_options.copy() + options.update(self._graph_options) + return options + else: + return base_options - """ - The values of the parameters to be sent with the statement. - """ - values = None +for opt in _graph_options: - def __init__(self, prepared_statement, graph_statement): - self._prepared_statement = prepared_statement - self._graph_statement = graph_statement - self.values = None + key = opt[2] - self.graph_options.update(graph_statement.graph_options) + def get(self): + return self._graph_options.get(key) - def bind(self, values_param=None): - """ - Add values that values will be added to the execution of the previously prepared statement. - The values bound can only be in the form of a dictionary, meaning that each Gremlin parameter - must be named, and bound with its name as well. - """ - values = None - if values_param is None: - values = {} + def set(self, value): + if value: + self._graph_options[key] = value else: - values = values_param + self._graph_options.pop(key) - if not isinstance(values, dict): - # TODO: change the exception class - raise Exception('The values parameter can only be a dictionary, unnamed parameters are not authorized in Gremlin queries.') - - self.values = values - return self - - def _configure_and_get_wrapped(self, graph_options): - # Transform the dict of parameters in a [{'name':name, 'value':value}] as imposed in the specs - values_list = [] - for key, value in self.values.iteritems(): - json_param = json.dumps({'name':key, 'value':value}) - values_list.append(json_param) + def delete(self): + self._graph_options.pop(key) - self._wrapped = self._prepared_statement.bind(values_list) - - # We want to propagate the options from the original GraphStatement. - self._configure(graph_options) - - return self._wrapped + setattr(GraphStatement, opt[0], property(get, set, delete, opt[1])) class GraphResultSet(object): @@ -164,6 +73,7 @@ def _generate_traversal_result_tuple(self, result): GraphTraversalResult = namedtuple('GraphTraversalResult', json_result.keys()) return GraphTraversalResult(*json_result.values()) + class GraphSession(object): """ A session object allowing to execute Gremlin queries against a DSEGraph cluster. @@ -176,13 +86,14 @@ class GraphSession(object): These options can be over redefined, or removed. """ - default_graph_options = {'graph-source':b'default', 'graph-language':b'gremlin-groovy'} + default_graph_options = None # Expose the session or private? session = None def __init__(self, session, default_graph_options=None): self.session = session + self.default_graph_options = {'graph-source': b'default', 'graph-language': b'gremlin-groovy'} if default_graph_options: self.default_graph_options.update(default_graph_options) @@ -191,34 +102,17 @@ def execute(self, query, parameters=None): Executes a Gremlin query string, a GraphStatement, or a BoundGraphStatement synchronously, and returns a GraphResultSet from this execution. """ - if isinstance(query, AbstractGraphStatement): - return self._execute(query, parameters) + if isinstance(query, GraphStatement): + return self._execute(query, query.get_options_map(self.default_graph_options), parameters) else: graph_statement = GraphStatement(query) - return self._execute(graph_statement, parameters) + return self._execute(graph_statement, self.default_graph_options, parameters) - def _execute(self, graph_statement, parameters): - statement = graph_statement._configure_and_get_wrapped(self.default_graph_options) + def _execute(self, statement, options, parameters): graph_parameters = None if parameters: graph_parameters = self._transform_params(parameters) - return GraphResultSet(self.session.execute(statement, parameters=graph_parameters)) - - def prepare(self, query): - """ - Prepares a Gremlin query string, or a GraphStatement. This returns a PreparedGraphStatement - resulting of the Cassandra prepare phase. - """ - # Should maybe only accept GraphStatement instances instead of AbstractGraphStatement. - if isinstance(query, AbstractGraphStatement): - return self._prepare(query) - else: - graph_statement = GraphStatement(query) - return self._prepare(graph_statement) - - def _prepare(self, graph_statement): - statement = graph_statement._configure_and_get_wrapped(self.default_graph_options) - return PreparedGraphStatement(self.session.prepare(statement), graph_statement) + return GraphResultSet(self.session.execute(statement, graph_parameters, custom_payload=options)) # this may go away if we change parameter encoding def _transform_params(self, parameters): From 2515b705442b62f0385e69b21fc6d3bbcc24d1d6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 21 Dec 2015 13:51:07 -0600 Subject: [PATCH 0258/2005] Use core ResultSet, introduce some graph result row factories --- dse/graph.py | 79 ++++++++++++++++++++++++++++++++++------------------ 1 file changed, 52 insertions(+), 27 deletions(-) diff --git a/dse/graph.py b/dse/graph.py index f4bd884c63..6be97121a8 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -47,31 +47,45 @@ def delete(self): setattr(GraphStatement, opt[0], property(get, set, delete, opt[1])) -class GraphResultSet(object): - """ - A results set containing multiple GraphTraversalResult objects. - """ +def single_object_row_factory(column_names, rows): + # returns the string value of graph results + return [row[0] for row in rows] + + +def graph_result_row_factory(column_names, rows): + # Returns an object that can load graph results and produce specific types + return [Result(row[0]) for row in rows] + + +class Result(object): - def __init__(self, result_set): - self._wrapped_rs = result_set + _value = None - def __iter__(self): - # Not great but doing iter(self._wrapped_rs); return self; Does not work- - # -because of the paging and __getitem__ on the wrapped result set. - return GraphResultSet(iter(self._wrapped_rs)) + def __init__(self, json_value): + self._value = json.loads(json_value)['result'] - def __getitem__(self, i): - return self._generate_traversal_result_tuple(self._wrapped_rs[i].gremlin) + def __getattr__(self, attr): + if not isinstance(self._value, dict): + raise ValueError("Value cannot be accessed as a dict") - def next(self): - return self._generate_traversal_result_tuple(self._wrapped_rs.next().gremlin) + if attr in self._value: + return self._value[attr] - __next__ = next + raise AttributeError("Result has no top-level attribute %r" % (attr,)) - def _generate_traversal_result_tuple(self, result): - json_result = json.loads(result)['result'] - GraphTraversalResult = namedtuple('GraphTraversalResult', json_result.keys()) - return GraphTraversalResult(*json_result.values()) + def __getitem__(self, item): + if isinstance(self._value, dict) and isinstance(key, six.string_types): + return self._value[item] + elif isinstance(self._value, list) and isinstance(key, int): + raise TypeError("Key must be a string") + else: + raise ValueError("Result cannot be indexed by %r" %(item,)) + + def __str__(self): + return self._value + + def __repr__(self): + return "%s(%r)" % (Result.__name__, json.dumps({'result': self._value})) class GraphSession(object): @@ -84,10 +98,12 @@ class GraphSession(object): - graph-source = 'default' - graph-language = 'gremlin-groovy' - These options can be over redefined, or removed. + These options can be over redefined, or removed. """ default_graph_options = None + default_graph_row_factory = staticmethod(graph_result_row_factory) + # Expose the session or private? session = None @@ -97,22 +113,31 @@ def __init__(self, session, default_graph_options=None): if default_graph_options: self.default_graph_options.update(default_graph_options) - def execute(self, query, parameters=None): + def execute(self, query, parameters=None, row_factory=None): """ - Executes a Gremlin query string, a GraphStatement, or a BoundGraphStatement synchronously, + Executes a Gremlin query string, a GraphStatement, or a BoundGraphStatement synchronously, and returns a GraphResultSet from this execution. """ if isinstance(query, GraphStatement): - return self._execute(query, query.get_options_map(self.default_graph_options), parameters) + options = query.get_options_map(self.default_graph_options) else: - graph_statement = GraphStatement(query) - return self._execute(graph_statement, self.default_graph_options, parameters) + query = GraphStatement(query) + options = self.default_graph_options - def _execute(self, statement, options, parameters): graph_parameters = None if parameters: graph_parameters = self._transform_params(parameters) - return GraphResultSet(self.session.execute(statement, graph_parameters, custom_payload=options)) + + row_factory = row_factory or self.default_graph_row_factory + + # TODO: pass down trace, timeout parameters + # this is basically Session.execute_async, repeated here to customize the row factory. May want to add that + # parameter to the session method + future = self.session._create_response_future(query, graph_parameters, trace=False, custom_payload=options, timeout=self.session.default_timeout) + future._protocol_handler = self.session.client_protocol_handler + future.row_factory = row_factory + future.send_request() + return future.result() # this may go away if we change parameter encoding def _transform_params(self, parameters): From 717736cd27c010bda629916f1370f312b4b979b9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 21 Dec 2015 14:43:48 -0600 Subject: [PATCH 0259/2005] Restore simple statement CQL binding, inject params for graph --- cassandra/cluster.py | 5 ++++- cassandra/protocol.py | 16 ++++++++-------- dse/graph.py | 23 ++++++++++++----------- 3 files changed, 24 insertions(+), 20 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 6b09657918..402a4d3822 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1645,9 +1645,11 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time if isinstance(query, SimpleStatement): query_string = query.query_string + if parameters: + query_string = bind_params(query_string, parameters, self.encoder) message = QueryMessage( query_string, cl, query.serial_consistency_level, - fetch_size, timestamp=timestamp, query_params=parameters) + fetch_size, timestamp=timestamp) elif isinstance(query, BoundStatement): message = ExecuteMessage( query.prepared_statement.query_id, query.values, cl, @@ -1932,6 +1934,7 @@ def get_pool_state(self): def get_pools(self): return self._pools.values() + class UserTypeDoesNotExist(Exception): """ An attempt was made to use a user-defined type that does not exist. diff --git a/cassandra/protocol.py b/cassandra/protocol.py index b48bc891f1..8a405a1c1a 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -484,19 +484,22 @@ class QueryMessage(_MessageType): name = 'QUERY' def __init__(self, query, consistency_level, serial_consistency_level=None, - fetch_size=None, paging_state=None, timestamp=None, query_params=None): + fetch_size=None, paging_state=None, timestamp=None): self.query = query self.consistency_level = consistency_level self.serial_consistency_level = serial_consistency_level self.fetch_size = fetch_size self.paging_state = paging_state self.timestamp = timestamp - self.query_params = query_params + self._query_params = None # only used internally. May be set to a list of native-encoded values to have them sent with the request. def send_body(self, f, protocol_version): write_longstring(f, self.query) write_consistency_level(f, self.consistency_level) flags = 0x00 + if self._query_params is not None: + flags |= _VALUES_FLAG # also v2+, but we're only setting params internally right now + if self.serial_consistency_level: if protocol_version >= 2: flags |= _WITH_SERIAL_CONSISTENCY_FLAG @@ -525,14 +528,11 @@ def send_body(self, f, protocol_version): if self.timestamp is not None: flags |= _PROTOCOL_TIMESTAMP - if self.query_params is not None: - flags |= _VALUES_FLAG - write_byte(f, flags) - if self.query_params is not None: - write_short(f, len(self.query_params)) - for param in self.query_params: + if self._query_params is not None: + write_short(f, len(self._query_params)) + for param in self._query_params: write_value(f, param) if self.fetch_size: diff --git a/dse/graph.py b/dse/graph.py index 6be97121a8..23cc2ca0fd 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -59,33 +59,33 @@ def graph_result_row_factory(column_names, rows): class Result(object): - _value = None + value = None def __init__(self, json_value): - self._value = json.loads(json_value)['result'] + self.value = json.loads(json_value)['result'] def __getattr__(self, attr): - if not isinstance(self._value, dict): + if not isinstance(self.value, dict): raise ValueError("Value cannot be accessed as a dict") - if attr in self._value: - return self._value[attr] + if attr in self.value: + return self.value[attr] raise AttributeError("Result has no top-level attribute %r" % (attr,)) def __getitem__(self, item): - if isinstance(self._value, dict) and isinstance(key, six.string_types): - return self._value[item] - elif isinstance(self._value, list) and isinstance(key, int): + if isinstance(self.value, dict) and isinstance(key, six.string_types): + return self.value[item] + elif isinstance(self.value, list) and isinstance(key, int): raise TypeError("Key must be a string") else: raise ValueError("Result cannot be indexed by %r" %(item,)) def __str__(self): - return self._value + return self.value def __repr__(self): - return "%s(%r)" % (Result.__name__, json.dumps({'result': self._value})) + return "%s(%r)" % (Result.__name__, json.dumps({'result': self.value})) class GraphSession(object): @@ -133,7 +133,8 @@ def execute(self, query, parameters=None, row_factory=None): # TODO: pass down trace, timeout parameters # this is basically Session.execute_async, repeated here to customize the row factory. May want to add that # parameter to the session method - future = self.session._create_response_future(query, graph_parameters, trace=False, custom_payload=options, timeout=self.session.default_timeout) + future = self.session._create_response_future(query, parameters=None, trace=False, custom_payload=options, timeout=self.session.default_timeout) + future.message._query_params = graph_parameters future._protocol_handler = self.session.client_protocol_handler future.row_factory = row_factory future.send_request() From 18a7214406b79748676e1d9b1c2ea3c34c0c51a2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 21 Dec 2015 15:36:41 -0600 Subject: [PATCH 0260/2005] Additional parameters for GraphSession.execute --- dse/graph.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/dse/graph.py b/dse/graph.py index 23cc2ca0fd..6067340c72 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -6,7 +6,7 @@ # (attr, server option, description) _graph_options = ( -('graph_keyspace', 'define the keyspace the graph is defined with.', 'graph-keyspace'), +('graph_namespace', 'define the namespace the graph is defined with.', 'graph-keyspace'), ('graph_source', 'choose the graph traversal source, configured on the server side.', 'graph-source'), ('graph_language', 'the language used in the queries (default "gremlin-groovy"', 'graph-language'), ('graph_rebinding', 'name of the graph in the query (default "g")', 'graph-rebinding') @@ -88,6 +88,9 @@ def __repr__(self): return "%s(%r)" % (Result.__name__, json.dumps({'result': self.value})) +_NOT_SET = object() + + class GraphSession(object): """ A session object allowing to execute Gremlin queries against a DSEGraph cluster. @@ -113,7 +116,7 @@ def __init__(self, session, default_graph_options=None): if default_graph_options: self.default_graph_options.update(default_graph_options) - def execute(self, query, parameters=None, row_factory=None): + def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, row_factory=None): """ Executes a Gremlin query string, a GraphStatement, or a BoundGraphStatement synchronously, and returns a GraphResultSet from this execution. @@ -128,15 +131,15 @@ def execute(self, query, parameters=None, row_factory=None): if parameters: graph_parameters = self._transform_params(parameters) - row_factory = row_factory or self.default_graph_row_factory - # TODO: pass down trace, timeout parameters # this is basically Session.execute_async, repeated here to customize the row factory. May want to add that # parameter to the session method - future = self.session._create_response_future(query, parameters=None, trace=False, custom_payload=options, timeout=self.session.default_timeout) + if timeout is _NOT_SET: + timeout = self.session.default_timeout + future = self.session._create_response_future(query, parameters=None, trace=trace, custom_payload=options, timeout=timeout) future.message._query_params = graph_parameters future._protocol_handler = self.session.client_protocol_handler - future.row_factory = row_factory + future.row_factory = row_factory or self.default_graph_row_factory future.send_request() return future.result() @@ -145,3 +148,4 @@ def _transform_params(self, parameters): if not isinstance(parameters, dict): raise Exception('The values parameter can only be a dictionary, unnamed parameters are not authorized in Gremlin queries.') return [json.dumps({'name': name, 'value': value}) for name, value in six.iteritems(parameters)] + From da81b887dbd4d4539d43539c36cbf3949c639fb1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 21 Dec 2015 15:41:20 -0600 Subject: [PATCH 0261/2005] cleanup --- dse/graph.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/dse/graph.py b/dse/graph.py index 6067340c72..a02183a0e8 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -1,15 +1,14 @@ from cassandra.query import SimpleStatement -from collections import namedtuple import json import six # (attr, server option, description) _graph_options = ( -('graph_namespace', 'define the namespace the graph is defined with.', 'graph-keyspace'), -('graph_source', 'choose the graph traversal source, configured on the server side.', 'graph-source'), -('graph_language', 'the language used in the queries (default "gremlin-groovy"', 'graph-language'), -('graph_rebinding', 'name of the graph in the query (default "g")', 'graph-rebinding') + ('graph_namespace', 'define the namespace the graph is defined with.', 'graph-keyspace'), + ('graph_source', 'choose the graph traversal source, configured on the server side.', 'graph-source'), + ('graph_language', 'the language used in the queries (default "gremlin-groovy"', 'graph-language'), + ('graph_rebinding', 'name of the graph in the query (default "g")', 'graph-rebinding') ) @@ -79,7 +78,7 @@ def __getitem__(self, item): elif isinstance(self.value, list) and isinstance(key, int): raise TypeError("Key must be a string") else: - raise ValueError("Result cannot be indexed by %r" %(item,)) + raise ValueError("Result cannot be indexed by %r" % (item,)) def __str__(self): return self.value @@ -148,4 +147,3 @@ def _transform_params(self, parameters): if not isinstance(parameters, dict): raise Exception('The values parameter can only be a dictionary, unnamed parameters are not authorized in Gremlin queries.') return [json.dumps({'name': name, 'value': value}) for name, value in six.iteritems(parameters)] - From 0bbd64b8373648332f1531cace2b8d396e7e49c7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 21 Dec 2015 15:42:08 -0600 Subject: [PATCH 0262/2005] Remove change notes --- dse/changes.md | 106 ------------------------------------------------- 1 file changed, 106 deletions(-) delete mode 100644 dse/changes.md diff --git a/dse/changes.md b/dse/changes.md deleted file mode 100644 index 0b6cec3f6c..0000000000 --- a/dse/changes.md +++ /dev/null @@ -1,106 +0,0 @@ -# New API for Python Driver's DSEGraph integration - -## GraphSession - -#### constructor -- parameters: `Session` mandatory, `default_graph_options` optional. - -#### public fields - - `default_graph_options`, dictionary initialised with the default graph options defined in the specs. Options can be overriten or other added. -- `session`, the wrapped `cassandra.cluster.Session`. - -#### exposed methods - - `execute()` takes arguments as either a `GraphStatement` or `BoundGraphStatement` or a query string, to execute it *synchronously* against the DSE cluster. Returns a `GraphResultSet` object containing the results of the execution. - - `prepare()` takes in argument a `GraphStatement` or a query string, and prepares it against the DSE instance. Returns a `PreparedGraphStatement`. - -##### private fields -_None_ - - - -## AbstractGraphStatement - -_Is not supposed to be directly used by users, is not part of the public API. Whereas child implementations will be exposed._ - -#### constructor -- parameters: _None_ - -#### public fields -- `graph_options`, dictionary containing the graph options to use and to override from the default ones defined in the session. - -#### exposed methods -_None_ - -##### private fields -- `_wrapped`, the wrapped driver's statement. It is not supposed to be modified by users but only used internally. - - - -## GraphStatement _[extends AbstractGraphStatement]_ - -#### constructor -- parameters: `query_string` mandatory, `graph_options` optional. - -#### public fields -- inherited from `AbstractGraphStatement` -- `graph_options`, used to override `GraphSession`'s default graph options, on a per-statement basis. - -#### exposed methods - -_None_ - -##### private fields -- inherited from `AbstractGraphStatement` - - - -## PreparedGraphStatement - -#### constructor -- not supposed to be used by users - -#### public fields -_None_ - -#### exposed methods -- `bind()`, binds the provided values, and creates a `BoundGraphStatement` instance. The values must be in a dictionary, meaning that all parameters must be named, an exception will be raised if it is not the case. - -##### private fields -- `_prepared_statement`, the `cassandra.query.PreparedStatement` instance wrapped. -- `_graph_statement`, the graph statement associated with this statement. Used to propagate the graph options to a `BoundGraphStatement` created from a `PreparedGraphStatement`. - - - -## BoundGraphStatement _[extends AbstractGraphStatement]_ - -#### constructor -- not supposed to be used by users. - -#### public fields -- inherited from `AbstractGraphStatement` -- `values`, the values to be bound with the statement and sent to the server. - -#### exposed methods -- `bind()`, bind the values parameters to this `BoundGraphStatement` instance. The values must be in a dictionary, meaning that all parameters must be named, an exception will be raised if it is not the case. - -##### private fields: -- inherited from `AbstractGraphStatement` -- `_prepared_statement`, the `cassandra.query.PreparedStatement` tied to this `BoundGraphStatement`. -- `_graph_statement`, the graph statement associated with this statement. Used to propagate the graph options to a `BoundGraphStatement` created from a `PreparedGraphStatement`. - - - -## GraphResultSet - -#### constructor -- not supposed to be used by users. - -#### public fields -_None_ - -#### exposed methods -- `__iter__()`/`next()`, allows iteration in the set, in `for ... in ...` loops. And manual iteration. Each iteration produces and returns one `GraphTraversalResult`. -- `__getitem__`, allows getting results by index as a array. Each indexed access will return a `GraphTraversalResult`. - -##### private fields - - `_wrapped_rs`, the wrapped `cassandra.cluster.ResultSet`. From 2c35335c64c996a1336f74497078466f2499f0dd Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 22 Dec 2015 13:47:32 -0600 Subject: [PATCH 0263/2005] Fix graph Result str to handle varying value types. --- dse/graph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dse/graph.py b/dse/graph.py index a02183a0e8..4425aa9275 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -81,7 +81,7 @@ def __getitem__(self, item): raise ValueError("Result cannot be indexed by %r" % (item,)) def __str__(self): - return self.value + return str(self.value) def __repr__(self): return "%s(%r)" % (Result.__name__, json.dumps({'result': self.value})) From 82a690965dda24b8ad64c5e555c93b310588baaa Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 5 Jan 2016 09:26:43 -0600 Subject: [PATCH 0264/2005] PYTHON-460: correct docs around default paging behavior --- docs/query_paging.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/query_paging.rst b/docs/query_paging.rst index 5e5391ff25..52366116e8 100644 --- a/docs/query_paging.rst +++ b/docs/query_paging.rst @@ -3,8 +3,8 @@ Paging Large Queries ==================== Cassandra 2.0+ offers support for automatic query paging. Starting with -version 2.0 of the driver, if :attr:`~.Cluster.protocol_version` is set to -:const:`4` (it is by default), queries returning large result sets will be +version 2.0 of the driver, if :attr:`~.Cluster.protocol_version` is greater than +:const:`2` (it is by default), queries returning large result sets will be automatically paged. Controlling the Page Size From f77173522692a00ee7756a4d27e996a43b5e14c3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 6 Jan 2016 12:05:58 -0600 Subject: [PATCH 0265/2005] cython: fix offset overflow in large collections PYTHON-459 --- cassandra/deserializers.pyx | 44 +++++++++++++++---------------------- 1 file changed, 18 insertions(+), 26 deletions(-) diff --git a/cassandra/deserializers.pyx b/cassandra/deserializers.pyx index 061d340a3f..cfc4326d27 100644 --- a/cassandra/deserializers.pyx +++ b/cassandra/deserializers.pyx @@ -228,54 +228,47 @@ cdef list _deserialize_list_or_set(itemlen_t dummy_version, The 'dummy' parameter is needed to make fused types work, so that we can specialize on the protocol version. """ - cdef itemlen_t itemlen cdef Buffer itemlen_buf cdef Buffer elem_buf cdef itemlen_t numelements - cdef itemlen_t idx + cdef int offset cdef list result = [] - _unpack_len[itemlen_t](0, &numelements, buf) - idx = sizeof(itemlen_t) + _unpack_len[itemlen_t](buf, 0, &numelements) + offset = sizeof(itemlen_t) protocol_version = max(3, protocol_version) for _ in range(numelements): - subelem(buf, &elem_buf, &idx) + subelem[itemlen_t](buf, &elem_buf, &offset, dummy_version) result.append(from_binary(deserializer, &elem_buf, protocol_version)) return result cdef inline int subelem( - Buffer *buf, Buffer *elem_buf, itemlen_t *idx_p) except -1: + Buffer *buf, Buffer *elem_buf, int* offset, itemlen_t dummy) except -1: """ Read the next element from the buffer: first read the size (in bytes) of the element, then fill elem_buf with a newly sliced buffer of this size (and the right offset). - - NOTE: The handling of 'idx' is somewhat atrocious, as there is a Cython - bug with the combination fused types + 'except' clause. - So instead, we pass in a pointer to 'idx', namely 'idx_p', and write - to this instead. """ cdef itemlen_t elemlen - _unpack_len[itemlen_t](idx_p[0], &elemlen, buf) - idx_p[0] += sizeof(itemlen_t) - slice_buffer(buf, elem_buf, idx_p[0], elemlen) - idx_p[0] += elemlen + _unpack_len[itemlen_t](buf, offset[0], &elemlen) + offset[0] += sizeof(itemlen_t) + slice_buffer(buf, elem_buf, offset[0], elemlen) + offset[0] += elemlen return 0 -cdef int _unpack_len(itemlen_t idx, itemlen_t *elemlen, Buffer *buf) except -1: - cdef itemlen_t result +cdef int _unpack_len(Buffer *buf, int offset, itemlen_t *output) except -1: cdef Buffer itemlen_buf - slice_buffer(buf, &itemlen_buf, idx, sizeof(itemlen_t)) + slice_buffer(buf, &itemlen_buf, offset, sizeof(itemlen_t)) if itemlen_t is uint16_t: - elemlen[0] = uint16_unpack(&itemlen_buf) + output[0] = uint16_unpack(&itemlen_buf) else: - elemlen[0] = int32_unpack(&itemlen_buf) + output[0] = int32_unpack(&itemlen_buf) return 0 @@ -314,21 +307,20 @@ cdef _deserialize_map(itemlen_t dummy_version, Buffer *buf, int protocol_version, Deserializer key_deserializer, Deserializer val_deserializer, key_type, val_type): - cdef itemlen_t itemlen, val_len, key_len cdef Buffer key_buf, val_buf cdef Buffer itemlen_buf cdef itemlen_t numelements - cdef itemlen_t idx = sizeof(itemlen_t) + cdef int offset cdef list result = [] - _unpack_len[itemlen_t](0, &numelements, buf) - idx = sizeof(itemlen_t) + _unpack_len[itemlen_t](buf, 0, &numelements) + offset = sizeof(itemlen_t) themap = util.OrderedMapSerializedKey(key_type, protocol_version) protocol_version = max(3, protocol_version) for _ in range(numelements): - subelem(buf, &key_buf, &idx) - subelem(buf, &val_buf, &idx) + subelem[itemlen_t](buf, &key_buf, &offset, dummy_version) + subelem[itemlen_t](buf, &val_buf, &offset, numelements) key = from_binary(key_deserializer, &key_buf, protocol_version) val = from_binary(val_deserializer, &val_buf, protocol_version) themap._insert_unchecked(key, to_bytes(&key_buf), val) From f2c3e240cf04446025a9a94fbd277ea3fb83f3e6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 6 Jan 2016 13:33:07 -0600 Subject: [PATCH 0266/2005] geo types with deserialization PYTHON-456 --- dse/cqltypes.py | 61 +++++++++++++++++++++++++++++++++++++++++++++++++ dse/marshal.py | 8 +++++++ dse/util.py | 50 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 119 insertions(+) create mode 100644 dse/cqltypes.py create mode 100644 dse/marshal.py create mode 100644 dse/util.py diff --git a/dse/cqltypes.py b/dse/cqltypes.py new file mode 100644 index 0000000000..a4dc56eec6 --- /dev/null +++ b/dse/cqltypes.py @@ -0,0 +1,61 @@ +from six.moves import range +import struct +from cassandra.cqltypes import CassandraType +from dse.marshal import point_be, point_le, circle_be, circle_le +from dse.util import Point, Circle, LineString, Polygon + + +class PointType(CassandraType): + typename = 'org.apache.cassandra.db.marshal.PointType' + + @staticmethod + def deserialize(byts, protocol_version): + is_little_endian = bool(byts[0]) + point = point_le if is_little_endian else point_be + return Point(*point.unpack_from(byts, 5)) # ofs = endian byte + int type + + +class CircleType(CassandraType): + typename = 'org.apache.cassandra.db.marshal.CircleType' + + @staticmethod + def deserialize(byts, protocol_version): + is_little_endian = bool(byts[0]) + circle = circle_le if is_little_endian else circle_be + return Circle(*circle.unpack_from(byts, 5)) + + +class LineStringType(CassandraType): + typename = 'org.apache.cassandra.db.marshal.LineStringType' + + @staticmethod + def deserialize(byts, protocol_version): + is_little_endian = bool(byts[0]) + point = point_le if is_little_endian else point_be + points = ((point.unpack_from(byts, offset) for offset in range(1 + 4 + 4, len(byts), point.size))) # start = endian + int type + int count + return LineString(points) + + +class PolygonType(CassandraType): + typename = 'org.apache.cassandra.db.marshal.PolygonType' + + @staticmethod + def deserialize(byts, protocol_version): + is_little_endian = bool(byts[0]) + if is_little_endian: + int_fmt = 'dd') +point_le = Struct('ddd') +circle_le = Struct(' Date: Wed, 6 Jan 2016 16:35:39 -0600 Subject: [PATCH 0267/2005] geo type binary serialization PYTHON-456 --- dse/cqltypes.py | 55 +++++++++++++++++++++++++++++++++++++++++++++---- dse/marshal.py | 1 - 2 files changed, 51 insertions(+), 5 deletions(-) diff --git a/dse/cqltypes.py b/dse/cqltypes.py index a4dc56eec6..1aa5173c62 100644 --- a/dse/cqltypes.py +++ b/dse/cqltypes.py @@ -1,12 +1,30 @@ +import io from six.moves import range import struct from cassandra.cqltypes import CassandraType +from cassandra.util import is_little_endian as _platform_is_le from dse.marshal import point_be, point_le, circle_be, circle_le from dse.util import Point, Circle, LineString, Polygon +_endian_flag = 1 if _platform_is_le else 0 + + +class WKBGeometryType(object): + POINT = 1 + LINESTRING = 2 + POLYGON = 3 + CIRCLE = 101 # DSE custom + class PointType(CassandraType): - typename = 'org.apache.cassandra.db.marshal.PointType' + typename = 'PointType' + + _platform_point = point_le if _platform_is_le else point_be + _type = struct.pack('=BI', _endian_flag, WKBGeometryType.POINT) + + @staticmethod + def serialize(val, protocol_version): + return PointType._type + PointType._platform_point.pack(val.x, val.y) @staticmethod def deserialize(byts, protocol_version): @@ -16,7 +34,14 @@ def deserialize(byts, protocol_version): class CircleType(CassandraType): - typename = 'org.apache.cassandra.db.marshal.CircleType' + typename = 'CircleType' + + _platform_circle = circle_le if _platform_is_le else circle_be + _type = struct.pack('=BI', _endian_flag, WKBGeometryType.CIRCLE) + + @staticmethod + def serialize(val, protocol_version): + return CircleType._type + CircleType._platform_circle.pack(val.x, val.y, val.r) @staticmethod def deserialize(byts, protocol_version): @@ -26,7 +51,14 @@ def deserialize(byts, protocol_version): class LineStringType(CassandraType): - typename = 'org.apache.cassandra.db.marshal.LineStringType' + typename = 'LineStringType' + + _type = struct.pack('=BI', _endian_flag, WKBGeometryType.LINESTRING) + + @staticmethod + def serialize(val, protocol_version): + num_points = len(val.points) + return LineStringType._type + struct.pack('=I' + 'dd' * num_points, num_points, *(d for point in val.points for d in point)) @staticmethod def deserialize(byts, protocol_version): @@ -37,7 +69,22 @@ def deserialize(byts, protocol_version): class PolygonType(CassandraType): - typename = 'org.apache.cassandra.db.marshal.PolygonType' + typename = 'PolygonType' + + _type = struct.pack('=BI', _endian_flag, WKBGeometryType.POLYGON) + _platform_ring_count = struct.Struct('=I').pack + + @staticmethod + def serialize(val, protocol_version): + buf = io.BytesIO(PolygonType._type) + buf.seek(0, 2) + + num_rings = len(val.rings) + buf.write(PolygonType._platform_ring_count(num_rings)) + for ring in val.rings: + num_points = len(ring) + buf.write(struct.pack('=I' + 'dd' * num_points, num_points, *(d for point in ring for d in point))) + return buf.getvalue() @staticmethod def deserialize(byts, protocol_version): diff --git a/dse/marshal.py b/dse/marshal.py index e2f0bc7c3d..5c1a7e0ff5 100644 --- a/dse/marshal.py +++ b/dse/marshal.py @@ -5,4 +5,3 @@ circle_be = Struct('>ddd') circle_le = Struct(' Date: Fri, 1 Jan 2016 13:19:53 +0800 Subject: [PATCH 0268/2005] improve ExponentialReconnectionPolicy,now can custom max attempts time --- cassandra/policies.py | 15 +++++++++++++-- tests/unit/test_policies.py | 11 ++++++++++- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 8132f8ab8e..3d4b4a4b65 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -528,10 +528,14 @@ class ExponentialReconnectionPolicy(ReconnectionPolicy): a set maximum delay. """ - def __init__(self, base_delay, max_delay): + def __init__(self, base_delay, max_delay, max_attempts=64): """ `base_delay` and `max_delay` should be in floating point units of seconds. + + `max_attempts` should be a total number of attempts to be made before + giving up, or :const:`None` to continue reconnection attempts forever. + The default is 64. """ if base_delay < 0 or max_delay < 0: raise ValueError("Delays may not be negative") @@ -539,11 +543,18 @@ def __init__(self, base_delay, max_delay): if max_delay < base_delay: raise ValueError("Max delay must be greater than base delay") + if max_attempts is not None and max_attempts < 0: + raise ValueError("max_attempts must not be negative") + self.base_delay = base_delay self.max_delay = max_delay + self.max_attempts = max_attempts def new_schedule(self): - return (min(self.base_delay * (2 ** i), self.max_delay) for i in range(64)) + i=0 + while self.max_attempts == None or i < self.max_attempts: + yield min(self.base_delay * (2 ** i), self.max_delay) + i += 1 class WriteType(object): diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 97e24455aa..2ca63491eb 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -814,9 +814,18 @@ def test_bad_vals(self): self.assertRaises(ValueError, ExponentialReconnectionPolicy, -1, 0) self.assertRaises(ValueError, ExponentialReconnectionPolicy, 0, -1) self.assertRaises(ValueError, ExponentialReconnectionPolicy, 9000, 1) + self.assertRaises(ValueError, ExponentialReconnectionPolicy, 1, 2,-1) def test_schedule(self): - policy = ExponentialReconnectionPolicy(base_delay=2, max_delay=100) + policy = ExponentialReconnectionPolicy(base_delay=2, max_delay=100, max_attempts=None) + i=0; + for delay in policy.new_schedule(): + i += 1 + if i > 10000: + break; + self.assertEqual(i, 10001) + + policy = ExponentialReconnectionPolicy(base_delay=2, max_delay=100, max_attempts=64) schedule = list(policy.new_schedule()) self.assertEqual(len(schedule), 64) for i, delay in enumerate(schedule): From a6eba0ff037256fd28ff89ed0ae0de821ac8aad6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 7 Jan 2016 11:46:05 -0600 Subject: [PATCH 0269/2005] add dse.cluster for implicit type registration PYTHON-456 --- cassandra/cluster.py | 7 +++++-- dse/cluster.py | 24 ++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-) create mode 100644 dse/cluster.py diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 402a4d3822..7f86d09456 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -895,11 +895,14 @@ def shutdown(self): def _new_session(self): session = Session(self, self.metadata.all_hosts()) + self._session_register_user_types(session) + self.sessions.add(session) + return session + + def _session_register_user_types(self, session): for keyspace, type_map in six.iteritems(self._user_types): for udt_name, klass in six.iteritems(type_map): session.user_type_registered(keyspace, udt_name, klass) - self.sessions.add(session) - return session def _cleanup_failed_on_up_handling(self, host): self.load_balancing_policy.on_down(host) diff --git a/dse/cluster.py b/dse/cluster.py new file mode 100644 index 0000000000..650a792c38 --- /dev/null +++ b/dse/cluster.py @@ -0,0 +1,24 @@ +from cassandra.cluster import Cluster, Session +import dse.cqltypes # unsued here, imported to cause type registration +from dse.util import Point, Circle, LineString, Polygon + + +class Cluster(Cluster): + + def _new_session(self): + session = Session(self, self.metadata.all_hosts()) + self._session_register_user_types(session) + self.sessions.add(session) + return session + + +class Session(Session): + + def __init__(self, cluster, hosts): + super(Session, self).__init__(cluster, hosts) + + def cql_encode_str_quoted(val): + return "'%s'" % val + + for typ in (Point, Circle, LineString, Polygon): + self.encoder.mapping[typ] = cql_encode_str_quoted From b1beb4f41ebe62db58cf056c0add2b1e43638549 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 7 Jan 2016 16:29:45 -0600 Subject: [PATCH 0270/2005] GraphOptions as an object, update docs --- dse/graph.py | 83 ++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 57 insertions(+), 26 deletions(-) diff --git a/dse/graph.py b/dse/graph.py index 4425aa9275..d13676e9f0 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -3,7 +3,7 @@ import json import six -# (attr, server option, description) +# (attr, description, server option) _graph_options = ( ('graph_namespace', 'define the namespace the graph is defined with.', 'graph-keyspace'), ('graph_source', 'choose the graph traversal source, configured on the server side.', 'graph-source'), @@ -12,53 +12,79 @@ ) -class GraphStatement(SimpleStatement): +class GraphOptions(object): - def __init__(self, *args, **kwargs): - super(GraphStatement, self).__init__(*args, **kwargs) + def __init__(self, **kwargs): self._graph_options = {} + for attr, value in six.iteritems(kwargs): + setattr(self, attr, value) + + def update(self, options): + self._graph_options.update(options._graph_options) def get_options_map(self, base_options): + """ + Returns a map for base_options updated with options set on this object, or + base_options map if none were set. + """ if self._graph_options: - options = base_options.copy() + options = base_options._graph_options.copy() options.update(self._graph_options) return options else: - return base_options + return base_options._graph_options for opt in _graph_options: - key = opt[2] - - def get(self): + def get(self, key=opt[2]): return self._graph_options.get(key) - def set(self, value): + def set(self, value, key=opt[2]): if value: self._graph_options[key] = value else: self._graph_options.pop(key) - def delete(self): + def delete(self, key=opt[2]): self._graph_options.pop(key) - setattr(GraphStatement, opt[0], property(get, set, delete, opt[1])) + setattr(GraphOptions, opt[0], property(get, set, delete, opt[1])) + + +class GraphStatement(SimpleStatement): + + options = None + """ + GraphOptions for this statement. + Any attributes set here override the GraphSession defaults. + """ + + def __init__(self, *args, **kwargs): + super(GraphStatement, self).__init__(*args, **kwargs) + self.options = GraphOptions() def single_object_row_factory(column_names, rows): - # returns the string value of graph results + """ + returns the JSON string value of graph results + """ return [row[0] for row in rows] def graph_result_row_factory(column_names, rows): - # Returns an object that can load graph results and produce specific types + """ + Returns an object that can load graph results and produce specific types + """ return [Result(row[0]) for row in rows] class Result(object): value = None + """ + Deserialized value from the result + """ def __init__(self, json_value): self.value = json.loads(json_value)['result'] @@ -92,17 +118,16 @@ def __repr__(self): class GraphSession(object): """ - A session object allowing to execute Gremlin queries against a DSEGraph cluster. + A session wrapper for executing Gremlin queries against a DSE cluster """ + default_graph_options = None """ - The default options to be used for all graph statements executions. Is initialised with : - - graph-source = 'default' - - graph-language = 'gremlin-groovy' - - These options can be over redefined, or removed. + Default options, initialized as follows if no additional options are provided + in init: + GraphOptions(graph_source=b'default', + graph_language=b'gremlin-groovy') """ - default_graph_options = None default_graph_row_factory = staticmethod(graph_result_row_factory) @@ -110,8 +135,14 @@ class GraphSession(object): session = None def __init__(self, session, default_graph_options=None): + """ + :param session: Session to use for graph queries + :param default_graph_options: a GraphOptions object; options are merged with built-in defaults + """ self.session = session - self.default_graph_options = {'graph-source': b'default', 'graph-language': b'gremlin-groovy'} + # TODO: python 3 + self.default_graph_options = GraphOptions(graph_source=b'default', + graph_language=b'gremlin-groovy') if default_graph_options: self.default_graph_options.update(default_graph_options) @@ -121,16 +152,16 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, row_fac and returns a GraphResultSet from this execution. """ if isinstance(query, GraphStatement): - options = query.get_options_map(self.default_graph_options) + options = query.options.get_options_map(self.default_graph_options) else: query = GraphStatement(query) - options = self.default_graph_options + options = self.default_graph_options._graph_options graph_parameters = None if parameters: graph_parameters = self._transform_params(parameters) - # TODO: pass down trace, timeout parameters + # TODO: # this is basically Session.execute_async, repeated here to customize the row factory. May want to add that # parameter to the session method if timeout is _NOT_SET: @@ -145,5 +176,5 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, row_fac # this may go away if we change parameter encoding def _transform_params(self, parameters): if not isinstance(parameters, dict): - raise Exception('The values parameter can only be a dictionary, unnamed parameters are not authorized in Gremlin queries.') + raise ValueError('The parameters must be a dictionary. Unnamed parameters are not allowed.') return [json.dumps({'name': name, 'value': value}) for name, value in six.iteritems(parameters)] From 006e7e460c3df5bfc2debe5290ea0433bc22de49 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 7 Jan 2016 16:59:36 -0600 Subject: [PATCH 0271/2005] graph tweaks for Python3 --- dse/graph.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/dse/graph.py b/dse/graph.py index d13676e9f0..bb2d30ca3d 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -42,6 +42,8 @@ def get(self, key=opt[2]): def set(self, value, key=opt[2]): if value: + if not isinstance(value, six.binary_type): + value = six.b(value) self._graph_options[key] = value else: self._graph_options.pop(key) @@ -140,7 +142,6 @@ def __init__(self, session, default_graph_options=None): :param default_graph_options: a GraphOptions object; options are merged with built-in defaults """ self.session = session - # TODO: python 3 self.default_graph_options = GraphOptions(graph_source=b'default', graph_language=b'gremlin-groovy') if default_graph_options: @@ -177,4 +178,4 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, row_fac def _transform_params(self, parameters): if not isinstance(parameters, dict): raise ValueError('The parameters must be a dictionary. Unnamed parameters are not allowed.') - return [json.dumps({'name': name, 'value': value}) for name, value in six.iteritems(parameters)] + return [json.dumps({'name': name, 'value': value}).encode('utf-8') for name, value in six.iteritems(parameters)] From ccae3183ea69b4e8cd8beebe91e88393970a6ee2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 8 Jan 2016 09:33:31 -0600 Subject: [PATCH 0272/2005] Fix graph Result getitem and make it work for lists --- dse/graph.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dse/graph.py b/dse/graph.py index bb2d30ca3d..aa8f26f258 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -101,10 +101,10 @@ def __getattr__(self, attr): raise AttributeError("Result has no top-level attribute %r" % (attr,)) def __getitem__(self, item): - if isinstance(self.value, dict) and isinstance(key, six.string_types): + if isinstance(self.value, dict) and isinstance(item, six.string_types): + return self.value[item] + elif isinstance(self.value, list) and isinstance(item, int): return self.value[item] - elif isinstance(self.value, list) and isinstance(key, int): - raise TypeError("Key must be a string") else: raise ValueError("Result cannot be indexed by %r" % (item,)) From 3bddef6185f2691e1713dfe51d1fa26d1555724c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 8 Jan 2016 10:48:07 -0600 Subject: [PATCH 0273/2005] add TODO and tweak test #455 --- cassandra/policies.py | 3 +++ tests/unit/test_policies.py | 31 ++++++++++++++++++------------- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 3d4b4a4b65..95c7829cdc 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -528,6 +528,9 @@ class ExponentialReconnectionPolicy(ReconnectionPolicy): a set maximum delay. """ + # TODO: max_attempts is 64 to preserve legacy default behavior + # consider changing to None in major release to prevent the policy + # giving up forever def __init__(self, base_delay, max_delay, max_attempts=64): """ `base_delay` and `max_delay` should be in floating point units of diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 2ca63491eb..74daab99bc 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -816,25 +816,30 @@ def test_bad_vals(self): self.assertRaises(ValueError, ExponentialReconnectionPolicy, 9000, 1) self.assertRaises(ValueError, ExponentialReconnectionPolicy, 1, 2,-1) - def test_schedule(self): - policy = ExponentialReconnectionPolicy(base_delay=2, max_delay=100, max_attempts=None) - i=0; - for delay in policy.new_schedule(): - i += 1 - if i > 10000: - break; - self.assertEqual(i, 10001) - - policy = ExponentialReconnectionPolicy(base_delay=2, max_delay=100, max_attempts=64) + def test_schedule_no_max(self): + base_delay = 2 + max_delay = 100 + test_iter = 10000 + policy = ExponentialReconnectionPolicy(base_delay=base_delay, max_delay=max_delay, max_attempts=None) + sched_slice = list(islice(policy.new_schedule(), 0, test_iter)) + self.assertEqual(sched_slice[0], base_delay) + self.assertEqual(sched_slice[-1], max_delay) + self.assertEqual(len(sched_slice), test_iter) + + def test_schedule_with_max(self): + base_delay = 2 + max_delay = 100 + max_attempts = 64 + policy = ExponentialReconnectionPolicy(base_delay=base_delay, max_delay=max_delay, max_attempts=max_attempts) schedule = list(policy.new_schedule()) - self.assertEqual(len(schedule), 64) + self.assertEqual(len(schedule), max_attempts) for i, delay in enumerate(schedule): if i == 0: - self.assertEqual(delay, 2) + self.assertEqual(delay, base_delay) elif i < 6: self.assertEqual(delay, schedule[i - 1] * 2) else: - self.assertEqual(delay, 100) + self.assertEqual(delay, max_delay) ONE = ConsistencyLevel.ONE From a9d1e4515860c7e0852d3cf58ff43618c6d19b93 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 8 Jan 2016 13:16:50 -0600 Subject: [PATCH 0274/2005] cython: stack allocation and memcpy in marshalling to avoid alignment issues PYTHON-450 --- cassandra/cython_marshal.pyx | 80 ++++++++++++++++++++++-------------- 1 file changed, 49 insertions(+), 31 deletions(-) diff --git a/cassandra/cython_marshal.pyx b/cassandra/cython_marshal.pyx index cdf1bc7c88..e59cfb7078 100644 --- a/cassandra/cython_marshal.pyx +++ b/cassandra/cython_marshal.pyx @@ -18,6 +18,7 @@ import six from libc.stdint cimport (int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t) +from libc.string cimport memcpy from cassandra.buffer cimport Buffer, buf_read, to_bytes cdef bint is_little_endian @@ -32,7 +33,7 @@ cdef inline void swap_order(char *buf, Py_ssize_t size): (reverse all the bytes). There are functions ntohl etc, but these may be POSIX-dependent. """ - cdef Py_ssize_t start, end, i + cdef Py_ssize_t end, i cdef char c if is_little_endian: @@ -42,59 +43,76 @@ cdef inline void swap_order(char *buf, Py_ssize_t size): buf[i] = buf[end] buf[end] = c +cdef inline void copy_maybe_swap(char *buf, void* dst, Py_ssize_t size): + """ + Copy to aligned destination, conditionally swapping to native byte order + """ + cdef Py_ssize_t start, end, i + cdef char* out = dst + + if is_little_endian: + for i in range(size): + out[size - i - 1] = buf[i] + else: + memcpy(dst, buf, size) + cdef inline Py_ssize_t div2(Py_ssize_t x): return x >> 1 -### Unpacking of signed integers +# TODO: is there a way to make these from a macro? +# tried fused types, but there are compiler errors requiring strange games +# type as an arg instead of just templating based on return type +# return unpack_x[int64_t](buf_read(buf, sizeof(int64_t)), 0) cdef inline int64_t int64_unpack(Buffer *buf) except ?0xDEAD: - cdef int64_t x = ( buf_read(buf, 8))[0] - cdef char *p = &x - swap_order( &x, 8) - return x + cdef int64_t out = 0 + copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) + return out cdef inline int32_t int32_unpack(Buffer *buf) except ?0xDEAD: - cdef int32_t x = ( buf_read(buf, 4))[0] - cdef char *p = &x - swap_order( &x, 4) - return x + cdef int32_t out = 0 + copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) + return out cdef inline int16_t int16_unpack(Buffer *buf) except ?0xDED: - cdef int16_t x = ( buf_read(buf, 2))[0] - swap_order( &x, 2) - return x + cdef int16_t out = 0 + copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) + return out cdef inline int8_t int8_unpack(Buffer *buf) except ?80: - return ( buf_read(buf, 1))[0] + cdef int8_t out = 0 + copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) + return out cdef inline uint64_t uint64_unpack(Buffer *buf) except ?0xDEAD: - cdef uint64_t x = ( buf_read(buf, 8))[0] - swap_order( &x, 8) - return x + cdef uint64_t out = 0 + copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) + return out cdef inline uint32_t uint32_unpack(Buffer *buf) except ?0xDEAD: - cdef uint32_t x = ( buf_read(buf, 4))[0] - swap_order( &x, 4) - return x + cdef uint32_t out = 0 + copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) + return out cdef inline uint16_t uint16_unpack(Buffer *buf) except ?0xDEAD: - cdef uint16_t x = ( buf_read(buf, 2))[0] - swap_order( &x, 2) - return x + cdef uint16_t out = 0 + copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) + return out cdef inline uint8_t uint8_unpack(Buffer *buf) except ?0xff: - return ( buf_read(buf, 1))[0] + cdef uint8_t out = 0 + copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) + return out cdef inline double double_unpack(Buffer *buf) except ?1.74: - cdef double x = ( buf_read(buf, 8))[0] - swap_order( &x, 8) - return x + cdef double out = 0 + copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) + return out cdef inline float float_unpack(Buffer *buf) except ?1.74: - cdef float x = ( buf_read(buf, 4))[0] - swap_order( &x, 4) - return x - + cdef float out = 0 + copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) + return out cdef varint_unpack(Buffer *term): """Unpack a variable-sized integer""" From ea72b6d25806154ec08ab2b8d87a09457d6fd406 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 8 Jan 2016 13:36:58 -0600 Subject: [PATCH 0275/2005] remove unused swap_order function --- cassandra/cython_marshal.pyx | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/cassandra/cython_marshal.pyx b/cassandra/cython_marshal.pyx index e59cfb7078..a79e9ef22a 100644 --- a/cassandra/cython_marshal.pyx +++ b/cassandra/cython_marshal.pyx @@ -26,23 +26,6 @@ from cassandra.util import is_little_endian cdef bint PY3 = six.PY3 - -cdef inline void swap_order(char *buf, Py_ssize_t size): - """ - Swap the byteorder of `buf` in-place on little-endian platforms - (reverse all the bytes). - There are functions ntohl etc, but these may be POSIX-dependent. - """ - cdef Py_ssize_t end, i - cdef char c - - if is_little_endian: - for i in range(div2(size)): - end = size - i - 1 - c = buf[i] - buf[i] = buf[end] - buf[end] = c - cdef inline void copy_maybe_swap(char *buf, void* dst, Py_ssize_t size): """ Copy to aligned destination, conditionally swapping to native byte order From 5ef6078852cce25bb55caa3f25770529040716da Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 8 Jan 2016 14:16:04 -0600 Subject: [PATCH 0276/2005] fused types for number unpacking --- cassandra/cython_marshal.pyx | 71 ++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 40 deletions(-) diff --git a/cassandra/cython_marshal.pyx b/cassandra/cython_marshal.pyx index a79e9ef22a..118e0610be 100644 --- a/cassandra/cython_marshal.pyx +++ b/cassandra/cython_marshal.pyx @@ -26,76 +26,67 @@ from cassandra.util import is_little_endian cdef bint PY3 = six.PY3 -cdef inline void copy_maybe_swap(char *buf, void* dst, Py_ssize_t size): +ctypedef fused num_t: + int64_t + int32_t + int16_t + int8_t + uint64_t + uint32_t + uint16_t + uint8_t + double + float + +cdef inline num_t copy_maybe_swap(char *buf, num_t *dummy=NULL): # dummy pointer because cython wants the fused type as an arg """ Copy to aligned destination, conditionally swapping to native byte order """ + cdef num_t ret = 0 cdef Py_ssize_t start, end, i - cdef char* out = dst + cdef char* out = &ret if is_little_endian: - for i in range(size): - out[size - i - 1] = buf[i] + for i in range(sizeof(num_t)): + out[sizeof(num_t) - i - 1] = buf[i] else: - memcpy(dst, buf, size) + # TODO: use inline function as in numpy_parser + memcpy(out, buf, sizeof(num_t)) + + return ret cdef inline Py_ssize_t div2(Py_ssize_t x): return x >> 1 -# TODO: is there a way to make these from a macro? -# tried fused types, but there are compiler errors requiring strange games -# type as an arg instead of just templating based on return type -# return unpack_x[int64_t](buf_read(buf, sizeof(int64_t)), 0) - cdef inline int64_t int64_unpack(Buffer *buf) except ?0xDEAD: - cdef int64_t out = 0 - copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) - return out + return copy_maybe_swap[int64_t](buf_read(buf, sizeof(int64_t))) cdef inline int32_t int32_unpack(Buffer *buf) except ?0xDEAD: - cdef int32_t out = 0 - copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) - return out + return copy_maybe_swap[int32_t](buf_read(buf, sizeof(int32_t))) cdef inline int16_t int16_unpack(Buffer *buf) except ?0xDED: - cdef int16_t out = 0 - copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) - return out + return copy_maybe_swap[int16_t](buf_read(buf, sizeof(int16_t))) cdef inline int8_t int8_unpack(Buffer *buf) except ?80: - cdef int8_t out = 0 - copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) - return out + return copy_maybe_swap[int8_t](buf_read(buf, sizeof(int8_t))) cdef inline uint64_t uint64_unpack(Buffer *buf) except ?0xDEAD: - cdef uint64_t out = 0 - copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) - return out + return copy_maybe_swap[uint64_t](buf_read(buf, sizeof(uint64_t))) cdef inline uint32_t uint32_unpack(Buffer *buf) except ?0xDEAD: - cdef uint32_t out = 0 - copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) - return out + return copy_maybe_swap[uint32_t](buf_read(buf, sizeof(uint32_t))) cdef inline uint16_t uint16_unpack(Buffer *buf) except ?0xDEAD: - cdef uint16_t out = 0 - copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) - return out + return copy_maybe_swap[uint16_t](buf_read(buf, sizeof(uint16_t))) cdef inline uint8_t uint8_unpack(Buffer *buf) except ?0xff: - cdef uint8_t out = 0 - copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) - return out + return copy_maybe_swap[uint8_t](buf_read(buf, sizeof(uint8_t))) cdef inline double double_unpack(Buffer *buf) except ?1.74: - cdef double out = 0 - copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) - return out + return copy_maybe_swap[double](buf_read(buf, sizeof(double))) cdef inline float float_unpack(Buffer *buf) except ?1.74: - cdef float out = 0 - copy_maybe_swap(buf_read(buf, sizeof(out)), &out, sizeof(out)) - return out + return copy_maybe_swap[float](buf_read(buf, sizeof(float))) cdef varint_unpack(Buffer *term): """Unpack a variable-sized integer""" From d7c10689096c22c8b7e159aa2af6a95ca7d4bb0d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 8 Jan 2016 14:42:13 -0600 Subject: [PATCH 0277/2005] Remove cython marshalling pass-through functions --- cassandra/cython_marshal.pyx | 45 ++++++------------------------------ cassandra/deserializers.pyx | 30 ++++++++++++------------ cassandra/ioutils.pyx | 2 +- 3 files changed, 23 insertions(+), 54 deletions(-) diff --git a/cassandra/cython_marshal.pyx b/cassandra/cython_marshal.pyx index 118e0610be..523775cf52 100644 --- a/cassandra/cython_marshal.pyx +++ b/cassandra/cython_marshal.pyx @@ -26,6 +26,7 @@ from cassandra.util import is_little_endian cdef bint PY3 = six.PY3 + ctypedef fused num_t: int64_t int32_t @@ -38,56 +39,24 @@ ctypedef fused num_t: double float -cdef inline num_t copy_maybe_swap(char *buf, num_t *dummy=NULL): # dummy pointer because cython wants the fused type as an arg +cdef inline num_t unpack_num(Buffer *buf, num_t *dummy=NULL): # dummy pointer because cython wants the fused type as an arg """ Copy to aligned destination, conditionally swapping to native byte order """ - cdef num_t ret = 0 cdef Py_ssize_t start, end, i - cdef char* out = &ret + cdef char *src = buf_read(buf, sizeof(num_t)) + cdef num_t ret = 0 + cdef char *out = &ret if is_little_endian: for i in range(sizeof(num_t)): - out[sizeof(num_t) - i - 1] = buf[i] + out[sizeof(num_t) - i - 1] = src[i] else: # TODO: use inline function as in numpy_parser - memcpy(out, buf, sizeof(num_t)) + memcpy(out, src, sizeof(num_t)) return ret -cdef inline Py_ssize_t div2(Py_ssize_t x): - return x >> 1 - -cdef inline int64_t int64_unpack(Buffer *buf) except ?0xDEAD: - return copy_maybe_swap[int64_t](buf_read(buf, sizeof(int64_t))) - -cdef inline int32_t int32_unpack(Buffer *buf) except ?0xDEAD: - return copy_maybe_swap[int32_t](buf_read(buf, sizeof(int32_t))) - -cdef inline int16_t int16_unpack(Buffer *buf) except ?0xDED: - return copy_maybe_swap[int16_t](buf_read(buf, sizeof(int16_t))) - -cdef inline int8_t int8_unpack(Buffer *buf) except ?80: - return copy_maybe_swap[int8_t](buf_read(buf, sizeof(int8_t))) - -cdef inline uint64_t uint64_unpack(Buffer *buf) except ?0xDEAD: - return copy_maybe_swap[uint64_t](buf_read(buf, sizeof(uint64_t))) - -cdef inline uint32_t uint32_unpack(Buffer *buf) except ?0xDEAD: - return copy_maybe_swap[uint32_t](buf_read(buf, sizeof(uint32_t))) - -cdef inline uint16_t uint16_unpack(Buffer *buf) except ?0xDEAD: - return copy_maybe_swap[uint16_t](buf_read(buf, sizeof(uint16_t))) - -cdef inline uint8_t uint8_unpack(Buffer *buf) except ?0xff: - return copy_maybe_swap[uint8_t](buf_read(buf, sizeof(uint8_t))) - -cdef inline double double_unpack(Buffer *buf) except ?1.74: - return copy_maybe_swap[double](buf_read(buf, sizeof(double))) - -cdef inline float float_unpack(Buffer *buf) except ?1.74: - return copy_maybe_swap[float](buf_read(buf, sizeof(float))) - cdef varint_unpack(Buffer *term): """Unpack a variable-sized integer""" if PY3: diff --git a/cassandra/deserializers.pyx b/cassandra/deserializers.pyx index 061d340a3f..db97003883 100644 --- a/cassandra/deserializers.pyx +++ b/cassandra/deserializers.pyx @@ -53,7 +53,7 @@ cdef class DesDecimalType(Deserializer): cdef Buffer varint_buf slice_buffer(buf, &varint_buf, 4, buf.size - 4) - scale = int32_unpack(buf) + scale = unpack_num[int32_t](buf) unscaled = varint_unpack(&varint_buf) return Decimal('%de%d' % (unscaled, -scale)) @@ -66,14 +66,14 @@ cdef class DesUUIDType(Deserializer): cdef class DesBooleanType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): - if int8_unpack(buf): + if unpack_num[int8_t](buf): return True return False cdef class DesByteType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): - return int8_unpack(buf) + return unpack_num[int8_t](buf) cdef class DesAsciiType(Deserializer): @@ -85,22 +85,22 @@ cdef class DesAsciiType(Deserializer): cdef class DesFloatType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): - return float_unpack(buf) + return unpack_num[float](buf) cdef class DesDoubleType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): - return double_unpack(buf) + return unpack_num[double](buf) cdef class DesLongType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): - return int64_unpack(buf) + return unpack_num[int64_t](buf) cdef class DesInt32Type(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): - return int32_unpack(buf) + return unpack_num[int32_t](buf) cdef class DesIntegerType(Deserializer): @@ -127,7 +127,7 @@ cdef class DesCounterColumnType(DesLongType): cdef class DesDateType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): - cdef double timestamp = int64_unpack(buf) / 1000.0 + cdef double timestamp = unpack_num[int64_t](buf) / 1000.0 return datetime_from_timestamp(timestamp) @@ -147,18 +147,18 @@ EPOCH_OFFSET_DAYS = 2 ** 31 cdef class DesSimpleDateType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): - days = uint32_unpack(buf) - EPOCH_OFFSET_DAYS + days = unpack_num[uint32_t](buf) - EPOCH_OFFSET_DAYS return util.Date(days) cdef class DesShortType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): - return int16_unpack(buf) + return unpack_num[int16_t](buf) cdef class DesTimeType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): - return util.Time(int64_unpack(buf)) + return util.Time(unpack_num[int64_t](buf)) cdef class DesUTF8Type(Deserializer): @@ -273,9 +273,9 @@ cdef int _unpack_len(itemlen_t idx, itemlen_t *elemlen, Buffer *buf) except -1: slice_buffer(buf, &itemlen_buf, idx, sizeof(itemlen_t)) if itemlen_t is uint16_t: - elemlen[0] = uint16_unpack(&itemlen_buf) + elemlen[0] = unpack_num[uint16_t](&itemlen_buf) else: - elemlen[0] = int32_unpack(&itemlen_buf) + elemlen[0] = unpack_num[int32_t](&itemlen_buf) return 0 @@ -359,7 +359,7 @@ cdef class DesTupleType(_DesParameterizedType): item = None if p < buf.size: slice_buffer(buf, &itemlen_buf, p, 4) - itemlen = int32_unpack(&itemlen_buf) + itemlen = unpack_num[int32_t](&itemlen_buf) p += 4 if itemlen >= 0: slice_buffer(buf, &item_buf, p, itemlen) @@ -407,7 +407,7 @@ cdef class DesCompositeType(_DesParameterizedType): res = res[:i] break - element_length = uint16_unpack(buf) + element_length = unpack_num[uint16_t](buf) slice_buffer(buf, &elem_buf, 2, element_length) deserializer = self.deserializers[i] diff --git a/cassandra/ioutils.pyx b/cassandra/ioutils.pyx index c38b311a34..720c79bbb6 100644 --- a/cassandra/ioutils.pyx +++ b/cassandra/ioutils.pyx @@ -44,4 +44,4 @@ cdef inline int32_t read_int(BytesIOReader reader) except ?0xDEAD: cdef Buffer buf buf.ptr = reader.read(4) buf.size = 4 - return int32_unpack(&buf) + return unpack_num[int32_t](&buf) From 827f4604ee8c5a5aa25b0417e41776a096201e01 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 8 Jan 2016 15:34:10 -0600 Subject: [PATCH 0278/2005] inline memcopy from numpy_parser to cython_marshal --- cassandra/cython_marshal.pyx | 12 +++++++++--- cassandra/numpy_parser.pyx | 12 +----------- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/cassandra/cython_marshal.pyx b/cassandra/cython_marshal.pyx index 523775cf52..08028bbbb6 100644 --- a/cassandra/cython_marshal.pyx +++ b/cassandra/cython_marshal.pyx @@ -18,7 +18,6 @@ import six from libc.stdint cimport (int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t) -from libc.string cimport memcpy from cassandra.buffer cimport Buffer, buf_read, to_bytes cdef bint is_little_endian @@ -26,6 +25,14 @@ from cassandra.util import is_little_endian cdef bint PY3 = six.PY3 +cdef inline void memcopy(char *dst, char *src, Py_ssize_t size): + """ + Our own simple memcopy which can be inlined. This is useful because our data types + are only a few bytes. + """ + cdef Py_ssize_t i + for i in range(size): + dst[i] = src[i] ctypedef fused num_t: int64_t @@ -52,8 +59,7 @@ cdef inline num_t unpack_num(Buffer *buf, num_t *dummy=NULL): # dummy pointer be for i in range(sizeof(num_t)): out[sizeof(num_t) - i - 1] = src[i] else: - # TODO: use inline function as in numpy_parser - memcpy(out, src, sizeof(num_t)) + memcopy(out, src, sizeof(num_t)) return ret diff --git a/cassandra/numpy_parser.pyx b/cassandra/numpy_parser.pyx index 6702cfcce9..c0c2eb38e5 100644 --- a/cassandra/numpy_parser.pyx +++ b/cassandra/numpy_parser.pyx @@ -153,7 +153,7 @@ cdef inline int unpack_row( Py_INCREF(val) ( arr.buf_ptr)[0] = val else: - memcopy(buf.ptr, arr.buf_ptr, buf.size) + memcopy( arr.buf_ptr, buf.ptr, buf.size) # Update the pointer into the array for the next time arrays[i].buf_ptr += arr.stride @@ -161,16 +161,6 @@ cdef inline int unpack_row( return 0 -cdef inline void memcopy(char *src, char *dst, Py_ssize_t size): - """ - Our own simple memcopy which can be inlined. This is useful because our data types - are only a few bytes. - """ - cdef Py_ssize_t i - for i in range(size): - dst[i] = src[i] - - def make_native_byteorder(arr): """ Make sure all values have a native endian in the NumPy arrays. From 554211200f8afa94198e95c76e996f5b0300b0a1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 12 Jan 2016 13:50:48 -0600 Subject: [PATCH 0279/2005] doc: add discussion about LWT results --- cassandra/query.py | 3 ++ docs/index.rst | 4 ++ docs/lwt.rst | 91 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 98 insertions(+) create mode 100644 docs/lwt.rst diff --git a/cassandra/query.py b/cassandra/query.py index e66f875c56..a683248843 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -305,6 +305,9 @@ def _del_serial_consistency_level(self): Serial consistency levels may only be used against Cassandra 2.0+ and the :attr:`~.Cluster.protocol_version` must be set to 2 or higher. + See :doc:`/lwt` for a discussion on how to work with results returned from + conditional statements. + .. versionadded:: 2.0.0 """) diff --git a/docs/index.rst b/docs/index.rst index 7775dc62b4..903a750666 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -33,6 +33,9 @@ Contents :doc:`query_paging` Notes on paging large query results. +:doc:`lwt` + Working with results of conditional requests + :doc:`user_defined_types` Working with Cassandra 2.1's user-defined types. @@ -51,6 +54,7 @@ Contents upgrading performance query_paging + lwt security user_defined_types object_mapper diff --git a/docs/lwt.rst b/docs/lwt.rst new file mode 100644 index 0000000000..128aaca6ca --- /dev/null +++ b/docs/lwt.rst @@ -0,0 +1,91 @@ +Lightweight Transactions (Compare-and-set) +========================================== + +Lightweight Transactions (LWTs) are mostly pass-through CQL for the driver. However, +the server returns some specialized results indicating the outcome and optional state +preceding the transaction. + +For pertinent execution parameters, see :attr:`.Statement.serial_consistency_level`. + +This section discusses working with specialized result sets returned by the server for LWTs, +and how to work with them using the driver. + + +Specialized Results +------------------- +The result returned from a LWT request is always a single row result. It will always have +prepended a special column named ``[applied]``. How this value appears in your results depends +on the row factory in use. See below for examples. + +The value of this ``[applied]`` column is boolean value indicating whether or not the transaction was applied. +If ``True``, it is the only column in the result. If ``False``, the additional columns depend on the LWT operation being +executed: + +- When using a ``UPDATE ... IF "col" = ...`` clause, the result will contain the ``[applied]`` column, plus the existing columns + and values for any columns in the ``IF`` clause (and thus the value that caused the transaction to fail). + +- When using ``INSERT ... IF NOT EXISTS``, the result will contain the ``[applied]`` column, plus all columns and values + of the existing row that rejected the transaction. + +- ``UPDATE .. IF EXISTS`` never has additional columns, regardless of ``[applied]`` status. + +How the ``[applied]]`` column manifests depends on the row factory in use. Considering the following (initially empty) table:: + + CREATE TABLE test.t ( + k int PRIMARY KEY, + v int, + x int + ) + +... the following sections show the expected result for a number of example statements, using the three base row factories. + +named_tuple_factory (default) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The name ``[applied]`` is not a valid Python identifier, so the square brackets are actually removed +from the attribute for the resulting ``namedtuple``. The row always has a boolean column ``applied`` in position 0:: + + >>> session.execute("INSERT INTO t (k,v) VALUES (0,0) IF NOT EXISTS") + Row(applied=True) + + >>> session.execute("INSERT INTO t (k,v) VALUES (0,0) IF NOT EXISTS") + Row(applied=False, k=0, v=0, x=None) + + >>> session.execute("UPDATE t SET v = 1, x = 2 WHERE k = 0 IF v =0") + Row(applied=True) + + >>> session.execute("UPDATE t SET v = 1, x = 2 WHERE k = 0 IF v =0 AND x = 1") + Row(applied=False, v=1, x=2) + +tuple_factory +~~~~~~~~~~~~~ +This return type does not refer to names, but the boolean value ``applied`` is always present in position 0:: + + >>> session.execute("INSERT INTO t (k,v) VALUES (0,0) IF NOT EXISTS") + (True,) + + >>> session.execute("INSERT INTO t (k,v) VALUES (0,0) IF NOT EXISTS") + (False, 0, 0, None) + + >>> session.execute("UPDATE t SET v = 1, x = 2 WHERE k = 0 IF v =0") + (True,) + + >>> session.execute("UPDATE t SET v = 1, x = 2 WHERE k = 0 IF v =0 AND x = 1") + (False, 1, 2) + +dict_factory +~~~~~~~~~~~~ +The retuned ``dict`` contains the ``[applied]`` key:: + + >>> session.execute("INSERT INTO t (k,v) VALUES (0,0) IF NOT EXISTS") + {u'[applied]': True} + + >>> session.execute("INSERT INTO t (k,v) VALUES (0,0) IF NOT EXISTS") + {u'x': 2, u'[applied]': False, u'v': 1} + + >>> session.execute("UPDATE t SET v = 1, x = 2 WHERE k = 0 IF v =0") + {u'x': None, u'[applied]': False, u'k': 0, u'v': 0} + + >>> session.execute("UPDATE t SET v = 1, x = 2 WHERE k = 0 IF v =0 AND x = 1") + {u'[applied]': True} + + From 676d874577485e93d3ac4a1e6b525010daa4fbfe Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 12 Jan 2016 16:01:42 -0600 Subject: [PATCH 0280/2005] Decimal serlz accepts primitive numeric types and tuple PYTHON-468 --- cassandra/cqltypes.py | 2 +- tests/unit/test_marshalling.py | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 82cf09e7ce..6fd98ae434 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -369,7 +369,7 @@ def serialize(dec, protocol_version): try: sign, digits, exponent = dec.as_tuple() except AttributeError: - raise TypeError("Non-Decimal type received for Decimal value") + sign, digits, exponent = Decimal(dec).as_tuple() unscaled = int(''.join([str(digit) for digit in digits])) if sign: unscaled *= -1 diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index 3b98280c7c..f097a7cb11 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from cassandra.marshal import bitlength +from cassandra.protocol import MAX_SUPPORTED_VERSION try: import unittest2 as unittest @@ -139,3 +140,11 @@ def test_bitlength(self): def test_date(self): # separate test because it will deserialize as datetime self.assertEqual(DateType.from_binary(DateType.to_binary(date(2015, 11, 2), 1), 1), datetime(2015, 11, 2)) + + def test_decimal(self): + # testing implicit numeric conversion + # int, float, tuple(sign, digits, exp) + for proto_ver in range(1, MAX_SUPPORTED_VERSION + 1): + for n in (10001, 100.1, (0, (1, 0, 0, 0, 0, 1), -3)): + expected = Decimal(n) + self.assertEqual(DecimalType.from_binary(DecimalType.to_binary(n, proto_ver), proto_ver), expected) From df809b37f451cb4e20b0e41b73e25d8fc0011189 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 12 Jan 2016 17:00:18 -0600 Subject: [PATCH 0281/2005] Don't test Decimal(float) for python 2.6 --- tests/unit/test_marshalling.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index f097a7cb11..c7aea35505 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import sys + from cassandra.marshal import bitlength from cassandra.protocol import MAX_SUPPORTED_VERSION @@ -25,7 +27,7 @@ from uuid import UUID from cassandra.cqltypes import lookup_casstype, DecimalType, UTF8Type, DateType -from cassandra.util import OrderedMap, OrderedMapSerializedKey, sortedset, Time, Date +from cassandra.util import OrderedMapSerializedKey, sortedset, Time, Date marshalled_value_pairs = ( # binary form, type, python native type @@ -143,8 +145,18 @@ def test_date(self): def test_decimal(self): # testing implicit numeric conversion - # int, float, tuple(sign, digits, exp) + # int, tuple(sign, digits, exp), float + converted_types = (10001, (0, (1, 0, 0, 0, 0, 1), -3), 100.1) + + if sys.version_info < (2, 7): + # Decimal in Python 2.6 does not accept floats for lossless initialization + # Just verifying expected exception here + f = converted_types[-1] + self.assertIsInstance(f, float) + self.assertRaises(TypeError, DecimalType.to_binary, f, MAX_SUPPORTED_VERSION) + converted_types = converted_types[:-1] + for proto_ver in range(1, MAX_SUPPORTED_VERSION + 1): - for n in (10001, 100.1, (0, (1, 0, 0, 0, 0, 1), -3)): + for n in converted_types: expected = Decimal(n) self.assertEqual(DecimalType.from_binary(DecimalType.to_binary(n, proto_ver), proto_ver), expected) From 5d97c1e3236a54cfd4f15fb28624f9b9e88038be Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 12 Jan 2016 17:27:38 -0600 Subject: [PATCH 0282/2005] is_schema_agreed attribute for insight following DDL requests PYTHON-458 --- cassandra/cluster.py | 10 +++++++++- docs/api/cassandra/cluster.rst | 2 ++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index dfeb70263d..9d94410330 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2614,7 +2614,7 @@ def refresh_schema_and_set_result(control_conn, response_future, **kwargs): if control_conn._meta_refresh_enabled: log.debug("Refreshing schema in response to schema change. " "%s", kwargs) - control_conn._refresh_schema(response_future._connection, **kwargs) + response_future.is_schema_agreed = control_conn._refresh_schema(response_future._connection, **kwargs) else: log.debug("Skipping schema refresh in response to schema change because meta refresh is disabled; " "%s", kwargs) @@ -2643,6 +2643,13 @@ class ResponseFuture(object): :class:`.ResponseFuture`. """ + is_schema_agreed = True + """ + For DDL requests, this may be set ``False`` if the schema agreement poll after the response fails. + + Always ``True`` for non-DDL requests. + """ + session = None row_factory = None message = None @@ -2866,6 +2873,7 @@ def _set_result(self, response): elif response.kind == RESULT_KIND_SCHEMA_CHANGE: # refresh the schema before responding, but do it in another # thread instead of the event loop thread + self.is_schema_agreed = False self.session.submit( refresh_schema_and_set_result, self.session.cluster.control_connection, diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index fbe8fe3f1b..7fe878ca1c 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -131,6 +131,8 @@ .. autoattribute:: custom_payload() + .. autoattribute:: is_schema_agreed + .. autoattribute:: has_more_pages .. autoattribute:: warnings From 173f7804cd5b04c0ae0c8ee8b6daf2e095e0703b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 13 Jan 2016 07:11:28 -0600 Subject: [PATCH 0283/2005] doc typo --- docs/lwt.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/lwt.rst b/docs/lwt.rst index 128aaca6ca..2cc272f350 100644 --- a/docs/lwt.rst +++ b/docs/lwt.rst @@ -29,7 +29,7 @@ executed: - ``UPDATE .. IF EXISTS`` never has additional columns, regardless of ``[applied]`` status. -How the ``[applied]]`` column manifests depends on the row factory in use. Considering the following (initially empty) table:: +How the ``[applied]`` column manifests depends on the row factory in use. Considering the following (initially empty) table:: CREATE TABLE test.t ( k int PRIMARY KEY, From 5d5f1b1865cead8d7d5ffd42443caffabcf1b8d3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 15 Jan 2016 10:24:02 -0600 Subject: [PATCH 0284/2005] Revert to intrinsic memcpy instead of inline PYTHON-450 intrinsic was 25% faster in benchmarks --- cassandra/cython_marshal.pyx | 12 ++---------- cassandra/numpy_parser.pyx | 2 +- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/cassandra/cython_marshal.pyx b/cassandra/cython_marshal.pyx index 08028bbbb6..672c6e82c4 100644 --- a/cassandra/cython_marshal.pyx +++ b/cassandra/cython_marshal.pyx @@ -18,6 +18,7 @@ import six from libc.stdint cimport (int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t) +from libc.string cimport memcpy from cassandra.buffer cimport Buffer, buf_read, to_bytes cdef bint is_little_endian @@ -25,15 +26,6 @@ from cassandra.util import is_little_endian cdef bint PY3 = six.PY3 -cdef inline void memcopy(char *dst, char *src, Py_ssize_t size): - """ - Our own simple memcopy which can be inlined. This is useful because our data types - are only a few bytes. - """ - cdef Py_ssize_t i - for i in range(size): - dst[i] = src[i] - ctypedef fused num_t: int64_t int32_t @@ -59,7 +51,7 @@ cdef inline num_t unpack_num(Buffer *buf, num_t *dummy=NULL): # dummy pointer be for i in range(sizeof(num_t)): out[sizeof(num_t) - i - 1] = src[i] else: - memcopy(out, src, sizeof(num_t)) + memcpy(out, src, sizeof(num_t)) return ret diff --git a/cassandra/numpy_parser.pyx b/cassandra/numpy_parser.pyx index c0c2eb38e5..9ced45effa 100644 --- a/cassandra/numpy_parser.pyx +++ b/cassandra/numpy_parser.pyx @@ -153,7 +153,7 @@ cdef inline int unpack_row( Py_INCREF(val) ( arr.buf_ptr)[0] = val else: - memcopy( arr.buf_ptr, buf.ptr, buf.size) + memcpy( arr.buf_ptr, buf.ptr, buf.size) # Update the pointer into the array for the next time arrays[i].buf_ptr += arr.stride From 53b08c8906169f85611347a7eea3da145c75f602 Mon Sep 17 00:00:00 2001 From: Mike Adamson Date: Fri, 15 Jan 2016 16:59:19 +0000 Subject: [PATCH 0285/2005] PYTHON-455 Provide separate auth providers for plain text and GSSAPI --- dse/auth.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/dse/auth.py b/dse/auth.py index 469c3e4036..a842920f87 100644 --- a/dse/auth.py +++ b/dse/auth.py @@ -6,19 +6,23 @@ SASLClient = None -class DSEAuthProvider(AuthProvider): - def __init__(self, username=None, password=None, service=None, qops=None): +class DSEPlainTextAuthProvider(AuthProvider): + def __init__(self, username=None, password=None): self.username = username self.password = password - if username is None: - if SASLClient is None: - raise ImportError('The puresasl library has not been installed') - self.service = service - self.qops = qops def new_authenticator(self, host): - if self.username: - return PlainTextAuthenticator(self.username, self.password) + return PlainTextAuthenticator(self.username, self.password) + + +class DSEGSSAPIAuthProvider(AuthProvider): + def __init__(self, service=None, qops=None): + if SASLClient is None: + raise ImportError('The puresasl library has not been installed') + self.service = service + self.qops = qops + + def new_authenticator(self, host): return GSSAPIAuthenticator(host, self.service, self.qops) From 79a6a2d214a78f8a65194cf01a9cdeb2fca21f1d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 15 Jan 2016 11:40:46 -0600 Subject: [PATCH 0286/2005] Update dse auth test following #463 --- tests/integration/standard/test_authentication.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 310e636740..cf8fbb3df5 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -17,7 +17,7 @@ from cassandra.cluster import Cluster, NoHostAvailable from cassandra.auth import PlainTextAuthProvider, SASLClient, SaslAuthProvider -from dse.auth import DSEAuthProvider +from dse.auth import DSEPlainTextAuthProvider from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION from tests.integration.util import assert_quiescent_pool_state @@ -169,7 +169,7 @@ def test_host_rejected(self): class DSEAuthenticatorTests(AuthenticationTests): """ - Test SaslAuthProvider as PlainText + Test DSEPlainTextAuthProvider """ def setUp(self): @@ -179,4 +179,4 @@ def setUp(self): raise unittest.SkipTest('pure-sasl is not installed') def get_authentication_provider(self, username, password): - return DSEAuthProvider(username, password) + return DSEPlainTextAuthProvider(username, password) From 52cc79475d070a35793a501eec85fecddfe3cf3d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 15 Jan 2016 11:51:34 -0600 Subject: [PATCH 0287/2005] correct outdated comment in graph session --- dse/graph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dse/graph.py b/dse/graph.py index aa8f26f258..e099022e37 100644 --- a/dse/graph.py +++ b/dse/graph.py @@ -149,7 +149,7 @@ def __init__(self, session, default_graph_options=None): def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, row_factory=None): """ - Executes a Gremlin query string, a GraphStatement, or a BoundGraphStatement synchronously, + Executes a Gremlin query string, a GraphStatement synchronously, and returns a GraphResultSet from this execution. """ if isinstance(query, GraphStatement): From dbe98116b3906d5d30b9e1a05e72731ac17a9e03 Mon Sep 17 00:00:00 2001 From: Mike Adamson Date: Mon, 18 Jan 2016 11:57:43 +0000 Subject: [PATCH 0288/2005] Check for kerberos library before attempting GSSAPI --- dse/auth.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/dse/auth.py b/dse/auth.py index a842920f87..48959acfce 100644 --- a/dse/auth.py +++ b/dse/auth.py @@ -1,9 +1,16 @@ from cassandra.auth import AuthProvider, Authenticator +try: + import kerberos + _have_kerberos = True +except ImportError: + _have_kerberos = False + try: from puresasl.client import SASLClient + _have_puresasl = True except ImportError: - SASLClient = None + _have_puresasl = False class DSEPlainTextAuthProvider(AuthProvider): @@ -17,8 +24,10 @@ def new_authenticator(self, host): class DSEGSSAPIAuthProvider(AuthProvider): def __init__(self, service=None, qops=None): - if SASLClient is None: + if not _have_puresasl: raise ImportError('The puresasl library has not been installed') + if not _have_kerberos: + raise ImportError('The kerberos library has not been installed') self.service = service self.qops = qops From d9580101127806a25cdc955317520beda4d8573d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 19 Jan 2016 16:18:09 -0600 Subject: [PATCH 0289/2005] mimic shapely.geometry type attributes in dse.util geo types Changed DSE util types to have similar naming and composition to the shapely library. This allows shapely types to be used interchangably when serializing for binding. It also makes it more intuitive to init one from the other. --- dse/cqltypes.py | 19 ++++++++++--------- dse/util.py | 33 +++++++++++++++++++++++++-------- 2 files changed, 35 insertions(+), 17 deletions(-) diff --git a/dse/cqltypes.py b/dse/cqltypes.py index 1aa5173c62..77ae104be1 100644 --- a/dse/cqltypes.py +++ b/dse/cqltypes.py @@ -1,4 +1,5 @@ import io +from itertools import chain from six.moves import range import struct from cassandra.cqltypes import CassandraType @@ -57,15 +58,15 @@ class LineStringType(CassandraType): @staticmethod def serialize(val, protocol_version): - num_points = len(val.points) - return LineStringType._type + struct.pack('=I' + 'dd' * num_points, num_points, *(d for point in val.points for d in point)) + num_points = len(val.coords) + return LineStringType._type + struct.pack('=I' + 'dd' * num_points, num_points, *(d for coords in val.coords for d in coords)) @staticmethod def deserialize(byts, protocol_version): is_little_endian = bool(byts[0]) point = point_le if is_little_endian else point_be - points = ((point.unpack_from(byts, offset) for offset in range(1 + 4 + 4, len(byts), point.size))) # start = endian + int type + int count - return LineString(points) + coords = ((point.unpack_from(byts, offset) for offset in range(1 + 4 + 4, len(byts), point.size))) # start = endian + int type + int count + return LineString(coords) class PolygonType(CassandraType): @@ -79,11 +80,11 @@ def serialize(val, protocol_version): buf = io.BytesIO(PolygonType._type) buf.seek(0, 2) - num_rings = len(val.rings) + num_rings = 1 + len(val.interiors) buf.write(PolygonType._platform_ring_count(num_rings)) - for ring in val.rings: - num_points = len(ring) - buf.write(struct.pack('=I' + 'dd' * num_points, num_points, *(d for point in ring for d in point))) + for ring in chain((val.exterior,), val.interiors): + num_points = len(ring.coords) + buf.write(struct.pack('=I' + 'dd' * num_points, num_points, *(d for coord in ring.coords for d in coord))) return buf.getvalue() @staticmethod @@ -105,4 +106,4 @@ def deserialize(byts, protocol_version): end = p + point_count * point.size rings.append([point.unpack_from(byts, offset) for offset in range(p, end, point.size)]) p = end - return Polygon(rings) + return Polygon(exterior=rings[0], interiors=rings[1:]) diff --git a/dse/util.py b/dse/util.py index db17773c12..bcf59ebd2c 100644 --- a/dse/util.py +++ b/dse/util.py @@ -1,3 +1,5 @@ +from itertools import chain + class Point(object): def __init__(self, x, y): @@ -27,24 +29,39 @@ def __repr__(self): class LineString(object): - def __init__(self, points): - self.points = list(points) + def __init__(self, coords): + self.coords = list(coords) + + def __str__(self): + return "LINESTRING(%s)" % ', '.join("%f %f" % (x, y) for x, y in self.coords) + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, self.coords) + + +class _LinearRing(object): + # no validation, no implicit closing; just used for poly composition, to + # mimic that of shapely.geometry.Polygon + def __init__(self, coords): + self.coords = list(coords) def __str__(self): - return "LINESTRING(%s)" % ', '.join("%f %f" % (x, y) for x, y in self.points) + return "LINEARRING(%s)" % ', '.join("%f %f" % (x, y) for x, y in self.coords) def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, self.points) + return "%s(%r)" % (self.__class__.__name__, self.coords) class Polygon(object): - def __init__(self, rings): - self.rings = list(rings) + def __init__(self, exterior, interiors=None): + self.exterior = _LinearRing(exterior) + self.interiors = [_LinearRing(e) for e in interiors] if interiors else [] def __str__(self): - rings = ("(%s)" % ', '.join("%s %s" % (x, y) for x, y in ring) for ring in self.rings) + rings = (ring.coords for ring in chain((self.exterior,), self.interiors)) + rings = ("(%s)" % ', '.join("%s %s" % (x, y) for x, y in ring) for ring in rings) return "POLYGON(%s)" % ', '.join(rings) def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, self.rings) + return "%s(%r, %r)" % (self.__class__.__name__, self.exterior.coords, [ring.coords for ring in self.interiors]) From 3e635e61ffd8f94a74761022493dbc3def200a7c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 20 Jan 2016 14:59:54 -0600 Subject: [PATCH 0290/2005] dse extension is split to separate repo --- cassandra/auth.py | 2 +- dse/__init__.py | 0 dse/auth.py | 83 --------------------- dse/cluster.py | 24 ------ dse/cqltypes.py | 109 ---------------------------- dse/graph.py | 181 ---------------------------------------------- dse/marshal.py | 7 -- dse/util.py | 67 ----------------- 8 files changed, 1 insertion(+), 472 deletions(-) delete mode 100644 dse/__init__.py delete mode 100644 dse/auth.py delete mode 100644 dse/cluster.py delete mode 100644 dse/cqltypes.py delete mode 100644 dse/graph.py delete mode 100644 dse/marshal.py delete mode 100644 dse/util.py diff --git a/cassandra/auth.py b/cassandra/auth.py index 7ac064fa6f..bb12b5d418 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -142,7 +142,7 @@ class SaslAuthProvider(AuthProvider): from cassandra.cluster import Cluster from cassandra.auth import SaslAuthProvider - sasl_kwargs = {'service': 'dse', + sasl_kwargs = {'service': 'something', 'mechanism': 'GSSAPI', 'qops': 'auth'.split(',')} auth_provider = SaslAuthProvider(**sasl_kwargs) diff --git a/dse/__init__.py b/dse/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/dse/auth.py b/dse/auth.py deleted file mode 100644 index 48959acfce..0000000000 --- a/dse/auth.py +++ /dev/null @@ -1,83 +0,0 @@ -from cassandra.auth import AuthProvider, Authenticator - -try: - import kerberos - _have_kerberos = True -except ImportError: - _have_kerberos = False - -try: - from puresasl.client import SASLClient - _have_puresasl = True -except ImportError: - _have_puresasl = False - - -class DSEPlainTextAuthProvider(AuthProvider): - def __init__(self, username=None, password=None): - self.username = username - self.password = password - - def new_authenticator(self, host): - return PlainTextAuthenticator(self.username, self.password) - - -class DSEGSSAPIAuthProvider(AuthProvider): - def __init__(self, service=None, qops=None): - if not _have_puresasl: - raise ImportError('The puresasl library has not been installed') - if not _have_kerberos: - raise ImportError('The kerberos library has not been installed') - self.service = service - self.qops = qops - - def new_authenticator(self, host): - return GSSAPIAuthenticator(host, self.service, self.qops) - - -class BaseDSEAuthenticator(Authenticator): - def get_mechanism(self): - raise NotImplementedError("get_mechanism not implemented") - - def get_initial_challenge(self): - raise NotImplementedError("get_initial_challenge not implemented") - - def initial_response(self): - if self.server_authenticator_class == "com.datastax.bdp.cassandra.auth.DseAuthenticator": - return self.get_mechanism() - else: - return self.evaluate_challenge(self.get_initial_challenge()) - - -class PlainTextAuthenticator(BaseDSEAuthenticator): - def __init__(self, username, password): - self.username = username - self.password = password - - def get_mechanism(self): - return "PLAIN" - - def get_initial_challenge(self): - return "PLAIN-START" - - def evaluate_challenge(self, challenge): - if challenge == 'PLAIN-START': - return "\x00%s\x00%s" % (self.username, self.password) - raise Exception('Did not receive a valid challenge response from server') - - -class GSSAPIAuthenticator(BaseDSEAuthenticator): - def __init__(self, host, service, qops): - self.sasl = SASLClient(host, service, 'GSSAPI', authorization_id=None, callback=None, qops=qops) - - def get_mechanism(self): - return "GSSAPI" - - def get_initial_challenge(self): - return "GSSAPI-START" - - def evaluate_challenge(self, challenge): - if challenge == 'GSSAPI-START': - return self.sasl.process() - else: - return self.sasl.process(challenge) diff --git a/dse/cluster.py b/dse/cluster.py deleted file mode 100644 index 650a792c38..0000000000 --- a/dse/cluster.py +++ /dev/null @@ -1,24 +0,0 @@ -from cassandra.cluster import Cluster, Session -import dse.cqltypes # unsued here, imported to cause type registration -from dse.util import Point, Circle, LineString, Polygon - - -class Cluster(Cluster): - - def _new_session(self): - session = Session(self, self.metadata.all_hosts()) - self._session_register_user_types(session) - self.sessions.add(session) - return session - - -class Session(Session): - - def __init__(self, cluster, hosts): - super(Session, self).__init__(cluster, hosts) - - def cql_encode_str_quoted(val): - return "'%s'" % val - - for typ in (Point, Circle, LineString, Polygon): - self.encoder.mapping[typ] = cql_encode_str_quoted diff --git a/dse/cqltypes.py b/dse/cqltypes.py deleted file mode 100644 index 77ae104be1..0000000000 --- a/dse/cqltypes.py +++ /dev/null @@ -1,109 +0,0 @@ -import io -from itertools import chain -from six.moves import range -import struct -from cassandra.cqltypes import CassandraType -from cassandra.util import is_little_endian as _platform_is_le -from dse.marshal import point_be, point_le, circle_be, circle_le -from dse.util import Point, Circle, LineString, Polygon - -_endian_flag = 1 if _platform_is_le else 0 - - -class WKBGeometryType(object): - POINT = 1 - LINESTRING = 2 - POLYGON = 3 - CIRCLE = 101 # DSE custom - - -class PointType(CassandraType): - typename = 'PointType' - - _platform_point = point_le if _platform_is_le else point_be - _type = struct.pack('=BI', _endian_flag, WKBGeometryType.POINT) - - @staticmethod - def serialize(val, protocol_version): - return PointType._type + PointType._platform_point.pack(val.x, val.y) - - @staticmethod - def deserialize(byts, protocol_version): - is_little_endian = bool(byts[0]) - point = point_le if is_little_endian else point_be - return Point(*point.unpack_from(byts, 5)) # ofs = endian byte + int type - - -class CircleType(CassandraType): - typename = 'CircleType' - - _platform_circle = circle_le if _platform_is_le else circle_be - _type = struct.pack('=BI', _endian_flag, WKBGeometryType.CIRCLE) - - @staticmethod - def serialize(val, protocol_version): - return CircleType._type + CircleType._platform_circle.pack(val.x, val.y, val.r) - - @staticmethod - def deserialize(byts, protocol_version): - is_little_endian = bool(byts[0]) - circle = circle_le if is_little_endian else circle_be - return Circle(*circle.unpack_from(byts, 5)) - - -class LineStringType(CassandraType): - typename = 'LineStringType' - - _type = struct.pack('=BI', _endian_flag, WKBGeometryType.LINESTRING) - - @staticmethod - def serialize(val, protocol_version): - num_points = len(val.coords) - return LineStringType._type + struct.pack('=I' + 'dd' * num_points, num_points, *(d for coords in val.coords for d in coords)) - - @staticmethod - def deserialize(byts, protocol_version): - is_little_endian = bool(byts[0]) - point = point_le if is_little_endian else point_be - coords = ((point.unpack_from(byts, offset) for offset in range(1 + 4 + 4, len(byts), point.size))) # start = endian + int type + int count - return LineString(coords) - - -class PolygonType(CassandraType): - typename = 'PolygonType' - - _type = struct.pack('=BI', _endian_flag, WKBGeometryType.POLYGON) - _platform_ring_count = struct.Struct('=I').pack - - @staticmethod - def serialize(val, protocol_version): - buf = io.BytesIO(PolygonType._type) - buf.seek(0, 2) - - num_rings = 1 + len(val.interiors) - buf.write(PolygonType._platform_ring_count(num_rings)) - for ring in chain((val.exterior,), val.interiors): - num_points = len(ring.coords) - buf.write(struct.pack('=I' + 'dd' * num_points, num_points, *(d for coord in ring.coords for d in coord))) - return buf.getvalue() - - @staticmethod - def deserialize(byts, protocol_version): - is_little_endian = bool(byts[0]) - if is_little_endian: - int_fmt = 'dd') -point_le = Struct('ddd') -circle_le = Struct(' Date: Wed, 20 Jan 2016 15:02:07 -0600 Subject: [PATCH 0291/2005] spacing --- cassandra/query.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cassandra/query.py b/cassandra/query.py index 3a6706bbf8..a683248843 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -338,6 +338,7 @@ def __str__(self): (self.query_string, consistency)) __repr__ = __str__ + class PreparedStatement(object): """ A statement that has been prepared against at least one Cassandra node. From 070df4ae83286acb32c604b17917b0552553cbeb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 21 Jan 2016 12:42:37 -0600 Subject: [PATCH 0292/2005] Remove DSE auth integ test after splitting repo --- .../integration/standard/test_authentication.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index cf8fbb3df5..82f81cd885 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -17,7 +17,6 @@ from cassandra.cluster import Cluster, NoHostAvailable from cassandra.auth import PlainTextAuthProvider, SASLClient, SaslAuthProvider -from dse.auth import DSEPlainTextAuthProvider from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION from tests.integration.util import assert_quiescent_pool_state @@ -165,18 +164,3 @@ def test_host_passthrough(self): def test_host_rejected(self): sasl_kwargs = {'host': 'something'} self.assertRaises(ValueError, SaslAuthProvider, **sasl_kwargs) - - -class DSEAuthenticatorTests(AuthenticationTests): - """ - Test DSEPlainTextAuthProvider - """ - - def setUp(self): - if PROTOCOL_VERSION < 2: - raise unittest.SkipTest('Sasl authentication not available for protocol v1') - if SASLClient is None: - raise unittest.SkipTest('pure-sasl is not installed') - - def get_authentication_provider(self, username, password): - return DSEPlainTextAuthProvider(username, password) From 5e80f6c8d8782674390dff4eaec422a8ca4d4e2c Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 21 Jan 2016 14:40:35 -0600 Subject: [PATCH 0293/2005] Enable python driver to run tests against dse clusters --- tests/integration/__init__.py | 38 ++++++++++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 34606f6360..de341a8036 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -28,6 +28,7 @@ try: from ccmlib.cluster import Cluster as CCMCluster + from ccmlib.dse_cluster import DseCluster from ccmlib.cluster_factory import ClusterFactory as CCMClusterFactory from ccmlib import common except ImportError as e: @@ -82,6 +83,18 @@ def _tuple_version(version_string): default_cassandra_version = '2.2.0' + +def _get_cass_version_from_dse(dse_version): + if dse_version.startswith('4.6') or dse_version.startswith('4.5'): + cass_ver = "2.0" + elif dse_version.startswith('4.7') or dse_version.startswith('4.8'): + cass_ver = "2.1" + else: + log.error("Uknown dse version found {0}, defaulting to 2.1".format(dse_version)) + cass_ver = "2.1" + + return cass_ver + if USE_CASS_EXTERNAL: if CCMClusterFactory: # see if the external instance is running in ccm @@ -105,15 +118,26 @@ def _tuple_version(version_string): CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) -CASSANDRA_VERSION = os.getenv('CASSANDRA_VERSION', default_cassandra_version) +DSE_VERSION = os.getenv('DSE_VERSION', None) +DSE_CRED = os.getenv('DSE_CREDS', None) +if DSE_VERSION: + CASSANDRA_VERSION = _get_cass_version_from_dse(DSE_VERSION) +else: + CASSANDRA_VERSION = os.getenv('CASSANDRA_VERSION', default_cassandra_version) CCM_KWARGS = {} if CASSANDRA_DIR: log.info("Using Cassandra dir: %s", CASSANDRA_DIR) CCM_KWARGS['install_dir'] = CASSANDRA_DIR -else: - log.info('Using Cassandra version: %s', CASSANDRA_VERSION) - CCM_KWARGS['version'] = CASSANDRA_VERSION + +if DSE_VERSION: + log.info('Using DSE version: %s', DSE_VERSION) + if not CASSANDRA_DIR: + CCM_KWARGS['version'] = DSE_VERSION + if DSE_CRED: + log.info("Using DSE credentials file located at {0}".format(DSE_CRED)) + CCM_KWARGS['dse_credentials_file'] = DSE_CRED + if CASSANDRA_VERSION >= '2.2': default_protocol_version = 4 @@ -214,7 +238,11 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True): del tb log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, CCM_KWARGS)) - CCM_CLUSTER = CCMCluster(path, cluster_name, **CCM_KWARGS) + if DSE_VERSION: + log.error("creating dse cluster") + CCM_CLUSTER = DseCluster(path, cluster_name, **CCM_KWARGS) + else: + CCM_CLUSTER = CCMCluster(path, cluster_name, **CCM_KWARGS) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) if CASSANDRA_VERSION >= '2.2': CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) From 26a7094ad8c69217db76a00aa1b66eca0508ef91 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 22 Jan 2016 16:26:27 -0600 Subject: [PATCH 0294/2005] don't bother trying to setup ccm for external cluster --- tests/integration/__init__.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index de341a8036..5ff93af1cd 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -96,12 +96,6 @@ def _get_cass_version_from_dse(dse_version): return cass_ver if USE_CASS_EXTERNAL: - if CCMClusterFactory: - # see if the external instance is running in ccm - path = common.get_default_path() - name = common.current_cluster_name(path) - CCM_CLUSTER = CCMClusterFactory.load(common.get_default_path(), name) - CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True) # Not sure what's going on, but the server version query # hangs in python3. This appears to be related to running inside of From 4d6465aec0ebdb36c39f03993d89efa85607d229 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 22 Jan 2016 16:27:59 -0600 Subject: [PATCH 0295/2005] no version attempt for external cluster testing --- tests/integration/__init__.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 5ff93af1cd..7ee7a38fe8 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -95,22 +95,6 @@ def _get_cass_version_from_dse(dse_version): return cass_ver -if USE_CASS_EXTERNAL: - - # Not sure what's going on, but the server version query - # hangs in python3. This appears to be related to running inside of - # nosetests, and only for this query that would run while loading the - # module. - # This is a hack to make it run with default cassandra version for PY3. - # Not happy with it, but need to move on for now. - if not six.PY3: - cass_ver, _ = get_server_versions() - default_cassandra_version = '.'.join('%d' % i for i in cass_ver) - else: - if not os.getenv('CASSANDRA_VERSION'): - log.warning("Using default C* version %s because external server cannot be queried" % default_cassandra_version) - - CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) DSE_VERSION = os.getenv('DSE_VERSION', None) DSE_CRED = os.getenv('DSE_CREDS', None) From 8b00f59fd4f2e9d4b48bc21fc298e6cb99600a1f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 26 Jan 2016 09:51:00 -0600 Subject: [PATCH 0296/2005] discard instead of remove from scheduled tasks avoids key error when multiple of the same tasks are scheduled --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 127aee7caa..a518fefd39 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2591,7 +2591,7 @@ def run(self): log.debug("Not executing scheduled task due to Scheduler shutdown") return if run_at <= time.time(): - self._scheduled_tasks.remove(task) + self._scheduled_tasks.discard(task) fn, args, kwargs = task kwargs = dict(kwargs) future = self._executor.submit(fn, *args, **kwargs) From aed79a0db2418112b86594f085b1cfe002c5c026 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 26 Jan 2016 15:24:03 -0600 Subject: [PATCH 0297/2005] Fix issue with version not being set for ccm when not using external clusters --- tests/integration/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 7ee7a38fe8..9a3b9af900 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -108,6 +108,10 @@ def _get_cass_version_from_dse(dse_version): log.info("Using Cassandra dir: %s", CASSANDRA_DIR) CCM_KWARGS['install_dir'] = CASSANDRA_DIR +else: + log.info('Using Cassandra version: %s', CASSANDRA_VERSION) + CCM_KWARGS['version'] = CASSANDRA_VERSION + if DSE_VERSION: log.info('Using DSE version: %s', DSE_VERSION) if not CASSANDRA_DIR: From 15f91558de56cd5729124999a4bed4f9b54fff15 Mon Sep 17 00:00:00 2001 From: jonmcevoy Date: Thu, 28 Jan 2016 11:07:18 -0800 Subject: [PATCH 0298/2005] Fix Python3 bug with Timers and heaps Add comparison operator to allow Timers with the same end time to be pushed into heaps. It's poorly documented, but the warning can be found here: https://docs.python.org/2/library/heapq.html#priority-queue-implementation-notes "In the future with Python 3, tuple comparison breaks for (priority, task) pairs if the priorities are equal and the tasks do not have a default comparison order." --- cassandra/connection.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/connection.py b/cassandra/connection.py index b54f4516eb..31e340c5a5 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -1020,6 +1020,9 @@ def __init__(self, timeout, callback): if timeout < 0: self.callback() + def __lt__(self, other): + return self.end < other.end + def cancel(self): self.canceled = True From 5b054bf1604c4cf76ec644f3e3023f1f033d8a85 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 Jan 2016 13:28:26 -0600 Subject: [PATCH 0299/2005] [ci skip] Update copyright dates --- README.rst | 2 +- benchmarks/base.py | 2 +- benchmarks/callback_full_pipeline.py | 2 +- benchmarks/future_batches.py | 2 +- benchmarks/future_full_pipeline.py | 2 +- benchmarks/future_full_throttle.py | 2 +- benchmarks/sync.py | 2 +- build.yaml | 2 +- cassandra/__init__.py | 2 +- cassandra/auth.py | 2 +- cassandra/buffer.pxd | 2 +- cassandra/bytesio.pxd | 2 +- cassandra/bytesio.pyx | 2 +- cassandra/cluster.py | 2 +- cassandra/cmurmur3.c | 2 +- cassandra/concurrent.py | 2 +- cassandra/connection.py | 2 +- cassandra/cqlengine/__init__.py | 2 +- cassandra/cqlengine/columns.py | 2 +- cassandra/cqlengine/connection.py | 2 +- cassandra/cqlengine/functions.py | 2 +- cassandra/cqlengine/management.py | 2 +- cassandra/cqlengine/models.py | 2 +- cassandra/cqlengine/named.py | 2 +- cassandra/cqlengine/operators.py | 2 +- cassandra/cqlengine/query.py | 2 +- cassandra/cqlengine/statements.py | 2 +- cassandra/cqltypes.py | 2 +- cassandra/cython_marshal.pyx | 2 +- cassandra/cython_utils.pxd | 2 +- cassandra/cython_utils.pyx | 2 +- cassandra/deserializers.pxd | 2 +- cassandra/deserializers.pyx | 2 +- cassandra/encoder.py | 2 +- cassandra/io/__init__.py | 2 +- cassandra/io/asyncorereactor.py | 2 +- cassandra/io/eventletreactor.py | 2 +- cassandra/io/geventreactor.py | 2 +- cassandra/io/libevreactor.py | 2 +- cassandra/io/twistedreactor.py | 2 +- cassandra/ioutils.pyx | 2 +- cassandra/marshal.py | 2 +- cassandra/metadata.py | 2 +- cassandra/metrics.py | 2 +- cassandra/numpyFlags.h | 2 +- cassandra/numpy_parser.pyx | 2 +- cassandra/obj_parser.pyx | 2 +- cassandra/parsing.pxd | 2 +- cassandra/parsing.pyx | 2 +- cassandra/policies.py | 2 +- cassandra/pool.py | 2 +- cassandra/protocol.py | 2 +- cassandra/query.py | 2 +- cassandra/row_parser.pyx | 2 +- cassandra/tuple.pxd | 2 +- cassandra/type_codes.pxd | 2 +- cassandra/util.py | 2 +- docs/conf.py | 2 +- example_core.py | 2 +- example_mapper.py | 2 +- setup.py | 2 +- test-requirements.txt | 2 +- tests/__init__.py | 2 +- tests/integration/__init__.py | 2 +- tests/integration/cqlengine/__init__.py | 4 ++-- tests/integration/cqlengine/base.py | 2 +- tests/integration/cqlengine/columns/__init__.py | 2 +- tests/integration/cqlengine/columns/test_container_columns.py | 2 +- tests/integration/cqlengine/columns/test_counter_column.py | 2 +- tests/integration/cqlengine/columns/test_static_column.py | 2 +- tests/integration/cqlengine/columns/test_validation.py | 2 +- tests/integration/cqlengine/columns/test_value_io.py | 4 ++-- tests/integration/cqlengine/management/__init__.py | 2 +- .../cqlengine/management/test_compaction_settings.py | 2 +- tests/integration/cqlengine/management/test_management.py | 2 +- tests/integration/cqlengine/model/__init__.py | 2 +- tests/integration/cqlengine/model/test_class_construction.py | 2 +- tests/integration/cqlengine/model/test_equality_operations.py | 2 +- tests/integration/cqlengine/model/test_model.py | 2 +- tests/integration/cqlengine/model/test_model_io.py | 2 +- tests/integration/cqlengine/model/test_polymorphism.py | 2 +- tests/integration/cqlengine/model/test_udts.py | 2 +- tests/integration/cqlengine/model/test_updates.py | 2 +- tests/integration/cqlengine/model/test_value_lists.py | 2 +- tests/integration/cqlengine/operators/__init__.py | 2 +- tests/integration/cqlengine/operators/test_base_operator.py | 2 +- tests/integration/cqlengine/operators/test_where_operators.py | 2 +- tests/integration/cqlengine/query/__init__.py | 2 +- tests/integration/cqlengine/query/test_batch_query.py | 2 +- tests/integration/cqlengine/query/test_datetime_queries.py | 2 +- tests/integration/cqlengine/query/test_named.py | 2 +- tests/integration/cqlengine/query/test_queryoperators.py | 2 +- tests/integration/cqlengine/query/test_queryset.py | 2 +- tests/integration/cqlengine/query/test_updates.py | 2 +- tests/integration/cqlengine/statements/__init__.py | 2 +- .../cqlengine/statements/test_assignment_clauses.py | 2 +- .../cqlengine/statements/test_assignment_statement.py | 2 +- tests/integration/cqlengine/statements/test_base_clause.py | 2 +- tests/integration/cqlengine/statements/test_base_statement.py | 2 +- .../integration/cqlengine/statements/test_delete_statement.py | 2 +- .../integration/cqlengine/statements/test_insert_statement.py | 2 +- .../integration/cqlengine/statements/test_select_statement.py | 2 +- .../integration/cqlengine/statements/test_update_statement.py | 2 +- tests/integration/cqlengine/statements/test_where_clause.py | 2 +- tests/integration/cqlengine/test_batch_query.py | 2 +- tests/integration/cqlengine/test_consistency.py | 2 +- tests/integration/cqlengine/test_ifnotexists.py | 2 +- tests/integration/cqlengine/test_timestamp.py | 2 +- tests/integration/cqlengine/test_transaction.py | 2 +- tests/integration/cqlengine/test_ttl.py | 2 +- tests/integration/datatype_utils.py | 2 +- tests/integration/long/__init__.py | 2 +- tests/integration/long/test_consistency.py | 2 +- tests/integration/long/test_failure_types.py | 2 +- tests/integration/long/test_ipv6.py | 4 ++-- tests/integration/long/test_large_data.py | 2 +- tests/integration/long/test_loadbalancingpolicies.py | 2 +- tests/integration/long/test_schema.py | 2 +- tests/integration/long/test_ssl.py | 4 ++-- tests/integration/long/utils.py | 4 ++-- tests/integration/standard/__init__.py | 2 +- tests/integration/standard/test_authentication.py | 2 +- tests/integration/standard/test_client_warnings.py | 2 +- tests/integration/standard/test_cluster.py | 2 +- tests/integration/standard/test_concurrent.py | 2 +- tests/integration/standard/test_connection.py | 2 +- tests/integration/standard/test_control_connection.py | 4 ++-- tests/integration/standard/test_custom_payload.py | 4 ++-- tests/integration/standard/test_custom_protocol_handler.py | 2 +- tests/integration/standard/test_metadata.py | 2 +- tests/integration/standard/test_metrics.py | 2 +- tests/integration/standard/test_prepared_statements.py | 2 +- tests/integration/standard/test_query.py | 2 +- tests/integration/standard/test_query_paging.py | 2 +- tests/integration/standard/test_routing.py | 2 +- tests/integration/standard/test_row_factories.py | 2 +- tests/integration/standard/test_types.py | 2 +- tests/integration/standard/test_udts.py | 2 +- tests/integration/standard/utils.py | 2 +- tests/integration/util.py | 2 +- tests/stress_tests/test_load.py | 2 +- tests/stress_tests/test_multi_inserts.py | 2 +- tests/unit/__init__.py | 2 +- tests/unit/cython/__init__.py | 2 +- tests/unit/cython/bytesio_testhelper.pyx | 2 +- tests/unit/cython/test_bytesio.py | 2 +- tests/unit/cython/utils.py | 2 +- tests/unit/io/__init__.py | 2 +- tests/unit/io/eventlet_utils.py | 2 +- tests/unit/io/gevent_utils.py | 2 +- tests/unit/io/test_asyncorereactor.py | 2 +- tests/unit/io/test_eventletreactor.py | 4 ++-- tests/unit/io/test_geventreactor.py | 2 +- tests/unit/io/test_libevreactor.py | 4 ++-- tests/unit/io/test_libevtimer.py | 2 +- tests/unit/io/test_twistedreactor.py | 2 +- tests/unit/io/utils.py | 2 +- tests/unit/test_concurrent.py | 2 +- tests/unit/test_connection.py | 2 +- tests/unit/test_control_connection.py | 2 +- tests/unit/test_exception.py | 2 +- tests/unit/test_host_connection_pool.py | 2 +- tests/unit/test_marshalling.py | 2 +- tests/unit/test_metadata.py | 2 +- tests/unit/test_orderedmap.py | 2 +- tests/unit/test_parameter_binding.py | 2 +- tests/unit/test_policies.py | 2 +- tests/unit/test_response_future.py | 2 +- tests/unit/test_resultset.py | 2 +- tests/unit/test_sortedset.py | 2 +- tests/unit/test_time_util.py | 2 +- tests/unit/test_types.py | 2 +- tests/unit/test_util_types.py | 2 +- 173 files changed, 182 insertions(+), 182 deletions(-) diff --git a/README.rst b/README.rst index d1feb292cf..a09e1ef548 100644 --- a/README.rst +++ b/README.rst @@ -69,7 +69,7 @@ Features to be Added License ------- -Copyright 2013-2015 DataStax +Copyright 2013-2016 DataStax Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/benchmarks/base.py b/benchmarks/base.py index 65df52864f..812db42aaa 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/callback_full_pipeline.py b/benchmarks/callback_full_pipeline.py index 7baa919494..3736991b4e 100644 --- a/benchmarks/callback_full_pipeline.py +++ b/benchmarks/callback_full_pipeline.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/future_batches.py b/benchmarks/future_batches.py index a816ff2686..91c250bcf9 100644 --- a/benchmarks/future_batches.py +++ b/benchmarks/future_batches.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/future_full_pipeline.py b/benchmarks/future_full_pipeline.py index 5f3bcebe2f..40682e0418 100644 --- a/benchmarks/future_full_pipeline.py +++ b/benchmarks/future_full_pipeline.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/future_full_throttle.py b/benchmarks/future_full_throttle.py index 1c2059f34c..27d87442bb 100644 --- a/benchmarks/future_full_throttle.py +++ b/benchmarks/future_full_throttle.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/sync.py b/benchmarks/sync.py index d756a58bfe..531e41fbe8 100644 --- a/benchmarks/sync.py +++ b/benchmarks/sync.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/build.yaml b/build.yaml index 49909596ca..f7ece90ff3 100644 --- a/build.yaml +++ b/build.yaml @@ -45,4 +45,4 @@ build: echo "==========RUNNING LONG INTEGRATION TESTS==========" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true - xunit: - - "*_results.xml" \ No newline at end of file + - "*_results.xml" diff --git a/cassandra/__init__.py b/cassandra/__init__.py index cdcd34550b..20e9cd2889 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/auth.py b/cassandra/auth.py index bb12b5d418..b562728a24 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/buffer.pxd b/cassandra/buffer.pxd index 2f40ced04a..f9976f09aa 100644 --- a/cassandra/buffer.pxd +++ b/cassandra/buffer.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/bytesio.pxd b/cassandra/bytesio.pxd index 2bcda361a0..a0bb083fac 100644 --- a/cassandra/bytesio.pxd +++ b/cassandra/bytesio.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/bytesio.pyx b/cassandra/bytesio.pyx index 68a15baf0d..3334697023 100644 --- a/cassandra/bytesio.pyx +++ b/cassandra/bytesio.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cluster.py b/cassandra/cluster.py index dfa82ec997..ae7a3ece4b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cmurmur3.c b/cassandra/cmurmur3.c index 0ffc3ad53a..2f4cfa0fd6 100644 --- a/cassandra/cmurmur3.c +++ b/cassandra/cmurmur3.c @@ -6,7 +6,7 @@ * * Copyright (c) 2011 Austin Appleby (Murmur3 routine) * Copyright (c) 2011 Patrick Hensley (Python wrapper, packaging) - * Copyright 2013 DataStax (Minor modifications to match Cassandra's MM3 hashes) + * Copyright 2013-2016 DataStax (Minor modifications to match Cassandra's MM3 hashes) * */ diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index ffc0f9081e..75c2604db8 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/connection.py b/cassandra/connection.py index b54f4516eb..ec559b0913 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/__init__.py b/cassandra/cqlengine/__init__.py index aeec9be086..48b9da7bfb 100644 --- a/cassandra/cqlengine/__init__.py +++ b/cassandra/cqlengine/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 45851f27ba..41ec98dd15 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index cc9e63d4b3..09c7582d6e 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index bc210d69ce..0200f1feb1 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index d7b6504c4d..f1b66d9527 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index f615068001..8210fc2873 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/named.py b/cassandra/cqlengine/named.py index 7bc229311d..f2f26aedc2 100644 --- a/cassandra/cqlengine/named.py +++ b/cassandra/cqlengine/named.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index 5ddbfed83b..09c6e1ddd8 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index fb7af3812b..1f4a1c3b3e 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 4c2f728bc9..ff5384a828 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 6fd98ae434..6e80ed298e 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cython_marshal.pyx b/cassandra/cython_marshal.pyx index 672c6e82c4..61b6daccc1 100644 --- a/cassandra/cython_marshal.pyx +++ b/cassandra/cython_marshal.pyx @@ -1,6 +1,6 @@ # -- cython: profile=True # -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cython_utils.pxd b/cassandra/cython_utils.pxd index d2bf7d203a..4a1e71dba5 100644 --- a/cassandra/cython_utils.pxd +++ b/cassandra/cython_utils.pxd @@ -1,2 +1,2 @@ from libc.stdint cimport int64_t -cdef datetime_from_timestamp(double timestamp) \ No newline at end of file +cdef datetime_from_timestamp(double timestamp) diff --git a/cassandra/cython_utils.pyx b/cassandra/cython_utils.pyx index 1d16d47d22..ad72f68910 100644 --- a/cassandra/cython_utils.pyx +++ b/cassandra/cython_utils.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/deserializers.pxd b/cassandra/deserializers.pxd index 26b4429a25..0846417054 100644 --- a/cassandra/deserializers.pxd +++ b/cassandra/deserializers.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/deserializers.pyx b/cassandra/deserializers.pyx index 35fb403668..ac3a009caf 100644 --- a/cassandra/deserializers.pyx +++ b/cassandra/deserializers.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 07c38772a4..d22540736d 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/__init__.py b/cassandra/io/__init__.py index e4b89e5f46..87fc3685e0 100644 --- a/cassandra/io/__init__.py +++ b/cassandra/io/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index dc9d26c6c9..2a83996d17 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index 5706f9cb25..c811e15c48 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -1,5 +1,5 @@ # Copyright 2014 Symantec Corporation -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 1dd013aa7a..6e62a38b0e 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 88678cb635..a3e96a9a03 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index cc74bc6cca..ccd976bd2d 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/ioutils.pyx b/cassandra/ioutils.pyx index 720c79bbb6..c59a6a0cf4 100644 --- a/cassandra/ioutils.pyx +++ b/cassandra/ioutils.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/marshal.py b/cassandra/marshal.py index fba5b78944..5a523d6381 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 19bf5feac1..1d04b4c964 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/metrics.py b/cassandra/metrics.py index 77ed8961e0..cf1f25c15d 100644 --- a/cassandra/metrics.py +++ b/cassandra/metrics.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/numpyFlags.h b/cassandra/numpyFlags.h index 6793b7a8fa..db464ed9ac 100644 --- a/cassandra/numpyFlags.h +++ b/cassandra/numpyFlags.h @@ -1 +1 @@ -#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION \ No newline at end of file +#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION diff --git a/cassandra/numpy_parser.pyx b/cassandra/numpy_parser.pyx index 9ced45effa..920a3efdd1 100644 --- a/cassandra/numpy_parser.pyx +++ b/cassandra/numpy_parser.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/obj_parser.pyx b/cassandra/obj_parser.pyx index 8aa5b3940f..2ec889ebc6 100644 --- a/cassandra/obj_parser.pyx +++ b/cassandra/obj_parser.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/parsing.pxd b/cassandra/parsing.pxd index 278c6e71a9..abfc74d12d 100644 --- a/cassandra/parsing.pxd +++ b/cassandra/parsing.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/parsing.pyx b/cassandra/parsing.pyx index c44d7f5a54..06cfe0bb8f 100644 --- a/cassandra/parsing.pyx +++ b/cassandra/parsing.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/policies.py b/cassandra/policies.py index 95c7829cdc..595717ca5a 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/pool.py b/cassandra/pool.py index a810d603c1..08cfd37271 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 8a405a1c1a..633c9734db 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/query.py b/cassandra/query.py index a683248843..01bf726979 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/row_parser.pyx b/cassandra/row_parser.pyx index fc7bce15d8..ec2b83bed7 100644 --- a/cassandra/row_parser.pyx +++ b/cassandra/row_parser.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/tuple.pxd b/cassandra/tuple.pxd index 746205e231..840cb7eb0b 100644 --- a/cassandra/tuple.pxd +++ b/cassandra/tuple.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/type_codes.pxd b/cassandra/type_codes.pxd index 90f29bc94f..b7e491f095 100644 --- a/cassandra/type_codes.pxd +++ b/cassandra/type_codes.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/util.py b/cassandra/util.py index 224ee878d0..6e22bceee6 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/docs/conf.py b/docs/conf.py index 782044abe2..8174f13a6c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -43,7 +43,7 @@ # General information about the project. project = u'Cassandra Driver' -copyright = u'2014, DataStax' +copyright 2013-2016 DataStax' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/example_core.py b/example_core.py index c7221880b7..3235c79bad 100644 --- a/example_core.py +++ b/example_core.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/example_mapper.py b/example_mapper.py index 31a39eec6d..4d5ebca361 100755 --- a/example_mapper.py +++ b/example_mapper.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/setup.py b/setup.py index 2ece7e6db8..f5070fdb3b 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/test-requirements.txt b/test-requirements.txt index 50875b6660..4c917da6c6 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -11,4 +11,4 @@ pure-sasl twisted gevent>=1.0 eventlet -cython>=0.21 \ No newline at end of file +cython>=0.21 diff --git a/tests/__init__.py b/tests/__init__.py index 150e600c91..abfb8bf792 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 9a3b9af900..e022626b4d 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index e89c47789b..2950984f58 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -42,4 +42,4 @@ def setup_connection(keyspace_name): connection.setup(['127.0.0.1'], consistency=ConsistencyLevel.ONE, protocol_version=PROTOCOL_VERSION, - default_keyspace=keyspace_name) \ No newline at end of file + default_keyspace=keyspace_name) diff --git a/tests/integration/cqlengine/base.py b/tests/integration/cqlengine/base.py index 856f2bac4f..5536efb3d2 100644 --- a/tests/integration/cqlengine/base.py +++ b/tests/integration/cqlengine/base.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/__init__.py b/tests/integration/cqlengine/columns/__init__.py index 55181d93fe..87fc3685e0 100644 --- a/tests/integration/cqlengine/columns/__init__.py +++ b/tests/integration/cqlengine/columns/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index ad67419c3e..8aa5491a9a 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/test_counter_column.py b/tests/integration/cqlengine/columns/test_counter_column.py index c9579ba7b9..11e5c1bd91 100644 --- a/tests/integration/cqlengine/columns/test_counter_column.py +++ b/tests/integration/cqlengine/columns/test_counter_column.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/test_static_column.py b/tests/integration/cqlengine/columns/test_static_column.py index a4864050d9..543dc84732 100644 --- a/tests/integration/cqlengine/columns/test_static_column.py +++ b/tests/integration/cqlengine/columns/test_static_column.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 8732375446..4609cca1af 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py index cd9f63f2af..455f80c1e4 100644 --- a/tests/integration/cqlengine/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -268,4 +268,4 @@ def setUp(self): column = columns.TinyInt pkey_val = 1 - data_val = 123 \ No newline at end of file + data_val = 123 diff --git a/tests/integration/cqlengine/management/__init__.py b/tests/integration/cqlengine/management/__init__.py index 55181d93fe..87fc3685e0 100644 --- a/tests/integration/cqlengine/management/__init__.py +++ b/tests/integration/cqlengine/management/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index a6717654e1..4fe349e069 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 69e335c702..93aa8dc11b 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/__init__.py b/tests/integration/cqlengine/model/__init__.py index 55181d93fe..87fc3685e0 100644 --- a/tests/integration/cqlengine/model/__init__.py +++ b/tests/integration/cqlengine/model/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index 18cda48f44..7b333a58b2 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_equality_operations.py b/tests/integration/cqlengine/model/test_equality_operations.py index 8ed38327ef..9391ce6a79 100644 --- a/tests/integration/cqlengine/model/test_equality_operations.py +++ b/tests/integration/cqlengine/model/test_equality_operations.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index b66e284fae..a22080faa7 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 96c1f63b00..f513c2e191 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_polymorphism.py b/tests/integration/cqlengine/model/test_polymorphism.py index f8969e32e6..18feb653c5 100644 --- a/tests/integration/cqlengine/model/test_polymorphism.py +++ b/tests/integration/cqlengine/model/test_polymorphism.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index e39fc2190e..859842b10c 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index ff8d8ecf42..242bffe12f 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_value_lists.py b/tests/integration/cqlengine/model/test_value_lists.py index 28b5a0d808..8bd9b218f5 100644 --- a/tests/integration/cqlengine/model/test_value_lists.py +++ b/tests/integration/cqlengine/model/test_value_lists.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/operators/__init__.py b/tests/integration/cqlengine/operators/__init__.py index 33f8baac19..a6c81ff121 100644 --- a/tests/integration/cqlengine/operators/__init__.py +++ b/tests/integration/cqlengine/operators/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/operators/test_base_operator.py b/tests/integration/cqlengine/operators/test_base_operator.py index 8df0da7f02..d4521783d6 100644 --- a/tests/integration/cqlengine/operators/test_base_operator.py +++ b/tests/integration/cqlengine/operators/test_base_operator.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index aa59f1390b..d3885c276a 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/__init__.py b/tests/integration/cqlengine/query/__init__.py index 55181d93fe..87fc3685e0 100644 --- a/tests/integration/cqlengine/query/__init__.py +++ b/tests/integration/cqlengine/query/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_batch_query.py b/tests/integration/cqlengine/query/test_batch_query.py index 4a3e21318c..5aea7e9c90 100644 --- a/tests/integration/cqlengine/query/test_batch_query.py +++ b/tests/integration/cqlengine/query/test_batch_query.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_datetime_queries.py b/tests/integration/cqlengine/query/test_datetime_queries.py index ebce19f449..118d74ddcf 100644 --- a/tests/integration/cqlengine/query/test_datetime_queries.py +++ b/tests/integration/cqlengine/query/test_datetime_queries.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index dbe664b3f3..3b51d1d216 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_queryoperators.py b/tests/integration/cqlengine/query/test_queryoperators.py index f3154de90b..af10435ca4 100644 --- a/tests/integration/cqlengine/query/test_queryoperators.py +++ b/tests/integration/cqlengine/query/test_queryoperators.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index db7c0c2306..1dc037ce53 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index 6c53901240..49e6f7f145 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/__init__.py b/tests/integration/cqlengine/statements/__init__.py index 33f8baac19..a6c81ff121 100644 --- a/tests/integration/cqlengine/statements/__init__.py +++ b/tests/integration/cqlengine/statements/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_assignment_clauses.py b/tests/integration/cqlengine/statements/test_assignment_clauses.py index 9874fddcf0..4fc3a28374 100644 --- a/tests/integration/cqlengine/statements/test_assignment_clauses.py +++ b/tests/integration/cqlengine/statements/test_assignment_clauses.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_assignment_statement.py b/tests/integration/cqlengine/statements/test_assignment_statement.py index 18f08dd2e8..9d5481d98c 100644 --- a/tests/integration/cqlengine/statements/test_assignment_statement.py +++ b/tests/integration/cqlengine/statements/test_assignment_statement.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_base_clause.py b/tests/integration/cqlengine/statements/test_base_clause.py index 2cb1bc2f03..14d98782ea 100644 --- a/tests/integration/cqlengine/statements/test_base_clause.py +++ b/tests/integration/cqlengine/statements/test_base_clause.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index 2d0f30e4c0..2a9bb80fae 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_delete_statement.py b/tests/integration/cqlengine/statements/test_delete_statement.py index c8283e2bc5..701991f4fb 100644 --- a/tests/integration/cqlengine/statements/test_delete_statement.py +++ b/tests/integration/cqlengine/statements/test_delete_statement.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_insert_statement.py b/tests/integration/cqlengine/statements/test_insert_statement.py index 75a2e71b86..dc6465e247 100644 --- a/tests/integration/cqlengine/statements/test_insert_statement.py +++ b/tests/integration/cqlengine/statements/test_insert_statement.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_select_statement.py b/tests/integration/cqlengine/statements/test_select_statement.py index 4f920ce042..0df2803d03 100644 --- a/tests/integration/cqlengine/statements/test_select_statement.py +++ b/tests/integration/cqlengine/statements/test_select_statement.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_update_statement.py b/tests/integration/cqlengine/statements/test_update_statement.py index 6c7bea6dc5..a8ff865e25 100644 --- a/tests/integration/cqlengine/statements/test_update_statement.py +++ b/tests/integration/cqlengine/statements/test_update_statement.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_where_clause.py b/tests/integration/cqlengine/statements/test_where_clause.py index 542cb1f9f2..e3d95d4fa4 100644 --- a/tests/integration/cqlengine/statements/test_where_clause.py +++ b/tests/integration/cqlengine/statements/test_where_clause.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index a83dd2e942..b28b533a64 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_consistency.py b/tests/integration/cqlengine/test_consistency.py index 6d8e038847..61cf3f0d50 100644 --- a/tests/integration/cqlengine/test_consistency.py +++ b/tests/integration/cqlengine/test_consistency.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py index 6a4387351c..3fc95d2dd0 100644 --- a/tests/integration/cqlengine/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_timestamp.py b/tests/integration/cqlengine/test_timestamp.py index dde4334ad6..9b3ffbeafa 100644 --- a/tests/integration/cqlengine/test_timestamp.py +++ b/tests/integration/cqlengine/test_timestamp.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_transaction.py b/tests/integration/cqlengine/test_transaction.py index eec6c631ef..b3d7ea0fb1 100644 --- a/tests/integration/cqlengine/test_transaction.py +++ b/tests/integration/cqlengine/test_transaction.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_ttl.py b/tests/integration/cqlengine/test_ttl.py index 811fdbadab..ba2c1e0935 100644 --- a/tests/integration/cqlengine/test_ttl.py +++ b/tests/integration/cqlengine/test_ttl.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/datatype_utils.py b/tests/integration/datatype_utils.py index 80087b67f1..ee9695c289 100644 --- a/tests/integration/datatype_utils.py +++ b/tests/integration/datatype_utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/__init__.py b/tests/integration/long/__init__.py index 794d75bff4..caa7e71667 100644 --- a/tests/integration/long/__init__.py +++ b/tests/integration/long/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index f46fedce53..43734c7dc8 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index e611adca35..7654fbcca7 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index e17f22aa07..618050fe42 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -121,4 +121,4 @@ def setUp(self): raise unittest.SkipTest("IPv6 is currently not supported under Windows") if AsyncoreConnection == -1: - raise unittest.SkipTest("Can't test asyncore with monkey patching") \ No newline at end of file + raise unittest.SkipTest("Can't test asyncore with monkey patching") diff --git a/tests/integration/long/test_large_data.py b/tests/integration/long/test_large_data.py index 4316a7b9af..acac09893d 100644 --- a/tests/integration/long/test_large_data.py +++ b/tests/integration/long/test_large_data.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 155115d1b1..f05253175b 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index 51a2c72d56..ba480a19f9 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 0df25016e6..fc6a7066cd 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -238,4 +238,4 @@ def test_cannot_connect_with_bad_client_auth(self): 'keyfile': abs_driver_keyfile, 'certfile': abs_driver_certfile}) with self.assertRaises(NoHostAvailable) as context: - cluster.connect() \ No newline at end of file + cluster.connect() diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index 08957e4d89..f850d6a1a8 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -155,4 +155,4 @@ def wait_for_down(cluster, node, wait=True): tries += 1 time.sleep(1) - raise RuntimeError("Host {0} is not down after 100 attempts".format(IP_FORMAT.format(node))) \ No newline at end of file + raise RuntimeError("Host {0} is not down after 100 attempts".format(IP_FORMAT.format(node))) diff --git a/tests/integration/standard/__init__.py b/tests/integration/standard/__init__.py index 484ed2379b..ba5c6e26fd 100644 --- a/tests/integration/standard/__init__.py +++ b/tests/integration/standard/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 82f81cd885..473b398a12 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index 90f224825a..a463578f4e 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 9baaf9fb78..003c78a913 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index a0cf79fab8..60243bb654 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index eabf2a4aa5..d8c455114d 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_control_connection.py b/tests/integration/standard/test_control_connection.py index 527232437c..07c5bac992 100644 --- a/tests/integration/standard/test_control_connection.py +++ b/tests/integration/standard/test_control_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -75,4 +75,4 @@ def test_drop_keyspace(self): cc_id_pre_drop = id(self.cluster.control_connection._connection) self.session.execute("DROP KEYSPACE keyspacetodrop") cc_id_post_drop = id(self.cluster.control_connection._connection) - self.assertEqual(cc_id_post_drop, cc_id_pre_drop) \ No newline at end of file + self.assertEqual(cc_id_post_drop, cc_id_pre_drop) diff --git a/tests/integration/standard/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py index e6a80d036e..3d4b849661 100644 --- a/tests/integration/standard/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -164,4 +164,4 @@ def execute_async_validate_custom_payload(self, statement, custom_payload): response_future = self.session.execute_async(statement, custom_payload=custom_payload) response_future.result() returned_custom_payload = response_future.custom_payload - self.assertEqual(custom_payload, returned_custom_payload) \ No newline at end of file + self.assertEqual(custom_payload, returned_custom_payload) diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 1427b3b09b..63a8380902 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 78589d5b3a..583943bcc3 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 70da6710fb..48627b3f01 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 4a14961344..63f62f238e 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index b087031e04..4ae636063e 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index f9e943f93d..fdad1bc3ee 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_routing.py b/tests/integration/standard/test_routing.py index 8172f8fc2d..1ad0a2fa5d 100644 --- a/tests/integration/standard/test_routing.py +++ b/tests/integration/standard/test_routing.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index e6d6184a40..a18646489b 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index e296286cc8..acac7b817b 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 5c6b0d663c..8d4411a17e 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/utils.py b/tests/integration/standard/utils.py index fe54f04db2..8749c5e3a2 100644 --- a/tests/integration/standard/utils.py +++ b/tests/integration/standard/utils.py @@ -50,4 +50,4 @@ def get_all_primitive_params(key): def get_primitive_datatypes(): - return ['int'] + list(PRIMITIVE_DATATYPES) \ No newline at end of file + return ['int'] + list(PRIMITIVE_DATATYPES) diff --git a/tests/integration/util.py b/tests/integration/util.py index aab617843d..ff71185a4a 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/stress_tests/test_load.py b/tests/stress_tests/test_load.py index 6283d2e413..523ee2004b 100644 --- a/tests/stress_tests/test_load.py +++ b/tests/stress_tests/test_load.py @@ -1,4 +1,4 @@ -# Copyright 2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/stress_tests/test_multi_inserts.py b/tests/stress_tests/test_multi_inserts.py index 12b5b70e7c..e39e73e8b7 100644 --- a/tests/stress_tests/test_multi_inserts.py +++ b/tests/stress_tests/test_multi_inserts.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index e4b89e5f46..87fc3685e0 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/__init__.py b/tests/unit/cython/__init__.py index e4b89e5f46..87fc3685e0 100644 --- a/tests/unit/cython/__init__.py +++ b/tests/unit/cython/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/bytesio_testhelper.pyx b/tests/unit/cython/bytesio_testhelper.pyx index d557c037c2..e86fdb73c2 100644 --- a/tests/unit/cython/bytesio_testhelper.pyx +++ b/tests/unit/cython/bytesio_testhelper.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/test_bytesio.py b/tests/unit/cython/test_bytesio.py index 2dbf1311dc..0c2ae7bfa8 100644 --- a/tests/unit/cython/test_bytesio.py +++ b/tests/unit/cython/test_bytesio.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/utils.py b/tests/unit/cython/utils.py index 0d9a4c2b77..6dbeb15c06 100644 --- a/tests/unit/cython/utils.py +++ b/tests/unit/cython/utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/__init__.py b/tests/unit/io/__init__.py index e4b89e5f46..87fc3685e0 100644 --- a/tests/unit/io/__init__.py +++ b/tests/unit/io/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/eventlet_utils.py b/tests/unit/io/eventlet_utils.py index 8aee030f27..e06d3f777f 100644 --- a/tests/unit/io/eventlet_utils.py +++ b/tests/unit/io/eventlet_utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/gevent_utils.py b/tests/unit/io/gevent_utils.py index 1c6e27adde..12aab8d2f1 100644 --- a/tests/unit/io/gevent_utils.py +++ b/tests/unit/io/gevent_utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 16fbf2fb72..ab5bd64091 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index 9f071d3bec..6aa76fa790 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -64,4 +64,4 @@ def test_timer_cancellation(self): # Assert that the cancellation was honored self.assertFalse(timer_manager._queue) self.assertFalse(timer_manager._new_timers) - self.assertFalse(callback.was_invoked()) \ No newline at end of file + self.assertFalse(callback.was_invoked()) diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py index c90be37d24..cf28b93d68 100644 --- a/tests/unit/io/test_geventreactor.py +++ b/tests/unit/io/test_geventreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 2b26343a63..309134c940 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -295,4 +295,4 @@ def test_partial_message_read(self, *args): c.handle_read(None, 0) self.assertTrue(c.connected_event.is_set()) - self.assertFalse(c.is_defunct) \ No newline at end of file + self.assertFalse(c.is_defunct) diff --git a/tests/unit/io/test_libevtimer.py b/tests/unit/io/test_libevtimer.py index 988282c2b1..4b21d03170 100644 --- a/tests/unit/io/test_libevtimer.py +++ b/tests/unit/io/test_libevtimer.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index 5c6bb2d0c2..fd4181460a 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index de6a43fe17..58ed78ea26 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index d120cd4a6a..a1f953220f 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 8055a5e766..b306708c9e 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index b609582231..9fac7e2f46 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_exception.py b/tests/unit/test_exception.py index f882c537db..a88b5260fa 100644 --- a/tests/unit/test_exception.py +++ b/tests/unit/test_exception.py @@ -1,4 +1,4 @@ -# Copyright 2014 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index 79391a2a1d..fb0ca21711 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index c7aea35505..626c38b22f 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index fc927a025a..c2e3513fd1 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_orderedmap.py b/tests/unit/test_orderedmap.py index a7fbef13cb..98f0ba9ade 100644 --- a/tests/unit/test_orderedmap.py +++ b/tests/unit/test_orderedmap.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index 7da3ed939d..d48b5d9573 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 74daab99bc..a9406cf790 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 5c59259be3..8e047d91ae 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index 2a68376767..252b6e0506 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_sortedset.py b/tests/unit/test_sortedset.py index 99d8d365fc..a2b264710f 100644 --- a/tests/unit/test_sortedset.py +++ b/tests/unit/test_sortedset.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_time_util.py b/tests/unit/test_time_util.py index 1c3dc1b4d1..8e767acc48 100644 --- a/tests/unit/test_time_util.py +++ b/tests/unit/test_time_util.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 18d40956c7..f6f618de66 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py index 7cf6c1f4ba..53ab9d0752 100644 --- a/tests/unit/test_util_types.py +++ b/tests/unit/test_util_types.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015 DataStax, Inc. +# Copyright 2013-2016 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From a7c1b3d823f52f5bd543da50e22fcaa0a9eafc3f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 Jan 2016 13:44:03 -0600 Subject: [PATCH 0300/2005] [ci skip] add travis status image back to readme --- README.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.rst b/README.rst index a09e1ef548..5adc82e672 100644 --- a/README.rst +++ b/README.rst @@ -1,6 +1,9 @@ DataStax Python Driver for Apache Cassandra =========================================== +.. image:: https://travis-ci.org/datastax/python-driver.png?branch=master + :target: https://travis-ci.org/datastax/python-driver + A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (1.2+) and DataStax Enterprise (3.1+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. The driver supports Python 2.6, 2.7, 3.3, and 3.4. From eb449ec848426c704203e2d1c39ba894d9f063e7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 Jan 2016 13:44:18 -0600 Subject: [PATCH 0301/2005] [ci skip] changelog update with 3.1 tickets --- CHANGELOG.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 79ff1ff980..0c9451c382 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,17 @@ +3.1.0 +===== + +Features +-------- +* Pass name of server auth class to AuthProvider (PYTHON-454) +* Surface schema agreed flag for DDL statements (PYTHON-458) +* Automatically convert float and int to Decimal on serialization (PYTHON-468) + +Bug Fixes +--------- +* Bus error (alignment issues) when running cython on some ARM platforms (PYTHON-450) +* Overflow when decoding large collections (cython) (PYTHON-459) + 3.0.0 ===== November 24, 2015 From cf41e80dfd0019b5ccca0eace3df37a65b35991c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 Jan 2016 15:05:30 -0600 Subject: [PATCH 0302/2005] normalized Type exception when casting Decimal type PYTHON-468 --- cassandra/cqltypes.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 6e80ed298e..26d7ebc0ff 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -369,7 +369,10 @@ def serialize(dec, protocol_version): try: sign, digits, exponent = dec.as_tuple() except AttributeError: - sign, digits, exponent = Decimal(dec).as_tuple() + try: + sign, digits, exponent = Decimal(dec).as_tuple() + except Exception: + raise TypeError("Invalid type for Decimal value: %r", dec) unscaled = int(''.join([str(digit) for digit in digits])) if sign: unscaled *= -1 From a694d3b4fe2acfa430be8ad1b031abc4ae438c86 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 Jan 2016 15:08:15 -0600 Subject: [PATCH 0303/2005] [ci skip] 3.1a1 version update --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 20e9cd2889..abda1fbdb9 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 0, 0, 'post0') +__version_info__ = (3, 1, '0a1') __version__ = '.'.join(map(str, __version_info__)) From f0d473e3722c02636023bc0ccbda011fafb79d3c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 Jan 2016 15:45:24 -0600 Subject: [PATCH 0304/2005] add contributing doc --- CONTRIBUTING.rst | 26 ++++++++++++++++++++++++++ README.rst | 4 ++++ 2 files changed, 30 insertions(+) create mode 100644 CONTRIBUTING.rst diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000000..09e307d63b --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,26 @@ +Contributing +============ + +Contributions are welcome in the form of bug reports or pull requests. + +Bug Reports +----------- +Quality bug reports are welcome at the `DataStax Python Drvier JIRA `_. + +There are plenty of `good resources `_ describing how to create +good bug reports. They will not be repeated in detail here, but in general, the bug report include where appropriate: + +* relevant software versions (Python runtime, driver version, cython version, server version) +* details for how to produce (e.g. a test script or written procedure) + * any effort to isolate the issue in reproduction is much-appreciated +* stack trace from a crashed runtime + +Pull Requests +------------- +If you're able to fix a bug yourself, you can [fork the repository](https://help.github.com/articles/fork-a-repo/) and submit a [Pull Request](https://help.github.com/articles/using-pull-requests/) with the fix. +Please include tests demonstrating the issue and fix. For examples of how to run the tests, consult the `dev README `_. + + +Contribution License Agreement +------------------------------ +To protect the community, all contributors are required to [sign the DataStax Contribution License Agreement](http://cla.datastax.com/). The process is completely electronic and should only take a few minutes. \ No newline at end of file diff --git a/README.rst b/README.rst index 5adc82e672..7068bcca6f 100644 --- a/README.rst +++ b/README.rst @@ -50,6 +50,10 @@ cqlengine (originally developed by Blake Eggleston and Jon Haddad, with contribu community) is now maintained as an integral part of this package. Refer to `documentation here `_. +Contributing +------------ +See `CONTRIBUTING.md `_. + Reporting Problems ------------------ Please report any bugs and make any feature requests on the From 6151693662ca07d178e433b8781e17bb14189a2a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 Jan 2016 15:48:13 -0600 Subject: [PATCH 0305/2005] fix sphinx conf file --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 8174f13a6c..167c7bd89b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -43,7 +43,7 @@ # General information about the project. project = u'Cassandra Driver' -copyright 2013-2016 DataStax' +copyright = u'2013-2016 DataStax' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the From fa431d6be4b3609fcd3d9471cbcd787c0186f646 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 Jan 2016 16:05:39 -0600 Subject: [PATCH 0306/2005] [ci skip] fix the contributing link --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 7068bcca6f..88d50dabdb 100644 --- a/README.rst +++ b/README.rst @@ -52,7 +52,7 @@ community) is now maintained as an integral part of this package. Refer to Contributing ------------ -See `CONTRIBUTING.md `_. +See `CONTRIBUTING.md `_. Reporting Problems ------------------ From dcca433b4ef8a153b6245bcacf636adb98a93737 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 Jan 2016 16:07:41 -0600 Subject: [PATCH 0307/2005] post-alpha version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index abda1fbdb9..18ba1394b8 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 1, '0a1') +__version_info__ = (3, 1, '0a1', 'post0') __version__ = '.'.join(map(str, __version_info__)) From 05361b4e7c3814455170a228e49f9fe1259564d5 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 29 Jan 2016 17:50:39 -0600 Subject: [PATCH 0308/2005] Adding mapping for dse 5.0 to Cassandra 3.0 --- tests/integration/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index e022626b4d..5b1532aa6e 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -89,6 +89,8 @@ def _get_cass_version_from_dse(dse_version): cass_ver = "2.0" elif dse_version.startswith('4.7') or dse_version.startswith('4.8'): cass_ver = "2.1" + elif dse_version.startswith('5.0'): + cass_ver = "3.0" else: log.error("Uknown dse version found {0}, defaulting to 2.1".format(dse_version)) cass_ver = "2.1" From 817b50d67f15f2e9c967c1810c66351688f07204 Mon Sep 17 00:00:00 2001 From: Reetesh Ranjan Date: Wed, 3 Feb 2016 04:03:42 +0530 Subject: [PATCH 0309/2005] Fix: crash while working with UDTs In some cases the 'value' attribute is None, and without checking we are accessing a method assuming it's a valid object. This fix helps achieving the desired functionality/outcome for such cases without breaking the intended logic. --- cassandra/cqlengine/columns.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 41ec98dd15..d9807f7424 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -790,10 +790,11 @@ def sub_columns(self): class UDTValueManager(BaseValueManager): @property def changed(self): - return self.value != self.previous_value or self.value.has_changed_fields() + return self.value != self.previous_value or (None != self.value and self.value.has_changed_fields()) def reset_previous_value(self): - self.value.reset_changed_fields() + if None != self.value: + self.value.reset_changed_fields() self.previous_value = copy(self.value) From 1abbbc0e1b722b47985a89c5da0925657eed624e Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 2 Feb 2016 17:05:48 -0600 Subject: [PATCH 0310/2005] Added code to wait for binary port on nodes become availible before using cluster --- tests/integration/__init__.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 5b1532aa6e..e48a383fcb 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -17,6 +17,7 @@ except ImportError: import unittest # noqa +import socket import os, six, time, sys, logging, traceback from threading import Event from subprocess import call @@ -143,6 +144,29 @@ def _get_cass_version_from_dse(dse_version): lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < '3.0', 'Cassandra version less then 3.0 required') +def wait_for_node_socket(node, timeout): + binary_itf = node.network_interfaces['binary'] + if not common.check_socket_listening(binary_itf, timeout=timeout): + print("Unable to connect to binary socket for node"+str(node)) + else: + print("Node is up and listening "+str(node)) + + +def check_socket_listening(itf, timeout=60): + end = time.time() + timeout + while time.time() <= end: + try: + sock = socket.socket() + sock.connect(itf) + sock.close() + return True + except socket.error: + # Try again in another 200ms                                          + time.sleep(.2) + continue + return False + + def get_cluster(): return CCM_CLUSTER @@ -243,6 +267,9 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True): if start: log.debug("Starting CCM cluster: {0}".format(cluster_name)) CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jvm_args) + #Added to wait for slow nodes to start up + for node in CCM_CLUSTER.nodes.values(): + wait_for_node_socket(node, 120) setup_keyspace(ipformat=ipformat) except Exception: log.exception("Failed to start CCM cluster; removing cluster.") @@ -561,3 +588,5 @@ def setUp(self): def tearDown(self): self.cluster.shutdown() + + From 645f7512f53a1e6b515e50c3576e906c69ec1c0f Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 2 Feb 2016 18:12:16 -0600 Subject: [PATCH 0311/2005] Updating test init to remove bad ascii characters --- tests/integration/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index e48a383fcb..43a86cdf2d 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -161,7 +161,7 @@ def check_socket_listening(itf, timeout=60): sock.close() return True except socket.error: - # Try again in another 200ms                                          + # Try again in another 200ms time.sleep(.2) continue return False From 81133a350296dd998ca05430613683ee7aba2380 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 3 Feb 2016 15:45:43 -0500 Subject: [PATCH 0312/2005] PYTHON-354 - DateType.deserialize being called with one argument vs two --- cassandra/cqlengine/columns.py | 8 +++----- .../integration/cqlengine/columns/test_validation.py | 11 +++++++++++ 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 41ec98dd15..feb93438c7 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -18,7 +18,7 @@ import six from cassandra import util -from cassandra.cqltypes import DateType, SimpleDateType +from cassandra.cqltypes import SimpleDateType from cassandra.cqlengine import ValidationError from cassandra.cqlengine.functions import get_total_seconds @@ -435,10 +435,8 @@ def to_python(self, value): return value elif isinstance(value, date): return datetime(*(value.timetuple()[:6])) - try: - return datetime.utcfromtimestamp(value) - except TypeError: - return datetime.utcfromtimestamp(DateType.deserialize(value)) + + return datetime.utcfromtimestamp(value) def to_database(self, value): value = super(DateTime, self).to_database(value) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 4609cca1af..c23dc95e28 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -88,6 +88,17 @@ def test_datetime_none(self): dts = self.DatetimeTest.objects.filter(test_id=1).values_list('created_at') assert dts[0][0] is None + def test_datetime_invalid(self): + dt_value= 'INVALID' + with self.assertRaises(TypeError): + self.DatetimeTest.objects.create(test_id=2, created_at=dt_value) + + def test_datetime_timestamp(self): + dt_value = 1454520554 + self.DatetimeTest.objects.create(test_id=2, created_at=dt_value) + dt2 = self.DatetimeTest.objects(test_id=2).first() + assert dt2.created_at == datetime.utcfromtimestamp(dt_value) + class TestBoolDefault(BaseCassEngTestCase): class BoolDefaultValueTest(Model): From 523c045775e5c261933974bff9ae5d5017aa7026 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 4 Feb 2016 11:07:34 -0500 Subject: [PATCH 0313/2005] Add a test for updating UDT with nones --- CHANGELOG.rst | 1 + .../integration/cqlengine/model/test_udts.py | 26 ++++++++++++++++++- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 0c9451c382..01000727bb 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,6 +11,7 @@ Bug Fixes --------- * Bus error (alignment issues) when running cython on some ARM platforms (PYTHON-450) * Overflow when decoding large collections (cython) (PYTHON-459) +* Crash when updating a UDT column with a None value (github #467) 3.0.0 ===== diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 859842b10c..e5bbc065bf 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -109,12 +109,36 @@ class UserModel(Model): self.assertEqual("John", john_info.name) created_user.info = User(age=22, name="Mary") - created_user.save() + created_user.update() mary_info = UserModel.objects().first().info self.assertEqual(22, mary_info.age) self.assertEqual("Mary", mary_info.name) + def test_can_update_udts_with_nones(self): + class User(UserType): + age = columns.Integer() + name = columns.Text() + + class UserModel(Model): + id = columns.Integer(primary_key=True) + info = columns.UserDefinedType(User) + + sync_table(UserModel) + + user = User(age=42, name="John") + created_user = UserModel.create(id=0, info=user) + + john_info = UserModel.objects().first().info + self.assertEqual(42, john_info.age) + self.assertEqual("John", john_info.name) + + created_user.info = None + created_user.update() + + john_info = UserModel.objects().first().info + self.assertIsNone(john_info) + def test_can_create_same_udt_different_keyspaces(self): class User(UserType): age = columns.Integer() From 0b86785f7572ba62bc156e0eca70be226e8b618e Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 4 Feb 2016 13:01:03 -0600 Subject: [PATCH 0314/2005] Adding test case for python-458 --- tests/integration/long/test_schema.py | 41 +++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index ba480a19f9..87ca5f3c8a 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -113,3 +113,44 @@ def test_for_schema_disagreements_same_keyspace(self): execute_until_pass(session, "INSERT INTO test.cf (key, value) VALUES ({0}, {0})".format(j)) execute_until_pass(session, "DROP KEYSPACE test") + + def test_for_schema_disagreement_attribute(self): + """ + Tests to ensure that schema disagreement is properly surfaced on the response future. + + Creates and destroys keyspaces/tables with various schema agreement timeouts set. + First part runs cql create/drop cmds with schema agreement set in such away were it will be impossible for agreement to occur during timeout. + It then validates that the correct value is set on the result. + Second part ensures that when schema agreement occurs, that the result set reflects that appropriately + + @since 3.1.0 + @jira_ticket PYTHON-458 + @expected_result is_schema_agreed is set appropriately on response thefuture + + @test_category schema + """ + # This should yield a schema disagreement + cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=0.001) + session = cluster.connect() + + rs = session.execute("CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}") + self.check_and_wait_for_agreement(session, rs, False) + rs = session.execute("CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)") + self.check_and_wait_for_agreement(session, rs, False) + rs = session.execute("DROP KEYSPACE test_schema_disagreement") + self.check_and_wait_for_agreement(session, rs, False) + + # These should have schema agreement + cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=100) + session = cluster.connect() + rs = session.execute("CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}") + self.check_and_wait_for_agreement(session, rs, True) + rs = session.execute("CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)") + self.check_and_wait_for_agreement(session, rs, True) + rs = session.execute("DROP KEYSPACE test_schema_disagreement") + self.check_and_wait_for_agreement(session, rs, True) + + def check_and_wait_for_agreement(self, session, rs, exepected): + self.assertEqual(rs.response_future.is_schema_agreed, exepected) + if not rs.response_future.is_schema_agreed: + session.cluster.control_connection.wait_for_schema_agreement(wait_time=1000) From 6cfd68fb8e6cf9796eea66102ea0ea14fb25032e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 4 Feb 2016 14:28:24 -0600 Subject: [PATCH 0315/2005] precheck compiler during setup PYTHON-471 avoids setup failure if we try to build Cython as a setup_requires and there are no build tools --- setup.py | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index f5070fdb3b..969f836e0f 100644 --- a/setup.py +++ b/setup.py @@ -171,7 +171,6 @@ def __init__(self, ext): ================================================================================= """ - is_windows = os.name == 'nt' is_pypy = "PyPy" in sys.version @@ -323,6 +322,34 @@ def _setup_extensions(self): sys.stderr.write("Failed to cythonize one or more modules. These will not be compiled as extensions (optional).\n") +def precheck_compiler(): + """ + Try to verify build tools + """ + try: + from distutils.ccompiler import new_compiler + from distutils.sysconfig import customize_compiler + from distutils.dist import Distribution + from distutils.spawn import find_executable + + # base build_ext just to emulate compiler option setup + be = build_ext(Distribution()) + be.initialize_options() + be.finalize_options() + + compiler = new_compiler(compiler=be.compiler) + customize_compiler(compiler) + if compiler.compiler_type in ('unix', 'cygwin'): + return all(find_executable(compiler.executables[exe][0]) for exe in ('compiler_so', 'linker_so')) + elif compiler.compiler_type == 'nt': + return all(find_executable(getattr(compiler, exe)) for exe in ('cc', 'linker')) + except Exception: + raise + # if we are unable to positively id the compiler type, or one of these assumptions fails, + # just proceed as we would have without the check + return True + + def run_setup(extensions): kw = {'cmdclass': {'doc': DocCommand}} @@ -335,7 +362,11 @@ def run_setup(extensions): kw['cmdclass']['build_ext'] = build_extensions kw['ext_modules'] = [Extension('DUMMY', [])] # dummy extension makes sure build_ext is called for install - if try_cython: + # precheck compiler before adding to setup_requires + # we don't actually negate try_cython because: + # 1.) build_ext eats errors at compile time, letting the install complete while producing useful feedback + # 2.) there could be a case where the python environment has cython installed but the system doesn't have build tools + if try_cython and precheck_compiler(): kw['setup_requires'] = ['Cython>=0.20'] dependencies = ['six >=1.6'] From e29c757a9da445ec8dacfea28e3677e5be1ec48b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 4 Feb 2016 15:35:55 -0600 Subject: [PATCH 0316/2005] Check for Python include before requiring Cython PYTHON-471 also adds build pre-check escape hatch --- setup.py | 47 +++++++++++++++++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 12 deletions(-) diff --git a/setup.py b/setup.py index 969f836e0f..a6fcc7b4b2 100644 --- a/setup.py +++ b/setup.py @@ -322,29 +322,49 @@ def _setup_extensions(self): sys.stderr.write("Failed to cythonize one or more modules. These will not be compiled as extensions (optional).\n") -def precheck_compiler(): +def pre_build_check(): """ Try to verify build tools """ + if os.environ.get('CASS_DRIVER_NO_PRE_BUILD_CHECK'): + return True + try: from distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler from distutils.dist import Distribution - from distutils.spawn import find_executable # base build_ext just to emulate compiler option setup be = build_ext(Distribution()) be.initialize_options() be.finalize_options() + # First, make sure we have a Python include directory + have_python_include = any(os.path.isfile(os.path.join(p, 'Python.h')) for p in be.include_dirs) + if not have_python_include: + sys.stderr.write("Did not find 'Python.h' in %s.\n" % (be.include_dirs,)) + return False + compiler = new_compiler(compiler=be.compiler) customize_compiler(compiler) + + executables = [] if compiler.compiler_type in ('unix', 'cygwin'): - return all(find_executable(compiler.executables[exe][0]) for exe in ('compiler_so', 'linker_so')) + executables = [compiler.executables[exe][0] for exe in ('compiler_so', 'linker_so')] elif compiler.compiler_type == 'nt': - return all(find_executable(getattr(compiler, exe)) for exe in ('cc', 'linker')) - except Exception: - raise + executables = [getattr(compiler, exe) for exe in ('cc', 'linker')] + + if executables: + from distutils.spawn import find_executable + for exe in executables: + if not find_executable(exe): + sys.stderr.write("Failed to find %s for compiler type %s.\n" % (exe, compiler.compiler_type)) + return False + + except Exception as exc: + sys.stderr.write('%s\n' % str(exc)) + sys.stderr.write("Failed pre-build check. Attempting anyway.\n") + # if we are unable to positively id the compiler type, or one of these assumptions fails, # just proceed as we would have without the check return True @@ -362,12 +382,15 @@ def run_setup(extensions): kw['cmdclass']['build_ext'] = build_extensions kw['ext_modules'] = [Extension('DUMMY', [])] # dummy extension makes sure build_ext is called for install - # precheck compiler before adding to setup_requires - # we don't actually negate try_cython because: - # 1.) build_ext eats errors at compile time, letting the install complete while producing useful feedback - # 2.) there could be a case where the python environment has cython installed but the system doesn't have build tools - if try_cython and precheck_compiler(): - kw['setup_requires'] = ['Cython>=0.20'] + if try_cython: + # precheck compiler before adding to setup_requires + # we don't actually negate try_cython because: + # 1.) build_ext eats errors at compile time, letting the install complete while producing useful feedback + # 2.) there could be a case where the python environment has cython installed but the system doesn't have build tools + if pre_build_check(): + kw['setup_requires'] = ['Cython>=0.20'] + else: + sys.stderr.write("Bypassing Cython setup requirement\n") dependencies = ['six >=1.6'] From 4fd4daa8e67d5a70573bc6fc40416f4c606afe9b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 4 Feb 2016 16:21:08 -0600 Subject: [PATCH 0317/2005] test: don't print node status to stdout --- tests/integration/__init__.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 43a86cdf2d..244c054c1a 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -17,8 +17,12 @@ except ImportError: import unittest # noqa +import logging +import os import socket -import os, six, time, sys, logging, traceback +import sys +import time +import traceback from threading import Event from subprocess import call from itertools import groupby @@ -147,9 +151,9 @@ def _get_cass_version_from_dse(dse_version): def wait_for_node_socket(node, timeout): binary_itf = node.network_interfaces['binary'] if not common.check_socket_listening(binary_itf, timeout=timeout): - print("Unable to connect to binary socket for node"+str(node)) + log.warn("Unable to connect to binary socket for node " + node.name) else: - print("Node is up and listening "+str(node)) + log.debug("Node %s is up and listening " % (node.name,)) def check_socket_listening(itf, timeout=60): @@ -267,13 +271,13 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True): if start: log.debug("Starting CCM cluster: {0}".format(cluster_name)) CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jvm_args) - #Added to wait for slow nodes to start up + # Added to wait for slow nodes to start up for node in CCM_CLUSTER.nodes.values(): wait_for_node_socket(node, 120) setup_keyspace(ipformat=ipformat) except Exception: log.exception("Failed to start CCM cluster; removing cluster.") - + if os.name == "nt": if CCM_CLUSTER: for node in CCM_CLUSTER.nodes.itervalues(): @@ -588,5 +592,3 @@ def setUp(self): def tearDown(self): self.cluster.shutdown() - - From 29d4f8b76053cd15961c1606d677a8c07d7b5806 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 4 Feb 2016 16:21:41 -0600 Subject: [PATCH 0318/2005] 3.1.0a2 release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 18ba1394b8..401279d422 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 1, '0a1', 'post0') +__version_info__ = (3, 1, '0a2') __version__ = '.'.join(map(str, __version_info__)) From e805cd4707d591e418a72865bde5d1bac101e09d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 4 Feb 2016 16:26:07 -0600 Subject: [PATCH 0319/2005] post-release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 401279d422..2123eec03b 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 1, '0a2') +__version_info__ = (3, 1, '0a2', 'post0') __version__ = '.'.join(map(str, __version_info__)) From eade89fed4902f19cd35a804a17c80ef4167944d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 8 Feb 2016 16:08:12 -0600 Subject: [PATCH 0320/2005] cqle: more pythonic handling of None in UDTValueMgr --- cassandra/cqlengine/columns.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 7b4be91b66..722a089255 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -788,10 +788,10 @@ def sub_columns(self): class UDTValueManager(BaseValueManager): @property def changed(self): - return self.value != self.previous_value or (None != self.value and self.value.has_changed_fields()) + return self.value != self.previous_value or (self.value is not None and self.value.has_changed_fields()) def reset_previous_value(self): - if None != self.value: + if self.value is not None: self.value.reset_changed_fields() self.previous_value = copy(self.value) From 9a9455c14c8b106df3653d80d5fbfa03a043102c Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 8 Feb 2016 16:48:49 -0600 Subject: [PATCH 0321/2005] Adding workload support to test harness --- tests/integration/__init__.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 43a86cdf2d..6482653192 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -175,16 +175,16 @@ def get_node(node_id): return CCM_CLUSTER.nodes['node%s' % node_id] -def use_multidc(dc_list): - use_cluster(MULTIDC_CLUSTER_NAME, dc_list, start=True) +def use_multidc(dc_list, workloads=[]): + use_cluster(MULTIDC_CLUSTER_NAME, dc_list, start=True, workloads=workloads) -def use_singledc(start=True): - use_cluster(CLUSTER_NAME, [3], start=start) +def use_singledc(start=True, workloads=[]): + use_cluster(CLUSTER_NAME, [3], start=start, workloads=workloads) -def use_single_node(start=True): - use_cluster(SINGLE_NODE_CLUSTER_NAME, [1], start=start) +def use_single_node(start=True, workloads=[]): + use_cluster(SINGLE_NODE_CLUSTER_NAME, [1], start=start, workloads=workloads) def remove_cluster(): @@ -219,7 +219,7 @@ def is_current_cluster(cluster_name, node_counts): return False -def use_cluster(cluster_name, nodes, ipformat=None, start=True): +def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): global CCM_CLUSTER if USE_CASS_EXTERNAL: if CCM_CLUSTER: @@ -265,6 +265,9 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True): jvm_args = [" -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler"] if start: + if(len(workloads) > 0): + for node in CCM_CLUSTER.nodes.values(): + node.set_workloads(workloads) log.debug("Starting CCM cluster: {0}".format(cluster_name)) CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jvm_args) #Added to wait for slow nodes to start up From 0262b2de7e4663934106e9184d966e3b3103074e Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 8 Feb 2016 17:41:03 -0600 Subject: [PATCH 0322/2005] Updating workload call to reflect ccm changes --- tests/integration/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 6482653192..81028f57a9 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -267,7 +267,8 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): if start: if(len(workloads) > 0): for node in CCM_CLUSTER.nodes.values(): - node.set_workloads(workloads) + for workload in workloads: + node.set_workload(workload) log.debug("Starting CCM cluster: {0}".format(cluster_name)) CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jvm_args) #Added to wait for slow nodes to start up From 85f32ec5328404b96ccc377082d483fcd6dc900d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 9 Feb 2016 09:54:37 -0600 Subject: [PATCH 0323/2005] cqle: guard against formatting an invalid keyspace in a query PYTHON-395 Resolves AttributeError when trying to query without a keyspace name, instead raising a CQLEngineException. --- cassandra/cqlengine/models.py | 5 +++- .../integration/cqlengine/model/test_model.py | 23 ++++++++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 8210fc2873..3eeef449b2 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -480,7 +480,10 @@ def column_family_name(cls, include_keyspace=True): """ cf_name = protect_name(cls._raw_column_family_name()) if include_keyspace: - return '{0}.{1}'.format(protect_name(cls._get_keyspace()), cf_name) + keyspace = cls._get_keyspace() + if not keyspace: + raise CQLEngineException("Model keyspace is not set and no default is available. Set model keyspace or setup connection before attempting to generate a query.") + return '{0}.{1}'.format(protect_name(keyspace), cf_name) return cf_name diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index a22080faa7..c552977e2c 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -16,7 +16,9 @@ except ImportError: import unittest # noqa -from cassandra.cqlengine import columns +from mock import patch + +from cassandra.cqlengine import columns, CQLEngineException from cassandra.cqlengine.management import sync_table, drop_table, create_keyspace_simple, drop_keyspace from cassandra.cqlengine.models import Model, ModelDefinitionException @@ -105,6 +107,25 @@ class table(Model): drop_keyspace('keyspace') + def test_column_family(self): + class TestModel(Model): + k = columns.Integer(primary_key=True) + + from cassandra.cqlengine import models + # no model keyspace uses default + self.assertEqual(TestModel.column_family_name(), "%s.test_model" % (models.DEFAULT_KEYSPACE,)) + + # model keyspace overrides + TestModel.__keyspace__ = "my_test_keyspace" + self.assertEqual(TestModel.column_family_name(), "%s.test_model" % (TestModel.__keyspace__,)) + + # neither set should raise CQLEngineException before failing or formatting an invalid name + del TestModel.__keyspace__ + with patch('cassandra.cqlengine.models.DEFAULT_KEYSPACE', None): + self.assertRaises(CQLEngineException, TestModel.column_family_name) + # .. but we can still get the bare CF name + self.assertEqual(TestModel.column_family_name(include_keyspace=False), "test_model") + class BuiltInAttributeConflictTest(unittest.TestCase): """tests Model definitions that conflict with built-in attributes/methods""" From bc0d314aad61fd252d4b85666fad5ea34706404f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 9 Feb 2016 10:38:22 -0600 Subject: [PATCH 0324/2005] cqle: remove unused operator classes simplifying module for PYTHON-476 --- cassandra/cqlengine/operators.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index 09c6e1ddd8..8e67aabacc 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -85,15 +85,3 @@ class LessThanOperator(BaseWhereOperator): class LessThanOrEqualOperator(BaseWhereOperator): symbol = "LTE" cql_symbol = '<=' - - -class BaseAssignmentOperator(BaseQueryOperator): - """ base operator used for insert and delete statements """ - - -class AssignmentOperator(BaseAssignmentOperator): - cql_symbol = "=" - - -class AddSymbol(BaseAssignmentOperator): - cql_symbol = "+" From 631369b00e6bfab590b1a0a11fd898bd482e2e96 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 9 Feb 2016 11:58:52 -0600 Subject: [PATCH 0325/2005] cqle: remove opmap get_operator race opmap is built using meta class during module import Fixes issue where mapping is missing if first queries are concurrent. PYTHON-476 --- cassandra/cqlengine/operators.py | 28 +++++++++---------- .../cqlengine/operators/test_base_operator.py | 27 ------------------ 2 files changed, 13 insertions(+), 42 deletions(-) delete mode 100644 tests/integration/cqlengine/operators/test_base_operator.py diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index 8e67aabacc..655e810cd5 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import six from cassandra.cqlengine import UnicodeMixin @@ -32,31 +33,28 @@ def __unicode__(self): raise QueryOperatorException("cql symbol is None") return self.cql_symbol - @classmethod - def get_operator(cls, symbol): - if cls == BaseQueryOperator: - raise QueryOperatorException("get_operator can only be called from a BaseQueryOperator subclass") + +class OpMapMeta(type): + + def __init__(cls, name, bases, dct): if not hasattr(cls, 'opmap'): cls.opmap = {} + else: + cls.opmap[cls.symbol] = cls + super(OpMapMeta, cls).__init__(name, bases, dct) - def _recurse(klass): - if klass.symbol: - cls.opmap[klass.symbol.upper()] = klass - for subklass in klass.__subclasses__(): - _recurse(subklass) - pass - _recurse(cls) +@six.add_metaclass(OpMapMeta) +class BaseWhereOperator(BaseQueryOperator): + """ base operator used for where clauses """ + @classmethod + def get_operator(cls, symbol): try: return cls.opmap[symbol.upper()] except KeyError: raise QueryOperatorException("{0} doesn't map to a QueryOperator".format(symbol)) -class BaseWhereOperator(BaseQueryOperator): - """ base operator used for where clauses """ - - class EqualsOperator(BaseWhereOperator): symbol = 'EQ' cql_symbol = '=' diff --git a/tests/integration/cqlengine/operators/test_base_operator.py b/tests/integration/cqlengine/operators/test_base_operator.py deleted file mode 100644 index d4521783d6..0000000000 --- a/tests/integration/cqlengine/operators/test_base_operator.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2013-2016 DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa - -from cassandra.cqlengine.operators import BaseQueryOperator, QueryOperatorException - - -class BaseOperatorTest(unittest.TestCase): - - def test_get_operator_cannot_be_called_from_base_class(self): - with self.assertRaises(QueryOperatorException): - BaseQueryOperator.get_operator('*') From 7adce710e19bc2703ca45d689d780b67f6ca50e1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 9 Feb 2016 12:00:30 -0600 Subject: [PATCH 0326/2005] remove __author__ in empty package files --- tests/integration/cqlengine/operators/__init__.py | 2 -- tests/integration/cqlengine/statements/__init__.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/tests/integration/cqlengine/operators/__init__.py b/tests/integration/cqlengine/operators/__init__.py index a6c81ff121..1c7af46e71 100644 --- a/tests/integration/cqlengine/operators/__init__.py +++ b/tests/integration/cqlengine/operators/__init__.py @@ -11,5 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -__author__ = 'bdeggleston' diff --git a/tests/integration/cqlengine/statements/__init__.py b/tests/integration/cqlengine/statements/__init__.py index a6c81ff121..1c7af46e71 100644 --- a/tests/integration/cqlengine/statements/__init__.py +++ b/tests/integration/cqlengine/statements/__init__.py @@ -11,5 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -__author__ = 'bdeggleston' From 84c3e08cf2bdbecda72c93c37949c073d76453e4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 9 Feb 2016 14:37:53 -0600 Subject: [PATCH 0327/2005] register UDT with default keyspace if none specified PYTHON-347 --- cassandra/cqlengine/connection.py | 12 +++++------- tests/integration/cqlengine/model/test_udts.py | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 09c7582d6e..9a74f8cfc8 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import namedtuple +from collections import namedtuple, defaultdict import logging import six @@ -37,7 +37,7 @@ # Because type models may be registered before a connection is present, # and because sessions may be replaced, we must register UDTs here, in order # to have them registered when a new session is established. -udt_by_keyspace = {} +udt_by_keyspace = defaultdict(dict) class UndefinedKeyspaceException(CQLEngineException): @@ -191,10 +191,7 @@ def handle_lazy_connect(): def register_udt(keyspace, type_name, klass): - try: - udt_by_keyspace[keyspace][type_name] = klass - except KeyError: - udt_by_keyspace[keyspace] = {type_name: klass} + udt_by_keyspace[keyspace][type_name] = klass global cluster if cluster: @@ -205,9 +202,10 @@ def register_udt(keyspace, type_name, klass): def _register_known_types(cluster): + from cassandra.cqlengine import models for ks_name, name_type_map in udt_by_keyspace.items(): for type_name, klass in name_type_map.items(): try: - cluster.register_user_type(ks_name, type_name, klass) + cluster.register_user_type(ks_name or models.DEFAULT_KEYSPACE, type_name, klass) except UserTypeDoesNotExist: pass # new types are covered in management sync functions diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index e5bbc065bf..28d2091ba2 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -18,6 +18,7 @@ from datetime import datetime, date, time from decimal import Decimal +from mock import Mock from uuid import UUID, uuid4 from cassandra.cqlengine.models import Model @@ -426,4 +427,21 @@ class UserModelText(Model): UserModelText.create(id=unicode_name, info=user_template_ascii) UserModelText.create(id=unicode_name, info=user_template_unicode) + def test_register_default_keyspace(self): + class User(UserType): + age = columns.Integer() + name = columns.Text() + + from cassandra.cqlengine import models + from cassandra.cqlengine import connection + + # None emulating no model and no default keyspace before connecting + connection.udt_by_keyspace.clear() + User.register_for_keyspace(None) + self.assertEqual(len(connection.udt_by_keyspace), 1) + self.assertIn(None, connection.udt_by_keyspace) + # register should be with default keyspace, not None + cluster = Mock() + connection._register_known_types(cluster) + cluster.register_user_type.assert_called_with(models.DEFAULT_KEYSPACE, User.type_name(), User) From 713bfe42231a17cac6d1622c18301a31bf53b689 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 8 Feb 2016 11:12:12 -0500 Subject: [PATCH 0328/2005] PYTHON-452 - Fix Cython deserializer date overflow with large timestamp --- cassandra/cython_utils.pyx | 11 +++++-- .../cqlengine/columns/test_validation.py | 17 +++++++++++ tests/unit/cython/test_utils.py | 29 +++++++++++++++++++ tests/unit/cython/utils_testhelper.pyx | 23 +++++++++++++++ tests/unit/test_time_util.py | 2 ++ 5 files changed, 79 insertions(+), 3 deletions(-) create mode 100644 tests/unit/cython/test_utils.py create mode 100644 tests/unit/cython/utils_testhelper.pyx diff --git a/cassandra/cython_utils.pyx b/cassandra/cython_utils.pyx index ad72f68910..c9caf01f7a 100644 --- a/cassandra/cython_utils.pyx +++ b/cassandra/cython_utils.pyx @@ -35,10 +35,15 @@ from cassandra.util import is_little_endian import_datetime() +DEF DAY_IN_SECONDS = 86400 + DATETIME_EPOC = datetime.datetime(1970, 1, 1) cdef datetime_from_timestamp(double timestamp): - cdef int seconds = timestamp - cdef int microseconds = ( (timestamp * 1000000)) % 1000000 - return DATETIME_EPOC + timedelta_new(0, seconds, microseconds) + cdef int days = (timestamp / DAY_IN_SECONDS) + cdef int64_t days_in_seconds = ( days) * DAY_IN_SECONDS + cdef int seconds = (timestamp - days_in_seconds) + cdef int microseconds = ((timestamp - days_in_seconds - seconds) * 1000000) + + return DATETIME_EPOC + timedelta_new(days, seconds, microseconds) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 4609cca1af..496bc42ee9 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -88,6 +88,23 @@ def test_datetime_none(self): dts = self.DatetimeTest.objects.filter(test_id=1).values_list('created_at') assert dts[0][0] is None + def test_datetime_invalid(self): + dt_value= 'INVALID' + with self.assertRaises(TypeError): + self.DatetimeTest.objects.create(test_id=2, created_at=dt_value) + + def test_datetime_timestamp(self): + dt_value = 1454520554 + self.DatetimeTest.objects.create(test_id=2, created_at=dt_value) + dt2 = self.DatetimeTest.objects(test_id=2).first() + assert dt2.created_at == datetime.utcfromtimestamp(dt_value) + + def test_datetime_large(self): + dt_value = datetime(2038, 12, 31, 10, 10, 10, 123000) + self.DatetimeTest.objects.create(test_id=2, created_at=dt_value) + dt2 = self.DatetimeTest.objects(test_id=2).first() + assert dt2.created_at == dt_value + class TestBoolDefault(BaseCassEngTestCase): class BoolDefaultValueTest(Model): diff --git a/tests/unit/cython/test_utils.py b/tests/unit/cython/test_utils.py new file mode 100644 index 0000000000..209056f645 --- /dev/null +++ b/tests/unit/cython/test_utils.py @@ -0,0 +1,29 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tests.unit.cython.utils import cyimport, cythontest +utils_testhelper = cyimport('tests.unit.cython.utils_testhelper') + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + + +class UtilsTest(unittest.TestCase): + """Test Cython Utils functions""" + + @cythontest + def test_datetime_from_timestamp(self): + utils_testhelper.test_datetime_from_timestamp(self.assertEqual) \ No newline at end of file diff --git a/tests/unit/cython/utils_testhelper.pyx b/tests/unit/cython/utils_testhelper.pyx new file mode 100644 index 0000000000..32816d3a31 --- /dev/null +++ b/tests/unit/cython/utils_testhelper.pyx @@ -0,0 +1,23 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +from cassandra.cython_utils cimport datetime_from_timestamp + + +def test_datetime_from_timestamp(assert_equal): + assert_equal(datetime_from_timestamp(1454781157.123456), datetime.datetime(2016, 2, 6, 17, 52, 37, 123456)) + # PYTHON-452 + assert_equal(datetime_from_timestamp(2177403010.123456), datetime.datetime(2038, 12, 31, 10, 10, 10, 123456)) diff --git a/tests/unit/test_time_util.py b/tests/unit/test_time_util.py index 8e767acc48..4455e58887 100644 --- a/tests/unit/test_time_util.py +++ b/tests/unit/test_time_util.py @@ -36,6 +36,8 @@ def test_datetime_from_timestamp(self): self.assertEqual(util.datetime_from_timestamp(0.123456), datetime.datetime(1970, 1, 1, 0, 0, 0, 123456)) + self.assertEqual(util.datetime_from_timestamp(2177403010.123456), datetime.datetime(2038, 12, 31, 10, 10, 10, 123456)) + def test_times_from_uuid1(self): node = uuid.getnode() now = time.time() From d64bcd753e558802a366f3ed45dac527097fdce7 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 9 Feb 2016 17:33:17 -0500 Subject: [PATCH 0329/2005] Add tests for Cython DateType deserialization --- tests/unit/cython/test_types.py | 28 +++++++++++ tests/unit/cython/types_testhelper.pyx | 65 ++++++++++++++++++++++++++ tests/unit/test_types.py | 4 ++ 3 files changed, 97 insertions(+) create mode 100644 tests/unit/cython/test_types.py create mode 100644 tests/unit/cython/types_testhelper.pyx diff --git a/tests/unit/cython/test_types.py b/tests/unit/cython/test_types.py new file mode 100644 index 0000000000..0286450b57 --- /dev/null +++ b/tests/unit/cython/test_types.py @@ -0,0 +1,28 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tests.unit.cython.utils import cyimport, cythontest +types_testhelper = cyimport('tests.unit.cython.types_testhelper') + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + + +class TypesTest(unittest.TestCase): + + @cythontest + def test_datetype(self): + types_testhelper.test_datetype(self.assertEqual) diff --git a/tests/unit/cython/types_testhelper.pyx b/tests/unit/cython/types_testhelper.pyx new file mode 100644 index 0000000000..b27a2f21d2 --- /dev/null +++ b/tests/unit/cython/types_testhelper.pyx @@ -0,0 +1,65 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import time +import datetime + +include '../../../cassandra/ioutils.pyx' + +from cassandra.cqltypes import DateType +from cassandra.deserializers import find_deserializer +from cassandra.bytesio cimport BytesIOReader +from cassandra.buffer cimport Buffer +from cassandra.deserializers cimport from_binary, Deserializer + + +def test_datetype(assert_equal): + + cdef Deserializer des = find_deserializer(DateType) + + def deserialize(timestamp): + """Serialize a datetime and deserialize it using the cython deserializer""" + + cdef BytesIOReader reader + cdef Buffer buf + + dt = datetime.datetime.utcfromtimestamp(timestamp) + reader = BytesIOReader(b'\x00\x00\x00\x08' + DateType.serialize(dt, 0)) + get_buf(reader, &buf) + deserialized_dt = from_binary(des, &buf, 0) + + return deserialized_dt + + # deserialize + # epoc + expected = 0 + assert_equal(deserialize(expected), datetime.datetime.utcfromtimestamp(expected)) + + # beyond 32b + expected = 2 ** 33 + assert_equal(deserialize(expected), datetime.datetime(2242, 3, 16, 12, 56, 32)) + + # less than epoc (PYTHON-119) + expected = -770172256 + assert_equal(deserialize(expected), datetime.datetime(1945, 8, 5, 23, 15, 44)) + + # work around rounding difference among Python versions (PYTHON-230) + # This wont pass with the cython extension until we fix the microseconds alignment with CPython + #expected = 1424817268.274 + #assert_equal(deserialize(expected), datetime.datetime(2015, 2, 24, 22, 34, 28, 274000)) + + # Large date overflow (PYTHON-452) + expected = 2177403010.123 + assert_equal(deserialize(expected), datetime.datetime(2038, 12, 31, 10, 10, 10, 123000)) diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index f6f618de66..d8774b0299 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -204,6 +204,10 @@ def test_datetype(self): expected = 1424817268.274 self.assertEqual(DateType.deserialize(int64_pack(int(1000 * expected)), 0), datetime.datetime(2015, 2, 24, 22, 34, 28, 274000)) + # Large date overflow (PYTHON-452) + expected = 2177403010.123 + self.assertEqual(DateType.deserialize(int64_pack(int(1000 * expected)), 0), datetime.datetime(2038, 12, 31, 10, 10, 10, 123000)) + def test_write_read_string(self): with tempfile.TemporaryFile() as f: value = u'test' From 59857a80636405cfcb78f12d0c67ff98ab3b54b7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 10 Feb 2016 08:31:48 -0600 Subject: [PATCH 0330/2005] test: move models import to module level --- tests/integration/cqlengine/model/test_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index c552977e2c..e9f795d8b2 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -20,6 +20,7 @@ from cassandra.cqlengine import columns, CQLEngineException from cassandra.cqlengine.management import sync_table, drop_table, create_keyspace_simple, drop_keyspace +from cassandra.cqlengine import models from cassandra.cqlengine.models import Model, ModelDefinitionException @@ -111,7 +112,6 @@ def test_column_family(self): class TestModel(Model): k = columns.Integer(primary_key=True) - from cassandra.cqlengine import models # no model keyspace uses default self.assertEqual(TestModel.column_family_name(), "%s.test_model" % (models.DEFAULT_KEYSPACE,)) From 6bfa507f96a653f27277803414e1ece68e8096c9 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 10 Feb 2016 10:19:31 -0500 Subject: [PATCH 0331/2005] Fix all test_id values in TestDateTime --- .../cqlengine/columns/test_validation.py | 24 +++++++++---------- tests/unit/cython/types_testhelper.pyx | 9 ++++++- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 496bc42ee9..7b51080bff 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -70,39 +70,39 @@ def dst(self, date_time): return None now = datetime(1982, 1, 1, tzinfo=TZ()) - dt = self.DatetimeTest.objects.create(test_id=0, created_at=now) - dt2 = self.DatetimeTest.objects(test_id=0).first() + dt = self.DatetimeTest.objects.create(test_id=1, created_at=now) + dt2 = self.DatetimeTest.objects(test_id=1).first() assert dt2.created_at.timetuple()[:6] == (now + timedelta(hours=1)).timetuple()[:6] def test_datetime_date_support(self): today = date.today() - self.DatetimeTest.objects.create(test_id=0, created_at=today) - dt2 = self.DatetimeTest.objects(test_id=0).first() + self.DatetimeTest.objects.create(test_id=2, created_at=today) + dt2 = self.DatetimeTest.objects(test_id=2).first() assert dt2.created_at.isoformat() == datetime(today.year, today.month, today.day).isoformat() def test_datetime_none(self): - dt = self.DatetimeTest.objects.create(test_id=1, created_at=None) - dt2 = self.DatetimeTest.objects(test_id=1).first() + dt = self.DatetimeTest.objects.create(test_id=3, created_at=None) + dt2 = self.DatetimeTest.objects(test_id=3).first() assert dt2.created_at is None - dts = self.DatetimeTest.objects.filter(test_id=1).values_list('created_at') + dts = self.DatetimeTest.objects.filter(test_id=3).values_list('created_at') assert dts[0][0] is None def test_datetime_invalid(self): dt_value= 'INVALID' with self.assertRaises(TypeError): - self.DatetimeTest.objects.create(test_id=2, created_at=dt_value) + self.DatetimeTest.objects.create(test_id=4, created_at=dt_value) def test_datetime_timestamp(self): dt_value = 1454520554 - self.DatetimeTest.objects.create(test_id=2, created_at=dt_value) - dt2 = self.DatetimeTest.objects(test_id=2).first() + self.DatetimeTest.objects.create(test_id=5, created_at=dt_value) + dt2 = self.DatetimeTest.objects(test_id=5).first() assert dt2.created_at == datetime.utcfromtimestamp(dt_value) def test_datetime_large(self): dt_value = datetime(2038, 12, 31, 10, 10, 10, 123000) - self.DatetimeTest.objects.create(test_id=2, created_at=dt_value) - dt2 = self.DatetimeTest.objects(test_id=2).first() + self.DatetimeTest.objects.create(test_id=6, created_at=dt_value) + dt2 = self.DatetimeTest.objects(test_id=6).first() assert dt2.created_at == dt_value diff --git a/tests/unit/cython/types_testhelper.pyx b/tests/unit/cython/types_testhelper.pyx index b27a2f21d2..0574d406fc 100644 --- a/tests/unit/cython/types_testhelper.pyx +++ b/tests/unit/cython/types_testhelper.pyx @@ -18,7 +18,10 @@ import datetime include '../../../cassandra/ioutils.pyx' +import io + from cassandra.cqltypes import DateType +from cassandra.protocol import write_value from cassandra.deserializers import find_deserializer from cassandra.bytesio cimport BytesIOReader from cassandra.buffer cimport Buffer @@ -36,7 +39,11 @@ def test_datetype(assert_equal): cdef Buffer buf dt = datetime.datetime.utcfromtimestamp(timestamp) - reader = BytesIOReader(b'\x00\x00\x00\x08' + DateType.serialize(dt, 0)) + + bytes = io.BytesIO() + write_value(bytes, DateType.serialize(dt, 0)) + bytes.seek(0) + reader = BytesIOReader(bytes.read()) get_buf(reader, &buf) deserialized_dt = from_binary(des, &buf, 0) From b408c166c93a553fa04c163e0f980a7e093dc9c5 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 10 Feb 2016 11:20:57 -0600 Subject: [PATCH 0332/2005] Changing set_workload call to match latest changes in ccm --- tests/integration/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 81028f57a9..4463ea3ae3 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -267,8 +267,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): if start: if(len(workloads) > 0): for node in CCM_CLUSTER.nodes.values(): - for workload in workloads: - node.set_workload(workload) + node.set_workloads(workloads) log.debug("Starting CCM cluster: {0}".format(cluster_name)) CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jvm_args) #Added to wait for slow nodes to start up From be3a0de5069f419dd353e3fe279acc6edb9e27c7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 11 Feb 2016 10:01:55 -0600 Subject: [PATCH 0333/2005] Add Tuple column type PYTHON-306 --- cassandra/cqlengine/columns.py | 59 +++++++++++++++++++++++++++++++ cassandra/cqlengine/connection.py | 15 +++++--- cassandra/encoder.py | 2 +- 3 files changed, 70 insertions(+), 6 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 722a089255..abad425a7a 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -796,6 +796,65 @@ def reset_previous_value(self): self.previous_value = copy(self.value) +class Tuple(Column): + """ + Stores a fixed-length set of positional values + + http://docs.datastax.com/en/cql/3.1/cql/cql_reference/tupleType.html + """ + def __init__(self, *args, **kwargs): + """ + :param args: column types representing tuple composition + """ + + self.db_type = 'tuple<{0}>'.format(', '.join(typ.db_type for typ in args)) + + if not args: + raise ValueError("Tuple must specify at least one inner type") + + sub_types = [] + for arg in args: + inheritance_comparator = issubclass if isinstance(arg, type) else isinstance + if not inheritance_comparator(arg, Column): + raise ValidationError("%s is not a column class" % (arg,)) + if arg.db_type is None: + raise ValidationError("%s is an abstract type" % (arg,)) + + sub_types.append(arg() if isinstance(arg, type) else arg) + self.sub_types = sub_types + + super(Tuple, self).__init__(**kwargs) + + def validate(self, value): + val = super(Tuple, self).validate(value) + if val is None: + return + if len(val) > self.sub_types: + raise ValidationError("Value %r has more fields than tuple definition (%s)" % + (val, ', '.join(t for t in self.sub_types))) + return tuple(t.validate(v) for t, v in zip(self.sub_types, val)) + + def to_python(self, value): + if value is None: + return tuple() + return tuple(t.to_python(v) for t, v in zip(self.sub_types, value)) + + def to_database(self, value): + if value is None: + return + return tuple(t.to_database(v) for t, v in zip(self.sub_types, value)) + + @property + def sub_columns(self): + return self.sub_types + +# TODO: +# sub_columns --> sub_types +# test UDTs in tuples, vice versa +# refactor init validation to common base +# cqlsh None in tuple should display as null + + class UserDefinedType(Column): """ User Defined Type column diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 9a74f8cfc8..a2de26e08a 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -56,9 +56,8 @@ def default(): cluster = Cluster() session = cluster.connect() - session.row_factory = dict_factory - _register_known_types(cluster) + _setup_session(session) log.debug("cqlengine connection initialized with default session to localhost") @@ -85,7 +84,7 @@ def set_session(s): if not models.DEFAULT_KEYSPACE and session.keyspace: models.DEFAULT_KEYSPACE = session.keyspace - _register_known_types(cluster) + _setup_session(session) log.debug("cqlengine connection initialized with %s", s) @@ -138,9 +137,15 @@ def setup( raise if consistency is not None: session.default_consistency_level = consistency - session.row_factory = dict_factory - _register_known_types(cluster) + _setup_session(session) + + +def _setup_session(session): + session.row_factory = dict_factory + enc = session.encoder + enc.mapping[tuple] = enc.cql_encode_tuple + _register_known_types(session.cluster) def execute(query, params=None, consistency_level=None, timeout=NOT_SET): diff --git a/cassandra/encoder.py b/cassandra/encoder.py index d22540736d..6d8b6ce8a2 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -84,7 +84,7 @@ def __init__(self): OrderedMap: self.cql_encode_map_collection, OrderedMapSerializedKey: self.cql_encode_map_collection, list: self.cql_encode_list_collection, - tuple: self.cql_encode_list_collection, + tuple: self.cql_encode_list_collection, # TODO: change to tuple in next major set: self.cql_encode_set_collection, sortedset: self.cql_encode_set_collection, frozenset: self.cql_encode_set_collection, From 3d66a3124f9c4346c5096ad8a80ce8cc8b4e5598 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 11 Feb 2016 10:06:47 -0600 Subject: [PATCH 0334/2005] sub_columns --> sub_types better, more general name for the property --- cassandra/cqlengine/columns.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index abad425a7a..b3856c7eba 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -249,7 +249,7 @@ def _val_is_null(self, val): return val is None @property - def sub_columns(self): + def sub_types(self): return [] @@ -648,7 +648,7 @@ def _val_is_null(self, val): return not val @property - def sub_columns(self): + def sub_types(self): return [self.value_col] @@ -781,7 +781,7 @@ def to_database(self, value): return dict((self.key_col.to_database(k), self.value_col.to_database(v)) for k, v in value.items()) @property - def sub_columns(self): + def sub_types(self): return [self.key_col, self.value_col] @@ -812,7 +812,7 @@ def __init__(self, *args, **kwargs): if not args: raise ValueError("Tuple must specify at least one inner type") - sub_types = [] + types = [] for arg in args: inheritance_comparator = issubclass if isinstance(arg, type) else isinstance if not inheritance_comparator(arg, Column): @@ -820,8 +820,8 @@ def __init__(self, *args, **kwargs): if arg.db_type is None: raise ValidationError("%s is an abstract type" % (arg,)) - sub_types.append(arg() if isinstance(arg, type) else arg) - self.sub_types = sub_types + types.append(arg() if isinstance(arg, type) else arg) + self.types = types super(Tuple, self).__init__(**kwargs) @@ -845,11 +845,10 @@ def to_database(self, value): return tuple(t.to_database(v) for t, v in zip(self.sub_types, value)) @property - def sub_columns(self): - return self.sub_types + def sub_types(self): + return self.types # TODO: -# sub_columns --> sub_types # test UDTs in tuples, vice versa # refactor init validation to common base # cqlsh None in tuple should display as null @@ -877,12 +876,12 @@ def __init__(self, user_type, **kwargs): super(UserDefinedType, self).__init__(**kwargs) @property - def sub_columns(self): + def sub_types(self): return list(self.user_type._fields.values()) def resolve_udts(col_def, out_list): - for col in col_def.sub_columns: + for col in col_def.sub_types: resolve_udts(col, out_list) if isinstance(col_def, UserDefinedType): out_list.append(col_def.user_type) From f38c9125ed958c22d7e27f5a85aed7aabe1d0b8c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 11 Feb 2016 11:36:56 -0600 Subject: [PATCH 0335/2005] cqle: refactor container init to common method PYTHON-306 --- cassandra/cqlengine/columns.py | 104 +++++++++++---------------------- cassandra/cqlengine/models.py | 4 +- 2 files changed, 37 insertions(+), 71 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index b3856c7eba..2243be6bf8 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -16,6 +16,7 @@ from datetime import date, datetime import logging import six +from uuid import UUID as _UUID from cassandra import util from cassandra.cqltypes import SimpleDateType @@ -504,7 +505,6 @@ def validate(self, value): val = super(UUID, self).validate(value) if val is None: return - from uuid import UUID as _UUID if isinstance(val, _UUID): return val if isinstance(val, six.string_types): @@ -523,9 +523,6 @@ def to_database(self, value): return self.validate(value) -from uuid import UUID as pyUUID, getnode - - class TimeUUID(UUID): """ UUID containing timestamp @@ -615,24 +612,22 @@ class BaseContainerColumn(Column): https://cassandra.apache.org/doc/cql3/CQL.html#collections """ - def __init__(self, value_type, **kwargs): + def __init__(self, types, **kwargs): """ - :param value_type: a column class indicating the types of the value + :param types: a sequence of sub types in this collection """ - inheritance_comparator = issubclass if isinstance(value_type, type) else isinstance - if not inheritance_comparator(value_type, Column): - raise ValidationError('value_type must be a column class') - if inheritance_comparator(value_type, BaseContainerColumn): - raise ValidationError('container types cannot be nested') - if value_type.db_type is None: - raise ValidationError('value_type cannot be an abstract column type') - - if isinstance(value_type, type): - self.value_type = value_type - self.value_col = self.value_type() - else: - self.value_col = value_type - self.value_type = self.value_col.__class__ + instances = [] + for t in types: + inheritance_comparator = issubclass if isinstance(t, type) else isinstance + if not inheritance_comparator(t, Column): + raise ValidationError("%s is not a column class" % (t,)) + if inheritance_comparator(t, BaseContainerColumn): # should go away with PYTHON-478 + raise ValidationError('container types cannot be nested') + if t.db_type is None: + raise ValidationError("%s is an abstract type" % (t,)) + + instances.append(t() if isinstance(t, type) else t) + self.types = instances super(BaseContainerColumn, self).__init__(**kwargs) @@ -649,7 +644,7 @@ def _val_is_null(self, val): @property def sub_types(self): - return [self.value_col] + return self.types class Set(BaseContainerColumn): @@ -666,7 +661,10 @@ def __init__(self, value_type, strict=True, default=set, **kwargs): """ self.strict = strict self.db_type = 'set<{0}>'.format(value_type.db_type) - super(Set, self).__init__(value_type, default=default, **kwargs) + + super(Set, self).__init__((value_type,), default=default, **kwargs) + + self.value_col = self.types[0] def validate(self, value): val = super(Set, self).validate(value) @@ -706,7 +704,10 @@ def __init__(self, value_type, default=list, **kwargs): :param value_type: a column class indicating the types of the value """ self.db_type = 'list<{0}>'.format(value_type.db_type) - return super(List, self).__init__(value_type=value_type, default=default, **kwargs) + + super(List, self).__init__((value_type,), default=default, **kwargs) + + self.value_col = self.types[0] def validate(self, value): val = super(List, self).validate(value) @@ -743,21 +744,10 @@ def __init__(self, key_type, value_type, default=dict, **kwargs): self.db_type = 'map<{0}, {1}>'.format(key_type.db_type, value_type.db_type) - inheritance_comparator = issubclass if isinstance(key_type, type) else isinstance - if not inheritance_comparator(key_type, Column): - raise ValidationError('key_type must be a column class') - if inheritance_comparator(key_type, BaseContainerColumn): - raise ValidationError('container types cannot be nested') - if key_type.db_type is None: - raise ValidationError('key_type cannot be an abstract column type') - - if isinstance(key_type, type): - self.key_type = key_type - self.key_col = self.key_type() - else: - self.key_col = key_type - self.key_type = self.key_col.__class__ - super(Map, self).__init__(value_type, default=default, **kwargs) + super(Map, self).__init__((key_type, value_type), default=default, **kwargs) + + self.key_col = self.types[0] + self.value_col = self.types[1] def validate(self, value): val = super(Map, self).validate(value) @@ -780,10 +770,6 @@ def to_database(self, value): return None return dict((self.key_col.to_database(k), self.value_col.to_database(v)) for k, v in value.items()) - @property - def sub_types(self): - return [self.key_col, self.value_col] - class UDTValueManager(BaseValueManager): @property @@ -796,7 +782,7 @@ def reset_previous_value(self): self.previous_value = copy(self.value) -class Tuple(Column): +class Tuple(BaseContainerColumn): """ Stores a fixed-length set of positional values @@ -812,46 +798,26 @@ def __init__(self, *args, **kwargs): if not args: raise ValueError("Tuple must specify at least one inner type") - types = [] - for arg in args: - inheritance_comparator = issubclass if isinstance(arg, type) else isinstance - if not inheritance_comparator(arg, Column): - raise ValidationError("%s is not a column class" % (arg,)) - if arg.db_type is None: - raise ValidationError("%s is an abstract type" % (arg,)) - - types.append(arg() if isinstance(arg, type) else arg) - self.types = types - - super(Tuple, self).__init__(**kwargs) + super(Tuple, self).__init__(args, **kwargs) def validate(self, value): val = super(Tuple, self).validate(value) if val is None: return - if len(val) > self.sub_types: + if len(val) > len(self.types): raise ValidationError("Value %r has more fields than tuple definition (%s)" % - (val, ', '.join(t for t in self.sub_types))) - return tuple(t.validate(v) for t, v in zip(self.sub_types, val)) + (val, ', '.join(t for t in self.types))) + return tuple(t.validate(v) for t, v in zip(self.types, val)) def to_python(self, value): if value is None: return tuple() - return tuple(t.to_python(v) for t, v in zip(self.sub_types, value)) + return tuple(t.to_python(v) for t, v in zip(self.types, value)) def to_database(self, value): if value is None: return - return tuple(t.to_database(v) for t, v in zip(self.sub_types, value)) - - @property - def sub_types(self): - return self.types - -# TODO: -# test UDTs in tuples, vice versa -# refactor init validation to common base -# cqlsh None in tuple should display as null + return tuple(t.to_database(v) for t, v in zip(self.types, value)) class UserDefinedType(Column): diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 3eeef449b2..e80ed7daf9 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -814,8 +814,8 @@ def _get_polymorphic_base(bases): raise ModelDefinitionException("column '{0}' conflicts with built-in attribute/method".format(k)) # counter column primary keys are not allowed - if (v.primary_key or v.partition_key) and isinstance(v, (columns.Counter, columns.BaseContainerColumn)): - raise ModelDefinitionException('counter columns and container columns cannot be used as primary keys') + if (v.primary_key or v.partition_key) and isinstance(v, columns.Counter): + raise ModelDefinitionException('counter columns cannot be used as primary keys') # this will mark the first primary key column as a partition # key, if one hasn't been set already From 9421d36de79ede7fdb20873fe5b117ee2d05b2e9 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 12 Feb 2016 13:11:38 -0500 Subject: [PATCH 0336/2005] PYTHON-310: Add lazy_connect_lock to avoid race condition with multiple threads --- cassandra/cqlengine/connection.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index a2de26e08a..21f9f7e5b5 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -15,6 +15,8 @@ from collections import namedtuple, defaultdict import logging import six +from warnings import warn +import threading from cassandra.cluster import Cluster, _NOT_SET, NoHostAvailable, UserTypeDoesNotExist from cassandra.query import SimpleStatement, Statement, dict_factory @@ -32,6 +34,7 @@ cluster = None session = None lazy_connect_args = None +lazy_connect_lock = threading.RLock() # Because type models may be registered before a connection is present, @@ -102,9 +105,14 @@ def setup( :param list hosts: list of hosts, (``contact_points`` for :class:`cassandra.cluster.Cluster`) :param str default_keyspace: The default keyspace to use :param int consistency: The global default :class:`~.ConsistencyLevel` - default is the same as :attr:`.Session.default_consistency_level` - :param bool lazy_connect: True if should not connect until first use + :param bool lazy_connect: True if should not connect until first use (Deprecated) :param bool retry_connect: True if we should retry to connect even if there was a connection failure initially :param \*\*kwargs: Pass-through keyword arguments for :class:`cassandra.cluster.Cluster` + + .. warning :: + + lazy_connect is deprecated. It will be removed in 3.3. + """ global cluster, session, lazy_connect_args @@ -115,6 +123,9 @@ def setup( models.DEFAULT_KEYSPACE = default_keyspace if lazy_connect: + msg = "lazy_connect is deprecated. It will be removed in 3.3." + warn(msg, DeprecationWarning) + log.warning(msg) kwargs['default_keyspace'] = default_keyspace kwargs['consistency'] = consistency kwargs['lazy_connect'] = False @@ -188,11 +199,12 @@ def get_cluster(): def handle_lazy_connect(): global lazy_connect_args - if lazy_connect_args: - log.debug("lazy connect") - hosts, kwargs = lazy_connect_args - lazy_connect_args = None - setup(hosts, **kwargs) + with lazy_connect_lock: + if lazy_connect_args: + log.debug("lazy connect") + hosts, kwargs = lazy_connect_args + lazy_connect_args = None + setup(hosts, **kwargs) def register_udt(keyspace, type_name, klass): From 8ea293adb29f0bbf5e166180080529559425c5a9 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 12 Feb 2016 14:03:03 -0500 Subject: [PATCH 0337/2005] Avoid to acquire the lazy_connect lock if it is not required --- cassandra/cqlengine/connection.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 21f9f7e5b5..7d53af6917 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -15,7 +15,6 @@ from collections import namedtuple, defaultdict import logging import six -from warnings import warn import threading from cassandra.cluster import Cluster, _NOT_SET, NoHostAvailable, UserTypeDoesNotExist @@ -105,14 +104,9 @@ def setup( :param list hosts: list of hosts, (``contact_points`` for :class:`cassandra.cluster.Cluster`) :param str default_keyspace: The default keyspace to use :param int consistency: The global default :class:`~.ConsistencyLevel` - default is the same as :attr:`.Session.default_consistency_level` - :param bool lazy_connect: True if should not connect until first use (Deprecated) + :param bool lazy_connect: True if should not connect until first use :param bool retry_connect: True if we should retry to connect even if there was a connection failure initially :param \*\*kwargs: Pass-through keyword arguments for :class:`cassandra.cluster.Cluster` - - .. warning :: - - lazy_connect is deprecated. It will be removed in 3.3. - """ global cluster, session, lazy_connect_args @@ -123,9 +117,6 @@ def setup( models.DEFAULT_KEYSPACE = default_keyspace if lazy_connect: - msg = "lazy_connect is deprecated. It will be removed in 3.3." - warn(msg, DeprecationWarning) - log.warning(msg) kwargs['default_keyspace'] = default_keyspace kwargs['consistency'] = consistency kwargs['lazy_connect'] = False @@ -199,12 +190,20 @@ def get_cluster(): def handle_lazy_connect(): global lazy_connect_args + + # if lazy_connect_args is None, it means the cluster is setup and ready + # No need to acquire the lock + if not lazy_connect_args: + return + with lazy_connect_lock: + # lazy_connect_args might have been set to None by another thread while waiting the lock + # In this case, do nothing. if lazy_connect_args: log.debug("lazy connect") hosts, kwargs = lazy_connect_args - lazy_connect_args = None setup(hosts, **kwargs) + lazy_connect_args = None def register_udt(keyspace, type_name, klass): From 21b8fac563817918bf973437fc4f953ab8e7ec36 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 16 Feb 2016 20:24:08 -0500 Subject: [PATCH 0338/2005] PYTHON-258: Add CONTAINS operator for collections --- cassandra/cqlengine/operators.py | 5 ++ cassandra/cqlengine/query.py | 17 +++++-- .../operators/test_where_operators.py | 2 + .../cqlengine/query/test_queryset.py | 48 +++++++++++++++++++ 4 files changed, 69 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index 655e810cd5..0aa29d94ae 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -83,3 +83,8 @@ class LessThanOperator(BaseWhereOperator): class LessThanOrEqualOperator(BaseWhereOperator): symbol = "LTE" cql_symbol = '<=' + + +class ContainsOperator(EqualsOperator): + symbol = "CONTAINS" + cql_symbol = 'CONTAINS' diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 1f4a1c3b3e..dec4781e4f 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -22,7 +22,7 @@ from cassandra.cqlengine.functions import Token, BaseQueryFunction, QueryValue from cassandra.cqlengine.operators import (InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator, LessThanOperator, - LessThanOrEqualOperator, BaseWhereOperator) + LessThanOrEqualOperator, ContainsOperator, BaseWhereOperator) # import * ? from cassandra.cqlengine.statements import (WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement, @@ -87,6 +87,13 @@ def in_(self, item): """ return WhereClause(six.text_type(self), InOperator(), item) + def contains_(self, item): + """ + Returns a CONTAINS operator + """ + return WhereClause(six.text_type(self), ContainsOperator(), item) + + def __eq__(self, other): return WhereClause(six.text_type(self), EqualsOperator(), self._to_database(other)) @@ -550,6 +557,9 @@ def filter(self, *args, **kwargs): query_val = [column.to_database(v) for v in val] elif isinstance(val, BaseQueryFunction): query_val = val + elif isinstance(column, (columns.List, columns.Set, columns.Map)): + # For collections, we query using the value, not the container + query_val = val else: query_val = column.to_database(val) @@ -779,11 +789,12 @@ class ModelQuerySet(AbstractQuerySet): """ def _validate_select_where(self): """ Checks that a filterset will not create invalid select statement """ - # check that there's either a = or IN relationship with a primary key or indexed field + # check that there's either a =, a IN or a CONTAINS (collection) relationship with a primary key or indexed field equal_ops = [self.model._columns.get(w.field) for w in self._where if isinstance(w.operator, EqualsOperator)] token_comparison = any([w for w in self._where if isinstance(w.value, Token)]) if not any([w.primary_key or w.index for w in equal_ops]) and not token_comparison and not self._allow_filtering: - raise QueryException('Where clauses require either a "=" or "IN" comparison with either a primary key or indexed field') + raise QueryException(('Where clauses require either =, a IN or a CONTAINS (collection) ' + 'comparison with either a primary key or indexed field')) if not self._allow_filtering: # if the query is not on an indexed field diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index d3885c276a..72ff2f6263 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -32,6 +32,7 @@ def check_lookup(symbol, expected): check_lookup('GTE', GreaterThanOrEqualOperator) check_lookup('LT', LessThanOperator) check_lookup('LTE', LessThanOrEqualOperator) + check_lookup('CONTAINS', ContainsOperator) def test_operator_rendering(self): """ tests symbols are rendered properly """ @@ -41,5 +42,6 @@ def test_operator_rendering(self): self.assertEqual(">=", six.text_type(GreaterThanOrEqualOperator())) self.assertEqual("<", six.text_type(LessThanOperator())) self.assertEqual("<=", six.text_type(LessThanOrEqualOperator())) + self.assertEqual("CONTAINS", six.text_type(ContainsOperator())) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 1dc037ce53..a0563d9aaf 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -77,6 +77,9 @@ class IndexedTestModel(Model): description = columns.Text() expected_result = columns.Integer() test_result = columns.Integer(index=True) + test_list = columns.List(columns.Integer, index=True) + test_set = columns.Set(columns.Integer, index=True) + test_map = columns.Map(columns.Text, columns.Integer, index=True) class TestMultiClusteringModel(Model): @@ -239,6 +242,16 @@ def setUpClass(cls): IndexedTestModel.objects.create(test_id=11, attempt_id=3, description='try12', expected_result=75, test_result=45) + IndexedTestModel.objects.create(test_id=12, attempt_id=3, description='list12', expected_result=75, + test_result=45, test_list=[1, 2, 42], test_set=set([1, 2, 3]), + test_map={'1': 1, '2': 2, '3': 3}) + IndexedTestModel.objects.create(test_id=13, attempt_id=3, description='list13', expected_result=75, + test_result=45, test_list=[3, 4, 5], test_set=set([4, 5, 42]), + test_map={'1': 5, '2': 6, '3': 7}) + IndexedTestModel.objects.create(test_id=14, attempt_id=3, description='list14', expected_result=75, + test_result=45, test_list=[1, 2, 3], test_set=set([1, 2, 3]), + test_map={'1': 1, '2': 2, '3': 42}) + @classmethod def tearDownClass(cls): super(BaseQuerySetUsage, cls).tearDownClass() @@ -509,6 +522,7 @@ def test_primary_key_or_index_must_have_equal_relation_filter(self): q = TestModel.objects(test_id__gt=0) list([i for i in q]) + def test_indexed_field_can_be_queried(self): """ Tests that queries on an indexed field will work without any primary key relations specified @@ -516,6 +530,15 @@ def test_indexed_field_can_be_queried(self): q = IndexedTestModel.objects(test_result=25) assert q.count() == 4 + q = IndexedTestModel.objects.filter(test_list__contains=42) + assert q.count() == 1 + + q = IndexedTestModel.objects.filter(test_set__contains=42) + assert q.count() == 1 + + q = IndexedTestModel.objects.filter(test_map__contains=42) + assert q.count() == 1 + class TestQuerySetDelete(BaseQuerySetUsage): def test_delete(self): @@ -665,6 +688,31 @@ def test_query_expression_success_case(self): assert q.count() == 8 +class TestContainsOperator(BaseQuerySetUsage): + def test_kwarg_success_case(self): + """ Tests the CONTAINS operator works with the kwarg query method """ + q = IndexedTestModel.filter(test_list__contains=1) + assert q.count() == 2 + + q = IndexedTestModel.filter(test_set__contains=3) + assert q.count() == 2 + + q = IndexedTestModel.filter(test_map__contains=42) + assert q.count() == 1 + + + def test_query_expression_success_case(self): + """ Tests the CONTAINS operator works with the query expression query method """ + q = IndexedTestModel.filter(IndexedTestModel.test_list.contains_(1)) + assert q.count() == 2 + + q = IndexedTestModel.filter(IndexedTestModel.test_set.contains_(3)) + assert q.count() == 2 + + q = IndexedTestModel.filter(IndexedTestModel.test_map.contains_(42)) + assert q.count() == 1 + + class TestValuesList(BaseQuerySetUsage): def test_values_list(self): q = TestModel.objects.filter(test_id=0, attempt_id=1) From 21081fb30673a52506de2ef2b3c38ae519afef99 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 19 Feb 2016 08:43:07 -0600 Subject: [PATCH 0339/2005] cqle: correct model multiple objects base exception PYTHON-489 --- cassandra/cqlengine/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index e80ed7daf9..6f9f61c6e8 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -895,7 +895,7 @@ def _get_polymorphic_base(bases): if MultipleObjectsReturnedBase is not None: break - MultipleObjectsReturnedBase = DoesNotExistBase or attrs.pop('MultipleObjectsReturned', BaseModel.MultipleObjectsReturned) + MultipleObjectsReturnedBase = MultipleObjectsReturnedBase or attrs.pop('MultipleObjectsReturned', BaseModel.MultipleObjectsReturned) attrs['MultipleObjectsReturned'] = type('MultipleObjectsReturned', (MultipleObjectsReturnedBase,), {}) # create the class and add a QuerySet to it From a89d0879e03a646285f92b2a4a3143c276c0c614 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 19 Feb 2016 10:18:13 -0600 Subject: [PATCH 0340/2005] setitem and delitem for util.OrderedMap --- cassandra/util.py | 13 ++++++++++++- tests/unit/test_orderedmap.py | 12 ++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/cassandra/util.py b/cassandra/util.py index 6e22bceee6..ab6968e035 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -746,7 +746,7 @@ class OrderedMap(Mapping): ... ) - This class dervies from the (immutable) Mapping API. Objects in these maps + This class derives from the (immutable) Mapping API. Objects in these maps are not intended be modified. \* Note: Because of the way Cassandra encodes nested types, when using the @@ -782,6 +782,8 @@ def _insert(self, key, value): self._items.append((key, value)) self._index[flat_key] = len(self._items) - 1 + __setitem__ = _insert + def __getitem__(self, key): try: index = self._index[self._serialize_key(key)] @@ -789,6 +791,15 @@ def __getitem__(self, key): except KeyError: raise KeyError(str(key)) + def __delitem__(self, key): + # not efficient -- for convenience only + try: + index = self._index.pop(self._serialize_key(key)) + self._index = dict((k, i if i < index else i - 1) for k, i in self._index.items()) + self._items.pop(index) + except KeyError: + raise KeyError(str(key)) + def __iter__(self): for i in self._items: yield i[0] diff --git a/tests/unit/test_orderedmap.py b/tests/unit/test_orderedmap.py index 98f0ba9ade..6fa43ccdfe 100644 --- a/tests/unit/test_orderedmap.py +++ b/tests/unit/test_orderedmap.py @@ -152,6 +152,18 @@ def test_popitem(self): self.assertEqual(om.popitem(), item) self.assertRaises(KeyError, om.popitem) + def test_delitem(self): + om = OrderedMap({1: 1, 2: 2}) + + self.assertRaises(KeyError, om.__delitem__, 3) + + del om[1] + self.assertEqual(om, {2: 2}) + del om[2] + self.assertFalse(om) + + self.assertRaises(KeyError, om.__delitem__, 1) + class OrderedMapSerializedKeyTest(unittest.TestCase): def test_init(self): From 694f50ddd0bc1f971790a5146a707765beef960b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 19 Feb 2016 10:21:39 -0600 Subject: [PATCH 0341/2005] cqle: allow nested collections PYTHON-478 --- cassandra/cqlengine/columns.py | 37 +++++++++++++++++----------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 2243be6bf8..4670f1d87d 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -621,12 +621,13 @@ def __init__(self, types, **kwargs): inheritance_comparator = issubclass if isinstance(t, type) else isinstance if not inheritance_comparator(t, Column): raise ValidationError("%s is not a column class" % (t,)) - if inheritance_comparator(t, BaseContainerColumn): # should go away with PYTHON-478 - raise ValidationError('container types cannot be nested') if t.db_type is None: raise ValidationError("%s is an abstract type" % (t,)) + inst = t() if isinstance(t, type) else t + if isinstance(t, BaseContainerColumn): + inst._freeze_db_type() + instances.append(inst) - instances.append(t() if isinstance(t, type) else t) self.types = instances super(BaseContainerColumn, self).__init__(**kwargs) @@ -642,6 +643,10 @@ def validate(self, value): def _val_is_null(self, val): return not val + def _freeze_db_type(self): + if not self.db_type.startswith('frozen'): + self.db_type = "frozen<%s>" % (self.db_type,) + @property def sub_types(self): return self.types @@ -660,17 +665,16 @@ def __init__(self, value_type, strict=True, default=set, **kwargs): type on validation, or raise a validation error, defaults to True """ self.strict = strict - self.db_type = 'set<{0}>'.format(value_type.db_type) - super(Set, self).__init__((value_type,), default=default, **kwargs) - self.value_col = self.types[0] + self.db_type = 'set<{0}>'.format(self.value_col.db_type) + def validate(self, value): val = super(Set, self).validate(value) if val is None: return - types = (set,) if self.strict else (set, list, tuple) + types = (set, util.SortedSet) if self.strict else (set, util.SortedSet, list, tuple) if not isinstance(val, types): if self.strict: raise ValidationError('{0} {1} is not a set object'.format(self.column_name, val)) @@ -703,11 +707,10 @@ def __init__(self, value_type, default=list, **kwargs): """ :param value_type: a column class indicating the types of the value """ - self.db_type = 'list<{0}>'.format(value_type.db_type) - super(List, self).__init__((value_type,), default=default, **kwargs) - self.value_col = self.types[0] + self.db_type = 'list<{0}>'.format(self.value_col.db_type) + def validate(self, value): val = super(List, self).validate(value) @@ -741,22 +744,21 @@ def __init__(self, key_type, value_type, default=dict, **kwargs): :param key_type: a column class indicating the types of the key :param value_type: a column class indicating the types of the value """ - - self.db_type = 'map<{0}, {1}>'.format(key_type.db_type, value_type.db_type) - super(Map, self).__init__((key_type, value_type), default=default, **kwargs) - self.key_col = self.types[0] self.value_col = self.types[1] + self.db_type = 'map<{0}, {1}>'.format(self.key_col.db_type, self.value_col.db_type) def validate(self, value): val = super(Map, self).validate(value) if val is None: return - if not isinstance(val, dict): + if not isinstance(val, (dict, util.OrderedMap)): raise ValidationError('{0} {1} is not a dict object'.format(self.column_name, val)) if None in val: raise ValidationError("{0} None is not allowed in a map".format(self.column_name)) + # TODO: stop doing this conversion because it doesn't support non-hashable collections as keys (cassandra does) + # will need to start using the cassandra.util types in the next major rev return dict((self.key_col.validate(k), self.value_col.validate(v)) for k, v in val.items()) def to_python(self, value): @@ -792,13 +794,10 @@ def __init__(self, *args, **kwargs): """ :param args: column types representing tuple composition """ - - self.db_type = 'tuple<{0}>'.format(', '.join(typ.db_type for typ in args)) - if not args: raise ValueError("Tuple must specify at least one inner type") - super(Tuple, self).__init__(args, **kwargs) + self.db_type = 'tuple<{0}>'.format(', '.join(typ.db_type for typ in self.types)) def validate(self, value): val = super(Tuple, self).validate(value) From a09c7c41ec9df725234279ed9b0350380b3915c2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 19 Feb 2016 10:22:59 -0600 Subject: [PATCH 0342/2005] improve existing cqlengine container tests --- .../columns/test_container_columns.py | 79 +++++++++---------- 1 file changed, 38 insertions(+), 41 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index 8aa5491a9a..6b23bcd41f 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -21,7 +21,6 @@ from uuid import uuid4 from cassandra import WriteTimeout - import cassandra.cqlengine.columns as columns from cassandra.cqlengine.functions import get_total_seconds from cassandra.cqlengine.models import Model, ValidationError @@ -109,14 +108,14 @@ def test_io_success(self): m1 = TestSetModel.create(int_set=set((1, 2)), text_set=set(('kai', 'andreas'))) m2 = TestSetModel.get(partition=m1.partition) - assert isinstance(m2.int_set, set) - assert isinstance(m2.text_set, set) + self.assertIsInstance(m2.int_set, set) + self.assertIsInstance(m2.text_set, set) - assert 1 in m2.int_set - assert 2 in m2.int_set + self.assertIn(1, m2.int_set) + self.assertIn(2, m2.int_set) - assert 'kai' in m2.text_set - assert 'andreas' in m2.text_set + self.assertIn('kai', m2.text_set) + self.assertIn('andreas', m2.text_set) def test_type_validation(self): """ @@ -144,12 +143,12 @@ def test_partial_updates(self): m1.int_set.add(5) m1.int_set.remove(1) - assert m1.int_set == set((2, 3, 4, 5)) + self.assertEqual(m1.int_set, set((2, 3, 4, 5))) m1.save() m2 = TestSetModel.get(partition=m1.partition) - assert m2.int_set == set((2, 3, 4, 5)) + self.assertEqual(m2.int_set, set((2, 3, 4, 5))) def test_instantiation_with_column_class(self): """ @@ -157,23 +156,23 @@ def test_instantiation_with_column_class(self): and that the class is instantiated in the constructor """ column = columns.Set(columns.Text) - assert isinstance(column.value_col, columns.Text) + self.assertIsInstance(column.value_col, columns.Text) def test_instantiation_with_column_instance(self): """ Tests that columns instantiated with a column instance work properly """ column = columns.Set(columns.Text(min_length=100)) - assert isinstance(column.value_col, columns.Text) + self.assertIsInstance(column.value_col, columns.Text) def test_to_python(self): """ Tests that to_python of value column is called """ column = columns.Set(JsonTestColumn) val = set((1, 2, 3)) db_val = column.to_database(val) - assert db_val == set(json.dumps(v) for v in val) + self.assertEqual(db_val, set(json.dumps(v) for v in val)) py_val = column.to_python(db_val) - assert py_val == val + self.assertEqual(py_val, val) def test_default_empty_container_saving(self): """ tests that the default empty container is not saved if it hasn't been updated """ @@ -219,17 +218,17 @@ def test_io_success(self): m1 = TestListModel.create(int_list=[1, 2], text_list=['kai', 'andreas']) m2 = TestListModel.get(partition=m1.partition) - assert isinstance(m2.int_list, list) - assert isinstance(m2.text_list, list) + self.assertIsInstance(m2.int_list, list) + self.assertIsInstance(m2.text_list, list) - assert len(m2.int_list) == 2 - assert len(m2.text_list) == 2 + self.assertEqual(len(m2.int_list), 2) + self.assertEqual(len(m2.text_list), 2) - assert m2.int_list[0] == 1 - assert m2.int_list[1] == 2 + self.assertEqual(m2.int_list[0], 1) + self.assertEqual(m2.int_list[1], 2) - assert m2.text_list[0] == 'kai' - assert m2.text_list[1] == 'andreas' + self.assertEqual(m2.text_list[0], 'kai') + self.assertEqual(m2.text_list[1], 'andreas') def test_type_validation(self): """ @@ -275,23 +274,23 @@ def test_instantiation_with_column_class(self): and that the class is instantiated in the constructor """ column = columns.List(columns.Text) - assert isinstance(column.value_col, columns.Text) + self.assertIsInstance(column.value_col, columns.Text) def test_instantiation_with_column_instance(self): """ Tests that columns instantiated with a column instance work properly """ column = columns.List(columns.Text(min_length=100)) - assert isinstance(column.value_col, columns.Text) + self.assertIsInstance(column.value_col, columns.Text) def test_to_python(self): """ Tests that to_python of value column is called """ column = columns.List(JsonTestColumn) val = [1, 2, 3] db_val = column.to_database(val) - assert db_val == [json.dumps(v) for v in val] + self.assertEqual(db_val, [json.dumps(v) for v in val]) py_val = column.to_python(db_val) - assert py_val == val + self.assertEqual(py_val, val) def test_default_empty_container_saving(self): """ tests that the default empty container is not saved if it hasn't been updated """ @@ -333,12 +332,12 @@ def test_blind_list_updates_from_none(self): m.save() m2 = TestListModel.get(partition=m.partition) - assert m2.int_list == expected + self.assertEqual(m2.int_list, expected) TestListModel.objects(partition=m.partition).update(int_list=[]) m3 = TestListModel.get(partition=m.partition) - assert m3.int_list == [] + self.assertEqual(m3.int_list, []) class TestMapModel(Model): @@ -398,8 +397,6 @@ def test_io_success(self): self.assertEqual(m2.int_map[1], k1) self.assertEqual(m2.int_map[2], k2) - self.assertTrue('now' in m2.text_map) - self.assertTrue('then' in m2.text_map) self.assertAlmostEqual(get_total_seconds(now - m2.text_map['now']), 0, 2) self.assertAlmostEqual(get_total_seconds(then - m2.text_map['then']), 0, 2) @@ -441,7 +438,7 @@ def test_partial_updates(self): m1.save() m2 = TestMapModel.get(partition=m1.partition) - assert m2.text_map == final + self.assertEqual(m2.text_map, final) def test_updates_from_none(self): """ Tests that updates from None work as expected """ @@ -451,12 +448,12 @@ def test_updates_from_none(self): m.save() m2 = TestMapModel.get(partition=m.partition) - assert m2.int_map == expected + self.assertEquals(m2.int_map, expected) m2.int_map = None m2.save() m3 = TestMapModel.get(partition=m.partition) - assert m3.int_map != expected + self.assertNotEqual(m3.int_map, expected) def test_blind_updates_from_none(self): """ Tests that updates from None work as expected """ @@ -466,12 +463,12 @@ def test_blind_updates_from_none(self): m.save() m2 = TestMapModel.get(partition=m.partition) - assert m2.int_map == expected + self.assertEqual(m2.int_map, expected) TestMapModel.objects(partition=m.partition).update(int_map={}) m3 = TestMapModel.get(partition=m.partition) - assert m3.int_map != expected + self.assertNotEqual(m3.int_map, expected) def test_updates_to_none(self): """ Tests that setting the field to None works as expected """ @@ -480,7 +477,7 @@ def test_updates_to_none(self): m.save() m2 = TestMapModel.get(partition=m.partition) - assert m2.int_map == {} + self.assertEqual(m2.int_map, {}) def test_instantiation_with_column_class(self): """ @@ -488,25 +485,25 @@ def test_instantiation_with_column_class(self): and that the class is instantiated in the constructor """ column = columns.Map(columns.Text, columns.Integer) - assert isinstance(column.key_col, columns.Text) - assert isinstance(column.value_col, columns.Integer) + self.assertIsInstance(column.key_col, columns.Text) + self.assertIsInstance(column.value_col, columns.Integer) def test_instantiation_with_column_instance(self): """ Tests that columns instantiated with a column instance work properly """ column = columns.Map(columns.Text(min_length=100), columns.Integer()) - assert isinstance(column.key_col, columns.Text) - assert isinstance(column.value_col, columns.Integer) + self.assertIsInstance(column.key_col, columns.Text) + self.assertIsInstance(column.value_col, columns.Integer) def test_to_python(self): """ Tests that to_python of value column is called """ column = columns.Map(JsonTestColumn, JsonTestColumn) val = {1: 2, 3: 4, 5: 6} db_val = column.to_database(val) - assert db_val == dict((json.dumps(k), json.dumps(v)) for k, v in val.items()) + self.assertEqual(db_val, dict((json.dumps(k), json.dumps(v)) for k, v in val.items())) py_val = column.to_python(db_val) - assert py_val == val + self.assertEqual(py_val, val) def test_default_empty_container_saving(self): """ tests that the default empty container is not saved if it hasn't been updated """ From 4dfe8944b36e69f85b79731ffde5af06324ee1c1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 19 Feb 2016 11:34:51 -0600 Subject: [PATCH 0343/2005] cqle: validate collection subtypes for PYTHON-478, in lieu of PYTHON-494 --- cassandra/cqlengine/columns.py | 48 ++++++++++++++++++++++------------ 1 file changed, 31 insertions(+), 17 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 4670f1d87d..45da2fce18 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -80,6 +80,8 @@ class Column(object): instance_counter = 0 + _python_type_hashable = True + primary_key = False """ bool flag, indicates this column is a primary key. The first primary key defined @@ -611,7 +613,6 @@ class BaseContainerColumn(Column): https://cassandra.apache.org/doc/cql3/CQL.html#collections """ - def __init__(self, types, **kwargs): """ :param types: a sequence of sub types in this collection @@ -629,7 +630,6 @@ def __init__(self, types, **kwargs): instances.append(inst) self.types = instances - super(BaseContainerColumn, self).__init__(**kwargs) def validate(self, value): @@ -658,6 +658,9 @@ class Set(BaseContainerColumn): http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_set_t.html """ + + _python_type_hashable = False + def __init__(self, value_type, strict=True, default=set, **kwargs): """ :param value_type: a column class indicating the types of the value @@ -667,9 +670,10 @@ def __init__(self, value_type, strict=True, default=set, **kwargs): self.strict = strict super(Set, self).__init__((value_type,), default=default, **kwargs) self.value_col = self.types[0] + if not self.value_col._python_type_hashable: + raise ValidationError("Cannot create a Set with unhashable value type (see PYTHON-494)") self.db_type = 'set<{0}>'.format(self.value_col.db_type) - def validate(self, value): val = super(Set, self).validate(value) if val is None: @@ -683,7 +687,8 @@ def validate(self, value): if None in val: raise ValidationError("{0} None not allowed in a set".format(self.column_name)) - + # TODO: stop doing this conversion because it doesn't support non-hashable collections as keys (cassandra does) + # will need to start using the cassandra.util types in the next major rev (PYTHON-494) return set(self.value_col.validate(v) for v in val) def to_python(self, value): @@ -703,6 +708,9 @@ class List(BaseContainerColumn): http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_list_t.html """ + + _python_type_hashable = False + def __init__(self, value_type, default=list, **kwargs): """ :param value_type: a column class indicating the types of the value @@ -711,7 +719,6 @@ def __init__(self, value_type, default=list, **kwargs): self.value_col = self.types[0] self.db_type = 'list<{0}>'.format(self.value_col.db_type) - def validate(self, value): val = super(List, self).validate(value) if val is None: @@ -739,6 +746,9 @@ class Map(BaseContainerColumn): http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_map_t.html """ + + _python_type_hashable = False + def __init__(self, key_type, value_type, default=dict, **kwargs): """ :param key_type: a column class indicating the types of the key @@ -747,6 +757,10 @@ def __init__(self, key_type, value_type, default=dict, **kwargs): super(Map, self).__init__((key_type, value_type), default=default, **kwargs) self.key_col = self.types[0] self.value_col = self.types[1] + + if not self.key_col._python_type_hashable: + raise ValidationError("Cannot create a Map with unhashable key type (see PYTHON-494)") + self.db_type = 'map<{0}, {1}>'.format(self.key_col.db_type, self.value_col.db_type) def validate(self, value): @@ -758,7 +772,7 @@ def validate(self, value): if None in val: raise ValidationError("{0} None is not allowed in a map".format(self.column_name)) # TODO: stop doing this conversion because it doesn't support non-hashable collections as keys (cassandra does) - # will need to start using the cassandra.util types in the next major rev + # will need to start using the cassandra.util types in the next major rev (PYTHON-494) return dict((self.key_col.validate(k), self.value_col.validate(v)) for k, v in val.items()) def to_python(self, value): @@ -773,17 +787,6 @@ def to_database(self, value): return dict((self.key_col.to_database(k), self.value_col.to_database(v)) for k, v in value.items()) -class UDTValueManager(BaseValueManager): - @property - def changed(self): - return self.value != self.previous_value or (self.value is not None and self.value.has_changed_fields()) - - def reset_previous_value(self): - if self.value is not None: - self.value.reset_changed_fields() - self.previous_value = copy(self.value) - - class Tuple(BaseContainerColumn): """ Stores a fixed-length set of positional values @@ -819,6 +822,17 @@ def to_database(self, value): return tuple(t.to_database(v) for t, v in zip(self.types, value)) +class UDTValueManager(BaseValueManager): + @property + def changed(self): + return self.value != self.previous_value or (self.value is not None and self.value.has_changed_fields()) + + def reset_previous_value(self): + if self.value is not None: + self.value.reset_changed_fields() + self.previous_value = copy(self.value) + + class UserDefinedType(Column): """ User Defined Type column From 46ef40e33ed876363f3d548dbd95908615a15960 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 17 Feb 2016 15:25:41 -0500 Subject: [PATCH 0344/2005] Added doc for the CONTAINS operator --- cassandra/cqlengine/query.py | 5 +++-- docs/cqlengine/queryset.rst | 12 +++++++++++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index dec4781e4f..24a548b1c1 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -557,8 +557,9 @@ def filter(self, *args, **kwargs): query_val = [column.to_database(v) for v in val] elif isinstance(val, BaseQueryFunction): query_val = val - elif isinstance(column, (columns.List, columns.Set, columns.Map)): - # For collections, we query using the value, not the container + elif (isinstance(operator, ContainsOperator) and + isinstance(column, (columns.List, columns.Set, columns.Map))): + # For ContainsOperator and collections, we query using the value, not the container query_val = val else: query_val = column.to_database(val) diff --git a/docs/cqlengine/queryset.rst b/docs/cqlengine/queryset.rst index 23c9226715..0732a24426 100644 --- a/docs/cqlengine/queryset.rst +++ b/docs/cqlengine/queryset.rst @@ -37,6 +37,7 @@ Retrieving objects with filters year = columns.Integer(primary_key=True) model = columns.Text() price = columns.Decimal() + options = columns.Set(columns.Text) ...and assuming the Automobile table contains a record of every car model manufactured in the last 20 years or so, we can retrieve only the cars made by a single manufacturer like this: @@ -172,6 +173,16 @@ Filtering Operators q.filter(Automobile.year <= 2012) + :attr:`CONTAINS (__contains) ` + + The CONTAINS operator is available for all collection types (List, Set, Map). + + .. code-block:: python + + q = Automobile.objects.filter(manufacturer='Tesla') + q.filter(options__contains='backup camera').allow_filtering() + + Note that we need to use allow_filtering() since the *options* column has no secondary index. TimeUUID Functions ================== @@ -341,4 +352,3 @@ Named tables are a way of querying a table without creating an class. They're u user.objects()[0] # {u'pk': 1, u't': datetime.datetime(2014, 6, 26, 17, 10, 31, 774000)} - From dc9ae8b5116bbb8f3e9f6b83f697e1f7e6bfed71 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 19 Feb 2016 13:38:53 -0500 Subject: [PATCH 0345/2005] minor assertions fix in tests --- .../cqlengine/query/test_queryset.py | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index a0563d9aaf..59320d534b 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -528,16 +528,16 @@ def test_indexed_field_can_be_queried(self): Tests that queries on an indexed field will work without any primary key relations specified """ q = IndexedTestModel.objects(test_result=25) - assert q.count() == 4 + self.assertEqual(q.count(), 4) q = IndexedTestModel.objects.filter(test_list__contains=42) - assert q.count() == 1 + self.assertEqual(q.count(), 1) q = IndexedTestModel.objects.filter(test_set__contains=42) - assert q.count() == 1 + self.assertEqual(q.count(), 1) q = IndexedTestModel.objects.filter(test_map__contains=42) - assert q.count() == 1 + self.assertEqual(q.count(), 1) class TestQuerySetDelete(BaseQuerySetUsage): @@ -692,25 +692,25 @@ class TestContainsOperator(BaseQuerySetUsage): def test_kwarg_success_case(self): """ Tests the CONTAINS operator works with the kwarg query method """ q = IndexedTestModel.filter(test_list__contains=1) - assert q.count() == 2 + self.assertEqual(q.count(), 2) q = IndexedTestModel.filter(test_set__contains=3) - assert q.count() == 2 + self.assertEqual(q.count(), 2) q = IndexedTestModel.filter(test_map__contains=42) - assert q.count() == 1 + self.assertEqual(q.count(), 1) def test_query_expression_success_case(self): """ Tests the CONTAINS operator works with the query expression query method """ q = IndexedTestModel.filter(IndexedTestModel.test_list.contains_(1)) - assert q.count() == 2 + self.assertEqual(q.count(), 2) q = IndexedTestModel.filter(IndexedTestModel.test_set.contains_(3)) - assert q.count() == 2 + self.assertEqual(q.count(), 2) q = IndexedTestModel.filter(IndexedTestModel.test_map.contains_(42)) - assert q.count() == 1 + self.assertEqual(q.count(), 1) class TestValuesList(BaseQuerySetUsage): From 9949f7cdb9aa7a8405f57be790b879dfab64a3b0 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 19 Feb 2016 09:43:30 -0500 Subject: [PATCH 0346/2005] PYTHON-266: Add DISTINCT support --- cassandra/cqlengine/query.py | 17 ++++++++++- cassandra/cqlengine/statements.py | 4 +++ .../cqlengine/query/test_queryset.py | 28 +++++++++++++++++++ .../statements/test_select_statement.py | 8 ++++++ 4 files changed, 56 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 24a548b1c1..9b42f42990 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -266,6 +266,7 @@ def __init__(self, model): self._result_cache = None self._result_idx = None + self._distinct_fields = [] self._batch = None self._ttl = getattr(model, '__default_ttl__', None) self._consistency = None @@ -335,6 +336,7 @@ def _select_query(self): return SelectStatement( self.column_family_name, fields=self._select_fields(), + distinct_fields=self._distinct_fields, where=self._where, order_by=self._order, limit=self._limit, @@ -660,6 +662,19 @@ def count(self): else: return len(self._result_cache) + def distinct(self, distinct_fields=None): + """ + Returns the DISTINCT rows by this query. Default to partition key if no distinct_fields specified. + """ + + clone = copy.deepcopy(self) + if distinct_fields: + clone._distinct_fields = distinct_fields + else: + clone._distinct_fields = [x.column_name for x in self.model._partition_keys.values()] + + return clone + def limit(self, v): """ Limits the number of results returned by Cassandra. @@ -694,7 +709,7 @@ def allow_filtering(self): def _only_or_defer(self, action, fields): clone = copy.deepcopy(self) if clone._defer_fields or clone._only_fields: - raise QueryException("QuerySet alread has only or defer fields defined") + raise QueryException("QuerySet already has only or defer fields defined") # check for strange fields missing_fields = [f for f in fields if f not in self.model._columns.keys()] diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index ff5384a828..434c61b1ad 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -550,6 +550,7 @@ class SelectStatement(BaseCQLStatement): def __init__(self, table, fields=None, + distinct_fields=[], count=False, consistency=None, where=None, @@ -568,6 +569,7 @@ def __init__(self, ) self.fields = [fields] if isinstance(fields, six.string_types) else (fields or []) + self.distinct_fields = distinct_fields self.count = count self.order_by = [order_by] if isinstance(order_by, six.string_types) else order_by self.limit = limit @@ -577,6 +579,8 @@ def __unicode__(self): qs = ['SELECT'] if self.count: qs += ['COUNT(*)'] + elif self.distinct_fields: + qs += ['DISTINCT {0}'.format(', '.join(['"{0}"'.format(f) for f in self.distinct_fields]))] else: qs += [', '.join(['"{0}"'.format(f) for f in self.fields]) if self.fields else '*'] qs += ['FROM', self.table] diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 59320d534b..43ba5ac30b 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -188,6 +188,20 @@ def test_the_all_method_duplicates_queryset(self): query3 = query2.all() assert query3 == query2 + def test_queryset_with_distinct(self): + """ + Tests that calling distinct on a queryset w/without parameter are evaluated properly. + """ + + query1 = TestModel.objects.distinct() + self.assertEqual(len(query1._distinct_fields), 1) + + query2 = TestModel.objects.distinct(['test_id']) + self.assertEqual(len(query2._distinct_fields), 1) + + query3 = TestModel.objects.distinct(['test_id', 'attempt_id']) + self.assertEqual(len(query3._distinct_fields), 2) + def test_defining_only_and_defer_fails(self): """ Tests that trying to add fields to either only or defer, or doing so more than once fails @@ -424,6 +438,20 @@ class NonEqualityFilteringModel(Model): assert num == 1, num +class TestQuerySetDistinct(BaseQuerySetUsage): + + def test_distinct_without_parameter(self): + q = TestModel.objects.distinct() + self.assertEqual(len(q), 3) + + def test_distinct_with_parameter(self): + q = TestModel.objects.distinct(['test_id']) + self.assertEqual(len(q), 3) + + def test_distinct_with_filter(self): + q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[1,2]) + self.assertEqual(len(q), 2) + class TestQuerySetOrdering(BaseQuerySetUsage): diff --git a/tests/integration/cqlengine/statements/test_select_statement.py b/tests/integration/cqlengine/statements/test_select_statement.py index 0df2803d03..33b1ef07e9 100644 --- a/tests/integration/cqlengine/statements/test_select_statement.py +++ b/tests/integration/cqlengine/statements/test_select_statement.py @@ -56,6 +56,14 @@ def test_count(self): self.assertIn('LIMIT', six.text_type(ss)) self.assertNotIn('ORDER', six.text_type(ss)) + def test_distinct(self): + ss = SelectStatement('table', distinct_fields=['field2']) + ss.add_where_clause(WhereClause('field1', EqualsOperator(), 'b')) + self.assertEqual(six.text_type(ss), 'SELECT DISTINCT "field2" FROM table WHERE "field1" = %(0)s', six.text_type(ss)) + + ss = SelectStatement('table', distinct_fields=['field1', 'field2']) + self.assertEqual(six.text_type(ss), 'SELECT DISTINCT "field1", "field2" FROM table') + def test_context(self): ss = SelectStatement('table') ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) From 7bd74e5dd49eedd95cf9ec025fa67e4acde342a7 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 19 Feb 2016 14:37:23 -0500 Subject: [PATCH 0347/2005] Add doc for DISTINCT --- cassandra/cqlengine/query.py | 31 ++++++++++++++++++++++---- cassandra/cqlengine/statements.py | 4 ++-- docs/api/cassandra/cqlengine/query.rst | 2 ++ 3 files changed, 31 insertions(+), 6 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 9b42f42990..c1d07b4511 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -266,7 +266,7 @@ def __init__(self, model): self._result_cache = None self._result_idx = None - self._distinct_fields = [] + self._distinct_fields = None self._batch = None self._ttl = getattr(model, '__default_ttl__', None) self._consistency = None @@ -336,11 +336,11 @@ def _select_query(self): return SelectStatement( self.column_family_name, fields=self._select_fields(), - distinct_fields=self._distinct_fields, where=self._where, order_by=self._order, limit=self._limit, - allow_filtering=self._allow_filtering + allow_filtering=self._allow_filtering, + distinct_fields=self._distinct_fields ) # ----Reads------ @@ -664,7 +664,30 @@ def count(self): def distinct(self, distinct_fields=None): """ - Returns the DISTINCT rows by this query. Default to partition key if no distinct_fields specified. + Returns the DISTINCT rows matched by this query. + + distinct_fields default to the partition key fields if not specified. + + *Note: distinct_fields must be a partition key or a static column* + + .. code-block:: python + + class Automobile(Model): + manufacturer = columns.Text(partition_key=True) + year = columns.Integer(primary_key=True) + model = columns.Text(primary_key=True) + price = columns.Decimal() + + sync_table(Automobile) + + # create rows + + Automobile.objects.distinct() + + # or + + Automobile.objects.distinct(['manufacturer']) + """ clone = copy.deepcopy(self) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 434c61b1ad..bd2eea2928 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -550,13 +550,13 @@ class SelectStatement(BaseCQLStatement): def __init__(self, table, fields=None, - distinct_fields=[], count=False, consistency=None, where=None, order_by=None, limit=None, - allow_filtering=False): + allow_filtering=False, + distinct_fields=None): """ :param where diff --git a/docs/api/cassandra/cqlengine/query.rst b/docs/api/cassandra/cqlengine/query.rst index df823704f3..d25428e4dc 100644 --- a/docs/api/cassandra/cqlengine/query.rst +++ b/docs/api/cassandra/cqlengine/query.rst @@ -18,6 +18,8 @@ The methods here are used to filter, order, and constrain results. .. automethod:: count + .. automethod:: distinct + .. automethod:: filter .. automethod:: get From 7c6bdb5e61583cbbd820bb6177ebe0c71081a26f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 22 Feb 2016 10:02:49 -0500 Subject: [PATCH 0348/2005] Add 3.2.0 section in CHANGELOG --- CHANGELOG.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 01000727bb..44d5030272 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,13 @@ +3.2.0 +===== + +Features +-------- + +Bug Fixes +--------- +* Fix Python3 bug with Timers and heaps (github #466) + 3.1.0 ===== From 27ef23bb11c93be4ba5b73497a0262a831b700e3 Mon Sep 17 00:00:00 2001 From: jonmcevoy Date: Thu, 28 Jan 2016 11:07:18 -0800 Subject: [PATCH 0349/2005] Fix Python3 bug with Timers and heaps Add comparison operator to allow Timers with the same end time to be pushed into heaps. It's poorly documented, but the warning can be found here: https://docs.python.org/2/library/heapq.html#priority-queue-implementation-notes "In the future with Python 3, tuple comparison breaks for (priority, task) pairs if the priorities are equal and the tasks do not have a default comparison order." --- cassandra/connection.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/connection.py b/cassandra/connection.py index ec559b0913..0624ad1364 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -1020,6 +1020,9 @@ def __init__(self, timeout, callback): if timeout < 0: self.callback() + def __lt__(self, other): + return self.end < other.end + def cancel(self): self.canceled = True From 1299f4a7537f44ef2a9bcececb5b8ba7588f5030 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 22 Feb 2016 09:54:03 -0600 Subject: [PATCH 0350/2005] changelog for #466 in 3.1 --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 0c9451c382..54355a40fb 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,6 +11,7 @@ Bug Fixes --------- * Bus error (alignment issues) when running cython on some ARM platforms (PYTHON-450) * Overflow when decoding large collections (cython) (PYTHON-459) +* Timer heap comparison issue with Python 3 (github #466) 3.0.0 ===== From 4c9dc3e5b7d684db311d5e4f508bb3dc1bdbb8e6 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 18 Feb 2016 12:10:11 -0600 Subject: [PATCH 0351/2005] First pass at cqlengine tuple testing --- .../columns/test_container_columns.py | 186 ++++++++++++++++++ 1 file changed, 186 insertions(+) diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index 6b23bcd41f..bf1884ea9b 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -537,3 +537,189 @@ def tearDownClass(cls): def test_camelcase_column(self): TestCamelMapModel.create(camelMap={'blah': 1}) + + +class TestTupleModel(Model): + + partition = columns.UUID(primary_key=True, default=uuid4) + int_tuple = columns.Tuple(columns.Integer, columns.Integer, columns.Integer, required=False) + text_tuple = columns.Tuple(columns.Text, columns.Text, required=False) + mixed_tuple = columns.Tuple(columns.Text, columns.Integer, columns.Text, required=False) + + +class TestTupleColumn(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + drop_table(TestTupleModel) + sync_table(TestTupleModel) + + @classmethod + def tearDownClass(cls): + drop_table(TestTupleModel) + + def test_initial(self): + tmp = TestTupleModel.create() + tmp.int_tuple = tmp.int_tuple + (1, 2, 3) + + def test_initial_retrieve(self): + tmp = TestTupleModel.create() + tmp2 = tmp.get(partition=tmp.partition) + tmp2.int_tuple = tmp2.int_tuple + (1, 2, 3) + + def test_io_success(self): + """ Tests that a basic usage works as expected """ + m1 = TestTupleModel.create(int_tuple=(1, 2, 3, 5, 6), text_tuple=('kai', 'andreas'), mixed_tuple=('first', 2, 'Third')) + m2 = TestTupleModel.get(partition=m1.partition) + + assert isinstance(m2.int_tuple, tuple) + assert isinstance(m2.text_tuple, tuple) + assert isinstance(m2.mixed_tuple, tuple) + + + assert len(m2.int_tuple) == 3 + assert len(m2.text_tuple) == 2 + assert len(m2.mixed_tuple) == 3 + + assert m2.int_tuple[0] == 1 + assert m2.int_tuple[1] == 2 + assert m2.int_tuple[2] == 3 + + assert m2.text_tuple[0] == 'kai' + assert m2.text_tuple[1] == 'andreas' + + assert m2.mixed_tuple[0] == 'first' + assert m2.mixed_tuple[1] == 2 + assert m2.mixed_tuple[2] == 'Third' + + def test_type_validation(self): + """ + Tests that attempting to use the wrong types will raise an exception + """ + self.assertRaises(ValidationError, TestTupleModel.create, **{'int_tuple': ('string', True), 'text_tuple': ('test', 'test'), 'mixed_tuple': ('one', 2, 'three')}) + self.assertRaises(ValidationError, TestTupleModel.create, **{'int_tuple': ('string', 'string'), 'text_tuple': (1, 3.0), 'mixed_tuple': ('one', 2, 'three')}) + self.assertRaises(ValidationError, TestTupleModel.create, **{'int_tuple': ('string', 'string'), 'text_tuple': ('test', 'test'), 'mixed_tuple': (1, "two", 3)}) + + def test_instantiation_with_column_class(self): + """ + Tests that columns instantiated with a column class work properly + and that the class is instantiated in the constructor + """ + mixed_tuple = columns.Tuple(columns.Text, columns.Integer, columns.Text, required=False) + assert isinstance(mixed_tuple.types[0], columns.Text) + assert isinstance(mixed_tuple.types[1], columns.Integer) + assert isinstance(mixed_tuple.types[2], columns.Text) + self.assertEqual(len(mixed_tuple.types), 3) + + def test_default_empty_container_saving(self): + """ tests that the default empty container is not saved if it hasn't been updated """ + pkey = uuid4() + # create a row with tuple data + TestTupleModel.create(partition=pkey, int_tuple=(1, 2, 3)) + # create another with no tuple data + TestTupleModel.create(partition=pkey) + + m = TestTupleModel.get(partition=pkey) + self.assertEqual(m.int_tuple, (1, 2, 3)) + + def test_partial_updates(self): + """Truncates on create but not on save""" + """ broken""" + full = tuple(range(10)) + initial = full[3:7] + + m1 = TestTupleModel.create(int_tuple=initial) + m1.int_tuple = (1, 2, 3) + m1.save() + + if is_prepend_reversed(): + expected = full[2::-1] + full[3:] + else: + expected = full + + m2 = TestTupleModel.get(partition=m1.partition) + self.assertEqual(tuple(m2.int_tuple), expected) + + def test_to_python(self): + """ Tests that to_python of value column is called """ + column = columns.Tuple(JsonTestColumn) + val = (1, 2, 3) + db_val = column.to_database(val) + assert db_val == [json.dumps(v) for v in val] + py_val = column.to_python(db_val) + assert py_val == val + + def test_update_from_non_empty_to_empty(self): + # Can up date a touple raises a Runtime Error + pkey = uuid4() + tmp = TestTupleModel.create(partition=pkey, int_tuple=(1, 2)) + tmp.int_tuple = (1, 3) + tmp.update() + + tmp = TestTupleModel.get(partition=pkey) + self.assertEqual(tmp.int_tuple, (1, 3)) + + def test_insert_none(self): + pkey = uuid4() + self.assertRaises(ValidationError, TestTupleModel.create, **{'partition': pkey, 'int_tuple': (None)}) + + def test_blind_tuple_updates_from_none(self): + """ Tests that updates from None work as expected """ + m = TestTupleModel.create(int_tuple=None) + expected = (1, 2, 3) + m.int_tuple = expected + m.save() + + m2 = TestTupleModel.get(partition=m.partition) + assert m2.int_tuple == expected + + TestTupleModel.objects(partition=m.partition).update(int_tuple=None) + + m3 = TestTupleModel.get(partition=m.partition) + assert m3.int_tuple == () + + +class TestTupleModelVeryLarge(Model): + + partition = columns.UUID(primary_key=True, default=uuid4) + args = [] + for i in range(32767): + args.append(columns.Integer) + int_tuple = columns.Tuple(*args, required=False) + + +class TestTupleModelOverLarge(Model): + + partition = columns.UUID(primary_key=True, default=uuid4) + args = [] + for i in range(32769): + args.append(columns.Integer) + int_tuple = columns.Tuple(*args, required=False) + + +class TestTupleColumnLarge(BaseCassEngTestCase): + + def test_element_count_validation(self): + drop_table(TestTupleModelVeryLarge) + sync_table(TestTupleModelVeryLarge) + """ + Tests that big collections are supported + """ + test_tuple = (i for i in range(32767)) + m1 = TestTupleModelVeryLarge.create(int_tuple=test_tuple) + m2 = TestTupleModelVeryLarge.get(partition=m1.partition) + print(len(m2.int_tuple)) + drop_table(TestTupleModelVeryLarge) + + def test_element_count_validation_too_big(self): + drop_table(TestTupleModelOverLarge) + sync_table(TestTupleModelOverLarge) + """ + Tests that big collections are detected and raise an exception. + """ + test_tuple = (i for i in range(32769)) + m1 = TestTupleModelOverLarge.create(int_tuple=test_tuple) + m2 = TestTupleModelOverLarge.get(partition=m1.partition) + print(len(m2.int_tuple)) + # this will need to be wrapped in an assertion + drop_table(TestTupleModelOverLarge) From 6560bfc24ce516e9fe568ca4012d91aefa4a8bb3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 19 Feb 2016 14:03:08 -0600 Subject: [PATCH 0352/2005] cqle: make the distinction btw collections and containers Makes the DML update query treat tuples as simple values, rather than special container types. --- cassandra/cqlengine/columns.py | 80 ++++++++++++++++++---------------- cassandra/cqlengine/query.py | 3 -- 2 files changed, 42 insertions(+), 41 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 45da2fce18..623e086a2b 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -607,7 +607,7 @@ def to_database(self, value): return self.validate(value) -class BaseContainerColumn(Column): +class BaseCollectionColumn(Column): """ Base Container type for collection-like columns. @@ -630,10 +630,10 @@ def __init__(self, types, **kwargs): instances.append(inst) self.types = instances - super(BaseContainerColumn, self).__init__(**kwargs) + super(BaseCollectionColumn, self).__init__(**kwargs) def validate(self, value): - value = super(BaseContainerColumn, self).validate(value) + value = super(BaseCollectionColumn, self).validate(value) # It is dangerous to let collections have more than 65535. # See: https://issues.apache.org/jira/browse/CASSANDRA-5428 if value is not None and len(value) > 65535: @@ -652,6 +652,45 @@ def sub_types(self): return self.types +class Tuple(BaseCollectionColumn): + """ + Stores a fixed-length set of positional values + + http://docs.datastax.com/en/cql/3.1/cql/cql_reference/tupleType.html + """ + def __init__(self, *args, **kwargs): + """ + :param args: column types representing tuple composition + """ + if not args: + raise ValueError("Tuple must specify at least one inner type") + super(Tuple, self).__init__(args, **kwargs) + self.db_type = 'tuple<{0}>'.format(', '.join(typ.db_type for typ in self.types)) + + def validate(self, value): + val = super(Tuple, self).validate(value) + if val is None: + return + if len(val) > len(self.types): + raise ValidationError("Value %r has more fields than tuple definition (%s)" % + (val, ', '.join(t for t in self.types))) + return tuple(t.validate(v) for t, v in zip(self.types, val)) + + def to_python(self, value): + if value is None: + return tuple() + return tuple(t.to_python(v) for t, v in zip(self.types, value)) + + def to_database(self, value): + if value is None: + return + return tuple(t.to_database(v) for t, v in zip(self.types, value)) + + +class BaseContainerColumn(BaseCollectionColumn): + pass + + class Set(BaseContainerColumn): """ Stores a set of unordered, unique values @@ -787,41 +826,6 @@ def to_database(self, value): return dict((self.key_col.to_database(k), self.value_col.to_database(v)) for k, v in value.items()) -class Tuple(BaseContainerColumn): - """ - Stores a fixed-length set of positional values - - http://docs.datastax.com/en/cql/3.1/cql/cql_reference/tupleType.html - """ - def __init__(self, *args, **kwargs): - """ - :param args: column types representing tuple composition - """ - if not args: - raise ValueError("Tuple must specify at least one inner type") - super(Tuple, self).__init__(args, **kwargs) - self.db_type = 'tuple<{0}>'.format(', '.join(typ.db_type for typ in self.types)) - - def validate(self, value): - val = super(Tuple, self).validate(value) - if val is None: - return - if len(val) > len(self.types): - raise ValidationError("Value %r has more fields than tuple definition (%s)" % - (val, ', '.join(t for t in self.types))) - return tuple(t.validate(v) for t, v in zip(self.types, val)) - - def to_python(self, value): - if value is None: - return tuple() - return tuple(t.to_python(v) for t, v in zip(self.types, value)) - - def to_database(self, value): - if value is None: - return - return tuple(t.to_database(v) for t, v in zip(self.types, value)) - - class UDTValueManager(BaseValueManager): @property def changed(self): diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index c1d07b4511..4d03344281 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1151,11 +1151,9 @@ def update(self): val = getattr(self.instance, name, None) val_mgr = self.instance._values[name] - # don't update something that is null if val is None: continue - # don't update something if it hasn't changed if not val_mgr.changed and not isinstance(col, columns.Counter): continue @@ -1173,7 +1171,6 @@ def update(self): else: raise RuntimeError - # do the stuff clause = klass(col.db_field_name, val, previous=val_mgr.previous_value, column=col) if clause.get_context_size() > 0: From e1170bfd8c709d8d7ddda0942765063b05cb38c7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 19 Feb 2016 14:05:35 -0600 Subject: [PATCH 0353/2005] cqle: tuple test tweaks (some are still broken) --- .../columns/test_container_columns.py | 49 +++++++++---------- 1 file changed, 24 insertions(+), 25 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index bf1884ea9b..1d4e770c43 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -560,37 +560,36 @@ def tearDownClass(cls): def test_initial(self): tmp = TestTupleModel.create() - tmp.int_tuple = tmp.int_tuple + (1, 2, 3) + tmp.int_tuple = (1, 2, 3) def test_initial_retrieve(self): tmp = TestTupleModel.create() tmp2 = tmp.get(partition=tmp.partition) - tmp2.int_tuple = tmp2.int_tuple + (1, 2, 3) + tmp2.int_tuple = (1, 2, 3) def test_io_success(self): """ Tests that a basic usage works as expected """ m1 = TestTupleModel.create(int_tuple=(1, 2, 3, 5, 6), text_tuple=('kai', 'andreas'), mixed_tuple=('first', 2, 'Third')) m2 = TestTupleModel.get(partition=m1.partition) - assert isinstance(m2.int_tuple, tuple) - assert isinstance(m2.text_tuple, tuple) - assert isinstance(m2.mixed_tuple, tuple) + self.assertIsInstance(m2.int_tuple, tuple) + self.assertIsInstance(m2.text_tuple, tuple) + self.assertIsInstance(m2.mixed_tuple, tuple) + self.assertEqual(len(m2.int_tuple), 3) + self.assertEqual(len(m2.text_tuple), 2) + self.assertEqual(len(m2.mixed_tuple), 3) - assert len(m2.int_tuple) == 3 - assert len(m2.text_tuple) == 2 - assert len(m2.mixed_tuple) == 3 + self.assertEqual(m2.int_tuple[0], 1) + self.assertEqual(m2.int_tuple[1], 2) + self.assertEqual(m2.int_tuple[2], 3) - assert m2.int_tuple[0] == 1 - assert m2.int_tuple[1] == 2 - assert m2.int_tuple[2] == 3 + self.assertEqual(m2.text_tuple[0], 'kai') + self.assertEqual(m2.text_tuple[1], 'andreas') - assert m2.text_tuple[0] == 'kai' - assert m2.text_tuple[1] == 'andreas' - - assert m2.mixed_tuple[0] == 'first' - assert m2.mixed_tuple[1] == 2 - assert m2.mixed_tuple[2] == 'Third' + self.assertEqual(m2.mixed_tuple[0], 'first') + self.assertEqual(m2.mixed_tuple[1], 2) + self.assertEqual(m2.mixed_tuple[2], 'Third') def test_type_validation(self): """ @@ -606,9 +605,9 @@ def test_instantiation_with_column_class(self): and that the class is instantiated in the constructor """ mixed_tuple = columns.Tuple(columns.Text, columns.Integer, columns.Text, required=False) - assert isinstance(mixed_tuple.types[0], columns.Text) - assert isinstance(mixed_tuple.types[1], columns.Integer) - assert isinstance(mixed_tuple.types[2], columns.Text) + self.assertIsInstance(mixed_tuple.types[0], columns.Text) + self.assertIsInstance(mixed_tuple.types[1], columns.Integer) + self.assertIsInstance(mixed_tuple.types[2], columns.Text) self.assertEqual(len(mixed_tuple.types), 3) def test_default_empty_container_saving(self): @@ -645,9 +644,9 @@ def test_to_python(self): column = columns.Tuple(JsonTestColumn) val = (1, 2, 3) db_val = column.to_database(val) - assert db_val == [json.dumps(v) for v in val] + self.assertEqual(db_val, tuple(json.dumps(v) for v in val)) py_val = column.to_python(db_val) - assert py_val == val + self.assertEqual(py_val, val) def test_update_from_non_empty_to_empty(self): # Can up date a touple raises a Runtime Error @@ -657,7 +656,7 @@ def test_update_from_non_empty_to_empty(self): tmp.update() tmp = TestTupleModel.get(partition=pkey) - self.assertEqual(tmp.int_tuple, (1, 3)) + self.assertEqual(tmp.int_tuple, (1, 3, None)) def test_insert_none(self): pkey = uuid4() @@ -671,12 +670,12 @@ def test_blind_tuple_updates_from_none(self): m.save() m2 = TestTupleModel.get(partition=m.partition) - assert m2.int_tuple == expected + self.assertEqual(m2.int_tuple, expected) TestTupleModel.objects(partition=m.partition).update(int_tuple=None) m3 = TestTupleModel.get(partition=m.partition) - assert m3.int_tuple == () + self.assertEqual(m3.int_tuple, None) class TestTupleModelVeryLarge(Model): From ecdbdb3eff7f09df4ea9be260dce6b0e93b220fc Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 22 Feb 2016 10:23:35 -0600 Subject: [PATCH 0354/2005] tuple test updates PYTHON-306 --- .../columns/test_container_columns.py | 191 ++++++++++-------- 1 file changed, 102 insertions(+), 89 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index 1d4e770c43..38e187849e 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -559,16 +559,45 @@ def tearDownClass(cls): drop_table(TestTupleModel) def test_initial(self): + """ + Tests creation and insertion of tuple types with models + + @since 3.2 + @jira_ticket PYTHON-306 + @expected_result Model is successfully crated + + @test_category object_mapper + """ tmp = TestTupleModel.create() tmp.int_tuple = (1, 2, 3) def test_initial_retrieve(self): + """ + Tests creation and insertion of tuple types with models, + and their retrieval. + + @since 3.2 + @jira_ticket PYTHON-306 + @expected_result Model is successfully crated + + @test_category object_mapper + """ + tmp = TestTupleModel.create() tmp2 = tmp.get(partition=tmp.partition) tmp2.int_tuple = (1, 2, 3) def test_io_success(self): - """ Tests that a basic usage works as expected """ + """ + Tests creation and insertion of various types with models, + and their retrieval. + + @since 3.2 + @jira_ticket PYTHON-306 + @expected_result Model is successfully created and fetched correctly + + @test_category object_mapper + """ m1 = TestTupleModel.create(int_tuple=(1, 2, 3, 5, 6), text_tuple=('kai', 'andreas'), mixed_tuple=('first', 2, 'Third')) m2 = TestTupleModel.get(partition=m1.partition) @@ -576,24 +605,19 @@ def test_io_success(self): self.assertIsInstance(m2.text_tuple, tuple) self.assertIsInstance(m2.mixed_tuple, tuple) - self.assertEqual(len(m2.int_tuple), 3) - self.assertEqual(len(m2.text_tuple), 2) - self.assertEqual(len(m2.mixed_tuple), 3) - - self.assertEqual(m2.int_tuple[0], 1) - self.assertEqual(m2.int_tuple[1], 2) - self.assertEqual(m2.int_tuple[2], 3) - - self.assertEqual(m2.text_tuple[0], 'kai') - self.assertEqual(m2.text_tuple[1], 'andreas') - - self.assertEqual(m2.mixed_tuple[0], 'first') - self.assertEqual(m2.mixed_tuple[1], 2) - self.assertEqual(m2.mixed_tuple[2], 'Third') + self.assertEqual((1, 2, 3), m2.int_tuple) + self.assertEqual(('kai', 'andreas'), m2.text_tuple) + self.assertEqual(('first', 2, 'Third'), m2.mixed_tuple) def test_type_validation(self): """ - Tests that attempting to use the wrong types will raise an exception + Tests to make sure tuple type validation occurs + + @since 3.2 + @jira_ticket PYTHON-306 + @expected_result validation errors are raised + + @test_category object_mapper """ self.assertRaises(ValidationError, TestTupleModel.create, **{'int_tuple': ('string', True), 'text_tuple': ('test', 'test'), 'mixed_tuple': ('one', 2, 'three')}) self.assertRaises(ValidationError, TestTupleModel.create, **{'int_tuple': ('string', 'string'), 'text_tuple': (1, 3.0), 'mixed_tuple': ('one', 2, 'three')}) @@ -603,6 +627,12 @@ def test_instantiation_with_column_class(self): """ Tests that columns instantiated with a column class work properly and that the class is instantiated in the constructor + + @since 3.2 + @jira_ticket PYTHON-306 + @expected_result types are instantiated correctly + + @test_category object_mapper """ mixed_tuple = columns.Tuple(columns.Text, columns.Integer, columns.Text, required=False) self.assertIsInstance(mixed_tuple.types[0], columns.Text) @@ -611,7 +641,15 @@ def test_instantiation_with_column_class(self): self.assertEqual(len(mixed_tuple.types), 3) def test_default_empty_container_saving(self): - """ tests that the default empty container is not saved if it hasn't been updated """ + """ + Tests that the default empty container is not saved if it hasn't been updated + + @since 3.2 + @jira_ticket PYTHON-306 + @expected_result empty tuple is not upserted + + @test_category object_mapper + """ pkey = uuid4() # create a row with tuple data TestTupleModel.create(partition=pkey, int_tuple=(1, 2, 3)) @@ -621,49 +659,69 @@ def test_default_empty_container_saving(self): m = TestTupleModel.get(partition=pkey) self.assertEqual(m.int_tuple, (1, 2, 3)) - def test_partial_updates(self): - """Truncates on create but not on save""" - """ broken""" - full = tuple(range(10)) - initial = full[3:7] + def test_updates(self): + """ + Tests that updates can be preformed on tuple containers + + @since 3.2 + @jira_ticket PYTHON-306 + @expected_result tuple is replaced + + @test_category object_mapper + """ + initial = (1, 2) + replacement = (1, 2, 3) m1 = TestTupleModel.create(int_tuple=initial) - m1.int_tuple = (1, 2, 3) + m1.int_tuple = replacement m1.save() - if is_prepend_reversed(): - expected = full[2::-1] + full[3:] - else: - expected = full - m2 = TestTupleModel.get(partition=m1.partition) - self.assertEqual(tuple(m2.int_tuple), expected) - - def test_to_python(self): - """ Tests that to_python of value column is called """ - column = columns.Tuple(JsonTestColumn) - val = (1, 2, 3) - db_val = column.to_database(val) - self.assertEqual(db_val, tuple(json.dumps(v) for v in val)) - py_val = column.to_python(db_val) - self.assertEqual(py_val, val) + self.assertEqual(tuple(m2.int_tuple), replacement) def test_update_from_non_empty_to_empty(self): - # Can up date a touple raises a Runtime Error + """ + Tests that explcit none updates are processed correctly on tuples + + @since 3.2 + @jira_ticket PYTHON-306 + @expected_result tuple is replaced with none + + @test_category object_mapper + """ pkey = uuid4() - tmp = TestTupleModel.create(partition=pkey, int_tuple=(1, 2)) - tmp.int_tuple = (1, 3) + tmp = TestTupleModel.create(partition=pkey, int_tuple=(1, 2, 3)) + tmp.int_tuple = (None) tmp.update() tmp = TestTupleModel.get(partition=pkey) - self.assertEqual(tmp.int_tuple, (1, 3, None)) + self.assertEqual(tmp.int_tuple, (None)) def test_insert_none(self): + """ + Tests that Tuples can be created as none + + @since 3.2 + @jira_ticket PYTHON-306 + @expected_result tuple is created as none + + @test_category object_mapper + """ pkey = uuid4() - self.assertRaises(ValidationError, TestTupleModel.create, **{'partition': pkey, 'int_tuple': (None)}) + tmp = TestTupleModel.create(partition=pkey, int_tuple=(None)) + self.assertEqual((None), tmp.int_tuple) def test_blind_tuple_updates_from_none(self): - """ Tests that updates from None work as expected """ + """ + Tests that Tuples can be updated from none + + @since 3.2 + @jira_ticket PYTHON-306 + @expected_result tuple is created as none, but upserted to contain values + + @test_category object_mapper + """ + m = TestTupleModel.create(int_tuple=None) expected = (1, 2, 3) m.int_tuple = expected @@ -677,48 +735,3 @@ def test_blind_tuple_updates_from_none(self): m3 = TestTupleModel.get(partition=m.partition) self.assertEqual(m3.int_tuple, None) - -class TestTupleModelVeryLarge(Model): - - partition = columns.UUID(primary_key=True, default=uuid4) - args = [] - for i in range(32767): - args.append(columns.Integer) - int_tuple = columns.Tuple(*args, required=False) - - -class TestTupleModelOverLarge(Model): - - partition = columns.UUID(primary_key=True, default=uuid4) - args = [] - for i in range(32769): - args.append(columns.Integer) - int_tuple = columns.Tuple(*args, required=False) - - -class TestTupleColumnLarge(BaseCassEngTestCase): - - def test_element_count_validation(self): - drop_table(TestTupleModelVeryLarge) - sync_table(TestTupleModelVeryLarge) - """ - Tests that big collections are supported - """ - test_tuple = (i for i in range(32767)) - m1 = TestTupleModelVeryLarge.create(int_tuple=test_tuple) - m2 = TestTupleModelVeryLarge.get(partition=m1.partition) - print(len(m2.int_tuple)) - drop_table(TestTupleModelVeryLarge) - - def test_element_count_validation_too_big(self): - drop_table(TestTupleModelOverLarge) - sync_table(TestTupleModelOverLarge) - """ - Tests that big collections are detected and raise an exception. - """ - test_tuple = (i for i in range(32769)) - m1 = TestTupleModelOverLarge.create(int_tuple=test_tuple) - m2 = TestTupleModelOverLarge.get(partition=m1.partition) - print(len(m2.int_tuple)) - # this will need to be wrapped in an assertion - drop_table(TestTupleModelOverLarge) From a98078b5ad7eb7bfef2711078edfe0b2cf40ed1b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 22 Feb 2016 11:57:22 -0600 Subject: [PATCH 0355/2005] add a test for #466 --- tests/unit/test_connection.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index b306708c9e..ed03b9a1a2 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -24,7 +24,7 @@ from cassandra.cluster import Cluster, Session from cassandra.connection import (Connection, HEADER_DIRECTION_TO_CLIENT, ProtocolError, - locally_supported_compressions, ConnectionHeartbeat, _Frame) + locally_supported_compressions, ConnectionHeartbeat, _Frame, Timer, TimerManager) from cassandra.marshal import uint8_pack, uint32_pack, int32_pack from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ProtocolHandler) @@ -413,3 +413,25 @@ def send_msg(msg, req_id, msg_callback): self.assertIsInstance(exc, Exception) self.assertEqual(exc.args, Exception('Connection heartbeat failure').args) holder.return_connection.assert_has_calls([call(connection)] * get_holders.call_count) + + +class TimerTest(unittest.TestCase): + + def test_timer_collision(self): + # simple test demonstrating #466 + def f1(): + pass + + def f2(): + pass + + # same timeout, comparison will defer to the Timer object itself + t1 = Timer(0, f1) + t2 = Timer(0, f2) + t2.end = t1.end + + tm = TimerManager() + tm.add_timer(t1) + tm.add_timer(t2) + # Prior to $466: "TypeError: unorderable types: Timer() < Timer()" + tm.service_timeouts() From 635f554055abb4f5c815615867be4903ab8b904b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 22 Feb 2016 13:38:05 -0600 Subject: [PATCH 0356/2005] Add a task counter to cluster task scheduler PYTHON-473 Fixes a TypeError when tasks collide in time and function comparison is attempted (Python 3 only). https://docs.python.org/2/library/heapq.html#priority-queue-implementation-notes --- cassandra/cluster.py | 11 ++++++----- tests/unit/test_connection.py | 12 +++--------- 2 files changed, 9 insertions(+), 14 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ae7a3ece4b..1954a13934 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -41,7 +41,7 @@ from cassandra.util import WeakSet # NOQA from functools import partial, wraps -from itertools import groupby +from itertools import groupby, count from cassandra import (ConsistencyLevel, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, @@ -2542,6 +2542,7 @@ class _Scheduler(object): def __init__(self, executor): self._queue = Queue.PriorityQueue() self._scheduled_tasks = set() + self._count = count() self._executor = executor t = Thread(target=self.run, name="Task Scheduler") @@ -2559,7 +2560,7 @@ def shutdown(self): # this can happen on interpreter shutdown pass self.is_shutdown = True - self._queue.put_nowait((0, None)) + self._queue.put_nowait((0, 0, None)) def schedule(self, delay, fn, *args, **kwargs): self._insert_task(delay, (fn, args, tuple(kwargs.items()))) @@ -2575,7 +2576,7 @@ def _insert_task(self, delay, task): if not self.is_shutdown: run_at = time.time() + delay self._scheduled_tasks.add(task) - self._queue.put_nowait((run_at, task)) + self._queue.put_nowait((run_at, next(self._count), task)) else: log.debug("Ignoring scheduled task after shutdown: %r", task) @@ -2586,7 +2587,7 @@ def run(self): try: while True: - run_at, task = self._queue.get(block=True, timeout=None) + run_at, i, task = self._queue.get(block=True, timeout=None) if self.is_shutdown: log.debug("Not executing scheduled task due to Scheduler shutdown") return @@ -2597,7 +2598,7 @@ def run(self): future = self._executor.submit(fn, *args, **kwargs) future.add_done_callback(self._log_if_failed) else: - self._queue.put_nowait((run_at, task)) + self._queue.put_nowait((run_at, i, task)) break except Queue.Empty: pass diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index ed03b9a1a2..15fa6e722b 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -419,19 +419,13 @@ class TimerTest(unittest.TestCase): def test_timer_collision(self): # simple test demonstrating #466 - def f1(): - pass - - def f2(): - pass - # same timeout, comparison will defer to the Timer object itself - t1 = Timer(0, f1) - t2 = Timer(0, f2) + t1 = Timer(0, lambda: None) + t2 = Timer(0, lambda: None) t2.end = t1.end tm = TimerManager() tm.add_timer(t1) tm.add_timer(t2) - # Prior to $466: "TypeError: unorderable types: Timer() < Timer()" + # Prior to #466: "TypeError: unorderable types: Timer() < Timer()" tm.service_timeouts() From 10598f958edb45ab40fcaf3a43a934b9dbb5a70c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 22 Feb 2016 18:54:15 -0600 Subject: [PATCH 0357/2005] fix inner collection class comparison --- cassandra/cqlengine/columns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 623e086a2b..1303c5d59f 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -625,7 +625,7 @@ def __init__(self, types, **kwargs): if t.db_type is None: raise ValidationError("%s is an abstract type" % (t,)) inst = t() if isinstance(t, type) else t - if isinstance(t, BaseContainerColumn): + if isinstance(t, BaseCollectionColumn): inst._freeze_db_type() instances.append(inst) From a0569f166938dcdc4af496db237aaa3038ba62a5 Mon Sep 17 00:00:00 2001 From: Kai Lautaportti Date: Wed, 3 Jun 2015 09:26:18 -0700 Subject: [PATCH 0358/2005] Expose conflicting data in case of LWTException Adds a custom attribute, `existing`, to the LWTException instance which allows the application layer to inspect the values read from Cassandra which conflicted with the attempted conditional write. The documentation already referred to the `existing` attribute but it was not implemented. --- cassandra/cqlengine/query.py | 14 +++++++-- docs/api/cassandra/cqlengine/models.rst | 8 ++--- docs/api/cassandra/cqlengine/query.rst | 1 + .../integration/cqlengine/test_ifnotexists.py | 19 ++++++++++-- .../integration/cqlengine/test_transaction.py | 29 ++++++++++++++++--- 5 files changed, 59 insertions(+), 12 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 1f4a1c3b3e..dc64a0059a 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -40,7 +40,17 @@ class IfNotExistsWithCounterColumn(CQLEngineException): class LWTException(CQLEngineException): - pass + """Lightweight transaction exception. + + This exception will be raised when a write using an `IF` clause could not be + applied due to existing data violating the condition. The existing data is + available through the ``existing`` attribute. + + :param existing: The current state of the data which prevented the write. + """ + def __init__(self, existing): + super(LWTException, self).__init__(self) + self.existing = existing class DoesNotExist(QueryException): @@ -58,7 +68,7 @@ def check_applied(result): applied to database. """ if result and '[applied]' in result[0] and not result[0]['[applied]']: - raise LWTException('') + raise LWTException(result[0]) class AbstractQueryableColumn(UnicodeMixin): diff --git a/docs/api/cassandra/cqlengine/models.rst b/docs/api/cassandra/cqlengine/models.rst index b8d8a71a0c..aad91461c9 100644 --- a/docs/api/cassandra/cqlengine/models.rst +++ b/docs/api/cassandra/cqlengine/models.rst @@ -48,7 +48,6 @@ Model See the `list of supported table properties for more information `_. - .. attribute:: __options__ For example: @@ -89,7 +88,7 @@ Model object is determined by its primary key(s). And please note using this flag would incur performance cost. - if the insertion didn't applied, a LWTException exception would be raised. + If the insertion isn't applied, a :class:`~cassandra.cqlengine.query.LWTException` is raised. .. code-block:: python @@ -111,7 +110,7 @@ Model Simply specify the column(s) and the expected value(s). As with if_not_exists, this incurs a performance cost. - If the insertion isn't applied, a LWTException is raised + If the insertion isn't applied, a :class:`~cassandra.cqlengine.query.LWTException` is raised. .. code-block:: python @@ -119,7 +118,8 @@ Model try: t.iff(count=5).update('other text') except LWTException as e: - # handle failure + # handle failure case + print e.existing # existing object .. automethod:: get diff --git a/docs/api/cassandra/cqlengine/query.rst b/docs/api/cassandra/cqlengine/query.rst index df823704f3..e67df5df1d 100644 --- a/docs/api/cassandra/cqlengine/query.rst +++ b/docs/api/cassandra/cqlengine/query.rst @@ -40,3 +40,4 @@ The methods here are used to filter, order, and constrain results. .. autoclass:: MultipleObjectsReturned +.. autoclass:: LWTException diff --git a/tests/integration/cqlengine/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py index 3fc95d2dd0..42df821dd7 100644 --- a/tests/integration/cqlengine/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -81,9 +81,17 @@ def test_insert_if_not_exists_success(self): id = uuid4() TestIfNotExistsModel.create(id=id, count=8, text='123456789') - with self.assertRaises(LWTException): + + with self.assertRaises(LWTException) as assertion: TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') + self.assertEquals(assertion.exception.existing, { + 'count': 8, + 'id': id, + 'text': '123456789', + '[applied]': False, + }) + q = TestIfNotExistsModel.objects(id=id) self.assertEqual(len(q), 1) @@ -117,9 +125,16 @@ def test_batch_insert_if_not_exists_success(self): b = BatchQuery() TestIfNotExistsModel.batch(b).if_not_exists().create(id=id, count=9, text='111111111111') - with self.assertRaises(LWTException): + with self.assertRaises(LWTException) as assertion: b.execute() + self.assertEquals(assertion.exception.existing, { + 'count': 8, + 'id': id, + 'text': '123456789', + '[applied]': False, + }) + q = TestIfNotExistsModel.objects(id=id) self.assertEqual(len(q), 1) diff --git a/tests/integration/cqlengine/test_transaction.py b/tests/integration/cqlengine/test_transaction.py index b3d7ea0fb1..3f7a19ee86 100644 --- a/tests/integration/cqlengine/test_transaction.py +++ b/tests/integration/cqlengine/test_transaction.py @@ -29,8 +29,9 @@ from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration import CASSANDRA_VERSION + class TestTransactionModel(Model): - id = columns.UUID(primary_key=True, default=lambda:uuid4()) + id = columns.UUID(primary_key=True, default=uuid4) count = columns.Integer() text = columns.Text(required=False) @@ -71,7 +72,14 @@ def test_update_failure(self): t = TestTransactionModel.create(text='blah blah') t.text = 'new blah' t = t.iff(text='something wrong') - self.assertRaises(LWTException, t.save) + + with self.assertRaises(LWTException) as assertion: + t.save() + + self.assertEqual(assertion.exception.existing, { + 'text': 'blah blah', + '[applied]': False, + }) def test_blind_update(self): t = TestTransactionModel.create(text='blah blah') @@ -89,7 +97,13 @@ def test_blind_update_fail(self): t.text = 'something else' uid = t.id qs = TestTransactionModel.objects(id=uid).iff(text='Not dis!') - self.assertRaises(LWTException, qs.update, text='this will never work') + with self.assertRaises(LWTException) as assertion: + qs.update(text='this will never work') + + self.assertEqual(assertion.exception.existing, { + 'text': 'blah blah', + '[applied]': False, + }) def test_transaction_clause(self): tc = TransactionClause('some_value', 23) @@ -109,7 +123,14 @@ def test_batch_update_transaction(self): b = BatchQuery() updated.batch(b).iff(count=6).update(text='and another thing') - self.assertRaises(LWTException, b.execute) + with self.assertRaises(LWTException) as assertion: + b.execute() + + self.assertEqual(assertion.exception.existing, { + 'id': id, + 'count': 5, + '[applied]': False, + }) updated = TestTransactionModel.objects(id=id).first() self.assertEqual(updated.text, 'something else') From f1e49230ebd74ee14771a36c9b7bca7eba5fee15 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 23 Feb 2016 12:36:25 -0600 Subject: [PATCH 0359/2005] actually at the new test --- tests/unit/test_cluster_task_scheduler.py | 38 +++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 tests/unit/test_cluster_task_scheduler.py diff --git a/tests/unit/test_cluster_task_scheduler.py b/tests/unit/test_cluster_task_scheduler.py new file mode 100644 index 0000000000..8aaf02f9cd --- /dev/null +++ b/tests/unit/test_cluster_task_scheduler.py @@ -0,0 +1,38 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from mock import patch + +from cassandra.cluster import _Scheduler + + +class SchedulerTest(unittest.TestCase): + # TODO: this suite could be expanded; for now just adding a test covering a ticket + + @patch('time.time', return_value=3) # always queue at same time + @patch('cassandra.cluster._Scheduler.run') # don't actually run the thread + def test_event_delay_timing(self, *args): + """ + Schedule something with a time collision to make sure the heap comparison works + + PYTHON-473 + """ + sched = _Scheduler(None) + sched.schedule(0, lambda: None) + sched.schedule(0, lambda: None) # pre-473: "TypeError: unorderable types: function() < function()"t From 94fe1fce992fc0541c60578f4df78e571077a7a7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 23 Feb 2016 13:30:35 -0600 Subject: [PATCH 0360/2005] ResultSet.was_applied property for LWT results. --- cassandra/cluster.py | 25 +++++++++++++++++++++++-- cassandra/cqlengine/query.py | 10 ++++++---- tests/unit/test_resultset.py | 27 +++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 6 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 1954a13934..13afecd245 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -70,8 +70,8 @@ HostConnectionPool, HostConnection, NoConnectionsAvailable) from cassandra.query import (SimpleStatement, PreparedStatement, BoundStatement, - BatchStatement, bind_params, QueryTrace, Statement, - named_tuple_factory, dict_factory, FETCH_SIZE_UNSET) + BatchStatement, bind_params, QueryTrace, + named_tuple_factory, dict_factory, tuple_factory, FETCH_SIZE_UNSET) def _is_eventlet_monkey_patched(): @@ -3409,3 +3409,24 @@ def get_all_query_traces(self, max_wait_sec_per=None): See :meth:`.ResponseFuture.get_all_query_traces` for details. """ return self.response_future.get_all_query_traces(max_wait_sec_per) + + @property + def was_applied(self): + """ + For LWT results, returns whether the transaction was applied. + + Result is indeterminate if called on a result that was not an LWT request. + + Only valid when one of tne of the internal row factories is in use. + """ + if self.response_future.row_factory not in (named_tuple_factory, dict_factory, tuple_factory): + raise RuntimeError("Cannot determine LWT result with row factory %s" % (self.response_future.row_factsory,)) + if len(self.current_rows) != 1: + raise RuntimeError("LWT result should have exactly one row. This has %d." % (len(self.current_rows))) + + row = self.current_rows[0] + if isinstance(row, tuple): + return row[0] + else: + return row['[applied]'] + diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index dc64a0059a..d356bf5e8b 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -63,11 +63,13 @@ class MultipleObjectsReturned(QueryException): def check_applied(result): """ - check if result contains some column '[applied]' with false value, - if that value is false, it means our light-weight transaction didn't - applied to database. + Raises LWTException if it looks like a failed LWT request. """ - if result and '[applied]' in result[0] and not result[0]['[applied]']: + try: + applied = result.was_applied + except Exception: + applied = True # result was not LWT form + if not applied: raise LWTException(result[0]) diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index 252b6e0506..2deeb30f75 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from cassandra.query import named_tuple_factory, dict_factory, tuple_factory try: import unittest2 as unittest @@ -161,3 +162,29 @@ def test_eq(self): def test_bool(self): self.assertFalse(ResultSet(Mock(has_more_pages=False), [])) self.assertTrue(ResultSet(Mock(has_more_pages=False), [1])) + + def test_was_applied(self): + # unknown row factory raises + with self.assertRaises(RuntimeError): + ResultSet(Mock(), []).was_applied + + response_future = Mock(row_factory=named_tuple_factory) + + # no row + with self.assertRaises(RuntimeError): + ResultSet(response_future, []).was_applied + + # too many rows + with self.assertRaises(RuntimeError): + ResultSet(response_future, [tuple(), tuple()]).was_applied + + # various internal row factories + for row_factory in (named_tuple_factory, tuple_factory): + for applied in (True, False): + rs = ResultSet(Mock(row_factory=row_factory), [(applied,)]) + self.assertEqual(rs.was_applied, applied) + + row_factory = dict_factory + for applied in (True, False): + rs = ResultSet(Mock(row_factory=row_factory), [{'[applied]': applied}]) + self.assertEqual(rs.was_applied, applied) From 5e102605cdff5afd987c463e27b442b94a4f755f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 23 Feb 2016 13:32:37 -0600 Subject: [PATCH 0361/2005] PYTHON-336 test tweaks and changelog --- CHANGELOG.rst | 1 + docs/api/cassandra/cqlengine/models.rst | 2 +- tests/integration/cqlengine/test_ifnotexists.py | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 54355a40fb..53c84afe5b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,6 +6,7 @@ Features * Pass name of server auth class to AuthProvider (PYTHON-454) * Surface schema agreed flag for DDL statements (PYTHON-458) * Automatically convert float and int to Decimal on serialization (PYTHON-468) +* Expose prior state information via cqlengine LWTException (github #343) Bug Fixes --------- diff --git a/docs/api/cassandra/cqlengine/models.rst b/docs/api/cassandra/cqlengine/models.rst index aad91461c9..ff198a4924 100644 --- a/docs/api/cassandra/cqlengine/models.rst +++ b/docs/api/cassandra/cqlengine/models.rst @@ -96,7 +96,7 @@ Model TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') except LWTException as e: # handle failure case - print e.existing # existing object + print e.existing # dict containing LWT result fields This method is supported on Cassandra 2.0 or later. diff --git a/tests/integration/cqlengine/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py index 42df821dd7..43d53319a8 100644 --- a/tests/integration/cqlengine/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -85,7 +85,7 @@ def test_insert_if_not_exists_success(self): with self.assertRaises(LWTException) as assertion: TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') - self.assertEquals(assertion.exception.existing, { + self.assertEqual(assertion.exception.existing, { 'count': 8, 'id': id, 'text': '123456789', @@ -128,7 +128,7 @@ def test_batch_insert_if_not_exists_success(self): with self.assertRaises(LWTException) as assertion: b.execute() - self.assertEquals(assertion.exception.existing, { + self.assertEqual(assertion.exception.existing, { 'count': 8, 'id': id, 'text': '123456789', From a51b4cbe0cba55b3cc3ca1d58a052ea356376b6b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 23 Feb 2016 14:02:18 -0600 Subject: [PATCH 0362/2005] warn against creating or saving counter models Confusing semantics via PYTHON-333 --- cassandra/cqlengine/query.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 4d03344281..8963463584 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -16,6 +16,7 @@ from datetime import datetime, timedelta import time import six +from warnings import warn from cassandra.cqlengine import columns, CQLEngineException, ValidationError, UnicodeMixin from cassandra.cqlengine import connection @@ -23,7 +24,6 @@ from cassandra.cqlengine.operators import (InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator, LessThanOperator, LessThanOrEqualOperator, ContainsOperator, BaseWhereOperator) -# import * ? from cassandra.cqlengine.statements import (WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement, BaseCQLStatement, MapUpdateClause, MapDeleteClause, @@ -1209,6 +1209,8 @@ def save(self): nulled_fields = set() if self.instance._has_counter or self.instance._can_update(): + if self.instance._has_counter: + warn("'create' and 'save' actions on Counters are deprecated. A future version will disallow this. Use the 'update' mechanism instead.") return self.update() else: insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, if_not_exists=self._if_not_exists) From 252b52c27ff4c16ae0825fe173984e9b90173f4e Mon Sep 17 00:00:00 2001 From: Sigmund Augdal Date: Mon, 22 Feb 2016 09:43:37 +0100 Subject: [PATCH 0363/2005] Fix hang when using eventlet and ssl The eventlet read loop runs select before each recv call. This will fail because recv() on an ssl socket might read more data than requested from the underlying socket in order to have enough context for decryption. These extra bytes would be returned by the next recv on the ssl socket, but are not known to underlying socket so select will hang. It is my understanding that eventlet implements blocking IO by using poll under the hood and yielding, so the select should not be necessary at all --- cassandra/io/eventletreactor.py | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index c811e15c48..8fda42fb4e 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -18,9 +18,9 @@ from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL import eventlet -from eventlet.green import select, socket +from eventlet.green import socket +import ssl from eventlet.queue import Queue -from functools import partial import logging import os from threading import Event @@ -37,7 +37,8 @@ def is_timeout(err): return ( err in (EINPROGRESS, EALREADY, EWOULDBLOCK) or - (err == EINVAL and os.name in ('nt', 'ce')) + (err == EINVAL and os.name in ('nt', 'ce')) or + (isinstance(err, ssl.SSLError) and err.args[0] == 'timed out') ) @@ -138,26 +139,17 @@ def handle_write(self): return # Leave the write loop def handle_read(self): - run_select = partial(select.select, (self._socket,), (), ()) while True: - try: - run_select() - except Exception as exc: - if not self.is_closed: - log.debug("Exception during read select() for %s: %s", - self, exc) - self.defunct(exc) - return - try: buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) except socket.error as err: - if not is_timeout(err): - log.debug("Exception during socket recv for %s: %s", - self, err) - self.defunct(err) - return # leave the read loop + if is_timeout(err): + continue + log.debug("Exception during socket recv for %s: %s", + self, err) + self.defunct(err) + return # leave the read loop if self._iobuf.tell(): self.process_io_buffer() From 603e87b9178cc63d862f9a2203d34d6ee16e8dca Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 24 Feb 2016 11:12:27 -0600 Subject: [PATCH 0364/2005] test: avoid dynamic gevent patching when eventlet is active --- tests/unit/io/test_geventreactor.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py index cf28b93d68..4a8ba30748 100644 --- a/tests/unit/io/test_geventreactor.py +++ b/tests/unit/io/test_geventreactor.py @@ -19,7 +19,7 @@ import time from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback -from tests import is_gevent_monkey_patched +from tests import is_gevent_monkey_patched, is_eventlet_monkey_patched try: from cassandra.io.geventreactor import GeventConnection @@ -31,15 +31,20 @@ class GeventTimerTest(unittest.TestCase): + need_unpatch = False + @classmethod def setUpClass(cls): + if is_eventlet_monkey_patched(): + return # no dynamic patching if we have eventlet applied if GeventConnection is not None: if not is_gevent_monkey_patched(): + cls.need_unpatch = True gevent.monkey.patch_all() @classmethod def tearDownClass(cls): - if is_gevent_monkey_patched(): + if cls.need_unpatch: gevent_un_patch_all() def setUp(self): @@ -47,7 +52,7 @@ def setUp(self): raise unittest.SkipTest("Can't test gevent without monkey patching") GeventConnection.initialize_reactor() - def test_multi_timer_validation(self, *args): + def test_multi_timer_validation(self): """ Verify that timer timeouts are honored appropriately """ From 29f3597180adff4ffbf317764409aa20174442ae Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 22 Feb 2016 19:00:55 -0600 Subject: [PATCH 0365/2005] Added tests for nested collections --- .../columns/test_container_columns.py | 221 ++++++++++++++++++ 1 file changed, 221 insertions(+) diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index 38e187849e..cba1ced44f 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -735,3 +735,224 @@ def test_blind_tuple_updates_from_none(self): m3 = TestTupleModel.get(partition=m.partition) self.assertEqual(m3.int_tuple, None) + +class TestNestedModel(Model): + + partition = columns.UUID(primary_key=True, default=uuid4) + list_list = columns.List(columns.List(columns.Integer), required=False) + map_list = columns.Map(columns.Text, columns.List(columns.Text), required=False) + set_tuple = columns.Set(columns.Tuple(columns.Integer, columns.Integer), required=False) + + +class TestNestedType(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + drop_table(TestNestedModel) + sync_table(TestNestedModel) + + @classmethod + def tearDownClass(cls): + drop_table(TestNestedModel) + + def test_initial(self): + """ + Tests creation and insertion of nested collection types with models + + @since 3.2 + @jira_ticket PYTHON-478 + @expected_result Model is successfully created + + @test_category object_mapper + """ + tmp = TestNestedModel.create() + tmp.list_list = [[1, 2, 3], [2, 3, 4], [3, 4, 5]] + + def test_initial_retrieve(self): + """ + Tests creation and insertion of nested collection types with models, + and their retrieval. + + @since 3.2 + @jira_ticket PYTHON-478 + @expected_result Model is successfully crated + + @test_category object_mapper + """ + + tmp = TestNestedModel.create() + tmp2 = tmp.get(partition=tmp.partition) + tmp2.list_list = [[1, 2, 3], [2, 3, 4], [3, 4, 5]] + tmp2.map_list = {'key1': ["text1", "text2", "text3"], "key2": ["text1", "text2", "text3"], "key3": ["text1", "text2", "text3"]} + tmp2.set_tuple = set(((1, 2), (3, 5), (4, 5))) + + def test_io_success(self): + """ + Tests creation and insertion of various nested collection types with models, + and their retrieval. + + @since 3.2 + @jira_ticket PYTHON-378 + @expected_result Model is successfully created and fetched correctly + + @test_category object_mapper + """ + list_list_master = [[1, 2, 3], [2, 3, 4], [3, 4, 5]] + map_list_master = {'key1': ["text1", "text2", "text3"], "key2": ["text1", "text2", "text3"], "key3": ["text1", "text2", "text3"]} + set_tuple_master = set(((1, 2), (3, 5), (4, 5))) + + m1 = TestNestedModel.create(list_list=list_list_master, map_list=map_list_master, set_tuple=set_tuple_master) + m2 = TestNestedModel.get(partition=m1.partition) + + self.assertIsInstance(m2.list_list, list) + self.assertIsInstance(m2.list_list[0], list) + self.assertIsInstance(m2.map_list, dict) + self.assertIsInstance(m2.map_list.get("key2"), list) + + self.assertEqual(list_list_master, m2.list_list) + self.assertEqual(map_list_master, m2.map_list) + self.assertEqual(set_tuple_master, m2.set_tuple) + self.assertIsInstance(m2.set_tuple.pop(), tuple) + + def test_type_validation(self): + """ + Tests to make sure nested collection type validation occurs + + @since 3.2 + @jira_ticket PYTHON-478 + @expected_result validation errors are raised + + @test_category object_mapper + """ + list_list_bad_list_context = [['text', "text", "text"], ["text", "text", "text"], ["text", "text", "text"]] + list_list_no_list = ['text', "text", "text"] + + map_list_bad_value = {'key1': [1, 2, 3], "key2": [1, 2, 3], "key3": [1, 2, 3]} + map_list_bad_key = {1: ["text1", "text2", "text3"], 2: ["text1", "text2", "text3"], 3: ["text1", "text2", "text3"]} + + set_tuple_bad_tuple_value = set((("text", "text"), ("text", "text"), ("text", "text"))) + set_tuple_not_set = ['This', 'is', 'not', 'a', 'set'] + + self.assertRaises(ValidationError, TestNestedModel.create, **{'list_list': list_list_bad_list_context}) + self.assertRaises(ValidationError, TestNestedModel.create, **{'list_list': list_list_no_list}) + self.assertRaises(ValidationError, TestNestedModel.create, **{'map_list': map_list_bad_value}) + self.assertRaises(ValidationError, TestNestedModel.create, **{'map_list': map_list_bad_key}) + self.assertRaises(ValidationError, TestNestedModel.create, **{'set_tuple': set_tuple_bad_tuple_value}) + self.assertRaises(ValidationError, TestNestedModel.create, **{'set_tuple': set_tuple_not_set}) + + def test_instantiation_with_column_class(self): + """ + Tests that columns instantiated with a column class work properly + and that the class is instantiated in the constructor + + @since 3.2 + @jira_ticket PYTHON-478 + @expected_result types are instantiated correctly + + @test_category object_mapper + """ + list_list = columns.List(columns.List(columns.Integer), required=False) + map_list = columns.Map(columns.Text, columns.List(columns.Text), required=False) + set_tuple = columns.Set(columns.Tuple(columns.Integer, columns.Integer), required=False) + + self.assertIsInstance(list_list, columns.List) + self.assertIsInstance(list_list.types[0], columns.List) + self.assertIsInstance(map_list.types[0], columns.Text) + self.assertIsInstance(map_list.types[1], columns.List) + self.assertIsInstance(set_tuple.types[0], columns.Tuple) + + def test_default_empty_container_saving(self): + """ + Tests that the default empty nested collection container is not saved if it hasn't been updated + + @since 3.2 + @jira_ticket PYTHON-478 + @expected_result empty nested collection is not upserted + + @test_category object_mapper + """ + pkey = uuid4() + # create a row with tuple data + list_list_master = [[1, 2, 3], [2, 3, 4], [3, 4, 5]] + map_list_master = {'key1': ["text1", "text2", "text3"], "key2": ["text1", "text2", "text3"], "key3": ["text1", "text2", "text3"]} + set_tuple_master = set(((1, 2), (3, 5), (4, 5))) + + TestNestedModel.create(partition=pkey, list_list=list_list_master, map_list=map_list_master, set_tuple=set_tuple_master) + # create another with no tuple data + TestNestedModel.create(partition=pkey) + + m = TestNestedModel.get(partition=pkey) + self.assertEqual(m.list_list, list_list_master) + self.assertEqual(m.map_list, map_list_master) + self.assertEqual(m.set_tuple, set_tuple_master) + + def test_updates(self): + """ + Tests that updates can be preformed on nested collections + + @since 3.2 + @jira_ticket PYTHON-478 + @expected_result nested collection is replaced + + @test_category object_mapper + """ + list_list_initial = [[1, 2, 3], [2, 3, 4], [3, 4, 5]] + list_list_replacement = [[1, 2, 3], [3, 4, 5]] + set_tuple_initial = set(((1, 2), (3, 5), (4, 5))) + + map_list_initial = {'key1': ["text1", "text2", "text3"], "key2": ["text1", "text2", "text3"], "key3": ["text1", "text2", "text3"]} + map_list_replacement = {'key1': ["text1", "text2", "text3"], "key3": ["text1", "text2", "text3"]} + set_tuple_replacement = set(((7, 7), (7, 7), (4, 5))) + + m1 = TestNestedModel.create(list_list=list_list_initial, map_list=map_list_initial, set_tuple=set_tuple_initial) + m1.list_list = list_list_replacement + m1.map_list = map_list_replacement + m1.set_tuple = set_tuple_replacement + m1.save() + + m2 = TestNestedModel.get(partition=m1.partition) + self.assertEqual(m2.list_list, list_list_replacement) + self.assertEqual(m2.map_list, map_list_replacement) + self.assertEqual(m2.set_tuple, set_tuple_replacement) + + def test_update_from_non_empty_to_empty(self): + """ + Tests that explcit none updates are processed correctly on tuples + + @since 3.2 + @jira_ticket PYTHON-478 + @expected_result nested collection is replaced with none + + @test_category object_mapper + """ + list_list_initial = [[1, 2, 3], [2, 3, 4], [3, 4, 5]] + map_list_initial = {'key1': ["text1", "text2", "text3"], "key2": ["text1", "text2", "text3"], "key3": ["text1", "text2", "text3"]} + set_tuple_initial = set(((1, 2), (3, 5), (4, 5))) + tmp = TestNestedModel.create(list_list=list_list_initial, map_list=map_list_initial, set_tuple=set_tuple_initial) + tmp.list_list = [] + tmp.map_list = {} + tmp.set_tuple = set() + tmp.update() + + tmp = TestNestedModel.get(partition=tmp.partition) + self.assertEqual(tmp.list_list, []) + self.assertEqual(tmp.map_list, {}) + self.assertEqual(tmp.set_tuple, set()) + + def test_insert_none(self): + """ + Tests that Tuples can be created as none + + @since 3.2 + @jira_ticket PYTHON-478 + @expected_result nested collection is created as none + + @test_category object_mapper + """ + pkey = uuid4() + tmp = TestNestedModel.create(partition=pkey, list_list=(None), map_list=(None), set_tuple=(None)) + self.assertEqual([], tmp.list_list) + self.assertEqual({}, tmp.map_list) + self.assertEqual(set(), tmp.set_tuple) + + From 49079ddc51da5a2ec0b040c246f07a7edca97f09 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 24 Feb 2016 13:36:26 -0600 Subject: [PATCH 0366/2005] PYTHON-258 tests error and negative scenarios --- .../cqlengine/query/test_queryset.py | 67 +++++++++++++++++-- 1 file changed, 61 insertions(+), 6 deletions(-) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 43ba5ac30b..5f43d80b9d 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -32,6 +32,7 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns from cassandra.cqlengine import query +from cassandra.cqlengine.query import QueryException from datetime import timedelta from datetime import tzinfo @@ -81,6 +82,10 @@ class IndexedTestModel(Model): test_set = columns.Set(columns.Integer, index=True) test_map = columns.Map(columns.Text, columns.Integer, index=True) + test_list_no_index = columns.List(columns.Integer, index=False) + test_set_no_index = columns.Set(columns.Integer, index=False) + test_map_no_index = columns.Map(columns.Text, columns.Integer, index=False) + class TestMultiClusteringModel(Model): @@ -266,6 +271,10 @@ def setUpClass(cls): test_result=45, test_list=[1, 2, 3], test_set=set([1, 2, 3]), test_map={'1': 1, '2': 2, '3': 42}) + IndexedTestModel.objects.create(test_id=15, attempt_id=4, description='list14', expected_result=75, + test_result=45, test_list_no_index=[1, 2, 3], test_set_no_index=set([1, 2, 3]), + test_map_no_index={'1': 1, '2': 2, '3': 42}) + @classmethod def tearDownClass(cls): super(BaseQuerySetUsage, cls).tearDownClass() @@ -292,7 +301,7 @@ def test_query_expression_count(self): def test_iteration(self): """ Tests that iterating over a query set pulls back all of the expected results """ q = TestModel.objects(test_id=0) - #tuple of expected attempt_id, expected_result values + # tuple of expected attempt_id, expected_result values compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)]) for t in q: val = t.attempt_id, t.expected_result @@ -303,7 +312,7 @@ def test_iteration(self): # test with regular filtering q = TestModel.objects(attempt_id=3).allow_filtering() assert len(q) == 3 - #tuple of expected test_id, expected_result values + # tuple of expected test_id, expected_result values compare_set = set([(0, 20), (1, 20), (2, 75)]) for t in q: val = t.test_id, t.expected_result @@ -314,7 +323,7 @@ def test_iteration(self): # test with query method q = TestModel.objects(TestModel.attempt_id == 3).allow_filtering() assert len(q) == 3 - #tuple of expected test_id, expected_result values + # tuple of expected test_id, expected_result values compare_set = set([(0, 20), (1, 20), (2, 75)]) for t in q: val = t.test_id, t.expected_result @@ -326,7 +335,7 @@ def test_multiple_iterations_work_properly(self): """ Tests that iterating over a query set more than once works """ # test with both the filtering method and the query method for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)): - #tuple of expected attempt_id, expected_result values + # tuple of expected attempt_id, expected_result values compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)]) for t in q: val = t.attempt_id, t.expected_result @@ -334,7 +343,7 @@ def test_multiple_iterations_work_properly(self): compare_set.remove(val) assert len(compare_set) == 0 - #try it again + # try it again compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)]) for t in q: val = t.attempt_id, t.expected_result @@ -550,7 +559,6 @@ def test_primary_key_or_index_must_have_equal_relation_filter(self): q = TestModel.objects(test_id__gt=0) list([i for i in q]) - def test_indexed_field_can_be_queried(self): """ Tests that queries on an indexed field will work without any primary key relations specified @@ -561,12 +569,21 @@ def test_indexed_field_can_be_queried(self): q = IndexedTestModel.objects.filter(test_list__contains=42) self.assertEqual(q.count(), 1) + q = IndexedTestModel.objects.filter(test_list__contains=13) + self.assertEqual(q.count(), 0) + q = IndexedTestModel.objects.filter(test_set__contains=42) self.assertEqual(q.count(), 1) + q = IndexedTestModel.objects.filter(test_set__contains=13) + self.assertEqual(q.count(), 0) + q = IndexedTestModel.objects.filter(test_map__contains=42) self.assertEqual(q.count(), 1) + q = IndexedTestModel.objects.filter(test_map__contains=13) + self.assertEqual(q.count(), 0) + class TestQuerySetDelete(BaseQuerySetUsage): def test_delete(self): @@ -722,24 +739,61 @@ def test_kwarg_success_case(self): q = IndexedTestModel.filter(test_list__contains=1) self.assertEqual(q.count(), 2) + q = IndexedTestModel.filter(test_list__contains=13) + self.assertEqual(q.count(), 0) + q = IndexedTestModel.filter(test_set__contains=3) self.assertEqual(q.count(), 2) + q = IndexedTestModel.filter(test_set__contains=13) + self.assertEqual(q.count(), 0) + q = IndexedTestModel.filter(test_map__contains=42) self.assertEqual(q.count(), 1) + q = IndexedTestModel.filter(test_map__contains=13) + self.assertEqual(q.count(), 0) + + with self.assertRaises(QueryException): + q = IndexedTestModel.filter(test_list_no_index__contains=1) + self.assertEqual(q.count(), 0) + with self.assertRaises(QueryException): + q = IndexedTestModel.filter(test_set_no_index__contains=1) + self.assertEqual(q.count(), 0) + with self.assertRaises(QueryException): + q = IndexedTestModel.filter(test_map_no_index__contains=1) + self.assertEqual(q.count(), 0) def test_query_expression_success_case(self): """ Tests the CONTAINS operator works with the query expression query method """ q = IndexedTestModel.filter(IndexedTestModel.test_list.contains_(1)) self.assertEqual(q.count(), 2) + q = IndexedTestModel.filter(IndexedTestModel.test_list.contains_(13)) + self.assertEqual(q.count(), 0) + q = IndexedTestModel.filter(IndexedTestModel.test_set.contains_(3)) self.assertEqual(q.count(), 2) + q = IndexedTestModel.filter(IndexedTestModel.test_set.contains_(13)) + self.assertEqual(q.count(), 0) + q = IndexedTestModel.filter(IndexedTestModel.test_map.contains_(42)) self.assertEqual(q.count(), 1) + q = IndexedTestModel.filter(IndexedTestModel.test_map.contains_(13)) + self.assertEqual(q.count(), 0) + + with self.assertRaises(QueryException): + q = IndexedTestModel.filter(IndexedTestModel.test_map_no_index.contains_(1)) + self.assertEqual(q.count(), 0) + with self.assertRaises(QueryException): + q = IndexedTestModel.filter(IndexedTestModel.test_map_no_index.contains_(1)) + self.assertEqual(q.count(), 0) + with self.assertRaises(QueryException): + q = IndexedTestModel.filter(IndexedTestModel.test_map_no_index.contains_(1)) + self.assertEqual(q.count(), 0) + class TestValuesList(BaseQuerySetUsage): def test_values_list(self): @@ -757,6 +811,7 @@ def test_objects_property_returns_fresh_queryset(self): len(TestModel.objects) # evaluate queryset assert TestModel.objects._result_cache is None + class PageQueryTests(BaseCassEngTestCase): def test_paged_result_handling(self): if PROTOCOL_VERSION < 2: From 9b4e915965341588e74e1fff422dfa4b1732ec4e Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 24 Feb 2016 17:17:50 -0600 Subject: [PATCH 0367/2005] Adding checks for empty, and invalid distinct queries --- .../cqlengine/query/test_queryset.py | 102 +++++++++++------- 1 file changed, 64 insertions(+), 38 deletions(-) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 5f43d80b9d..13290967c1 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -22,8 +22,10 @@ import time from uuid import uuid1, uuid4 import uuid +import sys from cassandra.cluster import Session +from cassandra import InvalidRequest from tests.integration.cqlengine.base import BaseCassEngTestCase from cassandra.cqlengine.connection import NOT_SET import mock @@ -41,7 +43,7 @@ from cassandra.util import uuid_from_time from cassandra.cqlengine.connection import get_session -from tests.integration import PROTOCOL_VERSION +from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthancass20 class TzOffset(tzinfo): @@ -78,6 +80,15 @@ class IndexedTestModel(Model): description = columns.Text() expected_result = columns.Integer() test_result = columns.Integer(index=True) + + +class IndexedCollectionsTestModel(Model): + + test_id = columns.Integer(primary_key=True) + attempt_id = columns.Integer(index=True) + description = columns.Text() + expected_result = columns.Integer() + test_result = columns.Integer(index=True) test_list = columns.List(columns.Integer, index=True) test_set = columns.Set(columns.Integer, index=True) test_map = columns.Map(columns.Text, columns.Integer, index=True) @@ -224,6 +235,7 @@ def setUpClass(cls): super(BaseQuerySetUsage, cls).setUpClass() drop_table(TestModel) drop_table(IndexedTestModel) + sync_table(TestModel) sync_table(IndexedTestModel) sync_table(TestMultiClusteringModel) @@ -261,19 +273,22 @@ def setUpClass(cls): IndexedTestModel.objects.create(test_id=11, attempt_id=3, description='try12', expected_result=75, test_result=45) - IndexedTestModel.objects.create(test_id=12, attempt_id=3, description='list12', expected_result=75, - test_result=45, test_list=[1, 2, 42], test_set=set([1, 2, 3]), - test_map={'1': 1, '2': 2, '3': 3}) - IndexedTestModel.objects.create(test_id=13, attempt_id=3, description='list13', expected_result=75, - test_result=45, test_list=[3, 4, 5], test_set=set([4, 5, 42]), - test_map={'1': 5, '2': 6, '3': 7}) - IndexedTestModel.objects.create(test_id=14, attempt_id=3, description='list14', expected_result=75, - test_result=45, test_list=[1, 2, 3], test_set=set([1, 2, 3]), - test_map={'1': 1, '2': 2, '3': 42}) - - IndexedTestModel.objects.create(test_id=15, attempt_id=4, description='list14', expected_result=75, - test_result=45, test_list_no_index=[1, 2, 3], test_set_no_index=set([1, 2, 3]), - test_map_no_index={'1': 1, '2': 2, '3': 42}) + if(CASSANDRA_VERSION >= '2.1'): + drop_table(IndexedCollectionsTestModel) + sync_table(IndexedCollectionsTestModel) + IndexedCollectionsTestModel.objects.create(test_id=12, attempt_id=3, description='list12', expected_result=75, + test_result=45, test_list=[1, 2, 42], test_set=set([1, 2, 3]), + test_map={'1': 1, '2': 2, '3': 3}) + IndexedCollectionsTestModel.objects.create(test_id=13, attempt_id=3, description='list13', expected_result=75, + test_result=45, test_list=[3, 4, 5], test_set=set([4, 5, 42]), + test_map={'1': 5, '2': 6, '3': 7}) + IndexedCollectionsTestModel.objects.create(test_id=14, attempt_id=3, description='list14', expected_result=75, + test_result=45, test_list=[1, 2, 3], test_set=set([1, 2, 3]), + test_map={'1': 1, '2': 2, '3': 42}) + + IndexedCollectionsTestModel.objects.create(test_id=15, attempt_id=4, description='list14', expected_result=75, + test_result=45, test_list_no_index=[1, 2, 3], test_set_no_index=set([1, 2, 3]), + test_map_no_index={'1': 1, '2': 2, '3': 42}) @classmethod def tearDownClass(cls): @@ -461,6 +476,15 @@ def test_distinct_with_filter(self): q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[1,2]) self.assertEqual(len(q), 2) + def test_distinct_with_non_partition(self): + with self.assertRaises(InvalidRequest): + q = TestModel.objects.distinct(['description']).filter(test_id__in=[1, 2]) + len(q) + + def test_zero_result(self): + q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[52]) + self.assertEqual(len(q), 0) + class TestQuerySetOrdering(BaseQuerySetUsage): @@ -559,6 +583,7 @@ def test_primary_key_or_index_must_have_equal_relation_filter(self): q = TestModel.objects(test_id__gt=0) list([i for i in q]) + @greaterthancass20 def test_indexed_field_can_be_queried(self): """ Tests that queries on an indexed field will work without any primary key relations specified @@ -566,22 +591,22 @@ def test_indexed_field_can_be_queried(self): q = IndexedTestModel.objects(test_result=25) self.assertEqual(q.count(), 4) - q = IndexedTestModel.objects.filter(test_list__contains=42) + q = IndexedCollectionsTestModel.objects.filter(test_list__contains=42) self.assertEqual(q.count(), 1) - q = IndexedTestModel.objects.filter(test_list__contains=13) + q = IndexedCollectionsTestModel.objects.filter(test_list__contains=13) self.assertEqual(q.count(), 0) - q = IndexedTestModel.objects.filter(test_set__contains=42) + q = IndexedCollectionsTestModel.objects.filter(test_set__contains=42) self.assertEqual(q.count(), 1) - q = IndexedTestModel.objects.filter(test_set__contains=13) + q = IndexedCollectionsTestModel.objects.filter(test_set__contains=13) self.assertEqual(q.count(), 0) - q = IndexedTestModel.objects.filter(test_map__contains=42) + q = IndexedCollectionsTestModel.objects.filter(test_map__contains=42) self.assertEqual(q.count(), 1) - q = IndexedTestModel.objects.filter(test_map__contains=13) + q = IndexedCollectionsTestModel.objects.filter(test_map__contains=13) self.assertEqual(q.count(), 0) @@ -733,65 +758,66 @@ def test_query_expression_success_case(self): assert q.count() == 8 +@greaterthancass20 class TestContainsOperator(BaseQuerySetUsage): def test_kwarg_success_case(self): """ Tests the CONTAINS operator works with the kwarg query method """ - q = IndexedTestModel.filter(test_list__contains=1) + q = IndexedCollectionsTestModel.filter(test_list__contains=1) self.assertEqual(q.count(), 2) - q = IndexedTestModel.filter(test_list__contains=13) + q = IndexedCollectionsTestModel.filter(test_list__contains=13) self.assertEqual(q.count(), 0) - q = IndexedTestModel.filter(test_set__contains=3) + q = IndexedCollectionsTestModel.filter(test_set__contains=3) self.assertEqual(q.count(), 2) - q = IndexedTestModel.filter(test_set__contains=13) + q = IndexedCollectionsTestModel.filter(test_set__contains=13) self.assertEqual(q.count(), 0) - q = IndexedTestModel.filter(test_map__contains=42) + q = IndexedCollectionsTestModel.filter(test_map__contains=42) self.assertEqual(q.count(), 1) - q = IndexedTestModel.filter(test_map__contains=13) + q = IndexedCollectionsTestModel.filter(test_map__contains=13) self.assertEqual(q.count(), 0) with self.assertRaises(QueryException): - q = IndexedTestModel.filter(test_list_no_index__contains=1) + q = IndexedCollectionsTestModel.filter(test_list_no_index__contains=1) self.assertEqual(q.count(), 0) with self.assertRaises(QueryException): - q = IndexedTestModel.filter(test_set_no_index__contains=1) + q = IndexedCollectionsTestModel.filter(test_set_no_index__contains=1) self.assertEqual(q.count(), 0) with self.assertRaises(QueryException): - q = IndexedTestModel.filter(test_map_no_index__contains=1) + q = IndexedCollectionsTestModel.filter(test_map_no_index__contains=1) self.assertEqual(q.count(), 0) def test_query_expression_success_case(self): """ Tests the CONTAINS operator works with the query expression query method """ - q = IndexedTestModel.filter(IndexedTestModel.test_list.contains_(1)) + q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_list.contains_(1)) self.assertEqual(q.count(), 2) - q = IndexedTestModel.filter(IndexedTestModel.test_list.contains_(13)) + q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_list.contains_(13)) self.assertEqual(q.count(), 0) - q = IndexedTestModel.filter(IndexedTestModel.test_set.contains_(3)) + q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_set.contains_(3)) self.assertEqual(q.count(), 2) - q = IndexedTestModel.filter(IndexedTestModel.test_set.contains_(13)) + q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_set.contains_(13)) self.assertEqual(q.count(), 0) - q = IndexedTestModel.filter(IndexedTestModel.test_map.contains_(42)) + q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map.contains_(42)) self.assertEqual(q.count(), 1) - q = IndexedTestModel.filter(IndexedTestModel.test_map.contains_(13)) + q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map.contains_(13)) self.assertEqual(q.count(), 0) with self.assertRaises(QueryException): - q = IndexedTestModel.filter(IndexedTestModel.test_map_no_index.contains_(1)) + q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map_no_index.contains_(1)) self.assertEqual(q.count(), 0) with self.assertRaises(QueryException): - q = IndexedTestModel.filter(IndexedTestModel.test_map_no_index.contains_(1)) + q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map_no_index.contains_(1)) self.assertEqual(q.count(), 0) with self.assertRaises(QueryException): - q = IndexedTestModel.filter(IndexedTestModel.test_map_no_index.contains_(1)) + q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map_no_index.contains_(1)) self.assertEqual(q.count(), 0) From 88a72f4586877bf82f00b5703f119f5a7c1507e7 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 25 Feb 2016 12:26:19 -0600 Subject: [PATCH 0368/2005] skipping tuple tests when C* version < 2.1 --- .../cqlengine/columns/test_container_columns.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index cba1ced44f..23a9791e5c 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -27,6 +27,7 @@ from cassandra.cqlengine.management import sync_table, drop_table from tests.integration.cqlengine import is_prepend_reversed from tests.integration.cqlengine.base import BaseCassEngTestCase +from tests.integration import greaterthancass20, CASSANDRA_VERSION log = logging.getLogger(__name__) @@ -547,12 +548,15 @@ class TestTupleModel(Model): mixed_tuple = columns.Tuple(columns.Text, columns.Integer, columns.Text, required=False) +@greaterthancass20 class TestTupleColumn(BaseCassEngTestCase): @classmethod def setUpClass(cls): - drop_table(TestTupleModel) - sync_table(TestTupleModel) + # Skip annotations don't seem to skip class level teradown and setup methods + if(CASSANDRA_VERSION >= '2.1'): + drop_table(TestTupleModel) + sync_table(TestTupleModel) @classmethod def tearDownClass(cls): @@ -744,12 +748,15 @@ class TestNestedModel(Model): set_tuple = columns.Set(columns.Tuple(columns.Integer, columns.Integer), required=False) +@greaterthancass20 class TestNestedType(BaseCassEngTestCase): @classmethod def setUpClass(cls): - drop_table(TestNestedModel) - sync_table(TestNestedModel) + # Skip annotations don't seem to skip class level teradown and setup methods + if(CASSANDRA_VERSION >= '2.1'): + drop_table(TestNestedModel) + sync_table(TestNestedModel) @classmethod def tearDownClass(cls): From 3487a83202627f000dd6215e9c386a1346840055 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 25 Feb 2016 16:06:25 -0600 Subject: [PATCH 0369/2005] cqle: allow db_field for fields in UDTs PYTHON-346 Previously UDTs would not map properly, resulting in None for all fields inserted or queried. --- cassandra/cqlengine/models.py | 2 +- cassandra/cqlengine/usertype.py | 17 +++++--- cassandra/protocol.py | 2 + .../integration/cqlengine/model/test_udts.py | 40 ++++++++++++++++++- 4 files changed, 53 insertions(+), 8 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 6f9f61c6e8..4cde958d96 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -384,7 +384,7 @@ def _construct_instance(cls, values): # and translate that into our local fields # the db_map is a db_field -> model field map items = values.items() - field_dict = dict([(cls._db_map.get(k, k), v) for k, v in items]) + field_dict = dict((cls._db_map.get(k, k), v) for k, v in items) if cls._is_polymorphic: disc_key = field_dict.get(cls._discriminator_column_name) diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index dcc3fc7662..5b26f1f8c2 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -28,6 +28,9 @@ class BaseUserType(object): def __init__(self, **values): self._values = {} + if self._db_map: + values = dict((self._db_map.get(k, k), v) for k, v in values.items()) + for name, field in self._fields.items(): value = values.get(name, None) if value is not None or isinstance(field, columns.BaseContainerColumn): @@ -68,6 +71,13 @@ def __iter__(self): for field in self._fields.keys(): yield field + def __getattr__(self, attr): + # provides the mapping from db_field to fields + try: + return getattr(self, self._db_map[attr]) + except KeyError: + raise AttributeError(attr) + def __getitem__(self, key): if not isinstance(key, six.string_types): raise TypeError @@ -158,13 +168,8 @@ def _transform_column(field_name, field_obj): raise UserTypeDefinitionException("field '{0}' conflicts with built-in attribute/method".format(k)) _transform_column(k, v) - # create db_name -> model name map for loading - db_map = {} - for field_name, field in field_dict.items(): - db_map[field.db_field_name] = field_name - attrs['_fields'] = field_dict - attrs['_db_map'] = db_map + attrs['_db_map'] = dict((field.db_field_name, name) for name, field in field_dict.items() if field.db_field_name != name) klass = super(UserTypeMetaClass, cls).__new__(cls, name, bases, attrs) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 633c9734db..d71ab458eb 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -1048,6 +1048,8 @@ class CythonProtocolHandler(ProtocolHandler): my_opcodes[FastResultMessage.opcode] = FastResultMessage message_types_by_opcode = my_opcodes + col_parser = colparser + return CythonProtocolHandler diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 28d2091ba2..1d3db48b93 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -23,7 +23,7 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine.usertype import UserType -from cassandra.cqlengine import columns +from cassandra.cqlengine import columns, connection from cassandra.cqlengine.management import sync_table, sync_type, create_keyspace_simple, drop_keyspace from cassandra.util import Date, Time @@ -445,3 +445,41 @@ class User(UserType): cluster = Mock() connection._register_known_types(cluster) cluster.register_user_type.assert_called_with(models.DEFAULT_KEYSPACE, User.type_name(), User) + + def test_db_field_override(self): + class db_field_different(UserType): + age = columns.Integer(db_field='a') + name = columns.Text(db_field='n') + + class TheModel(Model): + id = columns.Integer(primary_key=True) + info = columns.UserDefinedType(db_field_different) + + sync_table(TheModel) + + cluster = connection.get_cluster() + type_meta = cluster.metadata.keyspaces[TheModel._get_keyspace()].user_types[db_field_different.type_name()] + + type_fields = (db_field_different.age.column, db_field_different.name.column) + self.assertEqual(len(type_meta.field_names), len(type_fields)) + for f in type_fields: + self.assertIn(f.db_field_name, type_meta.field_names) + + id = 0 + age = 42 + name = 'John' + info = db_field_different(age=age, name=name) + TheModel.create(id=id, info=info) + + self.assertEqual(1, TheModel.objects.count()) + + john = TheModel.objects().first() + self.assertEqual(john.id, id) + info = john.info + self.assertIsInstance(info, db_field_different) + self.assertEqual(info.age, age) + self.assertEqual(info.name, name) + + # also excercise the db_Field mapping + self.assertEqual(info.a, age) + self.assertEqual(info.n, name) From baa5a72c5edbf056ff88b092458799d57e28c1fa Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 26 Feb 2016 09:38:38 -0600 Subject: [PATCH 0370/2005] cqle: reject user types with db_fields overloading field names PYTHON-346 --- cassandra/cqlengine/usertype.py | 11 +++++++++-- tests/integration/cqlengine/model/test_udts.py | 8 +++++++- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index 5b26f1f8c2..edba123df9 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -27,7 +27,6 @@ class BaseUserType(object): def __init__(self, **values): self._values = {} - if self._db_map: values = dict((self._db_map.get(k, k), v) for k, v in values.items()) @@ -169,7 +168,15 @@ def _transform_column(field_name, field_obj): _transform_column(k, v) attrs['_fields'] = field_dict - attrs['_db_map'] = dict((field.db_field_name, name) for name, field in field_dict.items() if field.db_field_name != name) + + db_map = {} + for field_name, field in field_dict.items(): + db_field = field.db_field_name + if db_field != field_name: + if db_field in field_dict: + raise UserTypeDefinitionException("db_field '{0}' for field '{1}' conflicts with another attribute name".format(db_field, field_name)) + db_map[db_field] = field_name + attrs['_db_map'] = db_map klass = super(UserTypeMetaClass, cls).__new__(cls, name, bases, attrs) diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 1d3db48b93..86e2970f93 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -22,7 +22,7 @@ from uuid import UUID, uuid4 from cassandra.cqlengine.models import Model -from cassandra.cqlengine.usertype import UserType +from cassandra.cqlengine.usertype import UserType, UserTypeDefinitionException from cassandra.cqlengine import columns, connection from cassandra.cqlengine.management import sync_table, sync_type, create_keyspace_simple, drop_keyspace from cassandra.util import Date, Time @@ -483,3 +483,9 @@ class TheModel(Model): # also excercise the db_Field mapping self.assertEqual(info.a, age) self.assertEqual(info.n, name) + + def test_db_field_overload(self): + with self.assertRaises(UserTypeDefinitionException): + class something_silly(UserType): + first_col = columns.Integer() + second_col = columns.Text(db_field='first_col') From 13b47a32c99acb6342c00ec521881bccec3e1fd3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 26 Feb 2016 10:47:01 -0600 Subject: [PATCH 0371/2005] cqle: don't pay for db_field mapping if there isn't any --- cassandra/cqlengine/models.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 4cde958d96..b5632d31ad 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -383,11 +383,11 @@ def _construct_instance(cls, values): # we're going to take the values, which is from the DB as a dict # and translate that into our local fields # the db_map is a db_field -> model field map - items = values.items() - field_dict = dict((cls._db_map.get(k, k), v) for k, v in items) + if cls._db_map: + values = dict((cls._db_map.get(k, k), v) for k, v in values.items()) if cls._is_polymorphic: - disc_key = field_dict.get(cls._discriminator_column_name) + disc_key = values.get(cls._discriminator_column_name) if disc_key is None: raise PolymorphicModelException('discriminator value was not found in values') @@ -408,12 +408,12 @@ def _construct_instance(cls, values): '{0} is not a subclass of {1}'.format(klass.__name__, cls.__name__) ) - field_dict = dict((k, v) for k, v in field_dict.items() if k in klass._columns.keys()) + values = dict((k, v) for k, v in values.items() if k in klass._columns.keys()) else: klass = cls - instance = klass(**field_dict) + instance = klass(**values) instance._set_persisted() return instance @@ -854,8 +854,12 @@ def _get_polymorphic_base(bases): # create db_name -> model name map for loading db_map = {} - for field_name, col in column_dict.items(): - db_map[col.db_field_name] = field_name + for col_name, field in column_dict.items(): + db_field = field.db_field_name + if db_field != col_name: + if db_field in column_dict: + raise ModelDefinitionException("db_field '{0}' for column '{1}' conflicts with another attribute name".format(db_field, col_name)) + db_map[db_field] = col_name # add management members to the class attrs['_columns'] = column_dict From 9e3222576474125975c643060db60290472752b4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 26 Feb 2016 12:04:08 -0600 Subject: [PATCH 0372/2005] cqle/test: correct test for duplicate column names --- tests/integration/cqlengine/model/test_class_construction.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index 7b333a58b2..036d50e55a 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -69,9 +69,9 @@ def test_attempting_to_make_duplicate_column_names_fails(self): Tests that trying to create conflicting db column names will fail """ - with self.assertRaises(ModelException): + with self.assertRaisesRegex(ModelException, r".*more than once$"): class BadNames(Model): - words = columns.Text() + words = columns.Text(primary_key=True) content = columns.Text(db_field='words') def test_column_ordering_is_preserved(self): @@ -95,7 +95,6 @@ class TestModel(Model): count = columns.Integer() text = columns.Text(required=False) - def test_value_managers_are_keeping_model_instances_isolated(self): """ Tests that instance value managers are isolated from other instances From 4a28faf38a80f9feea2a8ec6fccbf893a073a434 Mon Sep 17 00:00:00 2001 From: Marc Zbyszynski Date: Fri, 26 Feb 2016 13:09:57 -0500 Subject: [PATCH 0373/2005] Corrected incorrect reference to self._columns in UserType --- cassandra/cqlengine/usertype.py | 2 +- tests/integration/cqlengine/model/test_udts.py | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index edba123df9..7d753e898d 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -95,7 +95,7 @@ def __len__(self): try: return self._len except: - self._len = len(self._columns.keys()) + self._len = len(self._fields.keys()) return self._len def keys(self): diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 86e2970f93..e5165a857a 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -489,3 +489,12 @@ def test_db_field_overload(self): class something_silly(UserType): first_col = columns.Integer() second_col = columns.Text(db_field='first_col') + + def test_set_udt_fields(self): + class User(UserType): + age = columns.Integer() + name = columns.Text() + + u = User() + u.age = 20 + self.assertEqual(20, u.age) From 062d536094554d6a5e88a433dc70d898d08d7c53 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 26 Feb 2016 12:10:18 -0600 Subject: [PATCH 0374/2005] cqle: better model exception formatting also remove conditional for alreay-covered duplicate columns --- cassandra/cqlengine/models.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index b5632d31ad..db48b9e30f 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -229,7 +229,6 @@ def __unicode__(self): return self.column.db_field_name def _get_column(self): - """ :rtype: ColumnQueryEvaluator """ return self.column @@ -845,11 +844,11 @@ def _get_polymorphic_base(bases): for v in column_dict.values(): # check for duplicate column names if v.db_field_name in col_names: - raise ModelException("{0} defines the column {1} more than once".format(name, v.db_field_name)) + raise ModelException("{0} defines the column '{1}' more than once".format(name, v.db_field_name)) if v.clustering_order and not (v.primary_key and not v.partition_key): raise ModelException("clustering_order may be specified only for clustering primary keys") if v.clustering_order and v.clustering_order.lower() not in ('asc', 'desc'): - raise ModelException("invalid clustering order {0} for column {1}".format(repr(v.clustering_order), v.db_field_name)) + raise ModelException("invalid clustering order '{0}' for column '{1}'".format(repr(v.clustering_order), v.db_field_name)) col_names.add(v.db_field_name) # create db_name -> model name map for loading @@ -857,8 +856,6 @@ def _get_polymorphic_base(bases): for col_name, field in column_dict.items(): db_field = field.db_field_name if db_field != col_name: - if db_field in column_dict: - raise ModelDefinitionException("db_field '{0}' for column '{1}' conflicts with another attribute name".format(db_field, col_name)) db_map[db_field] = col_name # add management members to the class From 237d322b7f293d77b2f008f08eb4c8d7bbe2fb60 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 26 Feb 2016 12:12:34 -0600 Subject: [PATCH 0375/2005] cqle: correct pk validation when db_field is in use PYTHON-351 --- cassandra/cqlengine/models.py | 7 +++++++ cassandra/cqlengine/query.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index db48b9e30f..6880d7c967 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -451,6 +451,13 @@ def _get_column(cls, name): """ return cls._columns[name] + @classmethod + def _get_column_by_db_name(cls, name): + """ + Returns the column, mapped by db_field name + """ + return cls._columns.get(cls._db_map.get(name, name)) + def __eq__(self, other): if self.__class__ != other.__class__: return False diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 62139cde1a..03fc253719 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -841,7 +841,7 @@ class ModelQuerySet(AbstractQuerySet): def _validate_select_where(self): """ Checks that a filterset will not create invalid select statement """ # check that there's either a =, a IN or a CONTAINS (collection) relationship with a primary key or indexed field - equal_ops = [self.model._columns.get(w.field) for w in self._where if isinstance(w.operator, EqualsOperator)] + equal_ops = [self.model._get_column_by_db_name(w.field) for w in self._where if isinstance(w.operator, EqualsOperator)] token_comparison = any([w for w in self._where if isinstance(w.value, Token)]) if not any([w.primary_key or w.index for w in equal_ops]) and not token_comparison and not self._allow_filtering: raise QueryException(('Where clauses require either =, a IN or a CONTAINS (collection) ' From 228d2c39dd60d91705461a19a636b1f7bc0f5e29 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 26 Feb 2016 12:18:34 -0600 Subject: [PATCH 0376/2005] cqle/test: clean up PendingDeprecation methods --- .../integration/cqlengine/columns/test_container_columns.py | 2 +- .../cqlengine/management/test_compaction_settings.py | 6 +++--- tests/integration/cqlengine/management/test_management.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index 23a9791e5c..cb8801e1db 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -449,7 +449,7 @@ def test_updates_from_none(self): m.save() m2 = TestMapModel.get(partition=m.partition) - self.assertEquals(m2.int_map, expected) + self.assertEqual(m2.int_map, expected) m2.int_map = None m2.save() diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index 4fe349e069..e128348bad 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -83,7 +83,7 @@ def test_alter_actually_alters(self): table_meta = _get_table_metadata(tmp) - self.assertRegexpMatches(table_meta.export_as_string(), '.*SizeTieredCompactionStrategy.*') + self.assertRegex(table_meta.export_as_string(), '.*SizeTieredCompactionStrategy.*') def test_alter_options(self): @@ -97,11 +97,11 @@ class AlterTable(Model): drop_table(AlterTable) sync_table(AlterTable) table_meta = _get_table_metadata(AlterTable) - self.assertRegexpMatches(table_meta.export_as_string(), ".*'sstable_size_in_mb': '64'.*") + self.assertRegex(table_meta.export_as_string(), ".*'sstable_size_in_mb': '64'.*") AlterTable.__options__['compaction']['sstable_size_in_mb'] = '128' sync_table(AlterTable) table_meta = _get_table_metadata(AlterTable) - self.assertRegexpMatches(table_meta.export_as_string(), ".*'sstable_size_in_mb': '128'.*") + self.assertRegex(table_meta.export_as_string(), ".*'sstable_size_in_mb': '128'.*") class OptionsTest(BaseCassEngTestCase): diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 93aa8dc11b..5b91d836da 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -209,7 +209,7 @@ def test_bogus_option_update(self): option = 'no way will this ever be an option' try: ModelWithTableProperties.__options__[option] = 'what was I thinking?' - self.assertRaisesRegexp(KeyError, "Invalid table option.*%s.*" % option, sync_table, ModelWithTableProperties) + self.assertRaisesRegex(KeyError, "Invalid table option.*%s.*" % option, sync_table, ModelWithTableProperties) finally: ModelWithTableProperties.__options__.pop(option, None) From 91f436c70cf18e8391f79f0480bb892411bb520b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 26 Feb 2016 14:18:56 -0600 Subject: [PATCH 0377/2005] cqle: fix delete query for db_field and composite parition keys --- cassandra/cqlengine/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 03fc253719..b294e896c1 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -780,8 +780,8 @@ def delete(self): Deletes the contents of a query """ # validate where clause - partition_key = [x for x in self.model._primary_keys.values()][0] - if not any([c.field == partition_key.column_name for c in self._where]): + partition_keys = set(x.db_field_name for x in self.model._partition_keys.values()) + if partition_keys - set(c.field for c in self._where): raise QueryException("The partition key must be defined on delete queries") dq = DeleteStatement( From 5defa9ab5012b0d3265e784200a14955b355ec65 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 26 Feb 2016 14:32:17 -0600 Subject: [PATCH 0378/2005] cqle/test: some coverage for query set using db_fields PYTHON-351 --- .../cqlengine/query/test_queryset.py | 79 +++++++++++++++++++ 1 file changed, 79 insertions(+) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 13290967c1..9d8c7a17e2 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -907,3 +907,82 @@ def test_batch_then_timeout(self): m = self.model.batch(b) with self.assertRaises(AssertionError): m.timeout(0.5) + + +class DBFieldModel(Model): + k0 = columns.Integer(partition_key=True, db_field='a') + k1 = columns.Integer(partition_key=True, db_field='b') + c0 = columns.Integer(primary_key=True, db_field='c') + v0 = columns.Integer(db_field='d') + v1 = columns.Integer(db_field='e', index=True) + + +class TestModelQueryWithDBField(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestModelQueryWithDBField, cls).setUpClass() + sync_table(DBFieldModel) + + @classmethod + def tearDownClass(cls): + super(TestModelQueryWithDBField, cls).tearDownClass() + drop_table(DBFieldModel) + + def test_basic_crud(self): + values = {'k0': 1, 'k1': 2, 'c0': 3, 'v0': 4, 'v1': 5} + + # create + i = DBFieldModel.create(**values) + i = DBFieldModel.objects(k0=i.k0, k1=i.k1).first() + self.assertEqual(i, DBFieldModel(**values)) + + # create + values['v0'] = 101 + i.update(v0=values['v0']) + i = DBFieldModel.objects(k0=i.k0, k1=i.k1).first() + self.assertEqual(i, DBFieldModel(**values)) + + # delete + DBFieldModel.objects(k0=i.k0, k1=i.k1).delete() + i = DBFieldModel.objects(k0=i.k0, k1=i.k1).first() + self.assertIsNone(i) + + i = DBFieldModel.create(**values) + i = DBFieldModel.objects(k0=i.k0, k1=i.k1).first() + self.assertEqual(i, DBFieldModel(**values)) + i.delete() + DBFieldModel.objects(k0=i.k0, k1=i.k1).delete() + i = DBFieldModel.objects(k0=i.k0, k1=i.k1).first() + self.assertIsNone(i) + + def test_slice(self): + values = {'k0': 1, 'k1': 3, 'c0': 3, 'v0': 4, 'v1': 5} + clustering_values = range(3) + for c in clustering_values: + values['c0'] = c + i = DBFieldModel.create(**values) + + self.assertEqual(DBFieldModel.objects(k0=i.k0, k1=i.k1).count(), len(clustering_values)) + self.assertEqual(DBFieldModel.objects(k0=i.k0, k1=i.k1, c0=i.c0).count(), 1) + self.assertEqual(DBFieldModel.objects(k0=i.k0, k1=i.k1, c0__lt=i.c0).count(), len(clustering_values[:-1])) + self.assertEqual(DBFieldModel.objects(k0=i.k0, k1=i.k1, c0__gt=0).count(), len(clustering_values[1:])) + + def test_order(self): + values = {'k0': 1, 'k1': 4, 'c0': 3, 'v0': 4, 'v1': 5} + clustering_values = range(3) + for c in clustering_values: + values['c0'] = c + i = DBFieldModel.create(**values) + self.assertEqual(DBFieldModel.objects(k0=i.k0, k1=i.k1).order_by('c0').first().c0, clustering_values[0]) + self.assertEqual(DBFieldModel.objects(k0=i.k0, k1=i.k1).order_by('-c0').first().c0, clustering_values[-1]) + + def test_index(self): + values = {'k0': 1, 'k1': 5, 'c0': 3, 'v0': 4, 'v1': 5} + clustering_values = range(3) + for c in clustering_values: + values['c0'] = c + values['v1'] = c + i = DBFieldModel.create(**values) + self.assertEqual(DBFieldModel.objects(k0=i.k0, k1=i.k1).count(), len(clustering_values)) + self.assertEqual(DBFieldModel.objects(k0=i.k0, k1=i.k1, v1=0).count(), 1) From bdb55d0218148f03a4fba56832f48c11a34b7b67 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 26 Feb 2016 15:07:00 -0600 Subject: [PATCH 0379/2005] Make python ProtocolHandler available even with Cython PYTHON-501 --- cassandra/protocol.py | 11 ++++++----- docs/api/cassandra/protocol.rst | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index d71ab458eb..d2b9467348 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -894,15 +894,15 @@ def recv_schema_change(cls, f, protocol_version): return event -class ProtocolHandler(object): +class _ProtocolHandler(object): """ - ProtocolHander handles encoding and decoding messages. + _ProtocolHander handles encoding and decoding messages. This class can be specialized to compose Handlers which implement alternative result decoding or type deserialization. Class definitions are passed to :class:`cassandra.cluster.Cluster` on initialization. - Contracted class methods are :meth:`ProtocolHandler.encode_message` and :meth:`ProtocolHandler.decode_message`. + Contracted class methods are :meth:`_ProtocolHandler.encode_message` and :meth:`_ProtocolHandler.decode_message`. """ message_types_by_opcode = _message_types_by_opcode.copy() @@ -1039,12 +1039,12 @@ class FastResultMessage(ResultMessage): code_to_type = dict((v, k) for k, v in ResultMessage.type_codes.items()) recv_results_rows = classmethod(make_recv_results_rows(colparser)) - class CythonProtocolHandler(ProtocolHandler): + class CythonProtocolHandler(_ProtocolHandler): """ Use FastResultMessage to decode query result message messages. """ - my_opcodes = ProtocolHandler.message_types_by_opcode.copy() + my_opcodes = _ProtocolHandler.message_types_by_opcode.copy() my_opcodes[FastResultMessage.opcode] = FastResultMessage message_types_by_opcode = my_opcodes @@ -1059,6 +1059,7 @@ class CythonProtocolHandler(ProtocolHandler): LazyProtocolHandler = cython_protocol_handler(LazyParser()) else: # Use Python-based ProtocolHandler + ProtocolHandler = _ProtocolHandler LazyProtocolHandler = None diff --git a/docs/api/cassandra/protocol.rst b/docs/api/cassandra/protocol.rst index fe506b2c4b..f3a3cd5ab5 100644 --- a/docs/api/cassandra/protocol.rst +++ b/docs/api/cassandra/protocol.rst @@ -16,7 +16,7 @@ a custom QueryHandler. See :meth:`.Session.execute`, ::meth:`.Session.execute_async`, :attr:`.ResponseFuture.custom_payload`. -.. autoclass:: ProtocolHandler +.. autoclass:: _ProtocolHandler .. autoattribute:: message_types_by_opcode :annotation: = {default mapping} From bfcde9e925dd32d667616df146e901229737f2b7 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 26 Feb 2016 18:03:54 -0500 Subject: [PATCH 0380/2005] PYTHON-432: Add IF EXISTS support to UPDATE and DELETE statements of CQLEngine --- cassandra/cqlengine/models.py | 33 ++- cassandra/cqlengine/query.py | 56 ++++- cassandra/cqlengine/statements.py | 14 +- docs/api/cassandra/cqlengine/models.rst | 18 ++ docs/api/cassandra/cqlengine/query.rst | 4 + tests/integration/cqlengine/test_ifexists.py | 225 ++++++++++++++++++ .../integration/cqlengine/test_ifnotexists.py | 46 +--- 7 files changed, 340 insertions(+), 56 deletions(-) create mode 100644 tests/integration/cqlengine/test_ifexists.py diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 6f9f61c6e8..1a56c447a2 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -178,7 +178,7 @@ class IfNotExistsDescriptor(object): def __get__(self, instance, model): if instance: # instance method - def ifnotexists_setter(ife): + def ifnotexists_setter(ife=True): instance._if_not_exists = ife return instance return ifnotexists_setter @@ -189,6 +189,24 @@ def __call__(self, *args, **kwargs): raise NotImplementedError +class IfExistsDescriptor(object): + """ + return a query set descriptor with a if_exists flag specified + """ + def __get__(self, instance, model): + if instance: + # instance method + def ifexists_setter(ife=True): + instance._if_exists = ife + return instance + return ifexists_setter + + return model.objects.if_exists + + def __call__(self, *args, **kwargs): + raise NotImplementedError + + class ConsistencyDescriptor(object): """ returns a query set descriptor if called on Class, instance if it was an instance call @@ -304,6 +322,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): if_not_exists = IfNotExistsDescriptor() + if_exists = IfExistsDescriptor() + # _len is lazily created by __len__ __table_name__ = None @@ -324,6 +344,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): _if_not_exists = False # optional if_not_exists flag to check existence before insertion + _if_exists = False # optional if_exists flag to check existence before update + _table_name = None # used internally to cache a derived table name def __init__(self, **values): @@ -648,7 +670,8 @@ def save(self): consistency=self.__consistency__, if_not_exists=self._if_not_exists, transaction=self._transaction, - timeout=self._timeout).save() + timeout=self._timeout, + if_exists=self._if_exists).save() self._set_persisted() @@ -694,7 +717,8 @@ def update(self, **values): timestamp=self._timestamp, consistency=self.__consistency__, transaction=self._transaction, - timeout=self._timeout).update() + timeout=self._timeout, + if_exists=self._if_exists).update() self._set_persisted() @@ -711,7 +735,8 @@ def delete(self): batch=self._batch, timestamp=self._timestamp, consistency=self.__consistency__, - timeout=self._timeout).delete() + timeout=self._timeout, + if_exists=self._if_exists).delete() def get_changed_columns(self): """ diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 62139cde1a..986ec5a77b 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -38,6 +38,9 @@ class QueryException(CQLEngineException): class IfNotExistsWithCounterColumn(CQLEngineException): pass +class IfExistsWithCounterColumn(CQLEngineException): + pass + class LWTException(CQLEngineException): """Lightweight transaction exception. @@ -285,6 +288,7 @@ def __init__(self, model): self._timestamp = None self._if_not_exists = False self._timeout = connection.NOT_SET + self._if_exists = False @property def column_family_name(self): @@ -295,7 +299,7 @@ def _execute(self, q): return self._batch.add_query(q) else: result = connection.execute(q, consistency_level=self._consistency, timeout=self._timeout) - if self._transaction: + if self._if_not_exists or self._if_exists or self._transaction: check_applied(result) return result @@ -771,9 +775,14 @@ def defer(self, fields): return self._only_or_defer('defer', fields) def create(self, **kwargs): - return self.model(**kwargs).batch(self._batch).ttl(self._ttl).\ - consistency(self._consistency).if_not_exists(self._if_not_exists).\ - timestamp(self._timestamp).save() + return self.model(**kwargs) \ + .batch(self._batch) \ + .ttl(self._ttl) \ + .consistency(self._consistency) \ + .if_not_exists(self._if_not_exists) \ + .timestamp(self._timestamp) \ + .if_exists(self._if_exists) \ + .save() def delete(self): """ @@ -787,7 +796,8 @@ def delete(self): dq = DeleteStatement( self.column_family_name, where=self._where, - timestamp=self._timestamp + timestamp=self._timestamp, + if_exists=self._if_exists ) self._execute(dq) @@ -929,12 +939,29 @@ def timestamp(self, timestamp): return clone def if_not_exists(self): + """ + Check the existence of an object before insertion. + + If the insertion isn't applied, a LWTException is raised. + """ if self.model._has_counter: raise IfNotExistsWithCounterColumn('if_not_exists cannot be used with tables containing counter columns') clone = copy.deepcopy(self) clone._if_not_exists = True return clone + def if_exists(self): + """ + Check the existence of an object before an update or delete. + + If the update or delete isn't applied, a LWTException is raised. + """ + if self.model._has_counter: + raise IfExistsWithCounterColumn('if_exists cannot be used with tables containing counter columns') + clone = copy.deepcopy(self) + clone._if_exists = True + return clone + def update(self, **values): """ Performs an update on the row selected by the queryset. Include values to update in the @@ -1027,7 +1054,7 @@ class Row(Model): nulled_columns = set() us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl, - timestamp=self._timestamp, transactions=self._transaction) + timestamp=self._timestamp, transactions=self._transaction, if_exists=self._if_exists) for name, val in values.items(): col_name, col_op = self._parse_filter_arg(name) col = self.model._columns.get(col_name) @@ -1067,7 +1094,8 @@ class Row(Model): self._execute(us) if nulled_columns: - ds = DeleteStatement(self.column_family_name, fields=nulled_columns, where=self._where) + ds = DeleteStatement(self.column_family_name, fields=nulled_columns, + where=self._where, if_exists=self._if_exists) self._execute(ds) @@ -1083,9 +1111,10 @@ class DMLQuery(object): _consistency = None _timestamp = None _if_not_exists = False + _if_exists = False def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, - if_not_exists=False, transaction=None, timeout=connection.NOT_SET): + if_not_exists=False, transaction=None, timeout=connection.NOT_SET, if_exists=False): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance @@ -1094,6 +1123,7 @@ def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, self._consistency = consistency self._timestamp = timestamp self._if_not_exists = if_not_exists + self._if_exists = if_exists self._transaction = transaction self._timeout = timeout @@ -1102,7 +1132,7 @@ def _execute(self, q): return self._batch.add_query(q) else: tmp = connection.execute(q, consistency_level=self._consistency, timeout=self._timeout) - if self._if_not_exists or self._transaction: + if self._if_not_exists or self._if_exists or self._transaction: check_applied(tmp) return tmp @@ -1116,7 +1146,7 @@ def _delete_null_columns(self): """ executes a delete query to remove columns that have changed to null """ - ds = DeleteStatement(self.column_family_name) + ds = DeleteStatement(self.column_family_name, if_exists=self._if_exists) deleted_fields = False for _, v in self.instance._values.items(): col = v.column @@ -1150,8 +1180,8 @@ def update(self): assert type(self.instance) == self.model null_clustering_key = False if len(self.instance._clustering_keys) == 0 else True static_changed_only = True - statement = UpdateStatement(self.column_family_name, ttl=self._ttl, - timestamp=self._timestamp, transactions=self._transaction) + statement = UpdateStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, + transactions=self._transaction, if_exists=self._if_exists) for name, col in self.instance._clustering_keys.items(): null_clustering_key = null_clustering_key and col._val_is_null(getattr(self.instance, name, None)) # get defined fields and their column names @@ -1255,7 +1285,7 @@ def delete(self): if self.instance is None: raise CQLEngineException("DML Query instance attribute is None") - ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp) + ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp, if_exists=self._if_exists) for name, col in self.model._primary_keys.items(): if (not col.partition_key) and (getattr(self.instance, name) is None): continue diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index bd2eea2928..39f09307d0 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -708,7 +708,8 @@ def __init__(self, where=None, ttl=None, timestamp=None, - transactions=None): + transactions=None, + if_exists=False): super(UpdateStatement, self). __init__(table, assignments=assignments, consistency=consistency, @@ -721,6 +722,8 @@ def __init__(self, for transaction in transactions or []: self.add_transaction_clause(transaction) + self.if_exists = if_exists + def __unicode__(self): qs = ['UPDATE', self.table] @@ -744,6 +747,9 @@ def __unicode__(self): if len(self.transactions) > 0: qs += [self._get_transactions()] + if self.if_exists: + qs += ["IF EXISTS"] + return ' '.join(qs) def add_transaction_clause(self, clause): @@ -778,7 +784,7 @@ def update_context_id(self, i): class DeleteStatement(BaseCQLStatement): """ a cql delete statement """ - def __init__(self, table, fields=None, consistency=None, where=None, timestamp=None): + def __init__(self, table, fields=None, consistency=None, where=None, timestamp=None, if_exists=False): super(DeleteStatement, self).__init__( table, consistency=consistency, @@ -790,6 +796,7 @@ def __init__(self, table, fields=None, consistency=None, where=None, timestamp=N fields = [fields] for field in fields or []: self.add_field(field) + self.if_exists = if_exists def update_context_id(self, i): super(DeleteStatement, self).update_context_id(i) @@ -829,4 +836,7 @@ def __unicode__(self): if self.where_clauses: qs += [self._where] + if self.if_exists: + qs += ["IF EXISTS"] + return ' '.join(qs) diff --git a/docs/api/cassandra/cqlengine/models.rst b/docs/api/cassandra/cqlengine/models.rst index ff198a4924..f8061b97cb 100644 --- a/docs/api/cassandra/cqlengine/models.rst +++ b/docs/api/cassandra/cqlengine/models.rst @@ -100,6 +100,24 @@ Model This method is supported on Cassandra 2.0 or later. + .. method:: if_exists() + + Check the existence of an object before an update or delete. The existence of an + object is determined by its primary key(s). And please note using this flag + would incur performance cost. + + If the update or delete isn't applied, a :class:`~cassandra.cqlengine.query.LWTException` is raised. + + .. code-block:: python + + try: + TestIfExistsModel.objects(id=id).if_exists().update(count=9, text='111111111111') + except LWTException as e: + # handle failure case + pass + + This method is supported on Cassandra 2.0 or later. + .. automethod:: save .. automethod:: update diff --git a/docs/api/cassandra/cqlengine/query.rst b/docs/api/cassandra/cqlengine/query.rst index 1cca4acec5..bd45b3b9fa 100644 --- a/docs/api/cassandra/cqlengine/query.rst +++ b/docs/api/cassandra/cqlengine/query.rst @@ -26,6 +26,10 @@ The methods here are used to filter, order, and constrain results. .. automethod:: limit + .. automethod:: if_not_exists + + .. automethod:: if_exists + .. automethod:: order_by .. automethod:: allow_filtering diff --git a/tests/integration/cqlengine/test_ifexists.py b/tests/integration/cqlengine/test_ifexists.py new file mode 100644 index 0000000000..e53e03e555 --- /dev/null +++ b/tests/integration/cqlengine/test_ifexists.py @@ -0,0 +1,225 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import mock +from uuid import uuid4 + +from cassandra.cqlengine import columns +from cassandra.cqlengine.management import sync_table, drop_table +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.query import BatchQuery, LWTException, IfExistsWithCounterColumn + +from tests.integration.cqlengine.base import BaseCassEngTestCase +from tests.integration import PROTOCOL_VERSION + + +class TestIfExistsModel(Model): + + id = columns.UUID(primary_key=True, default=lambda: uuid4()) + count = columns.Integer() + text = columns.Text(required=False) + + +class TestIfExistsWithCounterModel(Model): + + id = columns.UUID(primary_key=True, default=lambda: uuid4()) + likes = columns.Counter() + + +class BaseIfExistsTest(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(BaseIfExistsTest, cls).setUpClass() + sync_table(TestIfExistsModel) + + @classmethod + def tearDownClass(cls): + super(BaseIfExistsTest, cls).tearDownClass() + drop_table(TestIfExistsModel) + + +class BaseIfExistsWithCounterTest(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(BaseIfExistsWithCounterTest, cls).setUpClass() + sync_table(TestIfExistsWithCounterModel) + + @classmethod + def tearDownClass(cls): + super(BaseIfExistsWithCounterTest, cls).tearDownClass() + drop_table(TestIfExistsWithCounterModel) + + +class IfExistsUpdateTests(BaseIfExistsTest): + + @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") + def test_update_if_exists(self): + """ tests that update with if_exists work as expected """ + + id = uuid4() + + m = TestIfExistsModel.create(id=id, count=8, text='123456789') + m.text = 'changed' + m.if_exists().update() + m = TestIfExistsModel.get(id=id) + self.assertEqual(m.text, 'changed') + + # save() + m.text = 'changed_again' + m.if_exists().save() + m = TestIfExistsModel.get(id=id) + self.assertEqual(m.text, 'changed_again') + + m = TestIfExistsModel(id=uuid4(), count=44) # do not exists + with self.assertRaises(LWTException) as assertion: + m.if_exists().update() + + self.assertEqual(assertion.exception.existing, { + '[applied]': False, + }) + + # queryset update + with self.assertRaises(LWTException) as assertion: + TestIfExistsModel.objects(id=uuid4()).if_exists().update(count=8) + + self.assertEqual(assertion.exception.existing, { + '[applied]': False, + }) + + @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") + def test_batch_update_if_exists_success(self): + """ tests that batch update with if_exists work as expected """ + + id = uuid4() + + m = TestIfExistsModel.create(id=id, count=8, text='123456789') + + with BatchQuery() as b: + m.text = '111111111' + m.batch(b).if_exists().update() + + with self.assertRaises(LWTException) as assertion: + with BatchQuery() as b: + m = TestIfExistsModel(id=uuid4(), count=42) # Doesn't exist + m.batch(b).if_exists().update() + + self.assertEqual(assertion.exception.existing, { + '[applied]': False, + }) + + q = TestIfExistsModel.objects(id=id) + self.assertEqual(len(q), 1) + + tm = q.first() + self.assertEqual(tm.count, 8) + self.assertEqual(tm.text, '111111111') + + @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") + def test_delete_if_exists(self): + """ tests that delete with if_exists work as expected """ + + id = uuid4() + + m = TestIfExistsModel.create(id=id, count=8, text='123456789') + m.if_exists().delete() + q = TestIfExistsModel.objects(id=id) + self.assertEqual(len(q), 0) + + m = TestIfExistsModel(id=uuid4(), count=44) # do not exists + with self.assertRaises(LWTException) as assertion: + m.if_exists().delete() + + self.assertEqual(assertion.exception.existing, { + '[applied]': False, + }) + + # queryset delete + with self.assertRaises(LWTException) as assertion: + TestIfExistsModel.objects(id=uuid4()).if_exists().delete() + + self.assertEqual(assertion.exception.existing, { + '[applied]': False, + }) + + + @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") + def test_batch_delete_if_exists_success(self): + """ tests that batch delete with if_exists work as expected """ + + id = uuid4() + + m = TestIfExistsModel.create(id=id, count=8, text='123456789') + + with BatchQuery() as b: + m.batch(b).if_exists().delete() + + q = TestIfExistsModel.objects(id=id) + self.assertEqual(len(q), 0) + + with self.assertRaises(LWTException) as assertion: + with BatchQuery() as b: + m = TestIfExistsModel(id=uuid4(), count=42) # Doesn't exist + m.batch(b).if_exists().delete() + + self.assertEqual(assertion.exception.existing, { + '[applied]': False, + }) + + +class IfExistsQueryTest(BaseIfExistsTest): + + def test_if_exists_included_on_queryset_update(self): + """ tests that if_exists on queryset works as expected """ + + with mock.patch.object(self.session, 'execute') as m: + TestIfExistsModel.objects(id=uuid4()).if_exists().update(count=42) + + query = m.call_args[0][0].query_string + self.assertIn("IF EXISTS", query) + + def test_if_exists_included_on_update(self): + """ tests that if_exists on models update works as expected """ + + with mock.patch.object(self.session, 'execute') as m: + TestIfExistsModel(id=uuid4()).if_exists().update(count=8) + + query = m.call_args[0][0].query_string + self.assertIn("IF EXISTS", query) + + def test_if_exists_included_on_delete(self): + """ tests that if_exists on models delete works as expected """ + + with mock.patch.object(self.session, 'execute') as m: + TestIfExistsModel(id=uuid4()).if_exists().delete() + + query = m.call_args[0][0].query_string + self.assertIn("IF EXISTS", query) + + +class IfExistWithCounterTest(BaseIfExistsWithCounterTest): + + def test_instance_raise_exception(self): + """ make sure exception is raised when calling + if_exists on table with counter column + """ + id = uuid4() + with self.assertRaises(IfExistsWithCounterColumn): + TestIfExistsWithCounterModel.if_exists() + diff --git a/tests/integration/cqlengine/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py index 43d53319a8..8c5e89c185 100644 --- a/tests/integration/cqlengine/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -75,7 +75,7 @@ def tearDownClass(cls): class IfNotExistsInsertTests(BaseIfNotExistsTest): @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") - def test_insert_if_not_exists_success(self): + def test_insert_if_not_exists(self): """ tests that insertion with if_not_exists work as expected """ id = uuid4() @@ -85,6 +85,9 @@ def test_insert_if_not_exists_success(self): with self.assertRaises(LWTException) as assertion: TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') + with self.assertRaises(LWTException) as assertion: + TestIfNotExistsModel.objects(count=9, text='111111111111').if_not_exists().create(id=id) + self.assertEqual(assertion.exception.existing, { 'count': 8, 'id': id, @@ -99,23 +102,8 @@ def test_insert_if_not_exists_success(self): self.assertEqual(tm.count, 8) self.assertEqual(tm.text, '123456789') - def test_insert_if_not_exists_failure(self): - """ tests that insertion with if_not_exists failure """ - - id = uuid4() - - TestIfNotExistsModel.create(id=id, count=8, text='123456789') - TestIfNotExistsModel.create(id=id, count=9, text='111111111111') - - q = TestIfNotExistsModel.objects(id=id) - self.assertEqual(len(q), 1) - - tm = q.first() - self.assertEqual(tm.count, 9) - self.assertEqual(tm.text, '111111111111') - @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") - def test_batch_insert_if_not_exists_success(self): + def test_batch_insert_if_not_exists(self): """ tests that batch insertion with if_not_exists work as expected """ id = uuid4() @@ -142,22 +130,6 @@ def test_batch_insert_if_not_exists_success(self): self.assertEqual(tm.count, 8) self.assertEqual(tm.text, '123456789') - def test_batch_insert_if_not_exists_failure(self): - """ tests that batch insertion with if_not_exists failure """ - id = uuid4() - - with BatchQuery() as b: - TestIfNotExistsModel.batch(b).create(id=id, count=8, text='123456789') - with BatchQuery() as b: - TestIfNotExistsModel.batch(b).create(id=id, count=9, text='111111111111') - - q = TestIfNotExistsModel.objects(id=id) - self.assertEqual(len(q), 1) - - tm = q.first() - self.assertEqual(tm.count, 9) - self.assertEqual(tm.text, '111111111111') - class IfNotExistsModelTest(BaseIfNotExistsTest): @@ -175,7 +147,7 @@ def test_if_not_exists_included_on_save(self): with mock.patch.object(self.session, 'execute') as m: tm = TestIfNotExistsModel(count=8) - tm.if_not_exists(True).save() + tm.if_not_exists().save() query = m.call_args[0][0].query_string self.assertIn("IF NOT EXISTS", query) @@ -198,12 +170,12 @@ class IfNotExistsInstanceTest(BaseIfNotExistsTest): def test_instance_is_returned(self): """ - ensures that we properly handle the instance.if_not_exists(True).save() + ensures that we properly handle the instance.if_not_exists().save() scenario """ o = TestIfNotExistsModel.create(text="whatever") o.text = "new stuff" - o = o.if_not_exists(True) + o = o.if_not_exists() self.assertEqual(True, o._if_not_exists) def test_if_not_exists_is_not_include_with_query_on_update(self): @@ -212,7 +184,7 @@ def test_if_not_exists_is_not_include_with_query_on_update(self): """ o = TestIfNotExistsModel.create(text="whatever") o.text = "new stuff" - o = o.if_not_exists(True) + o = o.if_not_exists() with mock.patch.object(self.session, 'execute') as m: o.save() From c87937db762bb28a1f5c99de359832e53f921a5d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 29 Feb 2016 12:40:23 -0600 Subject: [PATCH 0381/2005] cqle: fix attr error if batch executed multiple times Also adds a warning for executing batches multiple times. PYTHON-445 --- cassandra/cqlengine/query.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index b294e896c1..b7cf64f40a 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -133,6 +133,8 @@ class BatchQuery(object): http://www.datastax.com/docs/1.2/cql_cli/cql/BATCH """ + warn_multiple_exec = True + _consistency = None def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on_exception=False, @@ -163,6 +165,8 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on self._execute_on_exception = execute_on_exception self._timeout = timeout self._callbacks = [] + self._executed = False + self._context_entered = False def add_query(self, query): if not isinstance(query, BaseCQLStatement): @@ -176,9 +180,6 @@ def _execute_callbacks(self): for callback, args, kwargs in self._callbacks: callback(*args, **kwargs) - # trying to clear up the ref counts for objects mentioned in the set - del self._callbacks - def add_callback(self, fn, *args, **kwargs): """Add a function and arguments to be passed to it to be executed after the batch executes. @@ -197,6 +198,13 @@ def add_callback(self, fn, *args, **kwargs): self._callbacks.append((fn, args, kwargs)) def execute(self): + if self._executed and self.warn_multiple_exec: + msg = "Batch executed multiple times." + if self._context_entered: + msg += " If using the batch as a context manager, there is no need to call execute directly." + warn(msg) + self._executed = True + if len(self.queries) == 0: # Empty batch is a no-op # except for callbacks @@ -237,6 +245,7 @@ def execute(self): self._execute_callbacks() def __enter__(self): + self._context_entered = True return self def __exit__(self, exc_type, exc_val, exc_tb): From b3ffffbf27cbf22dcfb6536fa4274db5d9cdfeef Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 29 Feb 2016 12:41:40 -0600 Subject: [PATCH 0382/2005] PYTHON-445 tests and cleanup --- .../cqlengine/query/test_batch_query.py | 28 ++++----- .../integration/cqlengine/test_batch_query.py | 61 +++++++++++++------ 2 files changed, 58 insertions(+), 31 deletions(-) diff --git a/tests/integration/cqlengine/query/test_batch_query.py b/tests/integration/cqlengine/query/test_batch_query.py index 5aea7e9c90..126ad21e36 100644 --- a/tests/integration/cqlengine/query/test_batch_query.py +++ b/tests/integration/cqlengine/query/test_batch_query.py @@ -25,10 +25,10 @@ class TestMultiKeyModel(Model): - partition = columns.Integer(primary_key=True) - cluster = columns.Integer(primary_key=True) - count = columns.Integer(required=False) - text = columns.Text(required=False) + partition = columns.Integer(primary_key=True) + cluster = columns.Integer(primary_key=True) + count = columns.Integer(required=False) + text = columns.Text(required=False) class BatchQueryLogModel(Model): @@ -77,12 +77,12 @@ def test_update_success_case(self): inst.batch(b).save() inst2 = TestMultiKeyModel.get(partition=self.pkey, cluster=2) - assert inst2.count == 3 + self.assertEqual(inst2.count, 3) b.execute() inst3 = TestMultiKeyModel.get(partition=self.pkey, cluster=2) - assert inst3.count == 4 + self.assertEqual(inst3.count, 4) def test_delete_success_case(self): @@ -120,9 +120,9 @@ def test_bulk_delete_success_case(self): with BatchQuery() as b: TestMultiKeyModel.objects.batch(b).filter(partition=0).delete() - assert TestMultiKeyModel.filter(partition=0).count() == 5 + self.assertEqual(TestMultiKeyModel.filter(partition=0).count(), 5) - assert TestMultiKeyModel.filter(partition=0).count() == 0 + self.assertEqual(TestMultiKeyModel.filter(partition=0).count(), 0) #cleanup for m in TestMultiKeyModel.all(): m.delete() @@ -132,23 +132,23 @@ def test_none_success_case(self): b = BatchQuery() q = TestMultiKeyModel.objects.batch(b) - assert q._batch == b + self.assertEqual(q._batch, b) q = q.batch(None) - assert q._batch is None + self.assertIsNone(q._batch) def test_dml_none_success_case(self): """ Tests that passing None into the batch call clears any batch object """ b = BatchQuery() q = DMLQuery(TestMultiKeyModel, batch=b) - assert q._batch == b + self.assertEqual(q._batch, b) q.batch(None) - assert q._batch is None + self.assertIsNone(q._batch) def test_batch_execute_on_exception_succeeds(self): - # makes sure if execute_on_exception == True we still apply the batch + # makes sure if execute_on_exception == True we still apply the batch drop_table(BatchQueryLogModel) sync_table(BatchQueryLogModel) @@ -167,7 +167,7 @@ def test_batch_execute_on_exception_succeeds(self): self.assertEqual(1, len(obj)) def test_batch_execute_on_exception_skips_if_not_specified(self): - # makes sure if execute_on_exception == True we still apply the batch + # makes sure if execute_on_exception == True we still apply the batch drop_table(BatchQueryLogModel) sync_table(BatchQueryLogModel) diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index b28b533a64..7d136fe60c 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import warnings import sure @@ -20,8 +21,9 @@ from cassandra.cqlengine.query import BatchQuery from tests.integration.cqlengine.base import BaseCassEngTestCase -class TestMultiKeyModel(Model): +from mock import patch +class TestMultiKeyModel(Model): partition = columns.Integer(primary_key=True) cluster = columns.Integer(primary_key=True) count = columns.Integer(required=False) @@ -50,7 +52,7 @@ def setUp(self): def test_insert_success_case(self): b = BatchQuery() - inst = TestMultiKeyModel.batch(b).create(partition=self.pkey, cluster=2, count=3, text='4') + TestMultiKeyModel.batch(b).create(partition=self.pkey, cluster=2, count=3, text='4') with self.assertRaises(TestMultiKeyModel.DoesNotExist): TestMultiKeyModel.get(partition=self.pkey, cluster=2) @@ -69,12 +71,12 @@ def test_update_success_case(self): inst.batch(b).save() inst2 = TestMultiKeyModel.get(partition=self.pkey, cluster=2) - assert inst2.count == 3 + self.assertEqual(inst2.count, 3) b.execute() inst3 = TestMultiKeyModel.get(partition=self.pkey, cluster=2) - assert inst3.count == 4 + self.assertEqual(inst3.count, 4) def test_delete_success_case(self): @@ -112,9 +114,9 @@ def test_bulk_delete_success_case(self): with BatchQuery() as b: TestMultiKeyModel.objects.batch(b).filter(partition=0).delete() - assert TestMultiKeyModel.filter(partition=0).count() == 5 + self.assertEqual(TestMultiKeyModel.filter(partition=0).count(), 5) - assert TestMultiKeyModel.filter(partition=0).count() == 0 + self.assertEqual(TestMultiKeyModel.filter(partition=0).count(), 0) #cleanup for m in TestMultiKeyModel.all(): m.delete() @@ -142,11 +144,11 @@ def my_callback(*args, **kwargs): batch.add_callback(my_callback, 2, named_arg='value') batch.add_callback(my_callback, 1, 3) - assert batch._callbacks == [ + self.assertEqual(batch._callbacks, [ (my_callback, (), {}), (my_callback, (2,), {'named_arg':'value'}), (my_callback, (1, 3), {}) - ] + ]) def test_callbacks_properly_execute_callables_and_tuples(self): @@ -162,8 +164,8 @@ def my_callback(*args, **kwargs): batch.execute() - assert len(call_history) == 2 - assert [(), ('more', 'args')] == call_history + self.assertEqual(len(call_history), 2) + self.assertEqual([(), ('more', 'args')], call_history) def test_callbacks_tied_to_execute(self): """Batch callbacks should NOT fire if batch is not executed in context manager mode""" @@ -174,9 +176,8 @@ def my_callback(*args, **kwargs): with BatchQuery() as batch: batch.add_callback(my_callback) - pass - assert len(call_history) == 1 + self.assertEqual(len(call_history), 1) class SomeError(Exception): pass @@ -189,16 +190,42 @@ class SomeError(Exception): raise SomeError # still same call history. Nothing added - assert len(call_history) == 1 + self.assertEqual(len(call_history), 1) # but if execute ran, even with an error bubbling through # the callbacks also would have fired with self.assertRaises(SomeError): with BatchQuery(execute_on_exception=True) as batch: batch.add_callback(my_callback) - # this error bubbling up through context manager - # should prevent callback runs (along with b.execute()) raise SomeError - # still same call history - assert len(call_history) == 2 + # updated call history + self.assertEqual(len(call_history), 2) + + def test_callbacks_work_multiple_times(self): + call_history = [] + + def my_callback(*args, **kwargs): + call_history.append(args) + + with warnings.catch_warnings(record=True) as w: + with BatchQuery() as batch: + batch.add_callback(my_callback) + batch.execute() + batch.execute() + self.assertEqual(len(w), 2) # package filter setup to warn always + self.assertRegex(str(w[0].message), r"^Batch.*multiple.*") + + def test_disable_multiple_callback_warning(self): + call_history = [] + + def my_callback(*args, **kwargs): + call_history.append(args) + + with patch('cassandra.cqlengine.query.BatchQuery.warn_multiple_exec', False): + with warnings.catch_warnings(record=True) as w: + with BatchQuery() as batch: + batch.add_callback(my_callback) + batch.execute() + batch.execute() + self.assertFalse(w) From 978cae601d0b1b91f5bb11970fea4ae81df58a74 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 29 Feb 2016 12:49:59 -0600 Subject: [PATCH 0383/2005] optional Cython BytesType->bytearray deserializer PYTHON-503 --- cassandra/deserializers.pyx | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cassandra/deserializers.pyx b/cassandra/deserializers.pyx index ac3a009caf..d3645a3285 100644 --- a/cassandra/deserializers.pyx +++ b/cassandra/deserializers.pyx @@ -46,6 +46,12 @@ cdef class DesBytesType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): return to_bytes(buf) +# this is to facilitate cqlengine integration, which requires bytearrays for BytesType +# It is switched in by simply overwriting DesBytesType: +# deserializers.DesBytesType = deserializers.DesBytesTypeByteArray +cdef class DesBytesTypeByteArray(Deserializer): + cdef deserialize(self, Buffer *buf, int protocol_version): + return bytearray(buf.ptr[:buf.size]) # TODO: Use libmpdec: http://www.bytereef.org/mpdecimal/index.html cdef class DesDecimalType(Deserializer): From 20b30a12f3cf9a1e8f7c55e7dce823f80ffb540b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 24 Feb 2016 17:58:28 -0500 Subject: [PATCH 0384/2005] PYTHON-337: case sensitive support for column family name --- cassandra/cqlengine/management.py | 2 +- cassandra/cqlengine/models.py | 18 +++++++- docs/api/cassandra/cqlengine/models.rst | 2 + .../cqlengine/management/test_management.py | 43 +++++++++++++++++++ .../model/test_class_construction.py | 26 +++++++++++ .../integration/cqlengine/model/test_model.py | 17 ++++++++ 6 files changed, 105 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index f1b66d9527..9e7b036364 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -194,7 +194,7 @@ def sync_table(model): if table.indexes.get(index_name): continue - qs = ['CREATE INDEX {0}'.format(index_name)] + qs = ['CREATE INDEX {0}'.format(metadata.protect_name(index_name))] qs += ['ON {0}'.format(cf_name)] qs += ['("{0}")'.format(column.db_field_name)] qs = ' '.join(qs) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 6f9f61c6e8..b20dca9902 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -15,7 +15,7 @@ import logging import re import six -import warnings +from warnings import warn from cassandra.cqlengine import CQLEngineException, ValidationError from cassandra.cqlengine import columns @@ -308,6 +308,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): __table_name__ = None + __table_name_case_sensitive__ = False + __keyspace__ = None __discriminator_value__ = None @@ -492,7 +494,14 @@ def column_family_name(cls, include_keyspace=True): def _raw_column_family_name(cls): if not cls._table_name: if cls.__table_name__: - cls._table_name = cls.__table_name__.lower() + if cls.__table_name_case_sensitive__: + cls._table_name = cls.__table_name__ + else: + table_name = cls.__table_name__.lower() + if cls.__table_name__ != table_name: + warn(("Model __table_name__ will be case sensitive by default in 4.0. " + "You should fix the __table_name__ value of the '{0}' model.").format(cls.__name__)) + cls._table_name = table_name else: if cls._is_polymorphic and not cls._is_polymorphic_base: cls._table_name = cls._polymorphic_base._raw_column_family_name() @@ -924,6 +933,11 @@ class Model(BaseModel): *Optional.* Sets the name of the CQL table for this model. If left blank, the table name will be the name of the model, with it's module name as it's prefix. Manually defined table names are not inherited. """ + __table_name_case_sensitive__ = False + """ + *Optional.* By default, __table_name__ is case insensitive. Set this to True if you want to preserve the case sensitivity. + """ + __keyspace__ = None """ Sets the name of the keyspace used by this model. diff --git a/docs/api/cassandra/cqlengine/models.rst b/docs/api/cassandra/cqlengine/models.rst index ff198a4924..efcd331b78 100644 --- a/docs/api/cassandra/cqlengine/models.rst +++ b/docs/api/cassandra/cqlengine/models.rst @@ -28,6 +28,8 @@ Model .. autoattribute:: __table_name__ + .. autoattribute:: __table_name_case_sensitive__ + .. autoattribute:: __keyspace__ .. _ttl-change: diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 93aa8dc11b..e90d82d7ef 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -235,6 +235,49 @@ def test_sync_table_works_with_primary_keys_only_tables(self): self.assertIn('SizeTieredCompactionStrategy', table_meta.as_cql_query()) +class IndexModel(Model): + + __table_name__ = 'index_model' + first_key = columns.UUID(primary_key=True) + second_key = columns.Text(index=True) + + +class IndexCaseSensitiveModel(Model): + + __table_name__ = 'IndexModel' + __table_name_case_sensitive__ = True + first_key = columns.UUID(primary_key=True) + second_key = columns.Text(index=True) + + +class IndexTests(BaseCassEngTestCase): + + def setUp(self): + drop_table(IndexModel) + + def test_sync_index(self): + + sync_table(IndexModel) + table_meta = management._get_table_metadata(IndexModel) + self.assertIn("index_index_model_second_key", table_meta.indexes) + + # index already exists + sync_table(IndexModel) + table_meta = management._get_table_metadata(IndexModel) + self.assertIn("index_index_model_second_key", table_meta.indexes) + + def test_sync_index_case_sensitive(self): + + sync_table(IndexCaseSensitiveModel) + table_meta = management._get_table_metadata(IndexCaseSensitiveModel) + self.assertIn("index_IndexModel_second_key", table_meta.indexes) + + # index already exists + sync_table(IndexCaseSensitiveModel) + table_meta = management._get_table_metadata(IndexCaseSensitiveModel) + self.assertIn("index_IndexModel_second_key", table_meta.indexes) + + class NonModelFailureTest(BaseCassEngTestCase): class FakeModel(object): pass diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index 7b333a58b2..c47c172382 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -217,6 +217,7 @@ class NoKeyspace(Model): self.assertEqual(len(warn), 0) + class TestManualTableNaming(BaseCassEngTestCase): class RenamedTest(Model): @@ -230,6 +231,31 @@ def test_proper_table_naming(self): assert self.RenamedTest.column_family_name(include_keyspace=False) == 'manual_name' assert self.RenamedTest.column_family_name(include_keyspace=True) == 'whatever.manual_name' + +class TestManualTableNamingCaseSensitive(BaseCassEngTestCase): + + class RenamedCaseInsensitiveTest(Model): + __keyspace__ = 'whatever' + __table_name__ = 'Manual_Name' + + id = columns.UUID(primary_key=True) + + class RenamedCaseSensitiveTest(Model): + __keyspace__ = 'whatever' + __table_name__ = 'Manual_Name' + __table_name_case_sensitive__ = True + + id = columns.UUID(primary_key=True) + + def test_proper_table_naming_case_insensitive(self): + self.assertEqual(self.RenamedCaseInsensitiveTest.column_family_name(include_keyspace=False), 'manual_name') + self.assertEqual(self.RenamedCaseInsensitiveTest.column_family_name(include_keyspace=True), 'whatever.manual_name') + + def test_proper_table_naming_case_sensitive(self): + self.assertEqual(self.RenamedCaseSensitiveTest.column_family_name(include_keyspace=False), '"Manual_Name"') + self.assertEqual(self.RenamedCaseSensitiveTest.column_family_name(include_keyspace=True), 'whatever."Manual_Name"') + + class AbstractModel(Model): __abstract__ = True diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index e9f795d8b2..cb6bdeb1ba 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -126,6 +126,23 @@ class TestModel(Model): # .. but we can still get the bare CF name self.assertEqual(TestModel.column_family_name(include_keyspace=False), "test_model") + def test_column_family_case_sensitive(self): + class TestModel(Model): + __table_name__ = 'TestModel' + __table_name_case_sensitive__ = True + + k = columns.Integer(primary_key=True) + + self.assertEqual(TestModel.column_family_name(), '%s."TestModel"' % (models.DEFAULT_KEYSPACE,)) + + TestModel.__keyspace__ = "my_test_keyspace" + self.assertEqual(TestModel.column_family_name(), '%s."TestModel"' % (TestModel.__keyspace__,)) + + del TestModel.__keyspace__ + with patch('cassandra.cqlengine.models.DEFAULT_KEYSPACE', None): + self.assertRaises(CQLEngineException, TestModel.column_family_name) + self.assertEqual(TestModel.column_family_name(include_keyspace=False), '"TestModel"') + class BuiltInAttributeConflictTest(unittest.TestCase): """tests Model definitions that conflict with built-in attributes/methods""" From 7362747faff0ae86380226c0f49fc55e7d6bc673 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 29 Feb 2016 13:50:55 -0600 Subject: [PATCH 0385/2005] Added documentation and slight model variation to PYTHON-351 tests --- .../cqlengine/query/test_queryset.py | 160 ++++++++++++------ 1 file changed, 109 insertions(+), 51 deletions(-) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 9d8c7a17e2..e0cc6ab4b0 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -917,72 +917,130 @@ class DBFieldModel(Model): v1 = columns.Integer(db_field='e', index=True) +class DBFieldModelMixed1(Model): + k0 = columns.Integer(partition_key=True, db_field='a') + k1 = columns.Integer(partition_key=True) + c0 = columns.Integer(primary_key=True, db_field='c') + v0 = columns.Integer(db_field='d') + v1 = columns.Integer(index=True) + + +class DBFieldModelMixed2(Model): + k0 = columns.Integer(partition_key=True) + k1 = columns.Integer(partition_key=True, db_field='b') + c0 = columns.Integer(primary_key=True) + v0 = columns.Integer(db_field='d') + v1 = columns.Integer(index=True, db_field='e') + + class TestModelQueryWithDBField(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestModelQueryWithDBField, cls).setUpClass() - sync_table(DBFieldModel) + cls.model_list = [DBFieldModel, DBFieldModelMixed1, DBFieldModelMixed2] + for model in cls.model_list: + sync_table(model) @classmethod def tearDownClass(cls): super(TestModelQueryWithDBField, cls).tearDownClass() - drop_table(DBFieldModel) + for model in cls.model_list: + drop_table(model) def test_basic_crud(self): - values = {'k0': 1, 'k1': 2, 'c0': 3, 'v0': 4, 'v1': 5} - - # create - i = DBFieldModel.create(**values) - i = DBFieldModel.objects(k0=i.k0, k1=i.k1).first() - self.assertEqual(i, DBFieldModel(**values)) - - # create - values['v0'] = 101 - i.update(v0=values['v0']) - i = DBFieldModel.objects(k0=i.k0, k1=i.k1).first() - self.assertEqual(i, DBFieldModel(**values)) - - # delete - DBFieldModel.objects(k0=i.k0, k1=i.k1).delete() - i = DBFieldModel.objects(k0=i.k0, k1=i.k1).first() - self.assertIsNone(i) - - i = DBFieldModel.create(**values) - i = DBFieldModel.objects(k0=i.k0, k1=i.k1).first() - self.assertEqual(i, DBFieldModel(**values)) - i.delete() - DBFieldModel.objects(k0=i.k0, k1=i.k1).delete() - i = DBFieldModel.objects(k0=i.k0, k1=i.k1).first() - self.assertIsNone(i) + """ + Tests creation update and delete of object model queries that are using db_field mappings. + + @since 3.1 + @jira_ticket PYTHON-351 + @expected_result results are properly retrieved without errors + + @test_category object_mapper + """ + for model in self.model_list: + values = {'k0': 1, 'k1': 2, 'c0': 3, 'v0': 4, 'v1': 5} + # create + i = model.create(**values) + i = model.objects(k0=i.k0, k1=i.k1).first() + self.assertEqual(i, model(**values)) + + # create + values['v0'] = 101 + i.update(v0=values['v0']) + i = model.objects(k0=i.k0, k1=i.k1).first() + self.assertEqual(i, model(**values)) + + # delete + model.objects(k0=i.k0, k1=i.k1).delete() + i = model.objects(k0=i.k0, k1=i.k1).first() + self.assertIsNone(i) + + i = model.create(**values) + i = model.objects(k0=i.k0, k1=i.k1).first() + self.assertEqual(i, model(**values)) + i.delete() + model.objects(k0=i.k0, k1=i.k1).delete() + i = model.objects(k0=i.k0, k1=i.k1).first() + self.assertIsNone(i) def test_slice(self): - values = {'k0': 1, 'k1': 3, 'c0': 3, 'v0': 4, 'v1': 5} - clustering_values = range(3) - for c in clustering_values: - values['c0'] = c - i = DBFieldModel.create(**values) + """ + Tests slice queries for object models that are using db_field mapping + + @since 3.1 + @jira_ticket PYTHON-351 + @expected_result results are properly retrieved without errors - self.assertEqual(DBFieldModel.objects(k0=i.k0, k1=i.k1).count(), len(clustering_values)) - self.assertEqual(DBFieldModel.objects(k0=i.k0, k1=i.k1, c0=i.c0).count(), 1) - self.assertEqual(DBFieldModel.objects(k0=i.k0, k1=i.k1, c0__lt=i.c0).count(), len(clustering_values[:-1])) - self.assertEqual(DBFieldModel.objects(k0=i.k0, k1=i.k1, c0__gt=0).count(), len(clustering_values[1:])) + @test_category object_mapper + """ + for model in self.model_list: + values = {'k0': 1, 'k1': 3, 'c0': 3, 'v0': 4, 'v1': 5} + clustering_values = range(3) + for c in clustering_values: + values['c0'] = c + i = model.create(**values) + + self.assertEqual(model.objects(k0=i.k0, k1=i.k1).count(), len(clustering_values)) + self.assertEqual(model.objects(k0=i.k0, k1=i.k1, c0=i.c0).count(), 1) + self.assertEqual(model.objects(k0=i.k0, k1=i.k1, c0__lt=i.c0).count(), len(clustering_values[:-1])) + self.assertEqual(model.objects(k0=i.k0, k1=i.k1, c0__gt=0).count(), len(clustering_values[1:])) def test_order(self): - values = {'k0': 1, 'k1': 4, 'c0': 3, 'v0': 4, 'v1': 5} - clustering_values = range(3) - for c in clustering_values: - values['c0'] = c - i = DBFieldModel.create(**values) - self.assertEqual(DBFieldModel.objects(k0=i.k0, k1=i.k1).order_by('c0').first().c0, clustering_values[0]) - self.assertEqual(DBFieldModel.objects(k0=i.k0, k1=i.k1).order_by('-c0').first().c0, clustering_values[-1]) + """ + Tests order by queries for object models that are using db_field mapping + + @since 3.1 + @jira_ticket PYTHON-351 + @expected_result results are properly retrieved without errors + + @test_category object_mapper + """ + for model in self.model_list: + values = {'k0': 1, 'k1': 4, 'c0': 3, 'v0': 4, 'v1': 5} + clustering_values = range(3) + for c in clustering_values: + values['c0'] = c + i = model.create(**values) + self.assertEqual(model.objects(k0=i.k0, k1=i.k1).order_by('c0').first().c0, clustering_values[0]) + self.assertEqual(model.objects(k0=i.k0, k1=i.k1).order_by('-c0').first().c0, clustering_values[-1]) def test_index(self): - values = {'k0': 1, 'k1': 5, 'c0': 3, 'v0': 4, 'v1': 5} - clustering_values = range(3) - for c in clustering_values: - values['c0'] = c - values['v1'] = c - i = DBFieldModel.create(**values) - self.assertEqual(DBFieldModel.objects(k0=i.k0, k1=i.k1).count(), len(clustering_values)) - self.assertEqual(DBFieldModel.objects(k0=i.k0, k1=i.k1, v1=0).count(), 1) + """ + Tests queries using index fields for object models using db_field mapping + + @since 3.1 + @jira_ticket PYTHON-351 + @expected_result results are properly retrieved without errors + + @test_category object_mapper + """ + for model in self.model_list: + values = {'k0': 1, 'k1': 5, 'c0': 3, 'v0': 4, 'v1': 5} + clustering_values = range(3) + for c in clustering_values: + values['c0'] = c + values['v1'] = c + i = model.create(**values) + self.assertEqual(model.objects(k0=i.k0, k1=i.k1).count(), len(clustering_values)) + self.assertEqual(model.objects(k0=i.k0, k1=i.k1, v1=0).count(), 1) From f9c4f23119278bc58bce22d6eb0d2b28be97d845 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 29 Feb 2016 15:49:26 -0600 Subject: [PATCH 0386/2005] cqle: show None values as deleted if the value is explicitly set Fixes an issue where a model instance created with explicit None fields would not persist as such on save() PYTHON-475 --- cassandra/cqlengine/columns.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 1303c5d59f..10e4e32bfa 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -37,7 +37,7 @@ def __init__(self, instance, column, value): @property def deleted(self): - return self.value is None and self.previous_value is not None + return self.value is None and (self.explicit or self.previous_value is not None) @property def changed(self): @@ -270,8 +270,6 @@ def to_database(self, value): val = super(Bytes, self).to_database(value) return bytearray(val) - def to_python(self, value): - return value Bytes = Blob From 23c74e9dffe40a39a01620bbf8db8b0aa18f872a Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 1 Mar 2016 12:12:31 -0500 Subject: [PATCH 0387/2005] PYTHON-373: Add tests for CQLEngine and range deletion --- .../cqlengine/query/test_queryset.py | 21 +++++++++++++++++++ .../statements/test_delete_statement.py | 12 +++++++++++ 2 files changed, 33 insertions(+) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index e0cc6ab4b0..01be041d3b 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -635,6 +635,27 @@ def test_delete_without_any_where_args(self): with self.assertRaises(query.QueryException): TestModel.objects(attempt_id=0).delete() + @unittest.skipIf(CASSANDRA_VERSION < '3.0', "range deletion was introduce in C* 3.0, currently running {0}".format(CASSANDRA_VERSION)) + def test_range_deletion(self): + """ + Tests that range deletion work as expected + """ + + for i in range(10): + TestMultiClusteringModel.objects().create(one=1, two=i, three=i) + + TestMultiClusteringModel.objects(one=1, two__gte=0, two__lte=3).delete() + self.assertEqual(6, len(TestMultiClusteringModel.objects.all())) + + TestMultiClusteringModel.objects(one=1, two__gt=3, two__lt=5).delete() + self.assertEqual(5, len(TestMultiClusteringModel.objects.all())) + + TestMultiClusteringModel.objects(one=1, two__in=[8,9]).delete() + self.assertEqual(3, len(TestMultiClusteringModel.objects.all())) + + TestMultiClusteringModel.objects(one__in=[1], two__gte=0).delete() + self.assertEqual(0, len(TestMultiClusteringModel.objects.all())) + class TimeUUIDQueryModel(Model): diff --git a/tests/integration/cqlengine/statements/test_delete_statement.py b/tests/integration/cqlengine/statements/test_delete_statement.py index 701991f4fb..099294287b 100644 --- a/tests/integration/cqlengine/statements/test_delete_statement.py +++ b/tests/integration/cqlengine/statements/test_delete_statement.py @@ -60,3 +60,15 @@ def test_context(self): ds = DeleteStatement('table', None) ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) self.assertEqual(ds.get_context(), {'0': 'b'}) + + def test_range_deletion_rendering(self): + ds = DeleteStatement('table', None) + ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) + ds.add_where_clause(WhereClause('created_at', GreaterThanOrEqualOperator(), '0')) + ds.add_where_clause(WhereClause('created_at', LessThanOrEqualOperator(), '10')) + self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" = %(0)s AND "created_at" >= %(1)s AND "created_at" <= %(2)s', six.text_type(ds)) + + ds = DeleteStatement('table', None) + ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) + ds.add_where_clause(WhereClause('created_at', InOperator(), ['0', '10', '20'])) + self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" = %(0)s AND "created_at" IN %(1)s', six.text_type(ds)) From 16cc60a4fb895eb3a50b6cb473dcf31cf180d4a3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 1 Mar 2016 13:26:56 -0600 Subject: [PATCH 0388/2005] cqle: fix LWTException super __init__ Resolves "RuntimeError: maximum recursion depth exceeded while getting the str of an object" with this type. --- cassandra/cqlengine/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 46918e0eb4..6539b1d136 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -52,7 +52,7 @@ class LWTException(CQLEngineException): :param existing: The current state of the data which prevented the write. """ def __init__(self, existing): - super(LWTException, self).__init__(self) + super(LWTException, self).__init__("LWT Query was not applied") self.existing = existing From 78e83758d7df8f9dec786f90c7d251f28cf4f6b7 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 1 Mar 2016 14:19:24 -0600 Subject: [PATCH 0389/2005] Updating tests, and adding documentation for PYTHON-432 --- tests/integration/cqlengine/test_ifexists.py | 111 +++++++++++++++++-- 1 file changed, 101 insertions(+), 10 deletions(-) diff --git a/tests/integration/cqlengine/test_ifexists.py b/tests/integration/cqlengine/test_ifexists.py index e53e03e555..a063463f09 100644 --- a/tests/integration/cqlengine/test_ifexists.py +++ b/tests/integration/cqlengine/test_ifexists.py @@ -22,7 +22,7 @@ from cassandra.cqlengine import columns from cassandra.cqlengine.management import sync_table, drop_table from cassandra.cqlengine.models import Model -from cassandra.cqlengine.query import BatchQuery, LWTException, IfExistsWithCounterColumn +from cassandra.cqlengine.query import BatchQuery, BatchType, LWTException, IfExistsWithCounterColumn from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration import PROTOCOL_VERSION @@ -30,11 +30,18 @@ class TestIfExistsModel(Model): - id = columns.UUID(primary_key=True, default=lambda: uuid4()) + id = columns.Integer(primary_key=True, default=lambda: uuid4()) count = columns.Integer() text = columns.Text(required=False) +class TestIfExistsModel2(Model): + + id = columns.Integer(primary_key=True) + count = columns.Integer(primary_key=True, required=False) + text = columns.Text(required=False) + + class TestIfExistsWithCounterModel(Model): id = columns.UUID(primary_key=True, default=lambda: uuid4()) @@ -47,11 +54,13 @@ class BaseIfExistsTest(BaseCassEngTestCase): def setUpClass(cls): super(BaseIfExistsTest, cls).setUpClass() sync_table(TestIfExistsModel) + sync_table(TestIfExistsModel2) @classmethod def tearDownClass(cls): super(BaseIfExistsTest, cls).tearDownClass() drop_table(TestIfExistsModel) + drop_table(TestIfExistsModel2) class BaseIfExistsWithCounterTest(BaseCassEngTestCase): @@ -71,7 +80,15 @@ class IfExistsUpdateTests(BaseIfExistsTest): @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_update_if_exists(self): - """ tests that update with if_exists work as expected """ + """ + Tests that update with if_exists work as expected + + @since 3.1 + @jira_ticket PYTHON-432 + @expected_result updates to be applied when primary key exists, otherwise LWT exception to be thrown + + @test_category object_mapper + """ id = uuid4() @@ -105,7 +122,15 @@ def test_update_if_exists(self): @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_batch_update_if_exists_success(self): - """ tests that batch update with if_exists work as expected """ + """ + Tests that batch update with if_exists work as expected + + @since 3.1 + @jira_ticket PYTHON-432 + @expected_result + + @test_category object_mapper + """ id = uuid4() @@ -131,9 +156,39 @@ def test_batch_update_if_exists_success(self): self.assertEqual(tm.count, 8) self.assertEqual(tm.text, '111111111') + @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") + def test_batch_mixed_update_if_exists_success(self): + """ + Tests that batch update with with one bad query will still fail with LWTException + + @since 3.1 + @jira_ticket PYTHON-432 + @expected_result + + @test_category object_mapper + """ + + m = TestIfExistsModel2.create(id=1, count=8, text='123456789') + with self.assertRaises(LWTException) as assertion: + with BatchQuery() as b: + m.text = '111111112' + m.batch(b).if_exists().update() # Does exist + n = TestIfExistsModel2(id=1, count=10, text="Failure") # Doesn't exist + n.batch(b).if_exists().update() + + self.assertEqual(assertion.exception.existing.get('[applied]'), False) + @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_delete_if_exists(self): - """ tests that delete with if_exists work as expected """ + """ + Tests that delete with if_exists work, and throw proper LWT exception when they are are not applied + + @since 3.1 + @jira_ticket PYTHON-432 + @expected_result Deletes will be preformed if they exist, otherwise throw LWT exception + + @test_category object_mapper + """ id = uuid4() @@ -158,10 +213,17 @@ def test_delete_if_exists(self): '[applied]': False, }) - @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_batch_delete_if_exists_success(self): - """ tests that batch delete with if_exists work as expected """ + """ + Tests that batch deletes with if_exists work, and throw proper LWTException when they are are not applied + + @since 3.1 + @jira_ticket PYTHON-432 + @expected_result Deletes will be preformed if they exist, otherwise throw LWTException + + @test_category object_mapper + """ id = uuid4() @@ -182,11 +244,34 @@ def test_batch_delete_if_exists_success(self): '[applied]': False, }) + @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") + def test_batch_delete_mixed(self): + """ + Tests that batch deletes with multiple queries and throw proper LWTException when they are are not all applicable + + @since 3.1 + @jira_ticket PYTHON-432 + @expected_result If one delete clause doesn't exist all should fail. + + @test_category object_mapper + """ + + m = TestIfExistsModel2.create(id=3, count=8, text='123456789') + + with self.assertRaises(LWTException) as assertion: + with BatchQuery() as b: + m.batch(b).if_exists().delete() # Does exist + n = TestIfExistsModel2(id=3, count=42, text='1111111') # Doesn't exist + n.batch(b).if_exists().delete() + + self.assertEqual(assertion.exception.existing.get('[applied]'), False) + q = TestIfExistsModel2.objects(id=3, count=8) + self.assertEqual(len(q), 1) + class IfExistsQueryTest(BaseIfExistsTest): def test_if_exists_included_on_queryset_update(self): - """ tests that if_exists on queryset works as expected """ with mock.patch.object(self.session, 'execute') as m: TestIfExistsModel.objects(id=uuid4()).if_exists().update(count=42) @@ -216,8 +301,14 @@ def test_if_exists_included_on_delete(self): class IfExistWithCounterTest(BaseIfExistsWithCounterTest): def test_instance_raise_exception(self): - """ make sure exception is raised when calling - if_exists on table with counter column + """ + Tests if exists is used with a counter column model that exception are thrown + + @since 3.1 + @jira_ticket PYTHON-432 + @expected_result Deletes will be preformed if they exist, otherwise throw LWTException + + @test_category object_mapper """ id = uuid4() with self.assertRaises(IfExistsWithCounterColumn): From 1adf9f3a0e7b5e32b19088b6e29d672255420640 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 26 Feb 2016 17:57:17 -0600 Subject: [PATCH 0390/2005] Docs for PYTHON-346, some nit test changes. --- .../integration/cqlengine/model/test_udts.py | 32 +++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 67d0f43ab1..dc7eb134c0 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -24,7 +24,7 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine.usertype import UserType, UserTypeDefinitionException from cassandra.cqlengine import columns, connection -from cassandra.cqlengine.management import sync_table, sync_type, create_keyspace_simple, drop_keyspace +from cassandra.cqlengine.management import sync_table, sync_type, create_keyspace_simple, drop_keyspace, drop_table from cassandra.util import Date, Time from tests.integration import PROTOCOL_VERSION @@ -447,6 +447,17 @@ class User(UserType): cluster.register_user_type.assert_called_with(models.DEFAULT_KEYSPACE, User.type_name(), User) def test_db_field_override(self): + """ + Tests for db_field override + + Tests to ensure that udt's in models can specify db_field for a particular field and that it will be honored. + + @since 3.1.0 + @jira_ticket PYTHON-346 + @expected_result The actual cassandra column will use the db_field specified. + + @test_category data_types:udt + """ class db_field_different(UserType): age = columns.Integer(db_field='a') name = columns.Text(db_field='n') @@ -461,6 +472,7 @@ class TheModel(Model): type_meta = cluster.metadata.keyspaces[TheModel._get_keyspace()].user_types[db_field_different.type_name()] type_fields = (db_field_different.age.column, db_field_different.name.column) + self.assertEqual(len(type_meta.field_names), len(type_fields)) for f in type_fields: self.assertIn(f.db_field_name, type_meta.field_names) @@ -479,17 +491,33 @@ class TheModel(Model): self.assertIsInstance(info, db_field_different) self.assertEqual(info.age, age) self.assertEqual(info.name, name) - # also excercise the db_Field mapping self.assertEqual(info.a, age) self.assertEqual(info.n, name) def test_db_field_overload(self): + """ + Tests for db_field UserTypeDefinitionException + + Test so that when we override a model's default field witha db_field that it errors appropriately + + @since 3.1.0 + @jira_ticket PYTHON-346 + @expected_result Setting a db_field to an existing field causes an exception to occur. + + @test_category data_types:udt + """ + with self.assertRaises(UserTypeDefinitionException): class something_silly(UserType): first_col = columns.Integer() second_col = columns.Text(db_field='first_col') + with self.assertRaises(UserTypeDefinitionException): + class something_silly_2(UserType): + first_col = columns.Integer(db_field="second_col") + second_col = columns.Text() + def test_set_udt_fields(self): # PYTHON-502 class User(UserType): From fad4dcaabc793f1fa39e5871ee61410596a61b50 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 1 Mar 2016 18:11:16 -0600 Subject: [PATCH 0391/2005] PYTHON-337 adding documentation to tests --- .../cqlengine/management/test_management.py | 25 +++++++-- .../model/test_class_construction.py | 54 +++++++++++++------ .../integration/cqlengine/model/test_model.py | 9 ++++ 3 files changed, 70 insertions(+), 18 deletions(-) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index c16052b3a0..2e06ed6662 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -17,9 +17,7 @@ import unittest # noqa import mock -import warnings -from cassandra.cqlengine import CACHING_ALL, CACHING_NONE from cassandra.cqlengine.connection import get_session, get_cluster from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine import management @@ -27,7 +25,7 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns -from tests.integration import CASSANDRA_VERSION, PROTOCOL_VERSION +from tests.integration import PROTOCOL_VERSION, greaterthancass20 from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine.query.test_queryset import TestModel @@ -254,9 +252,19 @@ class IndexTests(BaseCassEngTestCase): def setUp(self): drop_table(IndexModel) + drop_table(IndexCaseSensitiveModel) def test_sync_index(self): + """ + Tests the default table creation, and ensures the table_name is created and surfaced correctly + in the table metadata + @since 3.1 + @jira_ticket PYTHON-337 + @expected_result table_name is lower case + + @test_category object_mapper + """ sync_table(IndexModel) table_meta = management._get_table_metadata(IndexModel) self.assertIn("index_index_model_second_key", table_meta.indexes) @@ -266,8 +274,18 @@ def test_sync_index(self): table_meta = management._get_table_metadata(IndexModel) self.assertIn("index_index_model_second_key", table_meta.indexes) + @greaterthancass20 def test_sync_index_case_sensitive(self): + """ + Tests the default table creation, and ensures the table_name is created correctly and surfaced correctly + in table metadata + + @since 3.1 + @jira_ticket PYTHON-337 + @expected_result table_name is lower case + @test_category object_mapper + """ sync_table(IndexCaseSensitiveModel) table_meta = management._get_table_metadata(IndexCaseSensitiveModel) self.assertIn("index_IndexModel_second_key", table_meta.indexes) @@ -286,6 +304,7 @@ def test_failure(self): with self.assertRaises(CQLEngineException): sync_table(self.FakeModel) + class StaticColumnTests(BaseCassEngTestCase): def test_static_columns(self): if PROTOCOL_VERSION < 2: diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index d61827c0af..1b05346edb 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -21,6 +21,7 @@ from tests.integration.cqlengine.base import BaseCassEngTestCase + class TestModelClassFunction(BaseCassEngTestCase): """ Tests verifying the behavior of the Model metaclass @@ -34,15 +35,15 @@ def test_column_attributes_handled_correctly(self): class TestModel(Model): - id = columns.UUID(primary_key=True, default=lambda:uuid4()) + id = columns.UUID(primary_key=True, default=lambda:uuid4()) text = columns.Text() - #check class attibutes + # check class attibutes self.assertHasAttr(TestModel, '_columns') self.assertHasAttr(TestModel, 'id') self.assertHasAttr(TestModel, 'text') - #check instance attributes + # check instance attributes inst = TestModel() self.assertHasAttr(inst, 'id') self.assertHasAttr(inst, 'text') @@ -56,7 +57,7 @@ def test_db_map(self): """ class WildDBNames(Model): - id = columns.UUID(primary_key=True, default=lambda:uuid4()) + id = columns.UUID(primary_key=True, default=lambda:uuid4()) content = columns.Text(db_field='words_and_whatnot') numbers = columns.Integer(db_field='integers_etc') @@ -92,8 +93,8 @@ def test_exception_raised_when_creating_class_without_pk(self): with self.assertRaises(ModelDefinitionException): class TestModel(Model): - count = columns.Integer() - text = columns.Text(required=False) + count = columns.Integer() + text = columns.Text(required=False) def test_value_managers_are_keeping_model_instances_isolated(self): """ @@ -101,7 +102,7 @@ def test_value_managers_are_keeping_model_instances_isolated(self): """ class Stuff(Model): - id = columns.UUID(primary_key=True, default=lambda:uuid4()) + id = columns.UUID(primary_key=True, default=lambda:uuid4()) num = columns.Integer() inst1 = Stuff(num=5) @@ -117,7 +118,7 @@ def test_superclass_fields_are_inherited(self): """ class TestModel(Model): - id = columns.UUID(primary_key=True, default=lambda:uuid4()) + id = columns.UUID(primary_key=True, default=lambda:uuid4()) text = columns.Text() class InheritedModel(TestModel): @@ -130,7 +131,7 @@ def test_column_family_name_generation(self): """ Tests that auto column family name generation works as expected """ class TestModel(Model): - id = columns.UUID(primary_key=True, default=lambda:uuid4()) + id = columns.UUID(primary_key=True, default=lambda:uuid4()) text = columns.Text() assert TestModel.column_family_name(include_keyspace=False) == 'test_model' @@ -163,7 +164,7 @@ def test_del_attribute_is_assigned_properly(self): """ Tests that columns that can be deleted have the del attribute """ class DelModel(Model): - id = columns.UUID(primary_key=True, default=lambda:uuid4()) + id = columns.UUID(primary_key=True, default=lambda:uuid4()) key = columns.Integer(primary_key=True) data = columns.Integer(required=False) @@ -177,25 +178,25 @@ def test_does_not_exist_exceptions_are_not_shared_between_model(self): class Model1(Model): - id = columns.UUID(primary_key=True, default=lambda:uuid4()) + id = columns.UUID(primary_key=True, default=lambda:uuid4()) class Model2(Model): - id = columns.UUID(primary_key=True, default=lambda:uuid4()) + id = columns.UUID(primary_key=True, default=lambda:uuid4()) try: raise Model1.DoesNotExist except Model2.DoesNotExist: assert False, "Model1 exception should not be caught by Model2" except Model1.DoesNotExist: - #expected + # expected pass def test_does_not_exist_inherits_from_superclass(self): """ Tests that a DoesNotExist exception can be caught by it's parent class DoesNotExist """ class Model1(Model): - id = columns.UUID(primary_key=True, default=lambda:uuid4()) + id = columns.UUID(primary_key=True, default=lambda:uuid4()) class Model2(Model1): pass @@ -203,7 +204,7 @@ class Model2(Model1): try: raise Model2.DoesNotExist except Model1.DoesNotExist: - #expected + # expected pass except Exception: assert False, "Model2 exception should not be caught by Model1" @@ -247,10 +248,29 @@ class RenamedCaseSensitiveTest(Model): id = columns.UUID(primary_key=True) def test_proper_table_naming_case_insensitive(self): + """ + Test to ensure case senstivity is not honored by default honored + + @since 3.1 + @jira_ticket PYTHON-337 + @expected_result table_names arel lowercase + + @test_category object_mapper + """ self.assertEqual(self.RenamedCaseInsensitiveTest.column_family_name(include_keyspace=False), 'manual_name') self.assertEqual(self.RenamedCaseInsensitiveTest.column_family_name(include_keyspace=True), 'whatever.manual_name') def test_proper_table_naming_case_sensitive(self): + """ + Test to ensure case is honored when the flag is correctly set. + + @since 3.1 + @jira_ticket PYTHON-337 + @expected_result table_name case is honored. + + @test_category object_mapper + """ + self.assertEqual(self.RenamedCaseSensitiveTest.column_family_name(include_keyspace=False), '"Manual_Name"') self.assertEqual(self.RenamedCaseSensitiveTest.column_family_name(include_keyspace=True), 'whatever."Manual_Name"') @@ -263,20 +283,24 @@ class ConcreteModel(AbstractModel): pkey = columns.Integer(primary_key=True) data = columns.Integer() + class AbstractModelWithCol(Model): __abstract__ = True pkey = columns.Integer(primary_key=True) + class ConcreteModelWithCol(AbstractModelWithCol): data = columns.Integer() + class AbstractModelWithFullCols(Model): __abstract__ = True pkey = columns.Integer(primary_key=True) data = columns.Integer() + class TestAbstractModelClasses(BaseCassEngTestCase): def test_id_field_is_not_created(self): diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index cb6bdeb1ba..e46698ff75 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -127,6 +127,15 @@ class TestModel(Model): self.assertEqual(TestModel.column_family_name(include_keyspace=False), "test_model") def test_column_family_case_sensitive(self): + """ + Test to ensure case sensitivity is honored when __table_name_case_sensitive__ flag is set + + @since 3.1 + @jira_ticket PYTHON-337 + @expected_result table_name case is respected + + @test_category object_mapper + """ class TestModel(Model): __table_name__ = 'TestModel' __table_name_case_sensitive__ = True From a60a65f10ee9bf631b07d3c60de7dbdccdaa54ff Mon Sep 17 00:00:00 2001 From: alanjds Date: Thu, 20 Aug 2015 14:31:29 +0000 Subject: [PATCH 0392/2005] Debug for automatic pagination machinery --- cassandra/cluster.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 13afecd245..5cc4aa6f6f 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2829,7 +2829,9 @@ def start_fetching_next_page(self): .. versionadded:: 2.0.0 """ + log.debug('Pagination: Fetching next page') if not self._paging_state: + log.debug('Pagination: No more pages to fetch') raise QueryExhausted() self._make_query_plan() From ddff533310cfb18e0aa01b3a67b5f15f0a823b06 Mon Sep 17 00:00:00 2001 From: alanjds Date: Thu, 20 Aug 2015 14:34:09 +0000 Subject: [PATCH 0393/2005] AbstractQuerySet.__iter__ is no more greedy. Fetches on demand. The __iter__ now yields until the end of actual page, than start to fetch the next page. --- cassandra/cqlengine/query.py | 41 ++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 46918e0eb4..da4cba4fa8 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -289,8 +289,12 @@ def __init__(self, model): # results cache self._result_cache = None self._result_idx = None + self._result_generator = None self._distinct_fields = None + + self._count = None + self._batch = None self._ttl = getattr(model, '__default_ttl__', None) self._consistency = None @@ -341,7 +345,7 @@ def __deepcopy__(self, memo): def __len__(self): self._execute_query() - return len(self._result_cache) + return self.count() # ----query generation / execution---- @@ -374,7 +378,8 @@ def _execute_query(self): if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._result_cache is None: - self._result_cache = list(self._execute(self._select_query())) + self._result_generator = (i for i in self._execute(self._select_query())) + self._result_cache = [] self._construct_result = self._get_result_constructor() def _fill_result_cache_to_idx(self, idx): @@ -388,13 +393,24 @@ def _fill_result_cache_to_idx(self, idx): else: for idx in range(qty): self._result_idx += 1 - self._result_cache[self._result_idx] = self._construct_result(self._result_cache[self._result_idx]) + while True: + try: + self._result_cache[self._result_idx] = self._construct_result(self._result_cache[self._result_idx]) + break + except IndexError: + self._result_cache.append(next(self._result_generator)) def __iter__(self): self._execute_query() - for idx in range(len(self._result_cache)): - instance = self._result_cache[idx] + for idx in range(self.count()): + while True: + try: + instance = self._result_cache[idx] + break + except IndexError: + self._result_cache.append(next(self._result_generator)) + if isinstance(instance, dict): self._fill_result_cache_to_idx(idx) yield self._result_cache[idx] @@ -402,7 +418,7 @@ def __iter__(self): def __getitem__(self, s): self._execute_query() - num_results = len(self._result_cache) + num_results = self.count() if isinstance(s, slice): # calculate the amount of results that need to be loaded @@ -615,10 +631,10 @@ def get(self, *args, **kwargs): return self.filter(*args, **kwargs).get() self._execute_query() - if len(self._result_cache) == 0: + if self.count() == 0: raise self.model.DoesNotExist - elif len(self._result_cache) > 1: - raise self.model.MultipleObjectsReturned('{0} objects found'.format(len(self._result_cache))) + elif self.count() > 1: + raise self.model.MultipleObjectsReturned('{0} objects found'.format(self.count())) else: return self[0] @@ -679,13 +695,12 @@ def count(self): if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") - if self._result_cache is None: + if self._count is None: query = self._select_query() query.count = True result = self._execute(query) - return result[0]['count'] - else: - return len(self._result_cache) + self._count = result[0]['count'] + return self._count def distinct(self, distinct_fields=None): """ From 419554ffa26915d8d04be82a244261876bceb202 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 2 Mar 2016 11:21:48 -0600 Subject: [PATCH 0394/2005] Adding docs for python-445 --- .../integration/cqlengine/test_batch_query.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index 7d136fe60c..5fd9453a72 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -203,6 +203,15 @@ class SomeError(Exception): self.assertEqual(len(call_history), 2) def test_callbacks_work_multiple_times(self): + """ + Tests that multiple executions of execute on a batch statement + logs a warning, and that we don't encounter an attribute error. + @since 3.1 + @jira_ticket PYTHON-445 + @expected_result warning message is logged + + @test_category object_mapper + """ call_history = [] def my_callback(*args, **kwargs): @@ -217,6 +226,16 @@ def my_callback(*args, **kwargs): self.assertRegex(str(w[0].message), r"^Batch.*multiple.*") def test_disable_multiple_callback_warning(self): + """ + Tests that multiple executions of a batch statement + don't log a warning when warn_multiple_exec flag is set, and + that we don't encounter an attribute error. + @since 3.1 + @jira_ticket PYTHON-445 + @expected_result warning message is logged + + @test_category object_mapper + """ call_history = [] def my_callback(*args, **kwargs): From d3cdb168e3a9ac17211688846f4641ca0fe12ff1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 2 Mar 2016 13:50:36 -0600 Subject: [PATCH 0395/2005] cqle: make ValueManager delete recognize converted collections PYTHON-475 --- cassandra/cqlengine/columns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 10e4e32bfa..3c353d9001 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -37,7 +37,7 @@ def __init__(self, instance, column, value): @property def deleted(self): - return self.value is None and (self.explicit or self.previous_value is not None) + return self.column._val_is_null(self.value) and (self.explicit or self.previous_value is not None) @property def changed(self): From bcd42baf314ba9792fe35ac3918b70f4351cd7c1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 2 Mar 2016 14:10:21 -0600 Subject: [PATCH 0396/2005] cqle: always set default keyspace from conn.set_session PYTHON-486 --- cassandra/cqlengine/connection.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 7d53af6917..5431f08e7d 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -81,9 +81,9 @@ def set_session(s): session = s cluster = s.cluster - # Set default keyspace from given session's keyspace if not already set - from cassandra.cqlengine import models - if not models.DEFAULT_KEYSPACE and session.keyspace: + # Set default keyspace from given session's keyspace + if session.keyspace: + from cassandra.cqlengine import models models.DEFAULT_KEYSPACE = session.keyspace _setup_session(session) From 6f1fba939a06ee3581e5ddd7253ee3c6f6173d52 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 2 Mar 2016 12:24:58 -0600 Subject: [PATCH 0397/2005] PYTHON-475 adding integration test --- .../cqlengine/model/test_model_io.py | 67 +++++++++++++++++++ .../cqlengine/query/test_updates.py | 15 +++-- 2 files changed, 75 insertions(+), 7 deletions(-) diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index f513c2e191..b05c8b7cb4 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -41,6 +41,16 @@ class TestModel(Model): a_bool = columns.Boolean(default=False) +class TestModelSave(Model): + partition = columns.UUID(primary_key=True, default=uuid4) + cluster = columns.Integer(primary_key=True) + count = columns.Integer(required=False) + text = columns.Text(required=False, index=True) + text_set = columns.Set(columns.Text, required=False) + text_list = columns.List(columns.Text, required=False) + text_map = columns.Map(columns.Text, columns.Text, required=False) + + class TestModelIO(BaseCassEngTestCase): @classmethod @@ -310,13 +320,16 @@ class TestUpdating(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestUpdating, cls).setUpClass() + drop_table(TestModelSave) drop_table(TestMultiKeyModel) + sync_table(TestModelSave) sync_table(TestMultiKeyModel) @classmethod def tearDownClass(cls): super(TestUpdating, cls).tearDownClass() drop_table(TestMultiKeyModel) + drop_table(TestModelSave) def setUp(self): super(TestUpdating, self).setUp() @@ -448,6 +461,60 @@ def test_previous_value_tracking_on_instantiation(self): self.assertTrue(self.instance._values['count'].previous_value is None) self.assertTrue(self.instance.count is None) + def test_save_to_none(self): + """ + Test update of column value of None with save() function. + + Under specific scenarios calling save on a None value wouldn't update + previous values. This issue only manifests with a new instantiation of the model, + if existing model is modified and updated the issue will not occur. + + @since 3.0.0 + @jira_ticket PYTHON-475 + @expected_result column value should be updated to None + + @test_category object_mapper + """ + + partition = uuid4() + cluster = 1 + text = 'set' + text_list = ['set'] + text_set = set(("set",)) + text_map = {"set": 'set'} + initial = TestModelSave(partition=partition, cluster=cluster, text=text, text_list=text_list, + text_set=text_set, text_map=text_map) + initial.save() + current = TestModelSave.objects.get(partition=partition, cluster=cluster) + self.assertEqual(current.text, text) + self.assertEqual(current.text_list, text_list) + self.assertEqual(current.text_set, text_set) + self.assertEqual(current.text_map, text_map) + + next = TestModelSave(partition=partition, cluster=cluster, text=None, text_list=None, + text_set=None, text_map=None) + + next.save() + current = TestModelSave.objects.get(partition=partition, cluster=cluster) + self.assertEqual(current.text, None) + self.assertEqual(current.text_list, []) + self.assertEqual(current.text_set, set()) + self.assertEqual(current.text_map, {}) + + +def test_none_filter_fails(): + class NoneFilterModel(Model): + + pk = columns.Integer(primary_key=True) + v = columns.Integer() + sync_table(NoneFilterModel) + + try: + NoneFilterModel.objects(pk=None) + raise Exception("fail") + except CQLEngineException as e: + pass + class TestCanUpdate(BaseCassEngTestCase): diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index 49e6f7f145..afa3a7f096 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -24,13 +24,14 @@ class TestQueryUpdateModel(Model): - partition = columns.UUID(primary_key=True, default=uuid4) - cluster = columns.Integer(primary_key=True) - count = columns.Integer(required=False) - text = columns.Text(required=False, index=True) - text_set = columns.Set(columns.Text, required=False) - text_list = columns.List(columns.Text, required=False) - text_map = columns.Map(columns.Text, columns.Text, required=False) + partition = columns.UUID(primary_key=True, default=uuid4) + cluster = columns.Integer(primary_key=True) + count = columns.Integer(required=False) + text = columns.Text(required=False, index=True) + text_set = columns.Set(columns.Text, required=False) + text_list = columns.List(columns.Text, required=False) + text_map = columns.Map(columns.Text, columns.Text, required=False) + class QueryUpdateTests(BaseCassEngTestCase): From 2feb57fe96140b67cbcd7278012bb2480e84e8f0 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 2 Mar 2016 16:18:30 -0500 Subject: [PATCH 0398/2005] perf improvements on cqlengine queryset __iter__ and __getitem__ --- cassandra/cqlengine/query.py | 53 ++++++++++++++++++------------- cassandra/cqlengine/statements.py | 9 ++++-- 2 files changed, 37 insertions(+), 25 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index da4cba4fa8..2d05f7de12 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -328,7 +328,7 @@ def __call__(self, *args, **kwargs): def __deepcopy__(self, memo): clone = self.__class__(self.model) for k, v in self.__dict__.items(): - if k in ['_con', '_cur', '_result_cache', '_result_idx']: # don't clone these + if k in ['_con', '_cur', '_result_cache', '_result_idx', '_result_generator']: # don't clone these clone.__dict__[k] = None elif k == '_batch': # we need to keep the same batch instance across @@ -403,45 +403,53 @@ def _fill_result_cache_to_idx(self, idx): def __iter__(self): self._execute_query() - for idx in range(self.count()): - while True: + idx = 0 + while True: + if len(self._result_cache) <= idx: try: - instance = self._result_cache[idx] - break - except IndexError: self._result_cache.append(next(self._result_generator)) + except StopIteration: + break + instance = self._result_cache[idx] if isinstance(instance, dict): self._fill_result_cache_to_idx(idx) yield self._result_cache[idx] + idx += 1 + def __getitem__(self, s): self._execute_query() - num_results = self.count() - if isinstance(s, slice): # calculate the amount of results that need to be loaded - end = num_results if s.step is None else s.step - if end < 0: - end += num_results - else: - end -= 1 - self._fill_result_cache_to_idx(end) + end = s.stop + if s.start < 0 or s.stop < 0: + end = self.count() + + try: + self._fill_result_cache_to_idx(end) + except StopIteration: + pass + return self._result_cache[s.start:s.stop:s.step] else: - # return the object at this index - s = int(s) + try: + s = int(s) + except (ValueError, TypeError): + raise TypeError('QuerySet indices must be integers') - # handle negative indexing + # Using negative indexing is costly since we have to execute a count() if s < 0: + num_results = self.count() s += num_results - if s >= num_results: - raise IndexError - else: + try: self._fill_result_cache_to_idx(s) - return self._result_cache[s] + except StopIteration: + raise IndexError + + return self._result_cache[s] def _get_result_constructor(self): """ @@ -699,7 +707,8 @@ def count(self): query = self._select_query() query.count = True result = self._execute(query) - self._count = result[0]['count'] + count_row = result[0].popitem() + self._count = count_row[1] return self._count def distinct(self, distinct_fields=None): diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 39f09307d0..7bf6f27fcf 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -577,10 +577,13 @@ def __init__(self, def __unicode__(self): qs = ['SELECT'] - if self.count: + if self.distinct_fields: + if self.count: + qs += ['DISTINCT COUNT({0})'.format(', '.join(['"{0}"'.format(f) for f in self.distinct_fields]))] + else: + qs += ['DISTINCT {0}'.format(', '.join(['"{0}"'.format(f) for f in self.distinct_fields]))] + elif self.count: qs += ['COUNT(*)'] - elif self.distinct_fields: - qs += ['DISTINCT {0}'.format(', '.join(['"{0}"'.format(f) for f in self.distinct_fields]))] else: qs += [', '.join(['"{0}"'.format(f) for f in self.fields]) if self.fields else '*'] qs += ['FROM', self.table] From 58f5e2e033138c1389b003064be8ab0cecfdd08c Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 2 Mar 2016 16:33:01 -0500 Subject: [PATCH 0399/2005] Add the ability to disable a query limit with .limit(None) --- cassandra/cqlengine/query.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 2d05f7de12..38918ced2d 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -758,7 +758,11 @@ def limit(self, v): for user in User.objects().limit(100): print(user) """ - if not (v is None or isinstance(v, six.integer_types)): + + if v is None: + v = 0 + + if not isinstance(v, six.integer_types): raise TypeError if v == self._limit: return self From fe14a7847af81f38fefccbe8f521106d11199f4f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 3 Mar 2016 11:48:31 -0600 Subject: [PATCH 0400/2005] [ci skip] 3.1 changelog update --- CHANGELOG.rst | 34 ++++++++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index c7b4b0d54b..a0ee012bb9 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,14 +6,44 @@ Features * Pass name of server auth class to AuthProvider (PYTHON-454) * Surface schema agreed flag for DDL statements (PYTHON-458) * Automatically convert float and int to Decimal on serialization (PYTHON-468) -* Expose prior state information via cqlengine LWTException (github #343) +* Eventlet Reactor IO improvement (PYTHON-495) +* Make pure Python ProtocolHandler available even when Cython is present (PYTHON-501) +* Optional Cython deserializer for bytes as bytearray (PYTHON-503) +* cqlengine: Expose prior state information via cqlengine LWTException (github #343, PYTHON-336) +* cqlengine: Collection datatype "contains" operators support (Cassandra 2.1) #278 (PYTHON-258) +* cqlengine: Add DISTINCT query operator (PYTHON-266) +* cqlengine: Tuple cqlengine api (PYTHON-306) +* cqlengine: Add support for UPDATE/DELETE ... IF EXISTS statements (PYTHON-432) +* cqlengine: Allow nested container types (PYTHON-478) +* cqlengine: Add ability to set query's fetch_size and limit (PYTHON-323) +* cqlengine: Fix inserting None for model save (PYTHON-475) +* cqlengine: Internalize default keyspace from successive set_session (PYTHON-486) +* cqlengine: Warn when Model.create() on Counters (to be deprecated) (PYTHON-333) Bug Fixes --------- * Bus error (alignment issues) when running cython on some ARM platforms (PYTHON-450) * Overflow when decoding large collections (cython) (PYTHON-459) -* Crash when updating a UDT column with a None value (github #467) * Timer heap comparison issue with Python 3 (github #466) +* Cython deserializer date overflow at 2^31 - 1 (PYTHON-452) +* Decode error encountered when cython deserializing large map results (PYTHON-459) +* Don't require Cython for build if compiler or Python header not present (PYTHON-471) +* Unorderable types in task scheduling with Python 3 (h(PYTHON-473) +* cqlengine: Fix crash when updating a UDT column with a None value (github #467) +* cqlengine: Race condition in ..connection.execute with lazy_connect (PYTHON-310) +* cqlengine: doesn't support case sensitive column family names (PYTHON-337) +* cqlengine: UserDefinedType mandatory in create or update (PYTHON-344) +* cqlengine: db_field breaks UserType (PYTHON-346) +* cqlengine: UDT badly quoted (PYTHON-347) +* cqlengine: Use of db_field on primary key prevents querying except while tracing. (PYTHON-351) +* cqlengine: DateType.deserialize being called with one argument vs two (PYTHON-354) +* cqlengine: Querying without setting up connection now throws AttributeError and not CQLEngineException (PYTHON-395) +* cqlengine: BatchQuery multiple time executing execute statements. (PYTHON-445) +* cqlengine: Better error for management functions when no connection set (PYTHON-451) +* cqlengine: Handle None values for UDT attributes in cqlengine (PYTHON-470) +* cqlengine: EQ doesn't map to a QueryOperator (setup race condition) (PYTHON-476) +* cqlengine: class.MultipleObjectsReturned has DoesNotExist as base class (PYTHON-489) +* cqlengine: Typo in cqlengine UserType __len__ breaks attribute assignment (PYTHON-502) 3.0.0 ===== From 6f67300d0af831a1143c1148e0bb136ebf5d1fff Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 3 Mar 2016 12:04:49 -0600 Subject: [PATCH 0401/2005] Some design/impl guidelines for contributing --- CONTRIBUTING.rst | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 09e307d63b..6d39d8df69 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -20,7 +20,14 @@ Pull Requests If you're able to fix a bug yourself, you can [fork the repository](https://help.github.com/articles/fork-a-repo/) and submit a [Pull Request](https://help.github.com/articles/using-pull-requests/) with the fix. Please include tests demonstrating the issue and fix. For examples of how to run the tests, consult the `dev README `_. - Contribution License Agreement ------------------------------ -To protect the community, all contributors are required to [sign the DataStax Contribution License Agreement](http://cla.datastax.com/). The process is completely electronic and should only take a few minutes. \ No newline at end of file +To protect the community, all contributors are required to [sign the DataStax Contribution License Agreement](http://cla.datastax.com/). The process is completely electronic and should only take a few minutes. + +Design and Implementation Guidelines +------------------------------------ +- We support Python 2.6+, so any changes must work in any of these runtimes (we use ``six``, ``futures``, and some internal backports for compatability) +- We have integrations (notably Cassandra cqlsh) that require pure Python and minimal external dependencies. We try to avoid new external dependencies. Where compiled extensions are concerned, there should always be a pure Python fallback implementation. +- This project follows `semantic versioning `_, so breaking API changes will only be introduced in major versions. +- Legacy ``cqlengine`` has varying degrees of overreaching client-side validation. Going forward, we will avoid client validation where server feedback is adequate and not overly expensive. +- When writing tests, try to achieve maximal coverage in unit tests (where it is faster to run across many runtimes). Integration tests are good for things where we need to test server interaction, or where it is important to test across different server versions (emulating in unit tests would not be effective). From 3e379abf8fc2a26d2907eb043b6f08769060362e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 3 Mar 2016 13:22:52 -0500 Subject: [PATCH 0402/2005] Add QuerySet.fetch_size() support --- cassandra/cqlengine/connection.py | 3 +-- cassandra/cqlengine/query.py | 28 +++++++++++++++++++++++++++- cassandra/cqlengine/statements.py | 10 +++++++--- 3 files changed, 35 insertions(+), 6 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 7d53af6917..1e043822b5 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -162,8 +162,7 @@ def execute(query, params=None, consistency_level=None, timeout=NOT_SET): elif isinstance(query, BaseCQLStatement): params = query.get_context() - query = str(query) - query = SimpleStatement(query, consistency_level=consistency_level) + query = SimpleStatement(str(query), consistency_level=consistency_level, fetch_size=query.fetch_size) elif isinstance(query, six.string_types): query = SimpleStatement(query, consistency_level=consistency_level) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 38918ced2d..963c4dceb9 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -302,6 +302,7 @@ def __init__(self, model): self._if_not_exists = False self._timeout = connection.NOT_SET self._if_exists = False + self._fetch_size = None @property def column_family_name(self): @@ -369,7 +370,8 @@ def _select_query(self): order_by=self._order, limit=self._limit, allow_filtering=self._allow_filtering, - distinct_fields=self._distinct_fields + distinct_fields=self._distinct_fields, + fetch_size=self._fetch_size ) # ----Reads------ @@ -774,6 +776,30 @@ def limit(self, v): clone._limit = v return clone + def fetch_size(self, v): + """ + Sets the number of rows that are fetched at a time. + + *Note that driver's default fetch size is 5000. + + .. code-block:: python + + for user in User.objects().fetch_size(500): + print(user) + """ + + if not isinstance(v, six.integer_types): + raise TypeError + if v == self._fetch_size: + return self + + if v < 1: + raise QueryException("fetch size less than 1 is not allowed") + + clone = copy.deepcopy(self) + clone._fetch_size = v + return clone + def allow_filtering(self): """ Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 7bf6f27fcf..73f9197ed3 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -16,6 +16,7 @@ import time import six +from cassandra.query import FETCH_SIZE_UNSET from cassandra.cqlengine import UnicodeMixin from cassandra.cqlengine.functions import QueryValue from cassandra.cqlengine.operators import BaseWhereOperator, InOperator @@ -470,13 +471,14 @@ def __unicode__(self): class BaseCQLStatement(UnicodeMixin): """ The base cql statement class """ - def __init__(self, table, consistency=None, timestamp=None, where=None): + def __init__(self, table, consistency=None, timestamp=None, where=None, fetch_size=None): super(BaseCQLStatement, self).__init__() self.table = table self.consistency = consistency self.context_id = 0 self.context_counter = self.context_id self.timestamp = timestamp + self.fetch_size = fetch_size if fetch_size else FETCH_SIZE_UNSET self.where_clauses = [] for clause in where or []: @@ -556,7 +558,8 @@ def __init__(self, order_by=None, limit=None, allow_filtering=False, - distinct_fields=None): + distinct_fields=None, + fetch_size=None): """ :param where @@ -565,7 +568,8 @@ def __init__(self, super(SelectStatement, self).__init__( table, consistency=consistency, - where=where + where=where, + fetch_size=fetch_size ) self.fields = [fields] if isinstance(fields, six.string_types) else (fields or []) From 9405a5f27c069d7786ba938c39a1617d3bdf16bc Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 3 Mar 2016 13:35:02 -0600 Subject: [PATCH 0403/2005] [ci skip] reclassify ticket in changelog --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a0ee012bb9..e5f6059f5c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -16,7 +16,6 @@ Features * cqlengine: Add support for UPDATE/DELETE ... IF EXISTS statements (PYTHON-432) * cqlengine: Allow nested container types (PYTHON-478) * cqlengine: Add ability to set query's fetch_size and limit (PYTHON-323) -* cqlengine: Fix inserting None for model save (PYTHON-475) * cqlengine: Internalize default keyspace from successive set_session (PYTHON-486) * cqlengine: Warn when Model.create() on Counters (to be deprecated) (PYTHON-333) @@ -41,6 +40,7 @@ Bug Fixes * cqlengine: BatchQuery multiple time executing execute statements. (PYTHON-445) * cqlengine: Better error for management functions when no connection set (PYTHON-451) * cqlengine: Handle None values for UDT attributes in cqlengine (PYTHON-470) +* cqlengine: Fix inserting None for model save (PYTHON-475) * cqlengine: EQ doesn't map to a QueryOperator (setup race condition) (PYTHON-476) * cqlengine: class.MultipleObjectsReturned has DoesNotExist as base class (PYTHON-489) * cqlengine: Typo in cqlengine UserType __len__ breaks attribute assignment (PYTHON-502) From 84c9c8c04b043a64b65dbee75697c12d79207c63 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 3 Mar 2016 14:41:49 -0500 Subject: [PATCH 0404/2005] Add some tests for cqlengine queryset slicing --- .../cqlengine/query/test_queryset.py | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index e0cc6ab4b0..a1609f3c1f 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -554,16 +554,31 @@ def test_negative_indexing_works_properly(self): def test_slicing_works_properly(self): q = TestModel.objects(test_id=0).order_by('attempt_id') expected_order = [0, 1, 2, 3] + for model, expect in zip(q[1:3], expected_order[1:3]): - assert model.attempt_id == expect + self.assertEqual(model.attempt_id, expect) + + for model, expect in zip(q[0:3:2], expected_order[0:3:2]): + self.assertEqual(model.attempt_id, expect) def test_negative_slicing(self): q = TestModel.objects(test_id=0).order_by('attempt_id') expected_order = [0, 1, 2, 3] + for model, expect in zip(q[-3:], expected_order[-3:]): - assert model.attempt_id == expect + self.assertEqual(model.attempt_id, expect) + for model, expect in zip(q[:-1], expected_order[:-1]): - assert model.attempt_id == expect + self.assertEqual(model.attempt_id, expect) + + for model, expect in zip(q[1:-1], expected_order[1:-1]): + self.assertEqual(model.attempt_id, expect) + + for model, expect in zip(q[-3:-1], expected_order[-3:-1]): + self.assertEqual(model.attempt_id, expect) + + for model, expect in zip(q[-3:-1:2], expected_order[-3:-1:2]): + self.assertEqual(model.attempt_id, expect) class TestQuerySetValidation(BaseQuerySetUsage): From 72a2b0ec60990b0e0d65a01a8ea871f009b6a53b Mon Sep 17 00:00:00 2001 From: Sergey Kirillov Date: Thu, 3 Mar 2016 22:05:11 +0200 Subject: [PATCH 0405/2005] Added default_serial_consistency_level --- cassandra/cluster.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 402a4d3822..b39a617cf8 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1429,6 +1429,7 @@ class Session(object): """ default_consistency_level = ConsistencyLevel.LOCAL_ONE + """ The default :class:`~ConsistencyLevel` for operations executed through this session. This default may be overridden by setting the @@ -1441,6 +1442,10 @@ class Session(object): default changed from ONE to LOCAL_ONE """ + default_serial_consistency_level = None + """ + """ + max_trace_wait = 2.0 """ The maximum amount of time (in seconds) the driver will wait for trace @@ -1632,6 +1637,8 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time query = query.bind(parameters) cl = query.consistency_level if query.consistency_level is not None else self.default_consistency_level + serial_cl = query.serial_consistency_level if query.serial_consistency_level is not None else self.default_serial_consistency_level + fetch_size = query.fetch_size if fetch_size is FETCH_SIZE_UNSET and self._protocol_version >= 2: fetch_size = self.default_fetch_size @@ -1648,12 +1655,12 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time if parameters: query_string = bind_params(query_string, parameters, self.encoder) message = QueryMessage( - query_string, cl, query.serial_consistency_level, + query_string, cl, serial_cl, fetch_size, timestamp=timestamp) elif isinstance(query, BoundStatement): message = ExecuteMessage( query.prepared_statement.query_id, query.values, cl, - query.serial_consistency_level, fetch_size, + serial_cl, fetch_size, timestamp=timestamp) prepared_statement = query.prepared_statement elif isinstance(query, BatchStatement): @@ -1664,7 +1671,7 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time "setting Cluster.protocol_version to 2 to support this operation.") message = BatchMessage( query.batch_type, query._statements_and_parameters, cl, - query.serial_consistency_level, timestamp) + serial_cl, timestamp) message.tracing = trace From dabb8d412b27aa91b772d625e41c6711eed1b4a1 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 3 Mar 2016 15:17:04 -0500 Subject: [PATCH 0406/2005] Add tests for distcint with count --- tests/integration/cqlengine/query/test_queryset.py | 7 +++++++ .../cqlengine/statements/test_select_statement.py | 3 +++ 2 files changed, 10 insertions(+) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index a1609f3c1f..7fffe8137b 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -485,6 +485,13 @@ def test_zero_result(self): q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[52]) self.assertEqual(len(q), 0) + def test_distinct_with_explicit_count(self): + q = TestModel.objects.distinct(['test_id']) + self.assertEqual(q.count(), 3) + + q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[1, 2]) + self.assertEqual(q.count(), 2) + class TestQuerySetOrdering(BaseQuerySetUsage): diff --git a/tests/integration/cqlengine/statements/test_select_statement.py b/tests/integration/cqlengine/statements/test_select_statement.py index 33b1ef07e9..3f1c371c9e 100644 --- a/tests/integration/cqlengine/statements/test_select_statement.py +++ b/tests/integration/cqlengine/statements/test_select_statement.py @@ -64,6 +64,9 @@ def test_distinct(self): ss = SelectStatement('table', distinct_fields=['field1', 'field2']) self.assertEqual(six.text_type(ss), 'SELECT DISTINCT "field1", "field2" FROM table') + ss = SelectStatement('table', distinct_fields=['field1'], count=True) + self.assertEqual(six.text_type(ss), 'SELECT DISTINCT COUNT("field1") FROM table') + def test_context(self): ss = SelectStatement('table') ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) From a4d329eceb1bc3854d14023f6a0ba57dbcfa1d64 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 3 Mar 2016 15:35:38 -0500 Subject: [PATCH 0407/2005] Add tests for limit(None) and fetch_size(...) --- cassandra/cluster.py | 2 -- .../cqlengine/statements/test_base_statement.py | 12 ++++++++++++ .../cqlengine/statements/test_select_statement.py | 12 ++++++++++++ 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 5cc4aa6f6f..13afecd245 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2829,9 +2829,7 @@ def start_fetching_next_page(self): .. versionadded:: 2.0.0 """ - log.debug('Pagination: Fetching next page') if not self._paging_state: - log.debug('Pagination: No more pages to fetch') raise QueryExhausted() self._make_query_plan() diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index 2a9bb80fae..1bda588eff 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -16,6 +16,7 @@ except ImportError: import unittest # noqa +from cassandra.query import FETCH_SIZE_UNSET from cassandra.cqlengine.statements import BaseCQLStatement, StatementException @@ -26,3 +27,14 @@ def test_where_clause_type_checking(self): stmt = BaseCQLStatement('table', []) with self.assertRaises(StatementException): stmt.add_where_clause('x=5') + + def test_fetch_size(self): + """ tests that fetch_size is correctly set """ + stmt = BaseCQLStatement('table', None, fetch_size=1000) + self.assertEqual(stmt.fetch_size, 1000) + + stmt = BaseCQLStatement('table', None, fetch_size=None) + self.assertEqual(stmt.fetch_size, FETCH_SIZE_UNSET) + + stmt = BaseCQLStatement('table', None) + self.assertEqual(stmt.fetch_size, FETCH_SIZE_UNSET) diff --git a/tests/integration/cqlengine/statements/test_select_statement.py b/tests/integration/cqlengine/statements/test_select_statement.py index 3f1c371c9e..6612333809 100644 --- a/tests/integration/cqlengine/statements/test_select_statement.py +++ b/tests/integration/cqlengine/statements/test_select_statement.py @@ -96,3 +96,15 @@ def test_additional_rendering(self): self.assertIn('ORDER BY x, y', qstr) self.assertIn('ALLOW FILTERING', qstr) + def test_limit_rendering(self): + ss = SelectStatement('table', None, limit=10) + qstr = six.text_type(ss) + self.assertIn('LIMIT 10', qstr) + + ss = SelectStatement('table', None, limit=0) + qstr = six.text_type(ss) + self.assertNotIn('LIMIT', qstr) + + ss = SelectStatement('table', None, limit=None) + qstr = six.text_type(ss) + self.assertNotIn('LIMIT', qstr) From a51a5736b761010f6d27076e1565a2a1bd18e187 Mon Sep 17 00:00:00 2001 From: Sergey Kirillov Date: Fri, 4 Mar 2016 11:28:32 +0200 Subject: [PATCH 0408/2005] Added documentation for default_serial_consistency_level --- cassandra/cluster.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index b39a617cf8..b71af5eb06 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1444,6 +1444,9 @@ class Session(object): default_serial_consistency_level = None """ + The default :class:`~ConsistencyLevel` for serial phase of conditional updates executed through + this session. This default may be overridden by setting the + :attr:`~.Statement.serial_consistency_level` on individual statements. """ max_trace_wait = 2.0 From 7a7012b6b3d3aeabd9134bb45d7efc1e4f88c48c Mon Sep 17 00:00:00 2001 From: Sigmund Augdal Date: Fri, 4 Mar 2016 11:34:27 +0100 Subject: [PATCH 0409/2005] Fix EventletConnection without ssl on recent versions of eventlet It seems recent versions of eventlet (tested on 0.18.4) has changed which exception is raised on timeout during recv(). This was not handled properly by PR #485 which caused a regression. --- cassandra/io/eventletreactor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index 8fda42fb4e..dfaea8bfb4 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -38,7 +38,8 @@ def is_timeout(err): return ( err in (EINPROGRESS, EALREADY, EWOULDBLOCK) or (err == EINVAL and os.name in ('nt', 'ce')) or - (isinstance(err, ssl.SSLError) and err.args[0] == 'timed out') + (isinstance(err, ssl.SSLError) and err.args[0] == 'timed out') or + isinstance(err, socket.timeout) ) From 51632f868cf998c219b129c6893ba474bbf59e13 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 4 Mar 2016 09:43:57 -0600 Subject: [PATCH 0410/2005] doc tweaks --- cassandra/cluster.py | 6 +++--- docs/api/cassandra/cluster.rst | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index b71af5eb06..c59ca8782c 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1429,7 +1429,6 @@ class Session(object): """ default_consistency_level = ConsistencyLevel.LOCAL_ONE - """ The default :class:`~ConsistencyLevel` for operations executed through this session. This default may be overridden by setting the @@ -1447,6 +1446,8 @@ class Session(object): The default :class:`~ConsistencyLevel` for serial phase of conditional updates executed through this session. This default may be overridden by setting the :attr:`~.Statement.serial_consistency_level` on individual statements. + + Only valid for ``protocol_version >= 2``. """ max_trace_wait = 2.0 @@ -3317,8 +3318,7 @@ def has_more_pages(self): @property def current_rows(self): """ - :return: the list of current page rows. May be empty if the result was empty, - or this is the last page. + :return: the list of current page rows. May be empty if the result was empty, or this is the last page. """ return self._current_rows or [] diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index ac2cbcd399..5e09884dc3 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -99,6 +99,8 @@ .. autoattribute:: default_consistency_level :annotation: = LOCAL_ONE + .. autoattribute:: default_serial_consistency_level + .. autoattribute:: row_factory .. autoattribute:: default_fetch_size From 2d200ff7c4e93168bf9b91facfc16c86949df51b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 4 Mar 2016 09:49:26 -0600 Subject: [PATCH 0411/2005] rename unit test module --- tests/unit/{test_cluster_task_scheduler.py => test_cluster.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/unit/{test_cluster_task_scheduler.py => test_cluster.py} (100%) diff --git a/tests/unit/test_cluster_task_scheduler.py b/tests/unit/test_cluster.py similarity index 100% rename from tests/unit/test_cluster_task_scheduler.py rename to tests/unit/test_cluster.py From 102ed21db0bbfd801b04bc3bd8ed5003bb284f6e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 4 Mar 2016 11:06:39 -0600 Subject: [PATCH 0412/2005] Test default_serial_consistency_level PR #510 --- CHANGELOG.rst | 1 + tests/unit/test_cluster.py | 37 +++++++++++++++++++++++++++++++++++-- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e5f6059f5c..7d55a8b0f7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -9,6 +9,7 @@ Features * Eventlet Reactor IO improvement (PYTHON-495) * Make pure Python ProtocolHandler available even when Cython is present (PYTHON-501) * Optional Cython deserializer for bytes as bytearray (PYTHON-503) +* Add Session.default_serial_consistency_level (github #510) * cqlengine: Expose prior state information via cqlengine LWTException (github #343, PYTHON-336) * cqlengine: Collection datatype "contains" operators support (Cassandra 2.1) #278 (PYTHON-258) * cqlengine: Add DISTINCT query operator (PYTHON-266) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 8aaf02f9cd..763875c9f8 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -17,9 +17,11 @@ except ImportError: import unittest # noqa -from mock import patch +from mock import patch, Mock -from cassandra.cluster import _Scheduler +from cassandra import ConsistencyLevel +from cassandra.cluster import _Scheduler, Session +from cassandra.query import SimpleStatement class SchedulerTest(unittest.TestCase): @@ -36,3 +38,34 @@ def test_event_delay_timing(self, *args): sched = _Scheduler(None) sched.schedule(0, lambda: None) sched.schedule(0, lambda: None) # pre-473: "TypeError: unorderable types: function() < function()"t + + +class SessionTest(unittest.TestCase): + # TODO: this suite could be expanded; for now just adding a test covering a PR + + @patch('cassandra.cluster.ResponseFuture._make_query_plan') + def test_default_serial_consistency_level(self, *args): + """ + Make sure default_serial_consistency_level passes through to a query message. + Also make sure Statement.serial_consistency_level overrides the default. + + PR #510 + """ + s = Session(Mock(protocol_version=4), []) + + # default is None + self.assertIsNone(s.default_serial_consistency_level) + + sentinel = 1001 + for cl in (None, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL, sentinel): + s.default_serial_consistency_level = cl + + # default is passed through + f = s._create_response_future(query='', parameters=[], trace=False, custom_payload={}, timeout=100) + self.assertEqual(f.message.serial_consistency_level, cl) + + # any non-None statement setting takes precedence + for cl_override in (ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL): + f = s._create_response_future(SimpleStatement(query_string='', serial_consistency_level=cl_override), parameters=[], trace=False, custom_payload={}, timeout=100) + self.assertEqual(s.default_serial_consistency_level, cl) + self.assertEqual(f.message.serial_consistency_level, cl_override) From 9b3e4065215b56b1a6c2f9d4a78a8c470ca4ff79 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 3 Mar 2016 15:26:21 -0600 Subject: [PATCH 0413/2005] Adding integration test for PYTHON-486 --- .../columns/test_container_columns.py | 38 ++++---- .../cqlengine/connections/__init__.py | 13 +++ .../cqlengine/connections/test_connection.py | 91 +++++++++++++++++++ 3 files changed, 123 insertions(+), 19 deletions(-) create mode 100644 tests/integration/cqlengine/connections/__init__.py create mode 100644 tests/integration/cqlengine/connections/test_connection.py diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index cb8801e1db..b2034bb11b 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -566,7 +566,7 @@ def test_initial(self): """ Tests creation and insertion of tuple types with models - @since 3.2 + @since 3.1 @jira_ticket PYTHON-306 @expected_result Model is successfully crated @@ -580,7 +580,7 @@ def test_initial_retrieve(self): Tests creation and insertion of tuple types with models, and their retrieval. - @since 3.2 + @since 3.1 @jira_ticket PYTHON-306 @expected_result Model is successfully crated @@ -596,7 +596,7 @@ def test_io_success(self): Tests creation and insertion of various types with models, and their retrieval. - @since 3.2 + @since 3.1 @jira_ticket PYTHON-306 @expected_result Model is successfully created and fetched correctly @@ -617,7 +617,7 @@ def test_type_validation(self): """ Tests to make sure tuple type validation occurs - @since 3.2 + @since 3.1 @jira_ticket PYTHON-306 @expected_result validation errors are raised @@ -632,7 +632,7 @@ def test_instantiation_with_column_class(self): Tests that columns instantiated with a column class work properly and that the class is instantiated in the constructor - @since 3.2 + @since 3.1 @jira_ticket PYTHON-306 @expected_result types are instantiated correctly @@ -648,7 +648,7 @@ def test_default_empty_container_saving(self): """ Tests that the default empty container is not saved if it hasn't been updated - @since 3.2 + @since 3.1 @jira_ticket PYTHON-306 @expected_result empty tuple is not upserted @@ -667,7 +667,7 @@ def test_updates(self): """ Tests that updates can be preformed on tuple containers - @since 3.2 + @since 3.1 @jira_ticket PYTHON-306 @expected_result tuple is replaced @@ -687,7 +687,7 @@ def test_update_from_non_empty_to_empty(self): """ Tests that explcit none updates are processed correctly on tuples - @since 3.2 + @since 3.1 @jira_ticket PYTHON-306 @expected_result tuple is replaced with none @@ -705,7 +705,7 @@ def test_insert_none(self): """ Tests that Tuples can be created as none - @since 3.2 + @since 3.1 @jira_ticket PYTHON-306 @expected_result tuple is created as none @@ -719,7 +719,7 @@ def test_blind_tuple_updates_from_none(self): """ Tests that Tuples can be updated from none - @since 3.2 + @since 3.1 @jira_ticket PYTHON-306 @expected_result tuple is created as none, but upserted to contain values @@ -766,7 +766,7 @@ def test_initial(self): """ Tests creation and insertion of nested collection types with models - @since 3.2 + @since 3.1 @jira_ticket PYTHON-478 @expected_result Model is successfully created @@ -780,7 +780,7 @@ def test_initial_retrieve(self): Tests creation and insertion of nested collection types with models, and their retrieval. - @since 3.2 + @since 3.1 @jira_ticket PYTHON-478 @expected_result Model is successfully crated @@ -798,7 +798,7 @@ def test_io_success(self): Tests creation and insertion of various nested collection types with models, and their retrieval. - @since 3.2 + @since 3.1 @jira_ticket PYTHON-378 @expected_result Model is successfully created and fetched correctly @@ -825,7 +825,7 @@ def test_type_validation(self): """ Tests to make sure nested collection type validation occurs - @since 3.2 + @since 3.1 @jira_ticket PYTHON-478 @expected_result validation errors are raised @@ -852,7 +852,7 @@ def test_instantiation_with_column_class(self): Tests that columns instantiated with a column class work properly and that the class is instantiated in the constructor - @since 3.2 + @since 3.1 @jira_ticket PYTHON-478 @expected_result types are instantiated correctly @@ -872,7 +872,7 @@ def test_default_empty_container_saving(self): """ Tests that the default empty nested collection container is not saved if it hasn't been updated - @since 3.2 + @since 3.1 @jira_ticket PYTHON-478 @expected_result empty nested collection is not upserted @@ -897,7 +897,7 @@ def test_updates(self): """ Tests that updates can be preformed on nested collections - @since 3.2 + @since 3.1 @jira_ticket PYTHON-478 @expected_result nested collection is replaced @@ -926,7 +926,7 @@ def test_update_from_non_empty_to_empty(self): """ Tests that explcit none updates are processed correctly on tuples - @since 3.2 + @since 3.1 @jira_ticket PYTHON-478 @expected_result nested collection is replaced with none @@ -950,7 +950,7 @@ def test_insert_none(self): """ Tests that Tuples can be created as none - @since 3.2 + @since 3.1 @jira_ticket PYTHON-478 @expected_result nested collection is created as none diff --git a/tests/integration/cqlengine/connections/__init__.py b/tests/integration/cqlengine/connections/__init__.py new file mode 100644 index 0000000000..1c7af46e71 --- /dev/null +++ b/tests/integration/cqlengine/connections/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py new file mode 100644 index 0000000000..8900d852b2 --- /dev/null +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -0,0 +1,91 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + + +from cassandra.cqlengine.models import Model +from cassandra.cqlengine import columns, connection +from cassandra.cqlengine.management import sync_table +from cassandra.cluster import Cluster +from cassandra.query import dict_factory + +from tests.integration import PROTOCOL_VERSION, execute_with_long_wait_retry +from tests.integration.cqlengine.base import BaseCassEngTestCase + + +class TestConnectModel(Model): + + id = columns.Integer(primary_key=True) + keyspace = columns.Text() + + +class ConnectionTest(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + cls.keyspace1 = 'ctest1' + cls.keyspace2 = 'ctest2' + super(ConnectionTest, cls).setUpClass() + cls.setup_cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.setup_session = cls.setup_cluster.connect() + ddl = "CREATE KEYSPACE {0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': '{1}'}}".format(cls.keyspace1, 1) + execute_with_long_wait_retry(cls.setup_session, ddl) + ddl = "CREATE KEYSPACE {0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': '{1}'}}".format(cls.keyspace2, 1) + execute_with_long_wait_retry(cls.setup_session, ddl) + + @classmethod + def tearDownClass(cls): + execute_with_long_wait_retry(cls.setup_session, "DROP KEYSPACE {0}".format(cls.keyspace1)) + execute_with_long_wait_retry(cls.setup_session, "DROP KEYSPACE {0}".format(cls.keyspace2)) + cls.setup_cluster.shutdown() + + def setUp(self): + self.c = Cluster(protocol_version=PROTOCOL_VERSION) + self.session1 = self.c.connect(keyspace=self.keyspace1) + self.session1.row_factory = dict_factory + self.session2 = self.c.connect(keyspace=self.keyspace2) + self.session2.row_factory = dict_factory + + def tearDown(self): + self.c.shutdown() + + def test_connection_session_switch(self): + """ + Test to ensure that when the default keyspace is changed in a session and that session, + is set in the connection class, that the new defaul keyspace is honored. + + @since 3.1 + @jira_ticket PYTHON-486 + @expected_result CQLENGINE adopts whatever keyspace is passed in vai the set_session method as default + + @test_category object_mapper + """ + + connection.set_session(self.session1) + sync_table(TestConnectModel) + TCM1 = TestConnectModel.create(id=1, keyspace=self.keyspace1) + connection.set_session(self.session2) + sync_table(TestConnectModel) + TCM2 = TestConnectModel.create(id=1, keyspace=self.keyspace2) + connection.set_session(self.session1) + self.assertEqual(1, TestConnectModel.objects.count()) + self.assertEqual(TestConnectModel.objects.first(), TCM1) + connection.set_session(self.session2) + self.assertEqual(1, TestConnectModel.objects.count()) + self.assertEqual(TestConnectModel.objects.first(), TCM2) + From 0f9eff30000a90bc3260768a76be64ae9c009a34 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 4 Mar 2016 16:10:01 -0500 Subject: [PATCH 0414/2005] Add release notes and documentation for python-323 --- CHANGELOG.rst | 18 ++++++++++++++++++ cassandra/cqlengine/query.py | 4 +++- docs/api/cassandra/cqlengine/query.rst | 6 ++++++ docs/cqlengine/queryset.rst | 9 ++++++++- 4 files changed, 35 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e5f6059f5c..26dd8d9661 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -45,6 +45,24 @@ Bug Fixes * cqlengine: class.MultipleObjectsReturned has DoesNotExist as base class (PYTHON-489) * cqlengine: Typo in cqlengine UserType __len__ breaks attribute assignment (PYTHON-502) + +Other +----- + +* cqlengine: a major improvement on queryset has been introduced. It + is a lot more efficient to iterage large datasets: the rows are + now fetched on demand using the driver pagination. + +* cqlengine: the queryset len() and count() behaviors have changed. It + now executes a "SELECT COUNT(*)" of the query rather than returning + the size of the internal result_cache (loaded rows). On large + queryset, you might want to avoid using them due to the performance + cost. Note that trying to access objects using list index/slicing + with negative indices also requires a count to be + executed. + + + 3.0.0 ===== November 24, 2015 diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 085971f74a..6c2a2a2596 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -700,7 +700,9 @@ class Comment(Model): def count(self): """ - Returns the number of rows matched by this query + Returns the number of rows matched by this query. + + *Note: This function executes a SELECT COUNT() and has a performance cost on large datasets* """ if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") diff --git a/docs/api/cassandra/cqlengine/query.rst b/docs/api/cassandra/cqlengine/query.rst index bd45b3b9fa..62119e3905 100644 --- a/docs/api/cassandra/cqlengine/query.rst +++ b/docs/api/cassandra/cqlengine/query.rst @@ -18,6 +18,12 @@ The methods here are used to filter, order, and constrain results. .. automethod:: count + .. method:: len(queryset) + + Returns the number of rows matched by this query. This function uses :meth:`~.cassandra.cqlengine.query.ModelQuerySet.count` internally. + + *Note: This function executes a SELECT COUNT() and has a performance cost on large datasets* + .. automethod:: distinct .. automethod:: filter diff --git a/docs/cqlengine/queryset.rst b/docs/cqlengine/queryset.rst index 0732a24426..18287f924d 100644 --- a/docs/cqlengine/queryset.rst +++ b/docs/cqlengine/queryset.rst @@ -79,6 +79,10 @@ Accessing objects in a QuerySet q[0] #returns the first result q[1] #returns the second result + .. note:: + + * CQL does not support specifying a start position in it's queries. Therefore, accessing elements using array indexing will load every result up to the index value requested + * Using negative indices requires a "SELECT COUNT()" to be executed. This has a performance cost on large datasets. * list slicing .. code-block:: python @@ -87,7 +91,10 @@ Accessing objects in a QuerySet q[1:] #returns all results except the first q[1:9] #returns a slice of the results - *Note: CQL does not support specifying a start position in it's queries. Therefore, accessing elements using array indexing / slicing will load every result up to the index value requested* + .. note:: + + * CQL does not support specifying a start position in it's queries. Therefore, accessing elements using array slicing will load every result up to the index value requested + * Using negative indices requires a "SELECT COUNT()" to be executed. This has a performance cost on large datasets. * calling :attr:`get() ` on the queryset .. code-block:: python From 11eadc945d8d98a595fa0879f6f01cfb9b333cb7 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 4 Mar 2016 16:41:48 -0500 Subject: [PATCH 0415/2005] Add some details in the docs for limit(None) and fetch_size() --- cassandra/cqlengine/query.py | 9 +++++++-- docs/api/cassandra/cqlengine/query.rst | 2 ++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 6c2a2a2596..4b2230c95d 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -753,14 +753,19 @@ class Automobile(Model): def limit(self, v): """ - Limits the number of results returned by Cassandra. + Limits the number of results returned by Cassandra. Use *0* or *None* to disable. *Note that CQL's default limit is 10,000, so all queries without a limit set explicitly will have an implicit limit of 10,000* .. code-block:: python + # Fetch 100 users for user in User.objects().limit(100): print(user) + + # Fetch all users + for user in User.objects().limit(None): + print(user) """ if v is None: @@ -782,7 +787,7 @@ def fetch_size(self, v): """ Sets the number of rows that are fetched at a time. - *Note that driver's default fetch size is 5000. + *Note that driver's default fetch size is 5000.* .. code-block:: python diff --git a/docs/api/cassandra/cqlengine/query.rst b/docs/api/cassandra/cqlengine/query.rst index 62119e3905..ad5489f207 100644 --- a/docs/api/cassandra/cqlengine/query.rst +++ b/docs/api/cassandra/cqlengine/query.rst @@ -32,6 +32,8 @@ The methods here are used to filter, order, and constrain results. .. automethod:: limit + .. automethod:: fetch_size + .. automethod:: if_not_exists .. automethod:: if_exists From eef82584c1a6929b7aed07ef6ce5025e3ded2d52 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 4 Mar 2016 17:01:16 -0500 Subject: [PATCH 0416/2005] typo --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 26dd8d9661..012355efd5 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -50,7 +50,7 @@ Other ----- * cqlengine: a major improvement on queryset has been introduced. It - is a lot more efficient to iterage large datasets: the rows are + is a lot more efficient to iterate large datasets: the rows are now fetched on demand using the driver pagination. * cqlengine: the queryset len() and count() behaviors have changed. It From 81e53d73107bf7dcb12e8fe5bb7f5f00a8da4a91 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 4 Mar 2016 11:00:27 -0600 Subject: [PATCH 0417/2005] Simple test for Python-503, slight doc tweak --- cassandra/deserializers.pyx | 2 +- tests/integration/standard/test_types.py | 31 ++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/cassandra/deserializers.pyx b/cassandra/deserializers.pyx index d3645a3285..3967ea1431 100644 --- a/cassandra/deserializers.pyx +++ b/cassandra/deserializers.pyx @@ -46,7 +46,7 @@ cdef class DesBytesType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): return to_bytes(buf) -# this is to facilitate cqlengine integration, which requires bytearrays for BytesType +# this is to facilitate cqlsh integration, which requires bytearrays for BytesType # It is switched in by simply overwriting DesBytesType: # deserializers.DesBytesType = deserializers.DesBytesTypeByteArray cdef class DesBytesTypeByteArray(Deserializer): diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index acac7b817b..736e7957e2 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -21,6 +21,7 @@ import math import six +import cassandra from cassandra import InvalidRequest from cassandra.cluster import Cluster from cassandra.concurrent import execute_concurrent_with_args @@ -98,6 +99,36 @@ def test_can_insert_blob_type_as_bytearray(self): for expected, actual in zip(params, results): self.assertEqual(expected, actual) + @unittest.skipIf(not hasattr(cassandra, 'deserializers'), "Cython required for to test DesBytesTypeArray deserializer") + def test_des_bytes_type_array(self): + """ + Simple test to ensure the DesBytesTypeByteArray deserializer functionally works + + @since 3.1 + @jira_ticket PYTHON-503 + @expected_result byte array should be deserialized appropriately. + + @test_category queries:custom_payload + """ + original = None + try: + + original = cassandra.deserializers.DesBytesType + cassandra.deserializers.DesBytesType = cassandra.deserializers.DesBytesTypeByteArray + s = self.session + + s.execute("CREATE TABLE blobbytes2 (a ascii PRIMARY KEY, b blob)") + + params = ['key1', bytearray(b'blob1')] + s.execute("INSERT INTO blobbytes2 (a, b) VALUES (%s, %s)", params) + + results = s.execute("SELECT * FROM blobbytes2")[0] + for expected, actual in zip(params, results): + self.assertEqual(expected, actual) + finally: + if original is not None: + cassandra.deserializers.DesBytesType=original + def test_can_insert_primitive_datatypes(self): """ Test insertion of all datatype primitives From 96d38eadfeb97f0bc3fd66ac378f66f578eed3ab Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 4 Mar 2016 16:59:05 -0600 Subject: [PATCH 0418/2005] Integration test for PYTHON-323 and other general test fixes --- .../cqlengine/connections/test_connection.py | 7 +++ .../management/test_compaction_settings.py | 6 +-- .../cqlengine/management/test_management.py | 2 +- .../cqlengine/query/test_queryset.py | 48 ++++++++++++++++++- .../integration/cqlengine/test_batch_query.py | 2 +- 5 files changed, 59 insertions(+), 6 deletions(-) diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index 8900d852b2..80771d3697 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -26,6 +26,8 @@ from tests.integration import PROTOCOL_VERSION, execute_with_long_wait_retry from tests.integration.cqlengine.base import BaseCassEngTestCase +from tests.integration.cqlengine import DEFAULT_KEYSPACE, setup_connection +from cassandra.cqlengine import models class TestConnectModel(Model): @@ -38,6 +40,7 @@ class ConnectionTest(BaseCassEngTestCase): @classmethod def setUpClass(cls): + cls.original_cluster = connection.cluster cls.keyspace1 = 'ctest1' cls.keyspace2 = 'ctest2' super(ConnectionTest, cls).setUpClass() @@ -52,7 +55,11 @@ def setUpClass(cls): def tearDownClass(cls): execute_with_long_wait_retry(cls.setup_session, "DROP KEYSPACE {0}".format(cls.keyspace1)) execute_with_long_wait_retry(cls.setup_session, "DROP KEYSPACE {0}".format(cls.keyspace2)) + models.DEFAULT_KEYSPACE = DEFAULT_KEYSPACE + cls.original_cluster.shutdown() cls.setup_cluster.shutdown() + setup_connection(DEFAULT_KEYSPACE) + models.DEFAULT_KEYSPACE def setUp(self): self.c = Cluster(protocol_version=PROTOCOL_VERSION) diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index e128348bad..4fe349e069 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -83,7 +83,7 @@ def test_alter_actually_alters(self): table_meta = _get_table_metadata(tmp) - self.assertRegex(table_meta.export_as_string(), '.*SizeTieredCompactionStrategy.*') + self.assertRegexpMatches(table_meta.export_as_string(), '.*SizeTieredCompactionStrategy.*') def test_alter_options(self): @@ -97,11 +97,11 @@ class AlterTable(Model): drop_table(AlterTable) sync_table(AlterTable) table_meta = _get_table_metadata(AlterTable) - self.assertRegex(table_meta.export_as_string(), ".*'sstable_size_in_mb': '64'.*") + self.assertRegexpMatches(table_meta.export_as_string(), ".*'sstable_size_in_mb': '64'.*") AlterTable.__options__['compaction']['sstable_size_in_mb'] = '128' sync_table(AlterTable) table_meta = _get_table_metadata(AlterTable) - self.assertRegex(table_meta.export_as_string(), ".*'sstable_size_in_mb': '128'.*") + self.assertRegexpMatches(table_meta.export_as_string(), ".*'sstable_size_in_mb': '128'.*") class OptionsTest(BaseCassEngTestCase): diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 2e06ed6662..68a24155a9 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -207,7 +207,7 @@ def test_bogus_option_update(self): option = 'no way will this ever be an option' try: ModelWithTableProperties.__options__[option] = 'what was I thinking?' - self.assertRaisesRegex(KeyError, "Invalid table option.*%s.*" % option, sync_table, ModelWithTableProperties) + self.assertRaisesRegexp(KeyError, "Invalid table option.*%s.*" % option, sync_table, ModelWithTableProperties) finally: ModelWithTableProperties.__options__.pop(option, None) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index c5f52ae46c..0c3fd49200 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -34,7 +34,7 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns from cassandra.cqlengine import query -from cassandra.cqlengine.query import QueryException +from cassandra.cqlengine.query import QueryException, BatchQuery from datetime import timedelta from datetime import tzinfo @@ -1087,3 +1087,49 @@ def test_index(self): i = model.create(**values) self.assertEqual(model.objects(k0=i.k0, k1=i.k1).count(), len(clustering_values)) self.assertEqual(model.objects(k0=i.k0, k1=i.k1, v1=0).count(), 1) + + +class TestModelSmall(Model): + + test_id = columns.Integer(primary_key=True) + + +class TestModelQueryWithFetchSize(BaseCassEngTestCase): + """ + Test FetchSize, and ensure that results are returned correctly + regardless of the paging size + + @since 3.1 + @jira_ticket PYTHON-324 + @expected_result results are properly retrieved and the correct size + + @test_category object_mapper + """ + + @classmethod + def setUpClass(cls): + super(TestModelQueryWithFetchSize, cls).setUpClass() + sync_table(TestModelSmall) + + @classmethod + def tearDownClass(cls): + super(TestModelQueryWithFetchSize, cls).tearDownClass() + drop_table(TestModelSmall) + + def test_defaultFetchSize(self): + with BatchQuery() as b: + for i in range(5100): + TestModelSmall.batch(b).create(test_id=i) + self.assertEqual(len(TestModelSmall.objects.fetch_size(1)), 5100) + self.assertEqual(len(TestModelSmall.objects.fetch_size(500)), 5100) + self.assertEqual(len(TestModelSmall.objects.fetch_size(4999)), 5100) + self.assertEqual(len(TestModelSmall.objects.fetch_size(5000)), 5100) + self.assertEqual(len(TestModelSmall.objects.fetch_size(5001)), 5100) + self.assertEqual(len(TestModelSmall.objects.fetch_size(5100)), 5100) + self.assertEqual(len(TestModelSmall.objects.fetch_size(5101)), 5100) + self.assertEqual(len(TestModelSmall.objects.fetch_size(1)), 5100) + + with self.assertRaises(QueryException): + TestModelSmall.objects.fetch_size(0) + with self.assertRaises(QueryException): + TestModelSmall.objects.fetch_size(-1) diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index 5fd9453a72..355a118235 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -223,7 +223,7 @@ def my_callback(*args, **kwargs): batch.execute() batch.execute() self.assertEqual(len(w), 2) # package filter setup to warn always - self.assertRegex(str(w[0].message), r"^Batch.*multiple.*") + self.assertRegexpMatches(str(w[0].message), r"^Batch.*multiple.*") def test_disable_multiple_callback_warning(self): """ From 464bb18e322dd2923e3d0f0acb37095c874f38ca Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 7 Mar 2016 10:16:07 -0600 Subject: [PATCH 0419/2005] Fixing typo in model type, fixing missed regex 2.7 compatibility issue --- tests/integration/cqlengine/model/test_class_construction.py | 2 +- tests/integration/cqlengine/test_ifexists.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index 1b05346edb..8147e41079 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -70,7 +70,7 @@ def test_attempting_to_make_duplicate_column_names_fails(self): Tests that trying to create conflicting db column names will fail """ - with self.assertRaisesRegex(ModelException, r".*more than once$"): + with self.assertRaisesRegexp(ModelException, r".*more than once$"): class BadNames(Model): words = columns.Text(primary_key=True) content = columns.Text(db_field='words') diff --git a/tests/integration/cqlengine/test_ifexists.py b/tests/integration/cqlengine/test_ifexists.py index a063463f09..6d693f7b45 100644 --- a/tests/integration/cqlengine/test_ifexists.py +++ b/tests/integration/cqlengine/test_ifexists.py @@ -30,7 +30,7 @@ class TestIfExistsModel(Model): - id = columns.Integer(primary_key=True, default=lambda: uuid4()) + id = columns.UUID(primary_key=True, default=lambda: uuid4()) count = columns.Integer() text = columns.Text(required=False) From d724c33b5ac88bb12186d233b32b04d0a8e47d01 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 7 Mar 2016 14:38:43 -0500 Subject: [PATCH 0420/2005] cqlengine: fix index names to be compatible with C* >=2.0 --- cassandra/cqlengine/management.py | 18 +++++++++++++++--- .../cqlengine/management/test_management.py | 9 ++++----- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 9e7b036364..8cd884d065 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -107,6 +107,18 @@ def drop_keyspace(name): execute("DROP KEYSPACE {0}".format(metadata.protect_name(name))) +def _get_index_name_by_column(table, column_name): + """ + Find the index name for a given table and column. + """ + for _, index_metadata in six.iteritems(table.indexes): + options = dict(index_metadata.index_options) + if 'target' in options and options['target'] == column_name: + return index_metadata.name + + return None + + def sync_table(model): """ Inspects the model and creates / updates the corresponding table and columns. @@ -190,11 +202,11 @@ def sync_table(model): # TODO: support multiple indexes in C* 3.0+ for column in indexes: - index_name = 'index_{0}_{1}'.format(raw_cf_name, column.db_field_name) - if table.indexes.get(index_name): + index_name = _get_index_name_by_column(table, column.db_field_name) + if index_name: continue - qs = ['CREATE INDEX {0}'.format(metadata.protect_name(index_name))] + qs = ['CREATE INDEX'] qs += ['ON {0}'.format(cf_name)] qs += ['("{0}")'.format(column.db_field_name)] qs = ' '.join(qs) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 68a24155a9..013168cefe 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -267,14 +267,13 @@ def test_sync_index(self): """ sync_table(IndexModel) table_meta = management._get_table_metadata(IndexModel) - self.assertIn("index_index_model_second_key", table_meta.indexes) + self.assertIsNotNone(management._get_index_name_by_column(table_meta, 'second_key')) # index already exists sync_table(IndexModel) table_meta = management._get_table_metadata(IndexModel) - self.assertIn("index_index_model_second_key", table_meta.indexes) + self.assertIsNotNone(management._get_index_name_by_column(table_meta, 'second_key')) - @greaterthancass20 def test_sync_index_case_sensitive(self): """ Tests the default table creation, and ensures the table_name is created correctly and surfaced correctly @@ -288,12 +287,12 @@ def test_sync_index_case_sensitive(self): """ sync_table(IndexCaseSensitiveModel) table_meta = management._get_table_metadata(IndexCaseSensitiveModel) - self.assertIn("index_IndexModel_second_key", table_meta.indexes) + self.assertIsNotNone(management._get_index_name_by_column(table_meta, 'second_key')) # index already exists sync_table(IndexCaseSensitiveModel) table_meta = management._get_table_metadata(IndexCaseSensitiveModel) - self.assertIn("index_IndexModel_second_key", table_meta.indexes) + self.assertIsNotNone(management._get_index_name_by_column(table_meta, 'second_key')) class NonModelFailureTest(BaseCassEngTestCase): From f8c142590b5379761958a436ebaffacd0cb54f88 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 7 Mar 2016 21:17:43 -0500 Subject: [PATCH 0421/2005] Fix len() and count() with distinct queries: materialize all results --- cassandra/cqlengine/query.py | 21 +++++++++++++++++++ tests/integration/__init__.py | 1 + .../cqlengine/query/test_queryset.py | 3 ++- 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 4b2230c95d..66b9fef780 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -384,6 +384,27 @@ def _execute_query(self): self._result_cache = [] self._construct_result = self._get_result_constructor() + # "DISTINCT COUNT()" is not supported in C* < 2.2, so we need to materialize all results to get + # len() and count() working with DISTINCT queries + if self._distinct_fields: + self._fill_result_cache() + + def _fill_result_cache(self): + """ + Fill the result cache with all results. + """ + + # iterate all results + idx = 0 + try: + while True: + idx += 1000 + self._fill_result_cache_to_idx(idx) + except StopIteration: + pass + + self._count = len(self._result_cache) + def _fill_result_cache_to_idx(self, idx): self._execute_query() if self._result_idx is None: diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 43c307fd54..2801fb3197 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -144,6 +144,7 @@ def _get_cass_version_from_dse(dse_version): greaterthanprotocolv3 = unittest.skipUnless(PROTOCOL_VERSION >= 4, 'Protocol versions less than 4 are not supported') greaterthancass20 = unittest.skipUnless(CASSANDRA_VERSION >= '2.1', 'Cassandra version 2.1 or greater required') +greaterthancass21 = unittest.skipUnless(CASSANDRA_VERSION >= '2.2', 'Cassandra version 2.2 or greater required') greaterthanorequalcass30 = unittest.skipUnless(CASSANDRA_VERSION >= '3.0', 'Cassandra version 3.0 or greater required') lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < '3.0', 'Cassandra version less then 3.0 required') diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 0c3fd49200..0097e60490 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -43,7 +43,7 @@ from cassandra.util import uuid_from_time from cassandra.cqlengine.connection import get_session -from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthancass20 +from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthancass20, greaterthancass21 class TzOffset(tzinfo): @@ -485,6 +485,7 @@ def test_zero_result(self): q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[52]) self.assertEqual(len(q), 0) + @greaterthancass21 def test_distinct_with_explicit_count(self): q = TestModel.objects.distinct(['test_id']) self.assertEqual(q.count(), 3) From 7ad8d31617edc161c932d4b1a9d27db119df822d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 8 Mar 2016 08:29:46 -0500 Subject: [PATCH 0422/2005] Fix queryset slicing issue with Python3 --- cassandra/cqlengine/query.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 66b9fef780..303800db85 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -394,7 +394,6 @@ def _fill_result_cache(self): Fill the result cache with all results. """ - # iterate all results idx = 0 try: while True: @@ -445,9 +444,11 @@ def __getitem__(self, s): self._execute_query() if isinstance(s, slice): + start = s.start if s.start else 0 + # calculate the amount of results that need to be loaded end = s.stop - if s.start < 0 or s.stop < 0: + if start < 0 or s.stop is None or s.stop < 0: end = self.count() try: @@ -455,7 +456,7 @@ def __getitem__(self, s): except StopIteration: pass - return self._result_cache[s.start:s.stop:s.step] + return self._result_cache[start:s.stop:s.step] else: try: s = int(s) From 0ba6d1e85e840a841066df30888cbbd5facca8ef Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 10 Mar 2016 15:54:58 -0500 Subject: [PATCH 0423/2005] Prepare 3.1 release --- CHANGELOG.rst | 1 + cassandra/__init__.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4cd3052c90..1f99fb90f7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,6 @@ 3.1.0 ===== +March 10, 2016 Features -------- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 2123eec03b..7f47cc6633 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 1, '0a2', 'post0') +__version_info__ = (3, 1, 0) __version__ = '.'.join(map(str, __version_info__)) From 3111c10f07402c3b51eb248b0401c1d0b83a5556 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 10 Mar 2016 16:33:15 -0500 Subject: [PATCH 0424/2005] post-release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 7f47cc6633..22963a5378 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 1, 0) +__version_info__ = (3, 1, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From d1cb5bfbc416bcd6cc8a81e3e03eea45289b35ee Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 10 Mar 2016 16:37:49 -0500 Subject: [PATCH 0425/2005] Add README-dev info to make the latest GA the only visible version --- README-dev.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/README-dev.rst b/README-dev.rst index bc38f2b997..deb2666fb7 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -16,6 +16,7 @@ Releasing python setup.py register python setup.py sdist upload +* On pypi, make the latest GA the only visible version * Update the docs (see below) * Append a 'postN' string to the version tuple in ``cassandra/__init__.py`` so that it looks like ``(x, y, z, 'postN')`` From 8a12772f5aced61ca57b23e8130ecb9517fd2086 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 14 Mar 2016 11:18:10 -0400 Subject: [PATCH 0426/2005] PYTHON-522: Fallback to materialize results on query --- cassandra/cqlengine/query.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 303800db85..640e64537f 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -290,6 +290,7 @@ def __init__(self, model): self._result_cache = None self._result_idx = None self._result_generator = None + self._materialize_results = True self._distinct_fields = None @@ -386,7 +387,7 @@ def _execute_query(self): # "DISTINCT COUNT()" is not supported in C* < 2.2, so we need to materialize all results to get # len() and count() working with DISTINCT queries - if self._distinct_fields: + if self._materialize_results or self._distinct_fields: self._fill_result_cache() def _fill_result_cache(self): From 6aefc9c5cc151dc6d496046f9c5b1bab1e9e2258 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 14 Mar 2016 11:40:03 -0400 Subject: [PATCH 0427/2005] PYTHON-522: Avoid sending a COUNT query with QuerySet.get() --- cassandra/cqlengine/query.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 640e64537f..677006826a 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -664,12 +664,20 @@ def get(self, *args, **kwargs): return self.filter(*args, **kwargs).get() self._execute_query() - if self.count() == 0: + + # Check that the resultset only contains one element, avoiding sending a COUNT query + try: + self[1] + raise self.model.MultipleObjectsReturned('Multiple objects found') + except IndexError: + pass + + try: + obj = self[0] + except IndexError: raise self.model.DoesNotExist - elif self.count() > 1: - raise self.model.MultipleObjectsReturned('{0} objects found'.format(self.count())) - else: - return self[0] + + return obj def _get_ordering_condition(self, colname): order_type = 'DESC' if colname.startswith('-') else 'ASC' From bd7cb3c603b1aae5670b9e41f3f4cef2573ceb98 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 14 Mar 2016 14:43:37 -0400 Subject: [PATCH 0428/2005] Prepare 3.1.1 release --- CHANGELOG.rst | 9 +++++++++ cassandra/__init__.py | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 1f99fb90f7..d002af7774 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,12 @@ +3.1.1 +===== +March 14, 2016 + +Bug Fixes +--------- + +* cqlengine: Fix performance issue related to additional "COUNT" queries (PYTHON-522) + 3.1.0 ===== March 10, 2016 diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 7f47cc6633..abce265efd 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 1, 0) +__version_info__ = (3, 1, 1) __version__ = '.'.join(map(str, __version_info__)) From fd6b988579694cface8a628c8515d7dcaf0b2a95 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 15 Mar 2016 13:10:15 -0500 Subject: [PATCH 0429/2005] consume generator when switching ResultSet to list mode --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 7ccb8e84c8..d8c1026e90 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3391,7 +3391,7 @@ def _enter_list_mode(self, operator): raise RuntimeError("Cannot use %s when results have been iterated." % operator) if self.response_future.has_more_pages: log.warning("Using %s on paged results causes entire result set to be materialized.", operator) - self._fetch_all() + self._fetch_all() # done regardless of paging status in case the row factory produces a generator self._list_mode = True def __eq__(self, other): From fd41d69cf417005ebeb33b555876690a7f0a4bc4 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 17 Mar 2016 11:42:43 -0500 Subject: [PATCH 0430/2005] Adding 3.4 to test matrix --- build.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/build.yaml b/build.yaml index f7ece90ff3..1d3c93915e 100644 --- a/build.yaml +++ b/build.yaml @@ -8,6 +8,7 @@ cassandra: - 2.1 - 2.2 - 3.0 + - 3.4 env: EVENT_LOOP_MANAGER: - libev From 2a31798084ba02b8bbdc350be00f0007c2ae5216 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 17 Mar 2016 12:45:44 -0500 Subject: [PATCH 0431/2005] cqle: iff transactions for DeleteStatements PYTHON-249 --- cassandra/cqlengine/models.py | 1 + cassandra/cqlengine/query.py | 9 +++++---- cassandra/cqlengine/statements.py | 28 +++++++++++++++++++++++++++- 3 files changed, 33 insertions(+), 5 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 1bc8fdab20..4fe840b83d 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -751,6 +751,7 @@ def delete(self): timestamp=self._timestamp, consistency=self.__consistency__, timeout=self._timeout, + transaction=self._transaction, if_exists=self._if_exists).delete() def get_changed_columns(self): diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 677006826a..aa8e525310 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -898,6 +898,7 @@ def delete(self): self.column_family_name, where=self._where, timestamp=self._timestamp, + transactions=self._transaction, if_exists=self._if_exists ) self._execute(dq) @@ -1196,7 +1197,7 @@ class Row(Model): if nulled_columns: ds = DeleteStatement(self.column_family_name, fields=nulled_columns, - where=self._where, if_exists=self._if_exists) + where=self._where, transactions=self._transaction, if_exists=self._if_exists) self._execute(ds) @@ -1247,7 +1248,7 @@ def _delete_null_columns(self): """ executes a delete query to remove columns that have changed to null """ - ds = DeleteStatement(self.column_family_name, if_exists=self._if_exists) + ds = DeleteStatement(self.column_family_name, transactions=self._transaction, if_exists=self._if_exists) deleted_fields = False for _, v in self.instance._values.items(): col = v.column @@ -1324,7 +1325,7 @@ def update(self): col.to_database(val) )) - if statement.get_context_size() > 0 or self.instance._has_counter: + if statement.assignments: for name, col in self.model._primary_keys.items(): # only include clustering key if clustering key is not null, and non static columns are changed to avoid cql error if (null_clustering_key or static_changed_only) and (not col.partition_key): @@ -1386,7 +1387,7 @@ def delete(self): if self.instance is None: raise CQLEngineException("DML Query instance attribute is None") - ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp, if_exists=self._if_exists) + ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp, transactions=self._transaction, if_exists=self._if_exists) for name, col in self.model._primary_keys.items(): if (not col.partition_key) and (getattr(self.instance, name) is None): continue diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 73f9197ed3..9613589e93 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -791,7 +791,7 @@ def update_context_id(self, i): class DeleteStatement(BaseCQLStatement): """ a cql delete statement """ - def __init__(self, table, fields=None, consistency=None, where=None, timestamp=None, if_exists=False): + def __init__(self, table, fields=None, consistency=None, where=None, timestamp=None, transactions=None, if_exists=False): super(DeleteStatement, self).__init__( table, consistency=consistency, @@ -803,6 +803,11 @@ def __init__(self, table, fields=None, consistency=None, where=None, timestamp=N fields = [fields] for field in fields or []: self.add_field(field) + + self.transactions = [] + for transaction in transactions or []: + self.add_transaction_clause(transaction) + self.if_exists = if_exists def update_context_id(self, i): @@ -815,6 +820,8 @@ def get_context(self): ctx = super(DeleteStatement, self).get_context() for field in self.fields: field.update_context(ctx) + for clause in self.transactions or []: + clause.update_context(ctx) return ctx def add_field(self, field): @@ -826,6 +833,22 @@ def add_field(self, field): self.context_counter += field.get_context_size() self.fields.append(field) + def add_transaction_clause(self, clause): + """ + Adds a iff clause to this statement + + :param clause: The clause that will be added to the iff statement + :type clause: TransactionClause + """ + if not isinstance(clause, TransactionClause): + raise StatementException('only instances of AssignmentClause can be added to statements') + clause.set_context_id(self.context_counter) + self.context_counter += clause.get_context_size() + self.transactions.append(clause) + + def _get_transactions(self): + return 'IF {0}'.format(' AND '.join([six.text_type(c) for c in self.transactions])) + def __unicode__(self): qs = ['DELETE'] if self.fields: @@ -843,6 +866,9 @@ def __unicode__(self): if self.where_clauses: qs += [self._where] + if self.transactions: + qs += [self._get_transactions()] + if self.if_exists: qs += ["IF EXISTS"] From 248ac90af1395fb1f1fbdc09d3d3822c57281760 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 17 Mar 2016 13:10:28 -0500 Subject: [PATCH 0432/2005] cqle: unit test for DeleteStatement iff PYTHON-249 --- .../cqlengine/statements/test_delete_statement.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/statements/test_delete_statement.py b/tests/integration/cqlengine/statements/test_delete_statement.py index 099294287b..c81366e1fd 100644 --- a/tests/integration/cqlengine/statements/test_delete_statement.py +++ b/tests/integration/cqlengine/statements/test_delete_statement.py @@ -13,10 +13,11 @@ # limitations under the License. from unittest import TestCase -from cassandra.cqlengine.statements import DeleteStatement, WhereClause, MapDeleteClause +from cassandra.cqlengine.statements import DeleteStatement, WhereClause, MapDeleteClause, TransactionClause from cassandra.cqlengine.operators import * import six + class DeleteStatementTests(TestCase): def test_single_field_is_listified(self): @@ -49,7 +50,7 @@ def test_where_clause_rendering(self): def test_context_update(self): ds = DeleteStatement('table', None) - ds.add_field(MapDeleteClause('d', {1: 2}, {1:2, 3: 4})) + ds.add_field(MapDeleteClause('d', {1: 2}, {1: 2, 3: 4})) ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) ds.update_context_id(7) @@ -72,3 +73,13 @@ def test_range_deletion_rendering(self): ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) ds.add_where_clause(WhereClause('created_at', InOperator(), ['0', '10', '20'])) self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" = %(0)s AND "created_at" IN %(1)s', six.text_type(ds)) + + def test_delete_conditional(self): + where = [WhereClause('id', EqualsOperator(), 1)] + transactions = [TransactionClause('f0', 'value0'), TransactionClause('f1', 'value1')] + ds = DeleteStatement('table', where=where, transactions=transactions) + self.assertEqual(len(ds.transactions), len(transactions)) + self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "id" = %(0)s IF "f0" = %(1)s AND "f1" = %(2)s', six.text_type(ds)) + fields = ['one', 'two'] + ds = DeleteStatement('table', fields=fields, where=where, transactions=transactions) + self.assertEqual(six.text_type(ds), 'DELETE "one", "two" FROM table WHERE "id" = %(0)s IF "f0" = %(1)s AND "f1" = %(2)s', six.text_type(ds)) From 0f3132d31a4ea0bf2f391679519bc91b03e2595e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 17 Mar 2016 14:50:06 -0500 Subject: [PATCH 0433/2005] cqle: integration tests for conditional deletes (and updates to None) PYTHON-249 --- .../integration/cqlengine/test_transaction.py | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/tests/integration/cqlengine/test_transaction.py b/tests/integration/cqlengine/test_transaction.py index 3f7a19ee86..91ce94982d 100644 --- a/tests/integration/cqlengine/test_transaction.py +++ b/tests/integration/cqlengine/test_transaction.py @@ -134,3 +134,45 @@ def test_batch_update_transaction(self): updated = TestTransactionModel.objects(id=id).first() self.assertEqual(updated.text, 'something else') + + def test_delete_transaction(self): + # DML path + t = TestTransactionModel.create(text='something', count=5) + self.assertEqual(TestTransactionModel.objects(id=t.id).count(), 1) + with self.assertRaises(LWTException): + t.iff(count=9999).delete() + self.assertEqual(TestTransactionModel.objects(id=t.id).count(), 1) + t.iff(count=5).delete() + self.assertEqual(TestTransactionModel.objects(id=t.id).count(), 0) + + # QuerySet path + t = TestTransactionModel.create(text='something', count=5) + self.assertEqual(TestTransactionModel.objects(id=t.id).count(), 1) + with self.assertRaises(LWTException): + TestTransactionModel.objects(id=t.id).iff(count=9999).delete() + self.assertEqual(TestTransactionModel.objects(id=t.id).count(), 1) + TestTransactionModel.objects(id=t.id).iff(count=5).delete() + self.assertEqual(TestTransactionModel.objects(id=t.id).count(), 0) + + def test_update_to_none(self): + # This test is done because updates to none are split into deletes + # for old versions of cassandra. Can be removed when we drop that code + # https://github.com/datastax/python-driver/blob/3.1.1/cassandra/cqlengine/query.py#L1197-L1200 + + # DML path + t = TestTransactionModel.create(text='something', count=5) + self.assertEqual(TestTransactionModel.objects(id=t.id).count(), 1) + with self.assertRaises(LWTException): + t.iff(count=9999).update(text=None) + self.assertIsNotNone(TestTransactionModel.objects(id=t.id).first().text) + t.iff(count=5).update(text=None) + self.assertIsNone(TestTransactionModel.objects(id=t.id).first().text) + + # QuerySet path + t = TestTransactionModel.create(text='something', count=5) + self.assertEqual(TestTransactionModel.objects(id=t.id).count(), 1) + with self.assertRaises(LWTException): + TestTransactionModel.objects(id=t.id).iff(count=9999).update(text=None) + self.assertIsNotNone(TestTransactionModel.objects(id=t.id).first().text) + TestTransactionModel.objects(id=t.id).iff(count=5).update(text=None) + self.assertIsNone(TestTransactionModel.objects(id=t.id).first().text) From c4db355311c249077ebb52a83acef9ba064a3483 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 17 Mar 2016 16:10:15 -0500 Subject: [PATCH 0434/2005] cqle: refactor transactions to base CQL statement --- cassandra/cqlengine/statements.py | 73 ++++++++++++------------------- 1 file changed, 28 insertions(+), 45 deletions(-) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 9613589e93..bb34f66f4b 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -471,7 +471,7 @@ def __unicode__(self): class BaseCQLStatement(UnicodeMixin): """ The base cql statement class """ - def __init__(self, table, consistency=None, timestamp=None, where=None, fetch_size=None): + def __init__(self, table, consistency=None, timestamp=None, where=None, fetch_size=None, transactions=None): super(BaseCQLStatement, self).__init__() self.table = table self.consistency = consistency @@ -484,6 +484,10 @@ def __init__(self, table, consistency=None, timestamp=None, where=None, fetch_si for clause in where or []: self.add_where_clause(clause) + self.transactions = [] + for transaction in transactions or []: + self.add_transaction_clause(transaction) + def add_where_clause(self, clause): """ adds a where clause to this statement @@ -506,6 +510,22 @@ def get_context(self): clause.update_context(ctx) return ctx + def add_transaction_clause(self, clause): + """ + Adds a iff clause to this statement + + :param clause: The clause that will be added to the iff statement + :type clause: TransactionClause + """ + if not isinstance(clause, TransactionClause): + raise StatementException('only instances of AssignmentClause can be added to statements') + clause.set_context_id(self.context_counter) + self.context_counter += clause.get_context_size() + self.transactions.append(clause) + + def _get_transactions(self): + return 'IF {0}'.format(' AND '.join([six.text_type(c) for c in self.transactions])) + def get_context_size(self): return len(self.get_context()) @@ -616,11 +636,13 @@ def __init__(self, consistency=None, where=None, ttl=None, - timestamp=None): + timestamp=None, + transactions=None): super(AssignmentStatement, self).__init__( table, consistency=consistency, where=where, + transactions=transactions ) self.ttl = ttl self.timestamp = timestamp @@ -722,12 +744,8 @@ def __init__(self, consistency=consistency, where=where, ttl=ttl, - timestamp=timestamp) - - # Add iff statements - self.transactions = [] - for transaction in transactions or []: - self.add_transaction_clause(transaction) + timestamp=timestamp, + transactions=transactions) self.if_exists = if_exists @@ -759,28 +777,12 @@ def __unicode__(self): return ' '.join(qs) - def add_transaction_clause(self, clause): - """ - Adds a iff clause to this statement - - :param clause: The clause that will be added to the iff statement - :type clause: TransactionClause - """ - if not isinstance(clause, TransactionClause): - raise StatementException('only instances of AssignmentClause can be added to statements') - clause.set_context_id(self.context_counter) - self.context_counter += clause.get_context_size() - self.transactions.append(clause) - def get_context(self): ctx = super(UpdateStatement, self).get_context() for clause in self.transactions or []: clause.update_context(ctx) return ctx - def _get_transactions(self): - return 'IF {0}'.format(' AND '.join([six.text_type(c) for c in self.transactions])) - def update_context_id(self, i): super(UpdateStatement, self).update_context_id(i) for transaction in self.transactions: @@ -796,7 +798,8 @@ def __init__(self, table, fields=None, consistency=None, where=None, timestamp=N table, consistency=consistency, where=where, - timestamp=timestamp + timestamp=timestamp, + transactions=transactions ) self.fields = [] if isinstance(fields, six.string_types): @@ -804,10 +807,6 @@ def __init__(self, table, fields=None, consistency=None, where=None, timestamp=N for field in fields or []: self.add_field(field) - self.transactions = [] - for transaction in transactions or []: - self.add_transaction_clause(transaction) - self.if_exists = if_exists def update_context_id(self, i): @@ -833,22 +832,6 @@ def add_field(self, field): self.context_counter += field.get_context_size() self.fields.append(field) - def add_transaction_clause(self, clause): - """ - Adds a iff clause to this statement - - :param clause: The clause that will be added to the iff statement - :type clause: TransactionClause - """ - if not isinstance(clause, TransactionClause): - raise StatementException('only instances of AssignmentClause can be added to statements') - clause.set_context_id(self.context_counter) - self.context_counter += clause.get_context_size() - self.transactions.append(clause) - - def _get_transactions(self): - return 'IF {0}'.format(' AND '.join([six.text_type(c) for c in self.transactions])) - def __unicode__(self): qs = ['DELETE'] if self.fields: From 67e51b47664fc81359b067547a574560ab6e537b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 18 Mar 2016 09:20:16 -0500 Subject: [PATCH 0435/2005] cqle: transaction --> conditional To remove overload with other transaction types (ife, ine). --- cassandra/cqlengine/models.py | 32 +++---- cassandra/cqlengine/query.py | 36 ++++---- cassandra/cqlengine/statements.py | 55 ++++++------ .../statements/test_delete_statement.py | 10 +-- .../integration/cqlengine/test_transaction.py | 90 +++++++++---------- 5 files changed, 113 insertions(+), 110 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 4fe840b83d..32d9beef45 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -101,28 +101,28 @@ def __call__(self, *args, **kwargs): raise NotImplementedError -class TransactionDescriptor(object): +class ConditionalDescriptor(object): """ returns a query set descriptor """ def __get__(self, instance, model): if instance: - def transaction_setter(*prepared_transaction, **unprepared_transactions): - if len(prepared_transaction) > 0: - transactions = prepared_transaction[0] + def conditional_setter(*prepared_conditional, **unprepared_conditionals): + if len(prepared_conditional) > 0: + conditionals = prepared_conditional[0] else: - transactions = instance.objects.iff(**unprepared_transactions)._transaction - instance._transaction = transactions + conditionals = instance.objects.iff(**unprepared_conditionals)._conditional + instance._conditional = conditionals return instance - return transaction_setter + return conditional_setter qs = model.__queryset__(model) - def transaction_setter(**unprepared_transactions): - transactions = model.objects.iff(**unprepared_transactions)._transaction - qs._transaction = transactions + def conditional_setter(**unprepared_conditionals): + conditionals = model.objects.iff(**unprepared_conditionals)._conditional + qs._conditional = conditionals return qs - return transaction_setter + return conditional_setter def __call__(self, *args, **kwargs): raise NotImplementedError @@ -314,7 +314,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): objects = QuerySetDescriptor() ttl = TTLDescriptor() consistency = ConsistencyDescriptor() - iff = TransactionDescriptor() + iff = ConditionalDescriptor() # custom timestamps, see USING TIMESTAMP X timestamp = TimestampDescriptor() @@ -352,7 +352,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): def __init__(self, **values): self._ttl = self.__default_ttl__ self._timestamp = None - self._transaction = None + self._conditional = None self._batch = None self._timeout = connection.NOT_SET self._is_persisted = False @@ -684,7 +684,7 @@ def save(self): timestamp=self._timestamp, consistency=self.__consistency__, if_not_exists=self._if_not_exists, - transaction=self._transaction, + conditional=self._conditional, timeout=self._timeout, if_exists=self._if_exists).save() @@ -731,7 +731,7 @@ def update(self, **values): ttl=self._ttl, timestamp=self._timestamp, consistency=self.__consistency__, - transaction=self._transaction, + conditional=self._conditional, timeout=self._timeout, if_exists=self._if_exists).update() @@ -751,7 +751,7 @@ def delete(self): timestamp=self._timestamp, consistency=self.__consistency__, timeout=self._timeout, - transaction=self._transaction, + conditional=self._conditional, if_exists=self._if_exists).delete() def get_changed_columns(self): diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index aa8e525310..8d01c07283 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -28,7 +28,7 @@ UpdateStatement, AssignmentClause, InsertStatement, BaseCQLStatement, MapUpdateClause, MapDeleteClause, ListUpdateClause, SetUpdateClause, CounterUpdateClause, - TransactionClause) + ConditionalClause) class QueryException(CQLEngineException): @@ -43,7 +43,7 @@ class IfExistsWithCounterColumn(CQLEngineException): class LWTException(CQLEngineException): - """Lightweight transaction exception. + """Lightweight conditional exception. This exception will be raised when a write using an `IF` clause could not be applied due to existing data violating the condition. The existing data is @@ -146,7 +146,7 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on :param batch_type: (optional) One of batch type values available through BatchType enum :type batch_type: str or None :param timestamp: (optional) A datetime or timedelta object with desired timestamp to be applied - to the batch transaction. + to the batch conditional. :type timestamp: datetime or timedelta or None :param consistency: (optional) One of consistency values ("ANY", "ONE", "QUORUM" etc) :type consistency: The :class:`.ConsistencyLevel` to be used for the batch query, or None. @@ -267,8 +267,8 @@ def __init__(self, model): # Where clause filters self._where = [] - # Transaction clause filters - self._transaction = [] + # Conditional clause filters + self._conditional = [] # ordering arguments self._order = [] @@ -314,7 +314,7 @@ def _execute(self, q): return self._batch.add_query(q) else: result = connection.execute(q, consistency_level=self._consistency, timeout=self._timeout) - if self._if_not_exists or self._if_exists or self._transaction: + if self._if_not_exists or self._if_exists or self._conditional: check_applied(result) return result @@ -545,9 +545,9 @@ def iff(self, *args, **kwargs): clone = copy.deepcopy(self) for operator in args: - if not isinstance(operator, TransactionClause): + if not isinstance(operator, ConditionalClause): raise QueryException('{0} is not a valid query operator'.format(operator)) - clone._transaction.append(operator) + clone._conditional.append(operator) for col_name, val in kwargs.items(): exists = False @@ -576,7 +576,7 @@ def iff(self, *args, **kwargs): else: query_val = column.to_database(val) - clone._transaction.append(TransactionClause(col_name, query_val)) + clone._conditional.append(ConditionalClause(col_name, query_val)) return clone @@ -898,7 +898,7 @@ def delete(self): self.column_family_name, where=self._where, timestamp=self._timestamp, - transactions=self._transaction, + conditionals=self._conditional, if_exists=self._if_exists ) self._execute(dq) @@ -1156,7 +1156,7 @@ class Row(Model): nulled_columns = set() us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl, - timestamp=self._timestamp, transactions=self._transaction, if_exists=self._if_exists) + timestamp=self._timestamp, conditionals=self._conditional, if_exists=self._if_exists) for name, val in values.items(): col_name, col_op = self._parse_filter_arg(name) col = self.model._columns.get(col_name) @@ -1197,7 +1197,7 @@ class Row(Model): if nulled_columns: ds = DeleteStatement(self.column_family_name, fields=nulled_columns, - where=self._where, transactions=self._transaction, if_exists=self._if_exists) + where=self._where, conditionals=self._conditional, if_exists=self._if_exists) self._execute(ds) @@ -1216,7 +1216,7 @@ class DMLQuery(object): _if_exists = False def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, - if_not_exists=False, transaction=None, timeout=connection.NOT_SET, if_exists=False): + if_not_exists=False, conditional=None, timeout=connection.NOT_SET, if_exists=False): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance @@ -1226,7 +1226,7 @@ def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, self._timestamp = timestamp self._if_not_exists = if_not_exists self._if_exists = if_exists - self._transaction = transaction + self._conditional = conditional self._timeout = timeout def _execute(self, q): @@ -1234,7 +1234,7 @@ def _execute(self, q): return self._batch.add_query(q) else: tmp = connection.execute(q, consistency_level=self._consistency, timeout=self._timeout) - if self._if_not_exists or self._if_exists or self._transaction: + if self._if_not_exists or self._if_exists or self._conditional: check_applied(tmp) return tmp @@ -1248,7 +1248,7 @@ def _delete_null_columns(self): """ executes a delete query to remove columns that have changed to null """ - ds = DeleteStatement(self.column_family_name, transactions=self._transaction, if_exists=self._if_exists) + ds = DeleteStatement(self.column_family_name, conditionals=self._conditional, if_exists=self._if_exists) deleted_fields = False for _, v in self.instance._values.items(): col = v.column @@ -1283,7 +1283,7 @@ def update(self): null_clustering_key = False if len(self.instance._clustering_keys) == 0 else True static_changed_only = True statement = UpdateStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, - transactions=self._transaction, if_exists=self._if_exists) + conditionals=self._conditional, if_exists=self._if_exists) for name, col in self.instance._clustering_keys.items(): null_clustering_key = null_clustering_key and col._val_is_null(getattr(self.instance, name, None)) # get defined fields and their column names @@ -1387,7 +1387,7 @@ def delete(self): if self.instance is None: raise CQLEngineException("DML Query instance attribute is None") - ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp, transactions=self._transaction, if_exists=self._if_exists) + ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp, conditionals=self._conditional, if_exists=self._if_exists) for name, col in self.model._primary_keys.items(): if (not col.partition_key) and (getattr(self.instance, name) is None): continue diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index bb34f66f4b..8ae2b6f121 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -148,7 +148,7 @@ def insert_tuple(self): return self.field, self.context_id -class TransactionClause(BaseClause): +class ConditionalClause(BaseClause): """ A single variable iff statement """ def __unicode__(self): @@ -471,7 +471,7 @@ def __unicode__(self): class BaseCQLStatement(UnicodeMixin): """ The base cql statement class """ - def __init__(self, table, consistency=None, timestamp=None, where=None, fetch_size=None, transactions=None): + def __init__(self, table, consistency=None, timestamp=None, where=None, fetch_size=None, conditionals=None): super(BaseCQLStatement, self).__init__() self.table = table self.consistency = consistency @@ -484,9 +484,9 @@ def __init__(self, table, consistency=None, timestamp=None, where=None, fetch_si for clause in where or []: self.add_where_clause(clause) - self.transactions = [] - for transaction in transactions or []: - self.add_transaction_clause(transaction) + self.conditionals = [] + for conditional in conditionals or []: + self.add_conditional_clause(conditional) def add_where_clause(self, clause): """ @@ -510,21 +510,21 @@ def get_context(self): clause.update_context(ctx) return ctx - def add_transaction_clause(self, clause): + def add_conditional_clause(self, clause): """ Adds a iff clause to this statement :param clause: The clause that will be added to the iff statement - :type clause: TransactionClause + :type clause: ConditionalClause """ - if not isinstance(clause, TransactionClause): + if not isinstance(clause, ConditionalClause): raise StatementException('only instances of AssignmentClause can be added to statements') clause.set_context_id(self.context_counter) self.context_counter += clause.get_context_size() - self.transactions.append(clause) + self.conditionals.append(clause) - def _get_transactions(self): - return 'IF {0}'.format(' AND '.join([six.text_type(c) for c in self.transactions])) + def _get_conditionals(self): + return 'IF {0}'.format(' AND '.join([six.text_type(c) for c in self.conditionals])) def get_context_size(self): return len(self.get_context()) @@ -637,12 +637,12 @@ def __init__(self, where=None, ttl=None, timestamp=None, - transactions=None): + conditionals=None): super(AssignmentStatement, self).__init__( table, consistency=consistency, where=where, - transactions=transactions + conditionals=conditionals ) self.ttl = ttl self.timestamp = timestamp @@ -737,7 +737,7 @@ def __init__(self, where=None, ttl=None, timestamp=None, - transactions=None, + conditionals=None, if_exists=False): super(UpdateStatement, self). __init__(table, assignments=assignments, @@ -745,7 +745,7 @@ def __init__(self, where=where, ttl=ttl, timestamp=timestamp, - transactions=transactions) + conditionals=conditionals) self.if_exists = if_exists @@ -769,8 +769,8 @@ def __unicode__(self): if self.where_clauses: qs += [self._where] - if len(self.transactions) > 0: - qs += [self._get_transactions()] + if len(self.conditionals) > 0: + qs += [self._get_conditionals()] if self.if_exists: qs += ["IF EXISTS"] @@ -779,27 +779,27 @@ def __unicode__(self): def get_context(self): ctx = super(UpdateStatement, self).get_context() - for clause in self.transactions or []: + for clause in self.conditionals: clause.update_context(ctx) return ctx def update_context_id(self, i): super(UpdateStatement, self).update_context_id(i) - for transaction in self.transactions: - transaction.set_context_id(self.context_counter) - self.context_counter += transaction.get_context_size() + for conditional in self.conditionals: + conditional.set_context_id(self.context_counter) + self.context_counter += conditional.get_context_size() class DeleteStatement(BaseCQLStatement): """ a cql delete statement """ - def __init__(self, table, fields=None, consistency=None, where=None, timestamp=None, transactions=None, if_exists=False): + def __init__(self, table, fields=None, consistency=None, where=None, timestamp=None, conditionals=None, if_exists=False): super(DeleteStatement, self).__init__( table, consistency=consistency, where=where, timestamp=timestamp, - transactions=transactions + conditionals=conditionals ) self.fields = [] if isinstance(fields, six.string_types): @@ -814,12 +814,15 @@ def update_context_id(self, i): for field in self.fields: field.set_context_id(self.context_counter) self.context_counter += field.get_context_size() + for t in self.conditionals: + t.set_context_id(self.context_counter) + self.context_counter += t.get_context_size() def get_context(self): ctx = super(DeleteStatement, self).get_context() for field in self.fields: field.update_context(ctx) - for clause in self.transactions or []: + for clause in self.conditionals: clause.update_context(ctx) return ctx @@ -849,8 +852,8 @@ def __unicode__(self): if self.where_clauses: qs += [self._where] - if self.transactions: - qs += [self._get_transactions()] + if self.conditionals: + qs += [self._get_conditionals()] if self.if_exists: qs += ["IF EXISTS"] diff --git a/tests/integration/cqlengine/statements/test_delete_statement.py b/tests/integration/cqlengine/statements/test_delete_statement.py index c81366e1fd..6fb7c5317e 100644 --- a/tests/integration/cqlengine/statements/test_delete_statement.py +++ b/tests/integration/cqlengine/statements/test_delete_statement.py @@ -13,7 +13,7 @@ # limitations under the License. from unittest import TestCase -from cassandra.cqlengine.statements import DeleteStatement, WhereClause, MapDeleteClause, TransactionClause +from cassandra.cqlengine.statements import DeleteStatement, WhereClause, MapDeleteClause, ConditionalClause from cassandra.cqlengine.operators import * import six @@ -76,10 +76,10 @@ def test_range_deletion_rendering(self): def test_delete_conditional(self): where = [WhereClause('id', EqualsOperator(), 1)] - transactions = [TransactionClause('f0', 'value0'), TransactionClause('f1', 'value1')] - ds = DeleteStatement('table', where=where, transactions=transactions) - self.assertEqual(len(ds.transactions), len(transactions)) + conditionals = [ConditionalClause('f0', 'value0'), ConditionalClause('f1', 'value1')] + ds = DeleteStatement('table', where=where, conditionals=conditionals) + self.assertEqual(len(ds.conditionals), len(conditionals)) self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "id" = %(0)s IF "f0" = %(1)s AND "f1" = %(2)s', six.text_type(ds)) fields = ['one', 'two'] - ds = DeleteStatement('table', fields=fields, where=where, transactions=transactions) + ds = DeleteStatement('table', fields=fields, where=where, conditionals=conditionals) self.assertEqual(six.text_type(ds), 'DELETE "one", "two" FROM table WHERE "id" = %(0)s IF "f0" = %(1)s AND "f1" = %(2)s', six.text_type(ds)) diff --git a/tests/integration/cqlengine/test_transaction.py b/tests/integration/cqlengine/test_transaction.py index 91ce94982d..2d4e3181c7 100644 --- a/tests/integration/cqlengine/test_transaction.py +++ b/tests/integration/cqlengine/test_transaction.py @@ -24,33 +24,33 @@ from cassandra.cqlengine.management import sync_table, drop_table from cassandra.cqlengine.models import Model from cassandra.cqlengine.query import BatchQuery, LWTException -from cassandra.cqlengine.statements import TransactionClause +from cassandra.cqlengine.statements import ConditionalClause from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration import CASSANDRA_VERSION -class TestTransactionModel(Model): +class TestConditionalModel(Model): id = columns.UUID(primary_key=True, default=uuid4) count = columns.Integer() text = columns.Text(required=False) -@unittest.skipUnless(CASSANDRA_VERSION >= '2.0.0', "transactions only supported on cassandra 2.0 or higher") -class TestTransaction(BaseCassEngTestCase): +@unittest.skipUnless(CASSANDRA_VERSION >= '2.0.0', "conditionals only supported on cassandra 2.0 or higher") +class TestConditional(BaseCassEngTestCase): @classmethod def setUpClass(cls): - super(TestTransaction, cls).setUpClass() - sync_table(TestTransactionModel) + super(TestConditional, cls).setUpClass() + sync_table(TestConditionalModel) @classmethod def tearDownClass(cls): - super(TestTransaction, cls).tearDownClass() - drop_table(TestTransactionModel) + super(TestConditional, cls).tearDownClass() + drop_table(TestConditionalModel) - def test_update_using_transaction(self): - t = TestTransactionModel.create(text='blah blah') + def test_update_using_conditional(self): + t = TestConditionalModel.create(text='blah blah') t.text = 'new blah' with mock.patch.object(self.session, 'execute') as m: t.iff(text='blah blah').save() @@ -58,18 +58,18 @@ def test_update_using_transaction(self): args = m.call_args self.assertIn('IF "text" = %(0)s', args[0][0].query_string) - def test_update_transaction_success(self): - t = TestTransactionModel.create(text='blah blah', count=5) + def test_update_conditional_success(self): + t = TestConditionalModel.create(text='blah blah', count=5) id = t.id t.text = 'new blah' t.iff(text='blah blah').save() - updated = TestTransactionModel.objects(id=id).first() + updated = TestConditionalModel.objects(id=id).first() self.assertEqual(updated.count, 5) self.assertEqual(updated.text, 'new blah') def test_update_failure(self): - t = TestTransactionModel.create(text='blah blah') + t = TestConditionalModel.create(text='blah blah') t.text = 'new blah' t = t.iff(text='something wrong') @@ -82,21 +82,21 @@ def test_update_failure(self): }) def test_blind_update(self): - t = TestTransactionModel.create(text='blah blah') + t = TestConditionalModel.create(text='blah blah') t.text = 'something else' uid = t.id with mock.patch.object(self.session, 'execute') as m: - TestTransactionModel.objects(id=uid).iff(text='blah blah').update(text='oh hey der') + TestConditionalModel.objects(id=uid).iff(text='blah blah').update(text='oh hey der') args = m.call_args self.assertIn('IF "text" = %(1)s', args[0][0].query_string) def test_blind_update_fail(self): - t = TestTransactionModel.create(text='blah blah') + t = TestConditionalModel.create(text='blah blah') t.text = 'something else' uid = t.id - qs = TestTransactionModel.objects(id=uid).iff(text='Not dis!') + qs = TestConditionalModel.objects(id=uid).iff(text='Not dis!') with self.assertRaises(LWTException) as assertion: qs.update(text='this will never work') @@ -105,20 +105,20 @@ def test_blind_update_fail(self): '[applied]': False, }) - def test_transaction_clause(self): - tc = TransactionClause('some_value', 23) + def test_conditional_clause(self): + tc = ConditionalClause('some_value', 23) tc.set_context_id(3) self.assertEqual('"some_value" = %(3)s', six.text_type(tc)) self.assertEqual('"some_value" = %(3)s', str(tc)) - def test_batch_update_transaction(self): - t = TestTransactionModel.create(text='something', count=5) + def test_batch_update_conditional(self): + t = TestConditionalModel.create(text='something', count=5) id = t.id with BatchQuery() as b: t.batch(b).iff(count=5).update(text='something else') - updated = TestTransactionModel.objects(id=id).first() + updated = TestConditionalModel.objects(id=id).first() self.assertEqual(updated.text, 'something else') b = BatchQuery() @@ -132,27 +132,27 @@ def test_batch_update_transaction(self): '[applied]': False, }) - updated = TestTransactionModel.objects(id=id).first() + updated = TestConditionalModel.objects(id=id).first() self.assertEqual(updated.text, 'something else') - def test_delete_transaction(self): + def test_delete_conditional(self): # DML path - t = TestTransactionModel.create(text='something', count=5) - self.assertEqual(TestTransactionModel.objects(id=t.id).count(), 1) + t = TestConditionalModel.create(text='something', count=5) + self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) with self.assertRaises(LWTException): t.iff(count=9999).delete() - self.assertEqual(TestTransactionModel.objects(id=t.id).count(), 1) + self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) t.iff(count=5).delete() - self.assertEqual(TestTransactionModel.objects(id=t.id).count(), 0) + self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0) # QuerySet path - t = TestTransactionModel.create(text='something', count=5) - self.assertEqual(TestTransactionModel.objects(id=t.id).count(), 1) + t = TestConditionalModel.create(text='something', count=5) + self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) with self.assertRaises(LWTException): - TestTransactionModel.objects(id=t.id).iff(count=9999).delete() - self.assertEqual(TestTransactionModel.objects(id=t.id).count(), 1) - TestTransactionModel.objects(id=t.id).iff(count=5).delete() - self.assertEqual(TestTransactionModel.objects(id=t.id).count(), 0) + TestConditionalModel.objects(id=t.id).iff(count=9999).delete() + self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) + TestConditionalModel.objects(id=t.id).iff(count=5).delete() + self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0) def test_update_to_none(self): # This test is done because updates to none are split into deletes @@ -160,19 +160,19 @@ def test_update_to_none(self): # https://github.com/datastax/python-driver/blob/3.1.1/cassandra/cqlengine/query.py#L1197-L1200 # DML path - t = TestTransactionModel.create(text='something', count=5) - self.assertEqual(TestTransactionModel.objects(id=t.id).count(), 1) + t = TestConditionalModel.create(text='something', count=5) + self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) with self.assertRaises(LWTException): t.iff(count=9999).update(text=None) - self.assertIsNotNone(TestTransactionModel.objects(id=t.id).first().text) + self.assertIsNotNone(TestConditionalModel.objects(id=t.id).first().text) t.iff(count=5).update(text=None) - self.assertIsNone(TestTransactionModel.objects(id=t.id).first().text) + self.assertIsNone(TestConditionalModel.objects(id=t.id).first().text) # QuerySet path - t = TestTransactionModel.create(text='something', count=5) - self.assertEqual(TestTransactionModel.objects(id=t.id).count(), 1) + t = TestConditionalModel.create(text='something', count=5) + self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) with self.assertRaises(LWTException): - TestTransactionModel.objects(id=t.id).iff(count=9999).update(text=None) - self.assertIsNotNone(TestTransactionModel.objects(id=t.id).first().text) - TestTransactionModel.objects(id=t.id).iff(count=5).update(text=None) - self.assertIsNone(TestTransactionModel.objects(id=t.id).first().text) + TestConditionalModel.objects(id=t.id).iff(count=9999).update(text=None) + self.assertIsNotNone(TestConditionalModel.objects(id=t.id).first().text) + TestConditionalModel.objects(id=t.id).iff(count=5).update(text=None) + self.assertIsNone(TestConditionalModel.objects(id=t.id).first().text) From 57f8d5c926c142d4f0e39d5f99b59e1321e64405 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 18 Mar 2016 09:24:13 -0500 Subject: [PATCH 0436/2005] cqle: rename lwt conditional test --- .../cqlengine/{test_transaction.py => test_lwt_conditional.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/integration/cqlengine/{test_transaction.py => test_lwt_conditional.py} (100%) diff --git a/tests/integration/cqlengine/test_transaction.py b/tests/integration/cqlengine/test_lwt_conditional.py similarity index 100% rename from tests/integration/cqlengine/test_transaction.py rename to tests/integration/cqlengine/test_lwt_conditional.py From b52ad946a6d5412406a45b9dda86299126b56aac Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 18 Mar 2016 15:29:25 -0500 Subject: [PATCH 0437/2005] Fixing timing issues in connection, and query trace tests --- tests/integration/standard/test_connection.py | 18 ++++++++++--- tests/integration/standard/test_query.py | 27 +++++++++++++++++++ 2 files changed, 41 insertions(+), 4 deletions(-) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index d8c455114d..51dd11a74b 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -227,10 +227,20 @@ def send_msgs(conn, event): t.join() def test_connect_timeout(self): - start = time.time() - self.assertRaises(Exception, self.get_connection, timeout=sys.float_info.min) - end = time.time() - self.assertAlmostEqual(start, end, 1) + # Underlying socket implementations don't always throw a socket timeout even with min float + # This can be timing sensitive, added retry to ensure failure occurs if it can + max_retry_count = 10 + exception_thrown = False + for i in range(max_retry_count): + start = time.time() + try: + self.get_connection(timeout=sys.float_info.min) + except Exception as e: + end = time.time() + self.assertAlmostEqual(start, end, 1) + exception_thrown = True + break + self.assertTrue(exception_thrown) class AsyncoreConnectionTests(ConnectionTests, unittest.TestCase): diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 4ae636063e..51ab1f514d 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -28,6 +28,7 @@ from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, greaterthanprotocolv3 +import time import re @@ -155,15 +156,18 @@ def test_incomplete_query_trace(self): # Create table and run insert, then select self.session.execute("CREATE TABLE {0} (k INT, i INT, PRIMARY KEY(k, i))".format(self.keyspace_table_name)) self.session.execute("INSERT INTO {0} (k, i) VALUES (0, 1)".format(self.keyspace_table_name)) + response_future = self.session.execute_async("SELECT i FROM {0} WHERE k=0".format(self.keyspace_table_name), trace=True) response_future.result() self.assertEqual(len(response_future._query_traces), 1) trace = response_future._query_traces[0] + self.assertTrue(self._wait_for_trace_to_populate(trace.trace_id)) # Delete trace duration from the session (this is what the driver polls for "complete") delete_statement = SimpleStatement("DELETE duration FROM system_traces.sessions WHERE session_id = {}".format(trace.trace_id), consistency_level=ConsistencyLevel.ALL) self.session.execute(delete_statement) + self.assertTrue(self._wait_for_trace_to_delete(trace.trace_id)) # should raise because duration is not set self.assertRaises(TraceUnavailable, trace.populate, max_wait=0.2, wait_for_complete=True) @@ -178,6 +182,29 @@ def test_incomplete_query_trace(self): self.assertTrue(trace.events) # non-zero list len self.assertIsNotNone(trace.started_at) + def _wait_for_trace_to_populate(self, trace_id): + count = 0 + retry_max = 10 + while(not self._is_trace_present(trace_id) and count < retry_max): + time.sleep(.2) + count += 1 + return count != retry_max + + def _wait_for_trace_to_delete(self, trace_id): + count = 0 + retry_max = 10 + while(self._is_trace_present(trace_id) and count < retry_max): + time.sleep(.2) + count += 1 + return count != retry_max + + def _is_trace_present(self, trace_id): + select_statement = SimpleStatement("SElECT duration FROM system_traces.sessions WHERE session_id = {}".format(trace_id), consistency_level=ConsistencyLevel.ALL) + ssrs = self.session.execute(select_statement) + if(ssrs[0].duration is None): + return False + return True + def test_column_names(self): """ Test to validate the columns are present on the result set. From 969ffdfdd44d3804fc9627a5245fcdab5f000511 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 22 Mar 2016 10:01:02 -0500 Subject: [PATCH 0438/2005] cqle: simplify conditional in MapUpdateClause --- cassandra/cqlengine/statements.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 73f9197ed3..2bab3b194d 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -364,17 +364,13 @@ def _analyze(self): self._analyzed = True def get_context_size(self): - if not self._analyzed: - self._analyze() - if self.previous is None and not self._updates: + if self.is_assignment: return 1 return len(self._updates or []) * 2 def update_context(self, ctx): - if not self._analyzed: - self._analyze() ctx_id = self.context_id - if self.previous is None and not self._updates: + if self.is_assignment: ctx[str(ctx_id)] = {} else: for key in self._updates or []: @@ -383,13 +379,17 @@ def update_context(self, ctx): ctx[str(ctx_id + 1)] = self._column.value_col.to_database(val) if self._column else val ctx_id += 2 - def __unicode__(self): + @property + def is_assignment(self): if not self._analyzed: self._analyze() + return self.previous is None and not self._updates + + def __unicode__(self): qs = [] ctx_id = self.context_id - if self.previous is None and not self._updates: + if self.is_assignment: qs += ['"{0}" = %({1})s'.format(self.field, ctx_id)] else: for _ in self._updates or []: From 08219104b50418a2b84027b877cbffb76ef08908 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 22 Mar 2016 11:42:07 -0500 Subject: [PATCH 0439/2005] cqle: warn when column or field types are different from the model PYTHON-260 --- cassandra/cqlengine/management.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 8cd884d065..344b262c65 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -176,6 +176,7 @@ def sync_table(model): else: log.debug("sync_table checking existing table %s", cf_name) # see if we're missing any columns + table_meta = tables[raw_cf_name] field_names = _get_non_pk_field_names(tables[raw_cf_name]) model_fields = set() # # TODO: does this work with db_name?? @@ -184,6 +185,13 @@ def sync_table(model): continue # we can't mess with the PK model_fields.add(name) if col.db_field_name in field_names: + col_meta = table_meta.columns[col.db_field_name] + + if col_meta.cql_type != col.db_type: + msg = 'Existing table {0} has column "{1}" with a type ({2}) differing from the model type ({3}).' \ + ' Model should be updated.'.format(cf_name, col.db_field_name, col_meta.cql_type, col.db_type) + warnings.warn(msg) + log.warning(msg) continue # skip columns already defined # add missing column using the column def @@ -259,12 +267,20 @@ def _sync_type(ks_name, type_model, omit_subtypes=None): cluster.refresh_user_type_metadata(ks_name, type_name) type_model.register_for_keyspace(ks_name) else: - defined_fields = defined_types[type_name].field_names + type_meta = defined_types[type_name] + defined_fields = type_meta.field_names model_fields = set() for field in type_model._fields.values(): model_fields.add(field.db_field_name) if field.db_field_name not in defined_fields: execute("ALTER TYPE {0} ADD {1}".format(type_name_qualified, field.get_column_def())) + else: + field_type = type_meta.field_types[defined_fields.index(field.db_field_name)] + if field_type != field.db_type: + msg = 'Existing user type {0} has field "{1}" with a type ({2}) differing from the model user type ({3}).' \ + ' UserType should be updated.'.format(type_name_qualified, field.db_field_name, field_type, field.db_type) + warnings.warn(msg) + log.warning(msg) type_model.register_for_keyspace(ks_name) From 3a9c7e17f247271c62a7c3e8b71646dc349c45a0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 22 Mar 2016 13:15:16 -0500 Subject: [PATCH 0440/2005] cqle: remove unused variable in iff --- cassandra/cqlengine/query.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 677006826a..9415a231fd 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -550,7 +550,6 @@ def iff(self, *args, **kwargs): clone._transaction.append(operator) for col_name, val in kwargs.items(): - exists = False try: column = self.model._get_column(col_name) except KeyError: @@ -571,7 +570,7 @@ def iff(self, *args, **kwargs): len(val.value), len(partition_columns))) val.set_columns(partition_columns) - if isinstance(val, BaseQueryFunction) or exists is True: + if isinstance(val, BaseQueryFunction): query_val = val else: query_val = column.to_database(val) From 49ae2e39d8a4dd3fad7a08a1c5f8d0e4d4599e4b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 22 Mar 2016 13:28:05 -0500 Subject: [PATCH 0441/2005] cqle: don't mess with Token values in iff conditional --- cassandra/cqlengine/query.py | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 9415a231fd..3c68333fb9 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -550,25 +550,13 @@ def iff(self, *args, **kwargs): clone._transaction.append(operator) for col_name, val in kwargs.items(): + if isinstance(val, Token): + raise QueryException("Token() values are not valid in conditionals") + try: column = self.model._get_column(col_name) except KeyError: - if col_name == 'pk__token': - if not isinstance(val, Token): - raise QueryException("Virtual column 'pk__token' may only be compared to Token() values") - column = columns._PartitionKeysToken(self.model) - else: - raise QueryException("Can't resolve column name: '{0}'".format(col_name)) - - if isinstance(val, Token): - if col_name != 'pk__token': - raise QueryException("Token() values may only be compared to the 'pk__token' virtual column") - partition_columns = column.partition_columns - if len(partition_columns) != len(val.value): - raise QueryException( - 'Token() received {0} arguments but model has {1} partition keys'.format( - len(val.value), len(partition_columns))) - val.set_columns(partition_columns) + raise QueryException("Can't resolve column name: '{0}'".format(col_name)) if isinstance(val, BaseQueryFunction): query_val = val From 2992424c0a61a42e8778fc6bb108b67c03fb3548 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 22 Mar 2016 14:17:44 -0500 Subject: [PATCH 0442/2005] cql: simplify filter processing around Token function PYTHON-260 --- cassandra/cqlengine/query.py | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 3c68333fb9..004e31dd69 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -588,21 +588,19 @@ def filter(self, *args, **kwargs): for arg, val in kwargs.items(): col_name, col_op = self._parse_filter_arg(arg) quote_field = True - # resolve column and operator - try: - column = self.model._get_column(col_name) - except KeyError: - if col_name == 'pk__token': - if not isinstance(val, Token): - raise QueryException("Virtual column 'pk__token' may only be compared to Token() values") - column = columns._PartitionKeysToken(self.model) - quote_field = False - else: - raise QueryException("Can't resolve column name: '{0}'".format(col_name)) - if isinstance(val, Token): + if not isinstance(val, Token): + try: + column = self.model._get_column(col_name) + except KeyError: + raise QueryException("Can't resolve column name: '{0}'".format(col_name)) + else: if col_name != 'pk__token': raise QueryException("Token() values may only be compared to the 'pk__token' virtual column") + + column = columns._PartitionKeysToken(self.model) + quote_field = False + partition_columns = column.partition_columns if len(partition_columns) != len(val.value): raise QueryException( @@ -941,13 +939,13 @@ def _validate_select_where(self): # check that there's either a =, a IN or a CONTAINS (collection) relationship with a primary key or indexed field equal_ops = [self.model._get_column_by_db_name(w.field) for w in self._where if isinstance(w.operator, EqualsOperator)] token_comparison = any([w for w in self._where if isinstance(w.value, Token)]) - if not any([w.primary_key or w.index for w in equal_ops]) and not token_comparison and not self._allow_filtering: + if not any(w.primary_key or w.index for w in equal_ops) and not token_comparison and not self._allow_filtering: raise QueryException(('Where clauses require either =, a IN or a CONTAINS (collection) ' 'comparison with either a primary key or indexed field')) if not self._allow_filtering: # if the query is not on an indexed field - if not any([w.index for w in equal_ops]): + if not any(w.index for w in equal_ops): if not any([w.partition_key for w in equal_ops]) and not token_comparison: raise QueryException('Filtering on a clustering key without a partition key is not allowed unless allow_filtering() is called on the querset') From 5c3eb8e6f23742239bace4dd721b10dfe0890f76 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 22 Mar 2016 14:18:16 -0500 Subject: [PATCH 0443/2005] cqle: make NameTable produce part. keys for Token function PYTHON-260 --- cassandra/cqlengine/columns.py | 9 --------- cassandra/cqlengine/functions.py | 2 -- cassandra/cqlengine/named.py | 20 ++++++++++++++++++++ 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 3c353d9001..1981287dc3 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -881,12 +881,3 @@ def __init__(self, model): @property def db_field_name(self): return 'token({0})'.format(', '.join(['"{0}"'.format(c.db_field_name) for c in self.partition_columns])) - - def to_database(self, value): - from cqlengine.functions import Token - assert isinstance(value, Token) - value.set_columns(self.partition_columns) - return value - - def get_cql(self): - return "token({0})".format(", ".join(c.cql for c in self.partition_columns)) diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index 0200f1feb1..ccfe9de93a 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -100,14 +100,12 @@ class MaxTimeUUID(TimeUUIDQueryFunction): format_string = 'MaxTimeUUID(%({0})s)' - class Token(BaseQueryFunction): """ compute the token for a given partition key http://cassandra.apache.org/doc/cql3/CQL.html#tokenFun """ - def __init__(self, *values): if len(values) == 1 and isinstance(values[0], (list, tuple)): values = values[0] diff --git a/cassandra/cqlengine/named.py b/cassandra/cqlengine/named.py index f2f26aedc2..90a0d8fdf2 100644 --- a/cassandra/cqlengine/named.py +++ b/cassandra/cqlengine/named.py @@ -12,7 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +from cassandra.util import OrderedDict + from cassandra.cqlengine import CQLEngineException +from cassandra.cqlengine.columns import Column +from cassandra.cqlengine.connection import get_cluster from cassandra.cqlengine.query import AbstractQueryableColumn, SimpleQuerySet from cassandra.cqlengine.query import DoesNotExist as _DoesNotExist from cassandra.cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned @@ -78,6 +82,8 @@ class NamedTable(object): objects = QuerySetDescriptor() + __partition_keys = None + class DoesNotExist(_DoesNotExist): pass @@ -88,6 +94,20 @@ def __init__(self, keyspace, name): self.keyspace = keyspace self.name = name + @property + def _partition_keys(self): + if not self.__partition_keys: + self._get_partition_keys() + return self.__partition_keys + + def _get_partition_keys(self): + try: + table_meta = get_cluster().metadata.keyspaces[self.keyspace].tables[self.name] + self.__partition_keys = OrderedDict((pk.name, Column(primary_key=True, partition_key=True, db_field=pk.name)) for pk in table_meta.partition_key) + except Exception as e: + raise CQLEngineException("Failed inspecting partition keys for {0}." + "Ensure cqlengine is connected before attempting this with NamedTable.".format(self.column_family_name())) + def column(self, name): return NamedColumn(name) From ef5edd0ba5df78362bec1baec84659f5fa6ef315 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 23 Mar 2016 15:12:57 -0500 Subject: [PATCH 0444/2005] cython: emulate cpython rounding for datetime deserialization PYTHON-480 --- cassandra/cython_utils.pyx | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/cassandra/cython_utils.pyx b/cassandra/cython_utils.pyx index c9caf01f7a..3c6fae036b 100644 --- a/cassandra/cython_utils.pyx +++ b/cassandra/cython_utils.pyx @@ -17,6 +17,8 @@ Duplicate module of util.py, with some accelerated functions used for deserialization. """ +from libc.math cimport modf, round, fabs + from cpython.datetime cimport ( timedelta_new, # cdef inline object timedelta_new(int days, int seconds, int useconds) @@ -44,6 +46,17 @@ cdef datetime_from_timestamp(double timestamp): cdef int days = (timestamp / DAY_IN_SECONDS) cdef int64_t days_in_seconds = ( days) * DAY_IN_SECONDS cdef int seconds = (timestamp - days_in_seconds) - cdef int microseconds = ((timestamp - days_in_seconds - seconds) * 1000000) + cdef double tmp + cdef double micros_left = modf(timestamp, &tmp) * 1000000. + micros_left = modf(micros_left, &tmp) + cdef int microseconds = tmp + + # rounding to emulate fp math in delta_new + cdef int x_odd + tmp = round(micros_left) + if fabs(tmp - micros_left) == 0.5: + x_odd = microseconds & 1 + tmp = 2.0 * round((micros_left + x_odd) * 0.5) - x_odd + microseconds += tmp return DATETIME_EPOC + timedelta_new(days, seconds, microseconds) From 5c3200b0e88300243f08cb15059eef347e5689da Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 23 Mar 2016 15:13:29 -0500 Subject: [PATCH 0445/2005] test: side-by-side datetime deserialization tests PYTHON-480 --- tests/unit/cython/test_types.py | 4 +++ tests/unit/cython/types_testhelper.pyx | 40 ++++++++++++++++++++++++-- 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/tests/unit/cython/test_types.py b/tests/unit/cython/test_types.py index 0286450b57..d9f3a746e1 100644 --- a/tests/unit/cython/test_types.py +++ b/tests/unit/cython/test_types.py @@ -26,3 +26,7 @@ class TypesTest(unittest.TestCase): @cythontest def test_datetype(self): types_testhelper.test_datetype(self.assertEqual) + + @cythontest + def test_date_side_by_side(self): + types_testhelper.test_date_side_by_side(self.assertEqual) diff --git a/tests/unit/cython/types_testhelper.pyx b/tests/unit/cython/types_testhelper.pyx index 0574d406fc..3cd60c550f 100644 --- a/tests/unit/cython/types_testhelper.pyx +++ b/tests/unit/cython/types_testhelper.pyx @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. - -import time +import calendar import datetime +import time include '../../../cassandra/ioutils.pyx' @@ -70,3 +70,39 @@ def test_datetype(assert_equal): # Large date overflow (PYTHON-452) expected = 2177403010.123 assert_equal(deserialize(expected), datetime.datetime(2038, 12, 31, 10, 10, 10, 123000)) + + +def test_date_side_by_side(assert_equal): + # Test pure python and cython date deserialization side-by-side + # This is meant to detect inconsistent rounding or conversion (PYTHON-480 for example) + # The test covers the full range of time deserializable in Python. It bounds through + # the range in factors of two to cover floating point scale. At each bound it sweeps + # all combinations of fractional seconds to verify rounding + + cdef BytesIOReader reader + cdef Buffer buf + cdef Deserializer cython_deserializer = find_deserializer(DateType) + import time + + def verify_time(ms): + blob = DateType.serialize(ms, 0) + bior = BytesIOReader(blob) + buf.ptr = bior.read() + buf.size = bior.size + cython_deserialized = from_binary(cython_deserializer, &buf, 0) + python_deserialized = DateType.deserialize(blob, 0) + assert_equal(cython_deserialized, python_deserialized) + + # min -> 0 + x = int(calendar.timegm(datetime.datetime(1, 1, 1).utctimetuple()) * 1000) + while x < -1: # for some reason -1 // 2 == -1 so we can't wait for zero + for y in range(1000): + verify_time(x + y) + x //= 2 + + # max -> 0 + x = int(calendar.timegm(datetime.datetime(9999, 12, 31, 23, 59, 59, 999999).utctimetuple()) * 1000) + while x: + for ms in range(1000): + verify_time(x - ms) + x //= 2 From 4d691abfa89cebdbd648cb31ceaa0ea0d4ab194a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 23 Mar 2016 16:30:20 -0500 Subject: [PATCH 0446/2005] cqle: Simplify Model __eq__ a little --- cassandra/cqlengine/models.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 32d9beef45..4e71ccaaa7 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -492,12 +492,7 @@ def __eq__(self, other): if keys != other_keys: return False - # check that all of the attributes match - for key in other_keys: - if getattr(self, key, None) != getattr(other, key, None): - return False - - return True + return all(getattr(self, key, None) == getattr(other, key, None) for key in other_keys) def __ne__(self, other): return not self.__eq__(other) From ec868395340eabc9656a58a17d55564581191081 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 23 Mar 2016 16:30:54 -0500 Subject: [PATCH 0447/2005] cqle: add the option to truncate datetime values to match DB quantization PYTHON-273 --- cassandra/cqlengine/columns.py | 20 ++++++++++++++++++-- docs/api/cassandra/cqlengine/columns.rst | 2 ++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 1981287dc3..c441b8f23b 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -13,7 +13,7 @@ # limitations under the License. from copy import deepcopy, copy -from datetime import date, datetime +from datetime import date, datetime, timedelta import logging import six from uuid import UUID as _UUID @@ -429,11 +429,27 @@ class DateTime(Column): """ db_type = 'timestamp' + truncate_microseconds = False + """ + Set this ``True`` to have model instances truncate the date, quantizing it in the same way it will be in the database. + This allows equality comparison between assigned values and values read back from the database:: + + DateTime.truncate_microseconds = True + assert Model.create(id=0, d=datetime.utcnow()) == Model.objects(id=0).first() + + Defaults to ``False`` to preserve legacy behavior. May change in the future. + """ + def to_python(self, value): if value is None: return if isinstance(value, datetime): - return value + if DateTime.truncate_microseconds: + us = value.microsecond + truncated_us = us // 1000 * 1000 + return value - timedelta(microseconds=us - truncated_us) + else: + return value elif isinstance(value, date): return datetime(*(value.timetuple()[:6])) diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst index 3f83eac5e6..670633f73a 100644 --- a/docs/api/cassandra/cqlengine/columns.rst +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -54,6 +54,8 @@ Columns of all types are initialized by passing :class:`.Column` attributes to t .. autoclass:: DateTime(**kwargs) + .. autoattribute:: truncate_microseconds + .. autoclass:: Decimal(**kwargs) .. autoclass:: Double(**kwargs) From fc0ae79f267727342a491ef674ff2d2c352b25b2 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 23 Mar 2016 17:38:20 -0500 Subject: [PATCH 0448/2005] Updating workload logic to handle spark worker resource allocation --- tests/integration/__init__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 2801fb3197..c2f201a256 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -261,6 +261,9 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) if CASSANDRA_VERSION >= '3.0': CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) + if 'spark' in workloads: + config_options = {"initial_spark_worker_resources": 0.1} + CCM_CLUSTER.set_dse_configuration_options(config_options) common.switch_cluster(path, cluster_name) CCM_CLUSTER.populate(nodes, ipformat=ipformat) try: From 25a9f08ba0e5c38da9cddc54b16f6c2c79d46725 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 24 Mar 2016 13:58:58 -0500 Subject: [PATCH 0449/2005] Check deferred/only fields before copy --- cassandra/cqlengine/query.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 5626c3481d..868716d74e 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -832,10 +832,11 @@ def allow_filtering(self): return clone def _only_or_defer(self, action, fields): - clone = copy.deepcopy(self) - if clone._defer_fields or clone._only_fields: + if self._defer_fields or self._only_fields: raise QueryException("QuerySet already has only or defer fields defined") + clone = copy.deepcopy(self) + # check for strange fields missing_fields = [f for f in fields if f not in self.model._columns.keys()] if missing_fields: From f571cec9eedddcd410a288accd93931b2ee24065 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 24 Mar 2016 14:17:10 -0500 Subject: [PATCH 0450/2005] cqle: don't select back known values in the where clause Instead, we track the values from filter, and use those when constructing model instances. PYTHON-520 --- cassandra/cqlengine/query.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 868716d74e..e8fa1546c3 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -14,6 +14,7 @@ import copy from datetime import datetime, timedelta +from functools import partial import time import six from warnings import warn @@ -281,6 +282,7 @@ def __init__(self, model): # see the defer and only methods self._defer_fields = [] + self._deferred_values = {} self._only_fields = [] self._values_list = False @@ -383,7 +385,7 @@ def _execute_query(self): if self._result_cache is None: self._result_generator = (i for i in self._execute(self._select_query())) self._result_cache = [] - self._construct_result = self._get_result_constructor() + self._construct_result = self._maybe_inject_deferred(self._get_result_constructor()) # "DISTINCT COUNT()" is not supported in C* < 2.2, so we need to materialize all results to get # len() and count() working with DISTINCT queries @@ -482,6 +484,15 @@ def _get_result_constructor(self): """ raise NotImplementedError + @staticmethod + def _construct_with_deferred(f, deferred, row): + row.update(deferred) + return f(row) + + def _maybe_inject_deferred(self, constructor): + return partial(self._construct_with_deferred, constructor, self._deferred_values)\ + if self._deferred_values else constructor + def batch(self, batch_obj): """ Set a batch object to run the query on. @@ -624,6 +635,9 @@ def filter(self, *args, **kwargs): query_val = val else: query_val = column.to_database(val) + if not col_op: # only equal values should be deferred + clone._defer_fields.append(col_name) + clone._deferred_values[column.db_field_name] = val # map by db field name for substitution in results clone._where.append(WhereClause(column.db_field_name, operator, query_val, quote_field=quote_field)) From 9b25f70e662094664e6e8b46c71a079c7a923d4b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 24 Mar 2016 15:17:45 -0500 Subject: [PATCH 0451/2005] cqle: make deferred fields a set Allows deferred values while still using deferred key optimization PYTHON-520 --- cassandra/cqlengine/query.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index e8fa1546c3..3f84afc550 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -281,7 +281,7 @@ def __init__(self, model): self._limit = 10000 # see the defer and only methods - self._defer_fields = [] + self._defer_fields = set() self._deferred_values = {} self._only_fields = [] @@ -636,7 +636,7 @@ def filter(self, *args, **kwargs): else: query_val = column.to_database(val) if not col_op: # only equal values should be deferred - clone._defer_fields.append(col_name) + clone._defer_fields.add(col_name) clone._deferred_values[column.db_field_name] = val # map by db field name for substitution in results clone._where.append(WhereClause(column.db_field_name, operator, query_val, quote_field=quote_field)) @@ -846,8 +846,8 @@ def allow_filtering(self): return clone def _only_or_defer(self, action, fields): - if self._defer_fields or self._only_fields: - raise QueryException("QuerySet already has only or defer fields defined") + if action == 'only' and self._only_fields: + raise QueryException("QuerySet already has 'only' fields defined") clone = copy.deepcopy(self) @@ -859,7 +859,7 @@ def _only_or_defer(self, action, fields): ', '.join(missing_fields), self.model.__name__)) if action == 'defer': - clone._defer_fields = fields + clone._defer_fields.update(fields) elif action == 'only': clone._only_fields = fields else: From 74beacef658e0e19ce8b902460297b5c8079be5d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 24 Mar 2016 15:19:06 -0500 Subject: [PATCH 0452/2005] cqle: make flat values list more robust PYTHON-520 --- cassandra/cqlengine/query.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 3f84afc550..84616839c2 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -980,7 +980,8 @@ def _get_result_constructor(self): if not self._values_list: # we want models return lambda rows: self.model._construct_instance(rows) elif self._flat_values_list: # the user has requested flattened list (1 value per row) - return lambda row: row.popitem()[1] + key = self._only_fields[0] + return lambda row: row[key] else: return lambda row: self._get_row_value_list(self._only_fields, row) From 1aee458957da09ab793f5acb5f30ea97bd83f8da Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 24 Mar 2016 15:23:28 -0500 Subject: [PATCH 0453/2005] cqle: simplify result constructors --- cassandra/cqlengine/query.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 84616839c2..9fe0ccfbc4 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -978,18 +978,12 @@ def _select_fields(self): def _get_result_constructor(self): """ Returns a function that will be used to instantiate query results """ if not self._values_list: # we want models - return lambda rows: self.model._construct_instance(rows) + return self.model._construct_instance elif self._flat_values_list: # the user has requested flattened list (1 value per row) key = self._only_fields[0] return lambda row: row[key] else: - return lambda row: self._get_row_value_list(self._only_fields, row) - - def _get_row_value_list(self, fields, row): - result = [] - for x in fields: - result.append(row[x]) - return result + return lambda row: [row[f] for f in self._only_fields] def _get_ordering_condition(self, colname): colname, order_type = super(ModelQuerySet, self)._get_ordering_condition(colname) From abb8ad0c6631d509eec874baad70c80a957307e9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 24 Mar 2016 15:43:47 -0500 Subject: [PATCH 0454/2005] cqle: simplify result constructor for SimpleQuerySet --- cassandra/cqlengine/query.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 9fe0ccfbc4..3f82b54e78 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -935,16 +935,14 @@ def __getattr__(self, item): class SimpleQuerySet(AbstractQuerySet): """ - + Overrides _get_result_constructor for querysets that do not define a model (e.g. NamedTable queries) """ def _get_result_constructor(self): """ Returns a function that will be used to instantiate query results """ - def _construct_instance(values): - return ResultObject(values) - return _construct_instance + return ResultObject class ModelQuerySet(AbstractQuerySet): From 340479fb64e127901cc240f0b7e638f4a9e06ea7 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 25 Mar 2016 13:28:47 -0500 Subject: [PATCH 0455/2005] PYTHON-260 added test for additional logging --- tests/integration/__init__.py | 20 ++++++ .../cqlengine/management/test_management.py | 64 ++++++++++++++++++- 2 files changed, 81 insertions(+), 3 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index c2f201a256..4b3afaa045 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -484,6 +484,26 @@ def drop_function_table(self): execute_until_pass(self.session, ddl) +class MockLoggingHandler(logging.Handler): + """Mock logging handler to check for expected logs.""" + + def __init__(self, *args, **kwargs): + self.reset() + logging.Handler.__init__(self, *args, **kwargs) + + def emit(self, record): + self.messages[record.levelname.lower()].append(record.getMessage()) + + def reset(self): + self.messages = { + 'debug': [], + 'info': [], + 'warning': [], + 'error': [], + 'critical': [], + } + + class BasicExistingKeyspaceUnitTestCase(BasicKeyspaceUnitTestCase): """ This is basic unit test defines class level teardown and setup methods. It assumes that keyspace is already defined, or created as part of the test. diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 013168cefe..2dc055353f 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -17,17 +17,19 @@ import unittest # noqa import mock - +import logging from cassandra.cqlengine.connection import get_session, get_cluster from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine import management -from cassandra.cqlengine.management import _get_non_pk_field_names, _get_table_metadata, sync_table, drop_table +from cassandra.cqlengine.management import _get_non_pk_field_names, _get_table_metadata, sync_table, drop_table, sync_type from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns -from tests.integration import PROTOCOL_VERSION, greaterthancass20 +from tests.integration import PROTOCOL_VERSION, greaterthancass20, MockLoggingHandler from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine.query.test_queryset import TestModel +from cassandra.cqlengine.usertype import UserType +from tests.integration.cqlengine import DEFAULT_KEYSPACE class KeyspaceManagementTest(BaseCassEngTestCase): @@ -248,6 +250,62 @@ class IndexCaseSensitiveModel(Model): second_key = columns.Text(index=True) +class BaseInconsistent(Model): + + __table_name__ = 'inconsistent' + first_key = columns.UUID(primary_key=True) + second_key = columns.Integer(index=True) + third_key = columns.Integer(index=True) + + +class ChangedInconsistent(Model): + + __table_name__ = 'inconsistent' + __table_name_case_sensitive__ = True + first_key = columns.UUID(primary_key=True) + second_key = columns.Text(index=True) + + +class BaseInconsistentType(UserType): + __type_name__ = 'type_inconsistent' + age = columns.Integer() + name = columns.Text() + + +class ChangedInconsistentType(UserType): + __type_name__ = 'type_inconsistent' + age = columns.Integer() + name = columns.Integer() + + +class InconsistentTable(BaseCassEngTestCase): + + def setUp(self): + drop_table(IndexModel) + + def test_sync_warnings(self): + """ + Test to insure when inconsistent changes are made to a table, or type as part of a sync call that the proper logging messages are surfaced + + @since 3.2 + @jira_ticket PYTHON-260 + @expected_result warnings are logged + + @test_category object_mapper + """ + mock_handler = MockLoggingHandler() + logger = logging.getLogger(management.__name__) + logger.addHandler(mock_handler) + sync_table(BaseInconsistent) + sync_table(ChangedInconsistent) + self.assertTrue('differing from the model type' in mock_handler.messages.get('warning')[0]) + sync_type(DEFAULT_KEYSPACE, BaseInconsistentType) + mock_handler.reset() + sync_type(DEFAULT_KEYSPACE, ChangedInconsistentType) + self.assertTrue('differing from the model user type' in mock_handler.messages.get('warning')[0]) + logger.removeHandler(mock_handler) + + class IndexTests(BaseCassEngTestCase): def setUp(self): From 877281080ba714a22cb1553d05d087a4c79fa863 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 25 Mar 2016 15:42:09 -0500 Subject: [PATCH 0456/2005] PYTHON-272 adding simple test for token function and named_tables --- .../cqlengine/query/test_queryoperators.py | 30 +++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/query/test_queryoperators.py b/tests/integration/cqlengine/query/test_queryoperators.py index af10435ca4..ae19f2498e 100644 --- a/tests/integration/cqlengine/query/test_queryoperators.py +++ b/tests/integration/cqlengine/query/test_queryoperators.py @@ -19,11 +19,13 @@ from cassandra.cqlengine import query from cassandra.cqlengine.management import sync_table, drop_table from cassandra.cqlengine.models import Model +from cassandra.cqlengine.named import NamedTable from cassandra.cqlengine.operators import EqualsOperator from cassandra.cqlengine.statements import WhereClause - +from tests.integration.cqlengine import DEFAULT_KEYSPACE from tests.integration.cqlengine.base import BaseCassEngTestCase + class TestQuerySetOperation(BaseCassEngTestCase): def test_maxtimeuuid_function(self): @@ -54,7 +56,7 @@ def test_mintimeuuid_function(self): class TokenTestModel(Model): - + __table_name__ = "token_test_model" key = columns.Integer(primary_key=True) val = columns.Integer() @@ -124,3 +126,27 @@ class TestModel(Model): # The # of arguments to Token must match the # of partition keys func = functions.Token('a') self.assertRaises(query.QueryException, TestModel.objects.filter, pk__token__gt=func) + + def test_named_table_pk_token_function(self): + """ + Test to ensure that token function work with named tables. + + @since 3.2 + @jira_ticket PYTHON-272 + @expected_result partition key token functions should all for pagination. Prior to Python-272 + this would fail with an AttributeError + + @test_category object_mapper + """ + + for i in range(5): + TokenTestModel.create(key=i, val=i) + named = NamedTable(DEFAULT_KEYSPACE, TokenTestModel.__table_name__) + + query = named.objects.all().limit(1) + first_page = list(query) + last = first_page[-1] + self.assertTrue(len(first_page) is 1) + next_page = list(query.filter(pk__token__gt=functions.Token(last.key))) + self.assertTrue(len(next_page) is 1) + From 31216ea21343c8074e050451aa821ba04611fb4f Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 29 Mar 2016 12:42:05 -0500 Subject: [PATCH 0457/2005] PYTHON-273 adding simple test for datetime microsecond truncation --- .../cqlengine/columns/test_validation.py | 35 +++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 7b51080bff..0480fe43e8 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -105,6 +105,29 @@ def test_datetime_large(self): dt2 = self.DatetimeTest.objects(test_id=6).first() assert dt2.created_at == dt_value + def test_datetime_truncate_microseconds(self): + """ + Test to ensure that truncate microseconds works as expected. + This will be default behavior in the future and we will need to modify the tests to comply + with new behavior + + @since 3.2 + @jira_ticket PYTHON-273 + @expected_result microseconds should be to the nearest thousand when truncate is set. + + @test_category object_mapper + """ + DateTime.truncate_microseconds = True + try: + dt_value = datetime(2024, 12, 31, 10, 10, 10, 923567) + dt_truncated = datetime(2024, 12, 31, 10, 10, 10, 923000) + self.DatetimeTest.objects.create(test_id=6, created_at=dt_value) + dt2 = self.DatetimeTest.objects(test_id=6).first() + self.assertEqual(dt2.created_at,dt_truncated) + finally: + # We need to always return behavior to default + DateTime.truncate_microseconds = False + class TestBoolDefault(BaseCassEngTestCase): class BoolDefaultValueTest(Model): @@ -122,6 +145,7 @@ def test_default_is_set(self): tmp2 = self.BoolDefaultValueTest.get(test_id=1) self.assertEqual(True, tmp2.stuff) + class TestBoolValidation(BaseCassEngTestCase): class BoolValidationTest(Model): @@ -138,6 +162,7 @@ def test_validation_preserves_none(self): test_obj.validate() self.assertIsNone(test_obj.bool_column) + class TestVarInt(BaseCassEngTestCase): class VarIntTest(Model): @@ -228,6 +253,7 @@ def test_decimal_io(self): dt2 = self.DecimalTest.objects(test_id=0).first() assert dt2.dec_val == D('5') + class TestUUID(BaseCassEngTestCase): class UUIDTest(Model): @@ -261,6 +287,7 @@ def test_uuid_with_upcase(self): t1 = self.UUIDTest.get(test_id=0) assert a_uuid == t1.a_uuid + class TestTimeUUID(BaseCassEngTestCase): class TimeUUIDTest(Model): @@ -285,6 +312,7 @@ def test_timeuuid_io(self): assert t1.timeuuid.time == t1.timeuuid.time + class TestInteger(BaseCassEngTestCase): class IntegerTest(Model): @@ -296,6 +324,7 @@ def test_default_zero_fields_validate(self): it = self.IntegerTest() it.validate() + class TestBigInt(BaseCassEngTestCase): class BigIntTest(Model): @@ -307,6 +336,7 @@ def test_default_zero_fields_validate(self): it = self.BigIntTest() it.validate() + class TestText(BaseCassEngTestCase): def test_min_length(self): @@ -352,8 +382,6 @@ def test_non_required_validation(self): Text().validate(None) - - class TestExtraFieldsRaiseException(BaseCassEngTestCase): class TestModel(Model): @@ -363,6 +391,7 @@ def test_extra_field(self): with self.assertRaises(ValidationError): self.TestModel.create(bacon=5000) + class TestPythonDoesntDieWhenExtraFieldIsInCassandra(BaseCassEngTestCase): class TestModel(Model): @@ -376,6 +405,7 @@ def test_extra_field(self): execute("ALTER TABLE {0} add blah int".format(self.TestModel.column_family_name(include_keyspace=True))) self.TestModel.objects().all() + class TestTimeUUIDFromDatetime(BaseCassEngTestCase): def test_conversion_specific_date(self): dt = datetime(1981, 7, 11, microsecond=555000) @@ -391,6 +421,7 @@ def test_conversion_specific_date(self): # checks that we created a UUID1 with the proper timestamp assert new_dt == dt + class TestInet(BaseCassEngTestCase): class InetTestModel(Model): From 6c65df3287226e9195a94e8920bb68e22f1fd332 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 29 Mar 2016 14:37:26 -0500 Subject: [PATCH 0458/2005] cqle: make queryset update clause respect db_field PYTHON-530 --- cassandra/cqlengine/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 3f82b54e78..3e787f5c78 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1180,10 +1180,10 @@ class Row(Model): klass = MapUpdateClause else: raise RuntimeError - us.add_assignment_clause(klass(col_name, col.to_database(val), operation=col_op)) + us.add_assignment_clause(klass(col.db_field_name, col.to_database(val), operation=col_op)) else: us.add_assignment_clause(AssignmentClause( - col_name, col.to_database(val))) + col.db_field_name, col.to_database(val))) if us.assignments: self._execute(us) From 45165873ea9fade57ba671e57089ef0e2c0dd234 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 29 Mar 2016 15:39:59 -0500 Subject: [PATCH 0459/2005] cqle: refactor ContainerUpdateClause init don't pass redundant information, and always use the column db_field_name PYTHON-530 --- cassandra/cqlengine/query.py | 6 +- cassandra/cqlengine/statements.py | 24 +++----- .../statements/test_assignment_clauses.py | 59 ++++++++++--------- .../statements/test_update_statement.py | 11 ++-- 4 files changed, 48 insertions(+), 52 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 3e787f5c78..fe95bec55c 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1180,7 +1180,7 @@ class Row(Model): klass = MapUpdateClause else: raise RuntimeError - us.add_assignment_clause(klass(col.db_field_name, col.to_database(val), operation=col_op)) + us.add_assignment_clause(klass(col, col.to_database(val), operation=col_op)) else: us.add_assignment_clause(AssignmentClause( col.db_field_name, col.to_database(val))) @@ -1308,8 +1308,8 @@ def update(self): else: raise RuntimeError - clause = klass(col.db_field_name, val, - previous=val_mgr.previous_value, column=col) + clause = klass(col, val, + previous=val_mgr.previous_value) if clause.get_context_size() > 0: statement.add_assignment_clause(clause) else: diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 6a1566bf2e..3e955e826c 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -160,8 +160,8 @@ def insert_tuple(self): class ContainerUpdateClause(AssignmentClause): - def __init__(self, field, value, operation=None, previous=None, column=None): - super(ContainerUpdateClause, self).__init__(field, value) + def __init__(self, column, value, operation=None, previous=None): + super(ContainerUpdateClause, self).__init__(column.db_field_name, value) self.previous = previous self._assignments = None self._operation = operation @@ -184,10 +184,8 @@ def update_context(self, ctx): class SetUpdateClause(ContainerUpdateClause): """ updates a set collection """ - def __init__(self, field, value, operation=None, previous=None, column=None): - super(SetUpdateClause, self).__init__(field, value, operation, previous, column=column) - self._additions = None - self._removals = None + _additions = None + _removals = None def __unicode__(self): qs = [] @@ -256,10 +254,8 @@ def update_context(self, ctx): class ListUpdateClause(ContainerUpdateClause): """ updates a list collection """ - def __init__(self, field, value, operation=None, previous=None, column=None): - super(ListUpdateClause, self).__init__(field, value, operation, previous, column=column) - self._append = None - self._prepend = None + _append = None + _prepend = None def __unicode__(self): if not self._analyzed: @@ -349,9 +345,7 @@ def _analyze(self): class MapUpdateClause(ContainerUpdateClause): """ updates a map collection """ - def __init__(self, field, value, operation=None, previous=None, column=None): - super(MapUpdateClause, self).__init__(field, value, operation, previous, column=column) - self._updates = None + _updates = None def _analyze(self): if self._operation == "update": @@ -401,8 +395,8 @@ def __unicode__(self): class CounterUpdateClause(ContainerUpdateClause): - def __init__(self, field, value, previous=None, column=None): - super(CounterUpdateClause, self).__init__(field, value, previous=previous, column=column) + def __init__(self, column, value, previous=None): + super(CounterUpdateClause, self).__init__(column, value, previous=previous) self.previous = self.previous or 0 def get_context_size(self): diff --git a/tests/integration/cqlengine/statements/test_assignment_clauses.py b/tests/integration/cqlengine/statements/test_assignment_clauses.py index 4fc3a28374..6afc9ff4b5 100644 --- a/tests/integration/cqlengine/statements/test_assignment_clauses.py +++ b/tests/integration/cqlengine/statements/test_assignment_clauses.py @@ -16,6 +16,7 @@ except ImportError: import unittest # noqa +from cassandra.cqlengine.columns import * from cassandra.cqlengine.statements import AssignmentClause, SetUpdateClause, ListUpdateClause, MapUpdateClause, MapDeleteClause, FieldDeleteClause, CounterUpdateClause @@ -33,7 +34,7 @@ def test_insert_tuple(self): class SetUpdateClauseTests(unittest.TestCase): def test_update_from_none(self): - c = SetUpdateClause('s', set((1, 2)), previous=None) + c = SetUpdateClause(Set(Integer, db_field='s'), set((1, 2)), previous=None) c._analyze() c.set_context_id(0) @@ -50,7 +51,7 @@ def test_update_from_none(self): def test_null_update(self): """ tests setting a set to None creates an empty update statement """ - c = SetUpdateClause('s', None, previous=set((1, 2))) + c = SetUpdateClause(Set(Integer, db_field='s'), None, previous=set((1, 2))) c._analyze() c.set_context_id(0) @@ -67,7 +68,7 @@ def test_null_update(self): def test_no_update(self): """ tests an unchanged value creates an empty update statement """ - c = SetUpdateClause('s', set((1, 2)), previous=set((1, 2))) + c = SetUpdateClause(Set(Integer, db_field='s'), set((1, 2)), previous=set((1, 2))) c._analyze() c.set_context_id(0) @@ -85,7 +86,7 @@ def test_no_update(self): def test_update_empty_set(self): """tests assigning a set to an empty set creates a nonempty update statement and nonzero context size.""" - c = SetUpdateClause(field='s', value=set()) + c = SetUpdateClause(column=Set(Integer, db_field='s'), value=set()) c._analyze() c.set_context_id(0) @@ -101,7 +102,7 @@ def test_update_empty_set(self): self.assertEqual(ctx, {'0': set()}) def test_additions(self): - c = SetUpdateClause('s', set((1, 2, 3)), previous=set((1, 2))) + c = SetUpdateClause(Set(Integer, db_field='s'), set((1, 2, 3)), previous=set((1, 2))) c._analyze() c.set_context_id(0) @@ -117,7 +118,7 @@ def test_additions(self): self.assertEqual(ctx, {'0': set((3,))}) def test_removals(self): - c = SetUpdateClause('s', set((1, 2)), previous=set((1, 2, 3))) + c = SetUpdateClause(Set(Integer, db_field='s'), set((1, 2)), previous=set((1, 2, 3))) c._analyze() c.set_context_id(0) @@ -133,7 +134,7 @@ def test_removals(self): self.assertEqual(ctx, {'0': set((3,))}) def test_additions_and_removals(self): - c = SetUpdateClause('s', set((2, 3)), previous=set((1, 2))) + c = SetUpdateClause(Set(Integer, db_field='s'), set((2, 3)), previous=set((1, 2))) c._analyze() c.set_context_id(0) @@ -152,7 +153,7 @@ def test_additions_and_removals(self): class ListUpdateClauseTests(unittest.TestCase): def test_update_from_none(self): - c = ListUpdateClause('s', [1, 2, 3]) + c = ListUpdateClause(List(Integer, db_field='l'), [1, 2, 3]) c._analyze() c.set_context_id(0) @@ -161,14 +162,14 @@ def test_update_from_none(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = %(0)s') + self.assertEqual(str(c), '"l" = %(0)s') ctx = {} c.update_context(ctx) self.assertEqual(ctx, {'0': [1, 2, 3]}) def test_update_from_empty(self): - c = ListUpdateClause('s', [1, 2, 3], previous=[]) + c = ListUpdateClause(List(Integer, db_field='l'), [1, 2, 3], previous=[]) c._analyze() c.set_context_id(0) @@ -177,14 +178,14 @@ def test_update_from_empty(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = %(0)s') + self.assertEqual(str(c), '"l" = %(0)s') ctx = {} c.update_context(ctx) self.assertEqual(ctx, {'0': [1, 2, 3]}) def test_update_from_different_list(self): - c = ListUpdateClause('s', [1, 2, 3], previous=[3, 2, 1]) + c = ListUpdateClause(List(Integer, db_field='l'), [1, 2, 3], previous=[3, 2, 1]) c._analyze() c.set_context_id(0) @@ -193,14 +194,14 @@ def test_update_from_different_list(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = %(0)s') + self.assertEqual(str(c), '"l" = %(0)s') ctx = {} c.update_context(ctx) self.assertEqual(ctx, {'0': [1, 2, 3]}) def test_append(self): - c = ListUpdateClause('s', [1, 2, 3, 4], previous=[1, 2]) + c = ListUpdateClause(List(Integer, db_field='l'), [1, 2, 3, 4], previous=[1, 2]) c._analyze() c.set_context_id(0) @@ -209,14 +210,14 @@ def test_append(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = "s" + %(0)s') + self.assertEqual(str(c), '"l" = "l" + %(0)s') ctx = {} c.update_context(ctx) self.assertEqual(ctx, {'0': [3, 4]}) def test_prepend(self): - c = ListUpdateClause('s', [1, 2, 3, 4], previous=[3, 4]) + c = ListUpdateClause(List(Integer, db_field='l'), [1, 2, 3, 4], previous=[3, 4]) c._analyze() c.set_context_id(0) @@ -225,14 +226,14 @@ def test_prepend(self): self.assertEqual(c._prepend, [1, 2]) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = %(0)s + "s"') + self.assertEqual(str(c), '"l" = %(0)s + "l"') ctx = {} c.update_context(ctx) self.assertEqual(ctx, {'0': [1, 2]}) def test_append_and_prepend(self): - c = ListUpdateClause('s', [1, 2, 3, 4, 5, 6], previous=[3, 4]) + c = ListUpdateClause(List(Integer, db_field='l'), [1, 2, 3, 4, 5, 6], previous=[3, 4]) c._analyze() c.set_context_id(0) @@ -241,7 +242,7 @@ def test_append_and_prepend(self): self.assertEqual(c._prepend, [1, 2]) self.assertEqual(c.get_context_size(), 2) - self.assertEqual(str(c), '"s" = %(0)s + "s", "s" = "s" + %(1)s') + self.assertEqual(str(c), '"l" = %(0)s + "l", "l" = "l" + %(1)s') ctx = {} c.update_context(ctx) @@ -249,7 +250,7 @@ def test_append_and_prepend(self): def test_shrinking_list_update(self): """ tests that updating to a smaller list results in an insert statement """ - c = ListUpdateClause('s', [1, 2, 3], previous=[1, 2, 3, 4]) + c = ListUpdateClause(List(Integer, db_field='l'), [1, 2, 3], previous=[1, 2, 3, 4]) c._analyze() c.set_context_id(0) @@ -258,7 +259,7 @@ def test_shrinking_list_update(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = %(0)s') + self.assertEqual(str(c), '"l" = %(0)s') ctx = {} c.update_context(ctx) @@ -268,33 +269,33 @@ def test_shrinking_list_update(self): class MapUpdateTests(unittest.TestCase): def test_update(self): - c = MapUpdateClause('s', {3: 0, 5: 6}, previous={5: 0, 3: 4}) + c = MapUpdateClause(Map(Integer, Integer, db_field='m'), {3: 0, 5: 6}, previous={5: 0, 3: 4}) c._analyze() c.set_context_id(0) self.assertEqual(c._updates, [3, 5]) self.assertEqual(c.get_context_size(), 4) - self.assertEqual(str(c), '"s"[%(0)s] = %(1)s, "s"[%(2)s] = %(3)s') + self.assertEqual(str(c), '"m"[%(0)s] = %(1)s, "m"[%(2)s] = %(3)s') ctx = {} c.update_context(ctx) self.assertEqual(ctx, {'0': 3, "1": 0, '2': 5, '3': 6}) def test_update_from_null(self): - c = MapUpdateClause('s', {3: 0, 5: 6}) + c = MapUpdateClause(Map(Integer, Integer, db_field='m'), {3: 0, 5: 6}) c._analyze() c.set_context_id(0) self.assertEqual(c._updates, [3, 5]) self.assertEqual(c.get_context_size(), 4) - self.assertEqual(str(c), '"s"[%(0)s] = %(1)s, "s"[%(2)s] = %(3)s') + self.assertEqual(str(c), '"m"[%(0)s] = %(1)s, "m"[%(2)s] = %(3)s') ctx = {} c.update_context(ctx) self.assertEqual(ctx, {'0': 3, "1": 0, '2': 5, '3': 6}) def test_nulled_columns_arent_included(self): - c = MapUpdateClause('s', {3: 0}, {1: 2, 3: 4}) + c = MapUpdateClause(Map(Integer, Integer, db_field='m'), {3: 0}, {1: 2, 3: 4}) c._analyze() c.set_context_id(0) @@ -304,7 +305,7 @@ def test_nulled_columns_arent_included(self): class CounterUpdateTests(unittest.TestCase): def test_positive_update(self): - c = CounterUpdateClause('a', 5, 3) + c = CounterUpdateClause(Counter(db_field='a'), 5, 3) c.set_context_id(5) self.assertEqual(c.get_context_size(), 1) @@ -315,7 +316,7 @@ def test_positive_update(self): self.assertEqual(ctx, {'5': 2}) def test_negative_update(self): - c = CounterUpdateClause('a', 4, 7) + c = CounterUpdateClause(Counter(db_field='a'), 4, 7) c.set_context_id(3) self.assertEqual(c.get_context_size(), 1) @@ -326,7 +327,7 @@ def test_negative_update(self): self.assertEqual(ctx, {'3': 3}) def noop_update(self): - c = CounterUpdateClause('a', 5, 5) + c = CounterUpdateClause(Counter(db_field='a'), 5, 5) c.set_context_id(5) self.assertEqual(c.get_context_size(), 1) diff --git a/tests/integration/cqlengine/statements/test_update_statement.py b/tests/integration/cqlengine/statements/test_update_statement.py index a8ff865e25..8e222b484e 100644 --- a/tests/integration/cqlengine/statements/test_update_statement.py +++ b/tests/integration/cqlengine/statements/test_update_statement.py @@ -16,6 +16,7 @@ except ImportError: import unittest # noqa +from cassandra.cqlengine.columns import * from cassandra.cqlengine.operators import * from cassandra.cqlengine.statements import (UpdateStatement, WhereClause, AssignmentClause, SetUpdateClause, @@ -62,25 +63,25 @@ def test_additional_rendering(self): def test_update_set_add(self): us = UpdateStatement('table') - us.add_assignment_clause(SetUpdateClause('a', set((1,)), operation='add')) + us.add_assignment_clause(SetUpdateClause(Set(Integer, db_field='a'), set((1,)), operation='add')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') def test_update_empty_set_add_does_not_assign(self): us = UpdateStatement('table') - us.add_assignment_clause(SetUpdateClause('a', set(), operation='add')) + us.add_assignment_clause(SetUpdateClause(Set(Integer, db_field='a'), set(), operation='add')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') def test_update_empty_set_removal_does_not_assign(self): us = UpdateStatement('table') - us.add_assignment_clause(SetUpdateClause('a', set(), operation='remove')) + us.add_assignment_clause(SetUpdateClause(Set(Integer, db_field='a'), set(), operation='remove')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" - %(0)s') def test_update_list_prepend_with_empty_list(self): us = UpdateStatement('table') - us.add_assignment_clause(ListUpdateClause('a', [], operation='prepend')) + us.add_assignment_clause(ListUpdateClause(List(Integer, db_field='a'), [], operation='prepend')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s + "a"') def test_update_list_append_with_empty_list(self): us = UpdateStatement('table') - us.add_assignment_clause(ListUpdateClause('a', [], operation='append')) + us.add_assignment_clause(ListUpdateClause(List(Integer, db_field='a'), [], operation='append')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') From ffe51068f7d6eb80f8dc4c899e307de8fcf7ae76 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 29 Mar 2016 15:45:00 -0500 Subject: [PATCH 0460/2005] cqle: assume we always have the column for container updates PYTHON-530 --- cassandra/cqlengine/statements.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 3e955e826c..9332c4db34 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -169,7 +169,7 @@ def __init__(self, column, value, operation=None, previous=None): self._column = column def _to_database(self, val): - return self._column.to_database(val) if self._column else val + return self._column.to_database(val) def _analyze(self): raise NotImplementedError @@ -369,8 +369,8 @@ def update_context(self, ctx): else: for key in self._updates or []: val = self.value.get(key) - ctx[str(ctx_id)] = self._column.key_col.to_database(key) if self._column else key - ctx[str(ctx_id + 1)] = self._column.value_col.to_database(val) if self._column else val + ctx[str(ctx_id)] = self._column.key_col.to_database(key) + ctx[str(ctx_id + 1)] = self._column.value_col.to_database(val) ctx_id += 2 @property From f2b2d2bd0c6eddc6a12cbc6a6e013b835bc6b60d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 30 Mar 2016 09:43:42 -0500 Subject: [PATCH 0461/2005] cqle: refactor common update lagic to UpdateStatement PYTHON-530 --- cassandra/cqlengine/query.py | 41 ++----------------------------- cassandra/cqlengine/statements.py | 40 +++++++++++++++++++++++++++--- 2 files changed, 38 insertions(+), 43 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index fe95bec55c..f9e1a75d42 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1167,23 +1167,7 @@ class Row(Model): nulled_columns.add(col_name) continue - # add the update statements - if isinstance(col, columns.Counter): - # TODO: implement counter updates - raise NotImplementedError - elif isinstance(col, (columns.List, columns.Set, columns.Map)): - if isinstance(col, columns.List): - klass = ListUpdateClause - elif isinstance(col, columns.Set): - klass = SetUpdateClause - elif isinstance(col, columns.Map): - klass = MapUpdateClause - else: - raise RuntimeError - us.add_assignment_clause(klass(col, col.to_database(val), operation=col_op)) - else: - us.add_assignment_clause(AssignmentClause( - col.db_field_name, col.to_database(val))) + us.add_update(col, val, operation=col_op) if us.assignments: self._execute(us) @@ -1295,28 +1279,7 @@ def update(self): continue static_changed_only = static_changed_only and col.static - if isinstance(col, (columns.BaseContainerColumn, columns.Counter)): - # get appropriate clause - if isinstance(col, columns.List): - klass = ListUpdateClause - elif isinstance(col, columns.Map): - klass = MapUpdateClause - elif isinstance(col, columns.Set): - klass = SetUpdateClause - elif isinstance(col, columns.Counter): - klass = CounterUpdateClause - else: - raise RuntimeError - - clause = klass(col, val, - previous=val_mgr.previous_value) - if clause.get_context_size() > 0: - statement.add_assignment_clause(clause) - else: - statement.add_assignment_clause(AssignmentClause( - col.db_field_name, - col.to_database(val) - )) + statement.add_update(col, val, previous=val_mgr.previous_value) if statement.assignments: for name, col in self.model._primary_keys.items(): diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 9332c4db34..14b592b8a9 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -17,6 +17,7 @@ import six from cassandra.query import FETCH_SIZE_UNSET +from cassandra.cqlengine import columns from cassandra.cqlengine import UnicodeMixin from cassandra.cqlengine.functions import QueryValue from cassandra.cqlengine.operators import BaseWhereOperator, InOperator @@ -158,6 +159,17 @@ def insert_tuple(self): return self.field, self.context_id +class ContainerUpdateClauseMapMeta(type): + + def __init__(cls, name, bases, dct): + if not hasattr(cls, 'clause_map'): + cls.clause_map = {} + else: + cls.clause_map[cls.col_type] = cls + super(ContainerUpdateClauseMapMeta, cls).__init__(name, bases, dct) + + +@six.add_metaclass(ContainerUpdateClauseMapMeta) class ContainerUpdateClause(AssignmentClause): def __init__(self, column, value, operation=None, previous=None): @@ -184,6 +196,8 @@ def update_context(self, ctx): class SetUpdateClause(ContainerUpdateClause): """ updates a set collection """ + col_type = columns.Set + _additions = None _removals = None @@ -254,6 +268,8 @@ def update_context(self, ctx): class ListUpdateClause(ContainerUpdateClause): """ updates a list collection """ + col_type = columns.List + _append = None _prepend = None @@ -345,6 +361,8 @@ def _analyze(self): class MapUpdateClause(ContainerUpdateClause): """ updates a map collection """ + col_type = columns.Map + _updates = None def _analyze(self): @@ -393,17 +411,19 @@ def __unicode__(self): return ', '.join(qs) -class CounterUpdateClause(ContainerUpdateClause): +class CounterUpdateClause(AssignmentClause): + + col_type = columns.Counter def __init__(self, column, value, previous=None): - super(CounterUpdateClause, self).__init__(column, value, previous=previous) - self.previous = self.previous or 0 + super(CounterUpdateClause, self).__init__(column.db_field_name, column.to_database(value)) + self.previous = column.to_database(previous) if previous else 0 def get_context_size(self): return 1 def update_context(self, ctx): - ctx[str(self.context_id)] = self._to_database(abs(self.value - self.previous)) + ctx[str(self.context_id)] = abs(self.value - self.previous) def __unicode__(self): delta = self.value - self.previous @@ -783,6 +803,18 @@ def update_context_id(self, i): conditional.set_context_id(self.context_counter) self.context_counter += conditional.get_context_size() + def add_update(self, column, value, operation=None, previous=None): + col_type = type(column) + container_update_type = ContainerUpdateClause.clause_map.get(col_type) + if container_update_type: + clause = container_update_type(column, value, operation, previous) + elif col_type == columns.Counter: + clause = CounterUpdateClause(column, value) + else: + clause = AssignmentClause(column.db_field_name, column.to_database(value)) + if clause.get_context_size(): # this is to exclude map removals from updates. Can go away if we drop support for C* < 1.2.4 and remove two-phase updates + self.add_assignment_clause(clause) + class DeleteStatement(BaseCQLStatement): """ a cql delete statement """ From 8a18b4a5988ff12abfaacba00099b4918cf70842 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 30 Mar 2016 10:19:14 -0500 Subject: [PATCH 0462/2005] cqle: refactor to_database conversions for update clauses PYTHON-530 --- cassandra/cqlengine/statements.py | 56 +++++++++--------- .../statements/test_assignment_clauses.py | 59 +++++++++---------- .../statements/test_update_statement.py | 11 ++-- 3 files changed, 61 insertions(+), 65 deletions(-) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 14b592b8a9..f5f626a49b 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -159,29 +159,25 @@ def insert_tuple(self): return self.field, self.context_id -class ContainerUpdateClauseMapMeta(type): +class ContainerUpdateTypeMapMeta(type): def __init__(cls, name, bases, dct): - if not hasattr(cls, 'clause_map'): - cls.clause_map = {} + if not hasattr(cls, 'type_map'): + cls.type_map = {} else: - cls.clause_map[cls.col_type] = cls - super(ContainerUpdateClauseMapMeta, cls).__init__(name, bases, dct) + cls.type_map[cls.col_type] = cls + super(ContainerUpdateTypeMapMeta, cls).__init__(name, bases, dct) -@six.add_metaclass(ContainerUpdateClauseMapMeta) +@six.add_metaclass(ContainerUpdateTypeMapMeta) class ContainerUpdateClause(AssignmentClause): - def __init__(self, column, value, operation=None, previous=None): - super(ContainerUpdateClause, self).__init__(column.db_field_name, value) + def __init__(self, field, value, operation=None, previous=None): + super(ContainerUpdateClause, self).__init__(field, value) self.previous = previous self._assignments = None self._operation = operation self._analyzed = False - self._column = column - - def _to_database(self, val): - return self._column.to_database(val) def _analyze(self): raise NotImplementedError @@ -254,15 +250,15 @@ def update_context(self, ctx): self._assignments is None and self._additions is None and self._removals is None): - ctx[str(ctx_id)] = self._to_database({}) + ctx[str(ctx_id)] = set() if self._assignments is not None: - ctx[str(ctx_id)] = self._to_database(self._assignments) + ctx[str(ctx_id)] = self._assignments ctx_id += 1 if self._additions is not None: - ctx[str(ctx_id)] = self._to_database(self._additions) + ctx[str(ctx_id)] = self._additions ctx_id += 1 if self._removals is not None: - ctx[str(ctx_id)] = self._to_database(self._removals) + ctx[str(ctx_id)] = self._removals class ListUpdateClause(ContainerUpdateClause): @@ -301,13 +297,13 @@ def update_context(self, ctx): self._analyze() ctx_id = self.context_id if self._assignments is not None: - ctx[str(ctx_id)] = self._to_database(self._assignments) + ctx[str(ctx_id)] = self._assignments ctx_id += 1 if self._prepend is not None: - ctx[str(ctx_id)] = self._to_database(self._prepend) + ctx[str(ctx_id)] = self._prepend ctx_id += 1 if self._append is not None: - ctx[str(ctx_id)] = self._to_database(self._append) + ctx[str(ctx_id)] = self._append def _analyze(self): """ works out the updates to be performed """ @@ -387,8 +383,8 @@ def update_context(self, ctx): else: for key in self._updates or []: val = self.value.get(key) - ctx[str(ctx_id)] = self._column.key_col.to_database(key) - ctx[str(ctx_id + 1)] = self._column.value_col.to_database(val) + ctx[str(ctx_id)] = key + ctx[str(ctx_id + 1)] = val ctx_id += 2 @property @@ -415,9 +411,9 @@ class CounterUpdateClause(AssignmentClause): col_type = columns.Counter - def __init__(self, column, value, previous=None): - super(CounterUpdateClause, self).__init__(column.db_field_name, column.to_database(value)) - self.previous = column.to_database(previous) if previous else 0 + def __init__(self, field, value, previous=None): + super(CounterUpdateClause, self).__init__(field, value) + self.previous = previous or 0 def get_context_size(self): return 1 @@ -804,15 +800,17 @@ def update_context_id(self, i): self.context_counter += conditional.get_context_size() def add_update(self, column, value, operation=None, previous=None): + value = column.to_database(value) col_type = type(column) - container_update_type = ContainerUpdateClause.clause_map.get(col_type) + container_update_type = ContainerUpdateClause.type_map.get(col_type) if container_update_type: - clause = container_update_type(column, value, operation, previous) + previous = column.to_database(previous) + clause = container_update_type(column.db_field_name, value, operation, previous) elif col_type == columns.Counter: - clause = CounterUpdateClause(column, value) + clause = CounterUpdateClause(column.db_field_name, value) else: - clause = AssignmentClause(column.db_field_name, column.to_database(value)) - if clause.get_context_size(): # this is to exclude map removals from updates. Can go away if we drop support for C* < 1.2.4 and remove two-phase updates + clause = AssignmentClause(column.db_field_name, value) + if clause.get_context_size(): # this is to exclude map removals from updates. Can go away if we drop support for C* < 1.2.4 and remove two-phase updates self.add_assignment_clause(clause) diff --git a/tests/integration/cqlengine/statements/test_assignment_clauses.py b/tests/integration/cqlengine/statements/test_assignment_clauses.py index 6afc9ff4b5..4fc3a28374 100644 --- a/tests/integration/cqlengine/statements/test_assignment_clauses.py +++ b/tests/integration/cqlengine/statements/test_assignment_clauses.py @@ -16,7 +16,6 @@ except ImportError: import unittest # noqa -from cassandra.cqlengine.columns import * from cassandra.cqlengine.statements import AssignmentClause, SetUpdateClause, ListUpdateClause, MapUpdateClause, MapDeleteClause, FieldDeleteClause, CounterUpdateClause @@ -34,7 +33,7 @@ def test_insert_tuple(self): class SetUpdateClauseTests(unittest.TestCase): def test_update_from_none(self): - c = SetUpdateClause(Set(Integer, db_field='s'), set((1, 2)), previous=None) + c = SetUpdateClause('s', set((1, 2)), previous=None) c._analyze() c.set_context_id(0) @@ -51,7 +50,7 @@ def test_update_from_none(self): def test_null_update(self): """ tests setting a set to None creates an empty update statement """ - c = SetUpdateClause(Set(Integer, db_field='s'), None, previous=set((1, 2))) + c = SetUpdateClause('s', None, previous=set((1, 2))) c._analyze() c.set_context_id(0) @@ -68,7 +67,7 @@ def test_null_update(self): def test_no_update(self): """ tests an unchanged value creates an empty update statement """ - c = SetUpdateClause(Set(Integer, db_field='s'), set((1, 2)), previous=set((1, 2))) + c = SetUpdateClause('s', set((1, 2)), previous=set((1, 2))) c._analyze() c.set_context_id(0) @@ -86,7 +85,7 @@ def test_no_update(self): def test_update_empty_set(self): """tests assigning a set to an empty set creates a nonempty update statement and nonzero context size.""" - c = SetUpdateClause(column=Set(Integer, db_field='s'), value=set()) + c = SetUpdateClause(field='s', value=set()) c._analyze() c.set_context_id(0) @@ -102,7 +101,7 @@ def test_update_empty_set(self): self.assertEqual(ctx, {'0': set()}) def test_additions(self): - c = SetUpdateClause(Set(Integer, db_field='s'), set((1, 2, 3)), previous=set((1, 2))) + c = SetUpdateClause('s', set((1, 2, 3)), previous=set((1, 2))) c._analyze() c.set_context_id(0) @@ -118,7 +117,7 @@ def test_additions(self): self.assertEqual(ctx, {'0': set((3,))}) def test_removals(self): - c = SetUpdateClause(Set(Integer, db_field='s'), set((1, 2)), previous=set((1, 2, 3))) + c = SetUpdateClause('s', set((1, 2)), previous=set((1, 2, 3))) c._analyze() c.set_context_id(0) @@ -134,7 +133,7 @@ def test_removals(self): self.assertEqual(ctx, {'0': set((3,))}) def test_additions_and_removals(self): - c = SetUpdateClause(Set(Integer, db_field='s'), set((2, 3)), previous=set((1, 2))) + c = SetUpdateClause('s', set((2, 3)), previous=set((1, 2))) c._analyze() c.set_context_id(0) @@ -153,7 +152,7 @@ def test_additions_and_removals(self): class ListUpdateClauseTests(unittest.TestCase): def test_update_from_none(self): - c = ListUpdateClause(List(Integer, db_field='l'), [1, 2, 3]) + c = ListUpdateClause('s', [1, 2, 3]) c._analyze() c.set_context_id(0) @@ -162,14 +161,14 @@ def test_update_from_none(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"l" = %(0)s') + self.assertEqual(str(c), '"s" = %(0)s') ctx = {} c.update_context(ctx) self.assertEqual(ctx, {'0': [1, 2, 3]}) def test_update_from_empty(self): - c = ListUpdateClause(List(Integer, db_field='l'), [1, 2, 3], previous=[]) + c = ListUpdateClause('s', [1, 2, 3], previous=[]) c._analyze() c.set_context_id(0) @@ -178,14 +177,14 @@ def test_update_from_empty(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"l" = %(0)s') + self.assertEqual(str(c), '"s" = %(0)s') ctx = {} c.update_context(ctx) self.assertEqual(ctx, {'0': [1, 2, 3]}) def test_update_from_different_list(self): - c = ListUpdateClause(List(Integer, db_field='l'), [1, 2, 3], previous=[3, 2, 1]) + c = ListUpdateClause('s', [1, 2, 3], previous=[3, 2, 1]) c._analyze() c.set_context_id(0) @@ -194,14 +193,14 @@ def test_update_from_different_list(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"l" = %(0)s') + self.assertEqual(str(c), '"s" = %(0)s') ctx = {} c.update_context(ctx) self.assertEqual(ctx, {'0': [1, 2, 3]}) def test_append(self): - c = ListUpdateClause(List(Integer, db_field='l'), [1, 2, 3, 4], previous=[1, 2]) + c = ListUpdateClause('s', [1, 2, 3, 4], previous=[1, 2]) c._analyze() c.set_context_id(0) @@ -210,14 +209,14 @@ def test_append(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"l" = "l" + %(0)s') + self.assertEqual(str(c), '"s" = "s" + %(0)s') ctx = {} c.update_context(ctx) self.assertEqual(ctx, {'0': [3, 4]}) def test_prepend(self): - c = ListUpdateClause(List(Integer, db_field='l'), [1, 2, 3, 4], previous=[3, 4]) + c = ListUpdateClause('s', [1, 2, 3, 4], previous=[3, 4]) c._analyze() c.set_context_id(0) @@ -226,14 +225,14 @@ def test_prepend(self): self.assertEqual(c._prepend, [1, 2]) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"l" = %(0)s + "l"') + self.assertEqual(str(c), '"s" = %(0)s + "s"') ctx = {} c.update_context(ctx) self.assertEqual(ctx, {'0': [1, 2]}) def test_append_and_prepend(self): - c = ListUpdateClause(List(Integer, db_field='l'), [1, 2, 3, 4, 5, 6], previous=[3, 4]) + c = ListUpdateClause('s', [1, 2, 3, 4, 5, 6], previous=[3, 4]) c._analyze() c.set_context_id(0) @@ -242,7 +241,7 @@ def test_append_and_prepend(self): self.assertEqual(c._prepend, [1, 2]) self.assertEqual(c.get_context_size(), 2) - self.assertEqual(str(c), '"l" = %(0)s + "l", "l" = "l" + %(1)s') + self.assertEqual(str(c), '"s" = %(0)s + "s", "s" = "s" + %(1)s') ctx = {} c.update_context(ctx) @@ -250,7 +249,7 @@ def test_append_and_prepend(self): def test_shrinking_list_update(self): """ tests that updating to a smaller list results in an insert statement """ - c = ListUpdateClause(List(Integer, db_field='l'), [1, 2, 3], previous=[1, 2, 3, 4]) + c = ListUpdateClause('s', [1, 2, 3], previous=[1, 2, 3, 4]) c._analyze() c.set_context_id(0) @@ -259,7 +258,7 @@ def test_shrinking_list_update(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"l" = %(0)s') + self.assertEqual(str(c), '"s" = %(0)s') ctx = {} c.update_context(ctx) @@ -269,33 +268,33 @@ def test_shrinking_list_update(self): class MapUpdateTests(unittest.TestCase): def test_update(self): - c = MapUpdateClause(Map(Integer, Integer, db_field='m'), {3: 0, 5: 6}, previous={5: 0, 3: 4}) + c = MapUpdateClause('s', {3: 0, 5: 6}, previous={5: 0, 3: 4}) c._analyze() c.set_context_id(0) self.assertEqual(c._updates, [3, 5]) self.assertEqual(c.get_context_size(), 4) - self.assertEqual(str(c), '"m"[%(0)s] = %(1)s, "m"[%(2)s] = %(3)s') + self.assertEqual(str(c), '"s"[%(0)s] = %(1)s, "s"[%(2)s] = %(3)s') ctx = {} c.update_context(ctx) self.assertEqual(ctx, {'0': 3, "1": 0, '2': 5, '3': 6}) def test_update_from_null(self): - c = MapUpdateClause(Map(Integer, Integer, db_field='m'), {3: 0, 5: 6}) + c = MapUpdateClause('s', {3: 0, 5: 6}) c._analyze() c.set_context_id(0) self.assertEqual(c._updates, [3, 5]) self.assertEqual(c.get_context_size(), 4) - self.assertEqual(str(c), '"m"[%(0)s] = %(1)s, "m"[%(2)s] = %(3)s') + self.assertEqual(str(c), '"s"[%(0)s] = %(1)s, "s"[%(2)s] = %(3)s') ctx = {} c.update_context(ctx) self.assertEqual(ctx, {'0': 3, "1": 0, '2': 5, '3': 6}) def test_nulled_columns_arent_included(self): - c = MapUpdateClause(Map(Integer, Integer, db_field='m'), {3: 0}, {1: 2, 3: 4}) + c = MapUpdateClause('s', {3: 0}, {1: 2, 3: 4}) c._analyze() c.set_context_id(0) @@ -305,7 +304,7 @@ def test_nulled_columns_arent_included(self): class CounterUpdateTests(unittest.TestCase): def test_positive_update(self): - c = CounterUpdateClause(Counter(db_field='a'), 5, 3) + c = CounterUpdateClause('a', 5, 3) c.set_context_id(5) self.assertEqual(c.get_context_size(), 1) @@ -316,7 +315,7 @@ def test_positive_update(self): self.assertEqual(ctx, {'5': 2}) def test_negative_update(self): - c = CounterUpdateClause(Counter(db_field='a'), 4, 7) + c = CounterUpdateClause('a', 4, 7) c.set_context_id(3) self.assertEqual(c.get_context_size(), 1) @@ -327,7 +326,7 @@ def test_negative_update(self): self.assertEqual(ctx, {'3': 3}) def noop_update(self): - c = CounterUpdateClause(Counter(db_field='a'), 5, 5) + c = CounterUpdateClause('a', 5, 5) c.set_context_id(5) self.assertEqual(c.get_context_size(), 1) diff --git a/tests/integration/cqlengine/statements/test_update_statement.py b/tests/integration/cqlengine/statements/test_update_statement.py index 8e222b484e..a8ff865e25 100644 --- a/tests/integration/cqlengine/statements/test_update_statement.py +++ b/tests/integration/cqlengine/statements/test_update_statement.py @@ -16,7 +16,6 @@ except ImportError: import unittest # noqa -from cassandra.cqlengine.columns import * from cassandra.cqlengine.operators import * from cassandra.cqlengine.statements import (UpdateStatement, WhereClause, AssignmentClause, SetUpdateClause, @@ -63,25 +62,25 @@ def test_additional_rendering(self): def test_update_set_add(self): us = UpdateStatement('table') - us.add_assignment_clause(SetUpdateClause(Set(Integer, db_field='a'), set((1,)), operation='add')) + us.add_assignment_clause(SetUpdateClause('a', set((1,)), operation='add')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') def test_update_empty_set_add_does_not_assign(self): us = UpdateStatement('table') - us.add_assignment_clause(SetUpdateClause(Set(Integer, db_field='a'), set(), operation='add')) + us.add_assignment_clause(SetUpdateClause('a', set(), operation='add')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') def test_update_empty_set_removal_does_not_assign(self): us = UpdateStatement('table') - us.add_assignment_clause(SetUpdateClause(Set(Integer, db_field='a'), set(), operation='remove')) + us.add_assignment_clause(SetUpdateClause('a', set(), operation='remove')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" - %(0)s') def test_update_list_prepend_with_empty_list(self): us = UpdateStatement('table') - us.add_assignment_clause(ListUpdateClause(List(Integer, db_field='a'), [], operation='prepend')) + us.add_assignment_clause(ListUpdateClause('a', [], operation='prepend')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s + "a"') def test_update_list_append_with_empty_list(self): us = UpdateStatement('table') - us.add_assignment_clause(ListUpdateClause(List(Integer, db_field='a'), [], operation='append')) + us.add_assignment_clause(ListUpdateClause('a', [], operation='append')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') From 56c8ec4442708d3acb0bb9e2f78aa7817ea243b0 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 30 Mar 2016 10:31:29 -0500 Subject: [PATCH 0463/2005] PYTHON-520 adding test for deffered item population --- .../cqlengine/query/test_queryset.py | 73 ++++++++++++++++++- 1 file changed, 72 insertions(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 0097e60490..79043753ef 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -22,7 +22,6 @@ import time from uuid import uuid1, uuid4 import uuid -import sys from cassandra.cluster import Session from cassandra import InvalidRequest @@ -1134,3 +1133,75 @@ def test_defaultFetchSize(self): TestModelSmall.objects.fetch_size(0) with self.assertRaises(QueryException): TestModelSmall.objects.fetch_size(-1) + + +class People(Model): + __table_name__ = "people" + last_name = columns.Text(primary_key=True, partition_key=True) + first_name = columns.Text(primary_key=True) + birthday = columns.DateTime() + + +class People2(Model): + __table_name__ = "people" + last_name = columns.Text(primary_key=True, partition_key=True) + first_name = columns.Text(primary_key=True) + middle_name = columns.Text() + birthday = columns.DateTime() + + +class TestModelQueryWithDifferedFeld(BaseCassEngTestCase): + """ + Tests that selects with filter will deffer population of known values until after the results are returned. + I.E. Instead of generating SELECT * FROM People WHERE last_name="Smith" It will generate + SELECT first_name, birthday FROM People WHERE last_name="Smith" + Where last_name 'smith' will populated post query + + @since 3.2 + @jira_ticket PYTHON-520 + @expected_result only needed fields are included in the query + + @test_category object_mapper + """ + @classmethod + def setUpClass(cls): + super(TestModelQueryWithDifferedFeld, cls).setUpClass() + sync_table(People) + + @classmethod + def tearDownClass(cls): + super(TestModelQueryWithDifferedFeld, cls).tearDownClass() + drop_table(People) + + def test_defaultFetchSize(self): + # Populate Table + People.objects.create(last_name="Smith", first_name="John", birthday=datetime.now()) + People.objects.create(last_name="Bestwater", first_name="Alan", birthday=datetime.now()) + People.objects.create(last_name="Smith", first_name="Greg", birthday=datetime.now()) + People.objects.create(last_name="Smith", first_name="Adam", birthday=datetime.now()) + + # Check query constructions + expected_fields = ['first_name', 'birthday'] + self.assertEqual(People.filter(last_name="Smith")._select_fields(), expected_fields) + # Validate correct fields are fetched + smiths = list(People.filter(last_name="Smith")) + self.assertEqual(len(smiths), 3) + self.assertTrue(smiths[0].last_name is not None) + + # Modify table with new value + sync_table(People2) + + # populate new format + People2.objects.create(last_name="Smith", first_name="Chris", middle_name="Raymond", birthday=datetime.now()) + People2.objects.create(last_name="Smith", first_name="Andrew", middle_name="Micheal", birthday=datetime.now()) + + # validate query construction + expected_fields = ['first_name', 'middle_name', 'birthday'] + self.assertEqual(People2.filter(last_name="Smith")._select_fields(), expected_fields) + + # validate correct items are returneds + smiths = list(People2.filter(last_name="Smith")) + self.assertEqual(len(smiths), 5) + self.assertTrue(smiths[0].last_name is not None) + + From 582eb042aa095d4ca2c4a21565bb50cc55a1f3f2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 30 Mar 2016 10:52:57 -0500 Subject: [PATCH 0464/2005] 3.2a1 relase version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 8bcb645cd6..6a27e19a87 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 1, 1, 'post0') +__version_info__ = (3, 2, 0, 'a1') __version__ = '.'.join(map(str, __version_info__)) From d20086758106e2f6726f67549f6c89248a7f7d2c Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 30 Mar 2016 18:08:21 -0500 Subject: [PATCH 0465/2005] Skipping UDT warning test for C* 2.1 --- .../cqlengine/management/test_management.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 2dc055353f..109ab22fac 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -25,7 +25,7 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns -from tests.integration import PROTOCOL_VERSION, greaterthancass20, MockLoggingHandler +from tests.integration import PROTOCOL_VERSION, greaterthancass20, MockLoggingHandler, CASSANDRA_VERSION from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine.query.test_queryset import TestModel from cassandra.cqlengine.usertype import UserType @@ -299,10 +299,11 @@ def test_sync_warnings(self): sync_table(BaseInconsistent) sync_table(ChangedInconsistent) self.assertTrue('differing from the model type' in mock_handler.messages.get('warning')[0]) - sync_type(DEFAULT_KEYSPACE, BaseInconsistentType) - mock_handler.reset() - sync_type(DEFAULT_KEYSPACE, ChangedInconsistentType) - self.assertTrue('differing from the model user type' in mock_handler.messages.get('warning')[0]) + if CASSANDRA_VERSION >= '2.1': + sync_type(DEFAULT_KEYSPACE, BaseInconsistentType) + mock_handler.reset() + sync_type(DEFAULT_KEYSPACE, ChangedInconsistentType) + self.assertTrue('differing from the model user type' in mock_handler.messages.get('warning')[0]) logger.removeHandler(mock_handler) From 8599b8322985817cf1e28c21f0d43b1cb431991a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 30 Mar 2016 11:51:06 -0500 Subject: [PATCH 0466/2005] post-release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 6a27e19a87..33fe14c384 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 2, 0, 'a1') +__version_info__ = (3, 2, 0, 'a1', 'post0') __version__ = '.'.join(map(str, __version_info__)) From 8c473819ead387cd263b94e3f946664ad902fd31 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 31 Mar 2016 11:04:51 -0500 Subject: [PATCH 0467/2005] cql: raise when a model sync requires PK changes PYTHON-532 --- cassandra/cqlengine/management.py | 44 ++++++++++++++----------------- 1 file changed, 20 insertions(+), 24 deletions(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 344b262c65..edb8a3dbed 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -161,7 +161,6 @@ def sync_table(model): for udt in [u for u in udts if u not in syncd_types]: _sync_type(ks_name, udt, syncd_types) - # check for an existing column family if raw_cf_name not in tables: log.debug("sync_table creating new table %s", cf_name) qs = _get_create_table(model) @@ -175,32 +174,36 @@ def sync_table(model): raise else: log.debug("sync_table checking existing table %s", cf_name) - # see if we're missing any columns table_meta = tables[raw_cf_name] - field_names = _get_non_pk_field_names(tables[raw_cf_name]) + table_columns = table_meta.columns model_fields = set() # # TODO: does this work with db_name?? - for name, col in model._columns.items(): - if col.primary_key or col.partition_key: - continue # we can't mess with the PK - model_fields.add(name) - if col.db_field_name in field_names: - col_meta = table_meta.columns[col.db_field_name] + for model_name, col in model._columns.items(): + db_name = col.db_field_name + model_fields.add(db_name) + if db_name in table_columns: + col_meta = table_columns[db_name] if col_meta.cql_type != col.db_type: - msg = 'Existing table {0} has column "{1}" with a type ({2}) differing from the model type ({3}).' \ - ' Model should be updated.'.format(cf_name, col.db_field_name, col_meta.cql_type, col.db_type) - warnings.warn(msg) - log.warning(msg) - continue # skip columns already defined + if col.primary_key or col.partition_key: + raise CQLEngineException("Table {0} already exists, but has different types for primary key column '{1}' ({2}) (model field '{3}' ({4})). " + "Update the model or remove the table.".format(cf_name, db_name, col_meta.cql_type, model_name, col.db_type)) + else: + msg = 'Existing table {0} has column "{1}" with a type ({2}) differing from the model type ({3}).' \ + ' Model should be updated.'.format(cf_name, db_name, col_meta.cql_type, col.db_type) + warnings.warn(msg) + log.warning(msg) + continue + + if col.primary_key or col.primary_key: + raise CQLEngineException("Cannot add primary key '{0}' (with db_field '{1}') to existing table {2}".format(model_name, db_name, cf_name)) - # add missing column using the column def query = "ALTER TABLE {0} add {1}".format(cf_name, col.get_column_def()) execute(query) - db_fields_not_in_model = model_fields.symmetric_difference(field_names) + db_fields_not_in_model = model_fields.symmetric_difference(table_columns) if db_fields_not_in_model: - log.info("Table %s has fields not referenced by model: %s", cf_name, db_fields_not_in_model) + log.info("Table {0} has fields not referenced by model: {1}".format(cf_name, db_fields_not_in_model)) _update_options(model) @@ -339,13 +342,6 @@ def add_column(col): return ' '.join(query_strings) -def _get_non_pk_field_names(table_meta): - # returns all fields that aren't part of the PK - pk_names = set(col.name for col in table_meta.primary_key) - field_names = [name for name in table_meta.columns.keys() if name not in pk_names] - return field_names - - def _get_table_metadata(model): # returns the table as provided by the native driver for a given model cluster = get_cluster() From ba1f54391b11fb3bc011b2fb7ab0623a1b38b77a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 31 Mar 2016 11:05:17 -0500 Subject: [PATCH 0468/2005] cqle: improve one of the sync_table tests PYTHON-532 --- .../cqlengine/management/test_management.py | 34 ++++++++++++------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 109ab22fac..e4b35e2136 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -21,7 +21,7 @@ from cassandra.cqlengine.connection import get_session, get_cluster from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine import management -from cassandra.cqlengine.management import _get_non_pk_field_names, _get_table_metadata, sync_table, drop_table, sync_type +from cassandra.cqlengine.management import _get_table_metadata, sync_table, drop_table, sync_type from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns @@ -128,7 +128,7 @@ class FourthModel(Model): first_key = columns.UUID(primary_key=True) second_key = columns.UUID() third_key = columns.Text() - # removed fourth key, but it should stay in the DB + # renamed model field, but map to existing column renamed = columns.Map(columns.Text, columns.Text, db_field='blah') @@ -138,23 +138,31 @@ def setUp(self): def test_add_column(self): sync_table(FirstModel) - fields = _get_non_pk_field_names(_get_table_metadata(FirstModel)) + meta_columns = _get_table_metadata(FirstModel).columns + self.assertEqual(set(meta_columns), set(FirstModel._columns)) - # this should contain the second key - self.assertEqual(len(fields), 2) - # get schema sync_table(SecondModel) - - fields = _get_non_pk_field_names(_get_table_metadata(FirstModel)) - self.assertEqual(len(fields), 3) + meta_columns = _get_table_metadata(FirstModel).columns + self.assertEqual(set(meta_columns), set(SecondModel._columns)) sync_table(ThirdModel) - fields = _get_non_pk_field_names(_get_table_metadata(FirstModel)) - self.assertEqual(len(fields), 4) + meta_columns = _get_table_metadata(FirstModel).columns + self.assertEqual(len(meta_columns), 5) + self.assertEqual(len(ThirdModel._columns), 4) + self.assertIn('fourth_key', meta_columns) + self.assertNotIn('fourth_key', ThirdModel._columns) + self.assertIn('blah', ThirdModel._columns) + self.assertIn('blah', meta_columns) sync_table(FourthModel) - fields = _get_non_pk_field_names(_get_table_metadata(FirstModel)) - self.assertEqual(len(fields), 4) + meta_columns = _get_table_metadata(FirstModel).columns + self.assertEqual(len(meta_columns), 5) + self.assertEqual(len(ThirdModel._columns), 4) + self.assertIn('fourth_key', meta_columns) + self.assertNotIn('fourth_key', FourthModel._columns) + self.assertIn('renamed', FourthModel._columns) + self.assertNotIn('renamed', meta_columns) + self.assertIn('blah', meta_columns) class ModelWithTableProperties(Model): From b1df7369735f7e3f41c2f3b51ca885d37ef2bf81 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 31 Mar 2016 11:47:17 -0500 Subject: [PATCH 0469/2005] cqle: sync_table: verify pk composition and not just type matching PYTHON-532 --- cassandra/cqlengine/management.py | 36 ++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index edb8a3dbed..800d6bf68a 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -13,6 +13,7 @@ # limitations under the License. from collections import namedtuple +from itertools import chain import json import logging import os @@ -175,24 +176,23 @@ def sync_table(model): else: log.debug("sync_table checking existing table %s", cf_name) table_meta = tables[raw_cf_name] + + _validate_pk(model, table_meta) + table_columns = table_meta.columns model_fields = set() - # # TODO: does this work with db_name?? + for model_name, col in model._columns.items(): db_name = col.db_field_name model_fields.add(db_name) if db_name in table_columns: col_meta = table_columns[db_name] - if col_meta.cql_type != col.db_type: - if col.primary_key or col.partition_key: - raise CQLEngineException("Table {0} already exists, but has different types for primary key column '{1}' ({2}) (model field '{3}' ({4})). " - "Update the model or remove the table.".format(cf_name, db_name, col_meta.cql_type, model_name, col.db_type)) - else: - msg = 'Existing table {0} has column "{1}" with a type ({2}) differing from the model type ({3}).' \ - ' Model should be updated.'.format(cf_name, db_name, col_meta.cql_type, col.db_type) - warnings.warn(msg) - log.warning(msg) + msg = 'Existing table {0} has column "{1}" with a type ({2}) differing from the model type ({3}).' \ + ' Model should be updated.'.format(cf_name, db_name, col_meta.cql_type, col.db_type) + warnings.warn(msg) + log.warning(msg) + continue if col.primary_key or col.primary_key: @@ -224,6 +224,22 @@ def sync_table(model): execute(qs) +def _validate_pk(model, table_meta): + model_partition = [c.db_field_name for c in model._partition_keys.values()] + meta_partition = [c.name for c in table_meta.partition_key] + model_clustering = [c.db_field_name for c in model._clustering_keys.values()] + meta_clustering = [c.name for c in table_meta.clustering_key] + + if model_partition != meta_partition or model_clustering != meta_clustering: + def _pk_string(partition, clustering): + return "PRIMARY KEY (({0}){1})".format(', '.join(partition), ', ' + ', '.join(clustering) if clustering else '') + raise CQLEngineException("Model {0} PRIMARY KEY composition does not match existing table {1}. " + "Model: {2}; Table: {3}. " + "Update model or drop the table.".format(model, model.column_family_name(), + _pk_string(model_partition, model_clustering), + _pk_string(meta_partition, meta_clustering))) + + def sync_type(ks_name, type_model): """ Inspects the type_model and creates / updates the corresponding type. From 7f1da283ea54cab6112acd07c5c1f748cce4d65e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 31 Mar 2016 11:52:08 -0500 Subject: [PATCH 0470/2005] cqle: remove KeyError for missing keyspace on sync --- cassandra/cqlengine/management.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 800d6bf68a..45167442e9 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -152,7 +152,11 @@ def sync_table(model): cluster = get_cluster() - keyspace = cluster.metadata.keyspaces[ks_name] + try: + keyspace = cluster.metadata.keyspaces[ks_name] + except KeyError: + raise CQLEngineException("Keyspace '{0}' for model {1} does not exist.".format(ks_name, model)) + tables = keyspace.tables syncd_types = set() From 2ce916836d34e51fca8d2ed21f757f06bb927c77 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 31 Mar 2016 12:37:36 -0500 Subject: [PATCH 0471/2005] remove unused import --- cassandra/cqlengine/management.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 45167442e9..19f8e9fa16 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -13,7 +13,6 @@ # limitations under the License. from collections import namedtuple -from itertools import chain import json import logging import os From 3c2bc6664f8745409fd07127a276025fc1ebb33a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 31 Mar 2016 15:20:06 -0500 Subject: [PATCH 0472/2005] remove unused imports --- cassandra/cqlengine/query.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index f9e1a75d42..9ebb040819 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -27,9 +27,7 @@ LessThanOrEqualOperator, ContainsOperator, BaseWhereOperator) from cassandra.cqlengine.statements import (WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement, - BaseCQLStatement, MapUpdateClause, MapDeleteClause, - ListUpdateClause, SetUpdateClause, CounterUpdateClause, - ConditionalClause) + BaseCQLStatement, MapDeleteClause, ConditionalClause) class QueryException(CQLEngineException): From f03e6381e7bb62bcda868549bb323ac7fd5ffb43 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 31 Mar 2016 15:20:27 -0500 Subject: [PATCH 0473/2005] cqle: partition key index on model columns PYTHON-535 --- cassandra/cqlengine/columns.py | 1 + cassandra/cqlengine/models.py | 4 ++++ cassandra/cqlengine/query.py | 1 + 3 files changed, 6 insertions(+) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index c441b8f23b..f6f843db90 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -159,6 +159,7 @@ def __init__(self, # the column name in the model definition self.column_name = None + self._partition_key_index = None self.static = static self.value = None diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 4e71ccaaa7..ff73207523 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -843,6 +843,7 @@ def _get_polymorphic_base(bases): has_partition_keys = any(v.partition_key for (k, v) in column_definitions) + partition_key_index = 0 # transform column definitions for k, v in column_definitions: # don't allow a column with the same name as a built-in attribute or method @@ -858,6 +859,9 @@ def _get_polymorphic_base(bases): if not has_partition_keys and v.primary_key: v.partition_key = True has_partition_keys = True + if v.partition_key: + v._partition_key_index = partition_key_index + partition_key_index += 1 _transform_column(k, v) partition_keys = OrderedDict(k for k in primary_keys.items() if k[1].partition_key) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 9ebb040819..9e38c21605 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -37,6 +37,7 @@ class QueryException(CQLEngineException): class IfNotExistsWithCounterColumn(CQLEngineException): pass + class IfExistsWithCounterColumn(CQLEngineException): pass From e728c921aa1ec490bdc422ded9119f7517eaa471 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 31 Mar 2016 16:52:22 -0500 Subject: [PATCH 0474/2005] PYTHON-530 adding test to check cql for correct dbfield values --- .../cqlengine/query/test_queryset.py | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 0097e60490..43425f12bb 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -1089,6 +1089,39 @@ def test_index(self): self.assertEqual(model.objects(k0=i.k0, k1=i.k1).count(), len(clustering_values)) self.assertEqual(model.objects(k0=i.k0, k1=i.k1, v1=0).count(), 1) + def test_db_field_names_used(self): + """ + Tests to ensure that with generated cql update statements correctly utilize the db_field values. + + @since 3.2 + @jira_ticket PYTHON-530 + @expected_result resulting cql_statements will use the db_field values + + @test_category object_mapper + """ + + values = ('k0', 'k1', 'c0', 'v0', 'v1') + # Test QuerySet Path + b = BatchQuery() + DBFieldModel.objects(k0=1).batch(b).update( + v0=0, + v1=9, + ) + for value in values: + self.assertTrue(value not in str(b.queries[0])) + + # Test DML path + b2 = BatchQuery() + dml_field_model = DBFieldModel.create(k0=1, k1=5, c0=3, v0=4, v1=5) + dml_field_model.batch(b2).update( + v0=0, + v1=9, + ) + for value in values: + self.assertTrue(value not in str(b2.queries[0])) + + + class TestModelSmall(Model): From 1ffd4dd0bbd831cf229f88d473215278f88f83f4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 1 Apr 2016 10:14:48 -0500 Subject: [PATCH 0475/2005] cqle: make statements collect primary key values initially for DML path; still need to address queryset PYTHON-535 --- cassandra/cqlengine/query.py | 27 +++------- cassandra/cqlengine/statements.py | 51 ++++++++++++------- .../statements/test_assignment_statement.py | 28 ---------- .../statements/test_base_statement.py | 8 +-- .../statements/test_delete_statement.py | 18 ++++--- .../statements/test_insert_statement.py | 13 ++--- .../statements/test_select_statement.py | 11 ++-- .../statements/test_update_statement.py | 41 +++++++-------- 8 files changed, 83 insertions(+), 114 deletions(-) delete mode 100644 tests/integration/cqlengine/statements/test_assignment_statement.py diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 9e38c21605..613fd112c7 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1239,11 +1239,7 @@ def _delete_null_columns(self): if deleted_fields: for name, col in self.model._primary_keys.items(): - ds.add_where_clause(WhereClause( - col.db_field_name, - EqualsOperator(), - col.to_database(getattr(self.instance, name)) - )) + ds.add_where(col, EqualsOperator(), getattr(self.instance, name)) self._execute(ds) def update(self): @@ -1285,11 +1281,7 @@ def update(self): # only include clustering key if clustering key is not null, and non static columns are changed to avoid cql error if (null_clustering_key or static_changed_only) and (not col.partition_key): continue - statement.add_where_clause(WhereClause( - col.db_field_name, - EqualsOperator(), - col.to_database(getattr(self.instance, name)) - )) + statement.add_where(col, EqualsOperator(), getattr(self.instance, name)) self._execute(statement) if not null_clustering_key: @@ -1324,10 +1316,7 @@ def save(self): if self.instance._values[name].changed: nulled_fields.add(col.db_field_name) continue - insert.add_assignment_clause(AssignmentClause( - col.db_field_name, - col.to_database(getattr(self.instance, name, None)) - )) + insert.add_assignment(col, getattr(self.instance, name, None)) # skip query execution if it's empty # caused by pointless update queries @@ -1344,12 +1333,8 @@ def delete(self): ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp, conditionals=self._conditional, if_exists=self._if_exists) for name, col in self.model._primary_keys.items(): - if (not col.partition_key) and (getattr(self.instance, name) is None): + val = getattr(self.instance, name) + if val is None and not col.parition_key: continue - - ds.add_where_clause(WhereClause( - col.db_field_name, - EqualsOperator(), - col.to_database(getattr(self.instance, name)) - )) + ds.add_where(col, EqualsOperator(), val) self._execute(ds) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index f5f626a49b..2156751b2e 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -481,6 +481,8 @@ def __unicode__(self): class BaseCQLStatement(UnicodeMixin): """ The base cql statement class """ + parition_key_values = None + def __init__(self, table, consistency=None, timestamp=None, where=None, fetch_size=None, conditionals=None): super(BaseCQLStatement, self).__init__() self.table = table @@ -492,20 +494,32 @@ def __init__(self, table, consistency=None, timestamp=None, where=None, fetch_si self.where_clauses = [] for clause in where or []: - self.add_where_clause(clause) + self._add_where_clause(clause) self.conditionals = [] for conditional in conditionals or []: self.add_conditional_clause(conditional) - def add_where_clause(self, clause): - """ - adds a where clause to this statement - :param clause: the clause to add - :type clause: WhereClause - """ - if not isinstance(clause, WhereClause): - raise StatementException("only instances of WhereClause can be added to statements") + def _update_partition_key(self, column, value): + if column.partition_key: + if self.parition_key_values: + self.parition_key_values.append(value) + else: + self.parition_key_values = [value] + # assert part keys are added in order + # this is an optimization based on the way statements are constructed in + # cqlengine.query (columns always iterated in order). If that assumption + # goes away we can preallocate the key values list and insert using + # self.partition_key_values + assert column._partition_key_index == len(self.parition_key_values) - 1 + + def add_where(self, column, operator, value, quote_field=True): + value = column.to_database(value) + clause = WhereClause(column.db_field_name, operator, value, quote_field) + self._add_where_clause(clause) + self._update_partition_key(column, value) + + def _add_where_clause(self, clause): clause.set_context_id(self.context_counter) self.context_counter += clause.get_context_size() self.where_clauses.append(clause) @@ -660,7 +674,7 @@ def __init__(self, # add assignments self.assignments = [] for assignment in assignments or []: - self.add_assignment_clause(assignment) + self._add_assignment_clause(assignment) def update_context_id(self, i): super(AssignmentStatement, self).update_context_id(i) @@ -668,14 +682,13 @@ def update_context_id(self, i): assignment.set_context_id(self.context_counter) self.context_counter += assignment.get_context_size() - def add_assignment_clause(self, clause): - """ - adds an assignment clause to this statement - :param clause: the clause to add - :type clause: AssignmentClause - """ - if not isinstance(clause, AssignmentClause): - raise StatementException("only instances of AssignmentClause can be added to statements") + def add_assignment(self, column, value): + value = column.to_database(value) + clause = AssignmentClause(column.db_field_name, value) + self._add_assignment_clause(clause) + self._update_partition_key(column, value) + + def _add_assignment_clause(self, clause): clause.set_context_id(self.context_counter) self.context_counter += clause.get_context_size() self.assignments.append(clause) @@ -811,7 +824,7 @@ def add_update(self, column, value, operation=None, previous=None): else: clause = AssignmentClause(column.db_field_name, value) if clause.get_context_size(): # this is to exclude map removals from updates. Can go away if we drop support for C* < 1.2.4 and remove two-phase updates - self.add_assignment_clause(clause) + self._add_assignment_clause(clause) class DeleteStatement(BaseCQLStatement): diff --git a/tests/integration/cqlengine/statements/test_assignment_statement.py b/tests/integration/cqlengine/statements/test_assignment_statement.py deleted file mode 100644 index 9d5481d98c..0000000000 --- a/tests/integration/cqlengine/statements/test_assignment_statement.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2013-2016 DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa - -from cassandra.cqlengine.statements import AssignmentStatement, StatementException - - -class AssignmentStatementTest(unittest.TestCase): - - def test_add_assignment_type_checking(self): - """ tests that only assignment clauses can be added to queries """ - stmt = AssignmentStatement('table', []) - with self.assertRaises(StatementException): - stmt.add_assignment_clause('x=5') diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index 1bda588eff..fb98966854 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -17,17 +17,11 @@ import unittest # noqa from cassandra.query import FETCH_SIZE_UNSET -from cassandra.cqlengine.statements import BaseCQLStatement, StatementException +from cassandra.cqlengine.statements import BaseCQLStatement class BaseStatementTest(unittest.TestCase): - def test_where_clause_type_checking(self): - """ tests that only assignment clauses can be added to queries """ - stmt = BaseCQLStatement('table', []) - with self.assertRaises(StatementException): - stmt.add_where_clause('x=5') - def test_fetch_size(self): """ tests that fetch_size is correctly set """ stmt = BaseCQLStatement('table', None, fetch_size=1000) diff --git a/tests/integration/cqlengine/statements/test_delete_statement.py b/tests/integration/cqlengine/statements/test_delete_statement.py index 6fb7c5317e..2767c7da33 100644 --- a/tests/integration/cqlengine/statements/test_delete_statement.py +++ b/tests/integration/cqlengine/statements/test_delete_statement.py @@ -13,6 +13,8 @@ # limitations under the License. from unittest import TestCase + +from cassandra.cqlengine.columns import Column from cassandra.cqlengine.statements import DeleteStatement, WhereClause, MapDeleteClause, ConditionalClause from cassandra.cqlengine.operators import * import six @@ -45,13 +47,13 @@ def test_table_rendering(self): def test_where_clause_rendering(self): ds = DeleteStatement('table', None) - ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) + ds.add_where(Column(db_field='a'), EqualsOperator(), 'b') self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" = %(0)s', six.text_type(ds)) def test_context_update(self): ds = DeleteStatement('table', None) ds.add_field(MapDeleteClause('d', {1: 2}, {1: 2, 3: 4})) - ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) + ds.add_where(Column(db_field='a'), EqualsOperator(), 'b') ds.update_context_id(7) self.assertEqual(six.text_type(ds), 'DELETE "d"[%(8)s] FROM table WHERE "a" = %(7)s') @@ -59,19 +61,19 @@ def test_context_update(self): def test_context(self): ds = DeleteStatement('table', None) - ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) + ds.add_where(Column(db_field='a'), EqualsOperator(), 'b') self.assertEqual(ds.get_context(), {'0': 'b'}) def test_range_deletion_rendering(self): ds = DeleteStatement('table', None) - ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - ds.add_where_clause(WhereClause('created_at', GreaterThanOrEqualOperator(), '0')) - ds.add_where_clause(WhereClause('created_at', LessThanOrEqualOperator(), '10')) + ds.add_where(Column(db_field='a'), EqualsOperator(), 'b') + ds.add_where(Column(db_field='created_at'), GreaterThanOrEqualOperator(), '0') + ds.add_where(Column(db_field='created_at'), LessThanOrEqualOperator(), '10') self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" = %(0)s AND "created_at" >= %(1)s AND "created_at" <= %(2)s', six.text_type(ds)) ds = DeleteStatement('table', None) - ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - ds.add_where_clause(WhereClause('created_at', InOperator(), ['0', '10', '20'])) + ds.add_where(Column(db_field='a'), EqualsOperator(), 'b') + ds.add_where(Column(db_field='created_at'), InOperator(), ['0', '10', '20']) self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" = %(0)s AND "created_at" IN %(1)s', six.text_type(ds)) def test_delete_conditional(self): diff --git a/tests/integration/cqlengine/statements/test_insert_statement.py b/tests/integration/cqlengine/statements/test_insert_statement.py index dc6465e247..df2aa0f594 100644 --- a/tests/integration/cqlengine/statements/test_insert_statement.py +++ b/tests/integration/cqlengine/statements/test_insert_statement.py @@ -16,6 +16,7 @@ except ImportError: import unittest # noqa +from cassandra.cqlengine.columns import Column from cassandra.cqlengine.statements import InsertStatement, StatementException, AssignmentClause import six @@ -30,8 +31,8 @@ def test_where_clause_failure(self): def test_statement(self): ist = InsertStatement('table', None) - ist.add_assignment_clause(AssignmentClause('a', 'b')) - ist.add_assignment_clause(AssignmentClause('c', 'd')) + ist.add_assignment(Column(db_field='a'), 'b') + ist.add_assignment(Column(db_field='c'), 'd') self.assertEqual( six.text_type(ist), @@ -40,8 +41,8 @@ def test_statement(self): def test_context_update(self): ist = InsertStatement('table', None) - ist.add_assignment_clause(AssignmentClause('a', 'b')) - ist.add_assignment_clause(AssignmentClause('c', 'd')) + ist.add_assignment(Column(db_field='a'), 'b') + ist.add_assignment(Column(db_field='c'), 'd') ist.update_context_id(4) self.assertEqual( @@ -53,6 +54,6 @@ def test_context_update(self): def test_additional_rendering(self): ist = InsertStatement('table', ttl=60) - ist.add_assignment_clause(AssignmentClause('a', 'b')) - ist.add_assignment_clause(AssignmentClause('c', 'd')) + ist.add_assignment(Column(db_field='a'), 'b') + ist.add_assignment(Column(db_field='c'), 'd') self.assertIn('USING TTL 60', six.text_type(ist)) diff --git a/tests/integration/cqlengine/statements/test_select_statement.py b/tests/integration/cqlengine/statements/test_select_statement.py index 6612333809..6d728acfbc 100644 --- a/tests/integration/cqlengine/statements/test_select_statement.py +++ b/tests/integration/cqlengine/statements/test_select_statement.py @@ -16,6 +16,7 @@ except ImportError: import unittest # noqa +from cassandra.cqlengine.columns import Column from cassandra.cqlengine.statements import SelectStatement, WhereClause from cassandra.cqlengine.operators import * import six @@ -46,19 +47,19 @@ def test_table_rendering(self): def test_where_clause_rendering(self): ss = SelectStatement('table') - ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) + ss.add_where(Column(db_field='a'), EqualsOperator(), 'b') self.assertEqual(six.text_type(ss), 'SELECT * FROM table WHERE "a" = %(0)s', six.text_type(ss)) def test_count(self): ss = SelectStatement('table', count=True, limit=10, order_by='d') - ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) + ss.add_where(Column(db_field='a'), EqualsOperator(), 'b') self.assertEqual(six.text_type(ss), 'SELECT COUNT(*) FROM table WHERE "a" = %(0)s LIMIT 10', six.text_type(ss)) self.assertIn('LIMIT', six.text_type(ss)) self.assertNotIn('ORDER', six.text_type(ss)) def test_distinct(self): ss = SelectStatement('table', distinct_fields=['field2']) - ss.add_where_clause(WhereClause('field1', EqualsOperator(), 'b')) + ss.add_where(Column(db_field='field1'), EqualsOperator(), 'b') self.assertEqual(six.text_type(ss), 'SELECT DISTINCT "field2" FROM table WHERE "field1" = %(0)s', six.text_type(ss)) ss = SelectStatement('table', distinct_fields=['field1', 'field2']) @@ -69,13 +70,13 @@ def test_distinct(self): def test_context(self): ss = SelectStatement('table') - ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) + ss.add_where(Column(db_field='a'), EqualsOperator(), 'b') self.assertEqual(ss.get_context(), {'0': 'b'}) def test_context_id_update(self): """ tests that the right things happen the the context id """ ss = SelectStatement('table') - ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) + ss.add_where(Column(db_field='a'), EqualsOperator(), 'b') self.assertEqual(ss.get_context(), {'0': 'b'}) self.assertEqual(str(ss), 'SELECT * FROM table WHERE "a" = %(0)s') diff --git a/tests/integration/cqlengine/statements/test_update_statement.py b/tests/integration/cqlengine/statements/test_update_statement.py index a8ff865e25..5679457fee 100644 --- a/tests/integration/cqlengine/statements/test_update_statement.py +++ b/tests/integration/cqlengine/statements/test_update_statement.py @@ -16,6 +16,7 @@ except ImportError: import unittest # noqa +from cassandra.cqlengine.columns import Column, Set, List, Text from cassandra.cqlengine.operators import * from cassandra.cqlengine.statements import (UpdateStatement, WhereClause, AssignmentClause, SetUpdateClause, @@ -33,54 +34,54 @@ def test_table_rendering(self): def test_rendering(self): us = UpdateStatement('table') - us.add_assignment_clause(AssignmentClause('a', 'b')) - us.add_assignment_clause(AssignmentClause('c', 'd')) - us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) + us.add_assignment(Column(db_field='a'), 'b') + us.add_assignment(Column(db_field='c'), 'd') + us.add_where(Column(db_field='a'), EqualsOperator(), 'x') self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s', six.text_type(us)) def test_context(self): us = UpdateStatement('table') - us.add_assignment_clause(AssignmentClause('a', 'b')) - us.add_assignment_clause(AssignmentClause('c', 'd')) - us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) + us.add_assignment(Column(db_field='a'), 'b') + us.add_assignment(Column(db_field='c'), 'd') + us.add_where(Column(db_field='a'), EqualsOperator(), 'x') self.assertEqual(us.get_context(), {'0': 'b', '1': 'd', '2': 'x'}) def test_context_update(self): us = UpdateStatement('table') - us.add_assignment_clause(AssignmentClause('a', 'b')) - us.add_assignment_clause(AssignmentClause('c', 'd')) - us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) + us.add_assignment(Column(db_field='a'), 'b') + us.add_assignment(Column(db_field='c'), 'd') + us.add_where(Column(db_field='a'), EqualsOperator(), 'x') us.update_context_id(3) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(4)s, "c" = %(5)s WHERE "a" = %(3)s') self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'}) def test_additional_rendering(self): us = UpdateStatement('table', ttl=60) - us.add_assignment_clause(AssignmentClause('a', 'b')) - us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) + us.add_assignment(Column(db_field='a'), 'b') + us.add_where(Column(db_field='a'), EqualsOperator(), 'x') self.assertIn('USING TTL 60', six.text_type(us)) def test_update_set_add(self): us = UpdateStatement('table') - us.add_assignment_clause(SetUpdateClause('a', set((1,)), operation='add')) + us.add_update(Set(Text, db_field='a'), set((1,)), 'add') self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') def test_update_empty_set_add_does_not_assign(self): us = UpdateStatement('table') - us.add_assignment_clause(SetUpdateClause('a', set(), operation='add')) - self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') + us.add_update(Set(Text, db_field='a'), set(), 'add') + self.assertFalse(us.assignments) def test_update_empty_set_removal_does_not_assign(self): us = UpdateStatement('table') - us.add_assignment_clause(SetUpdateClause('a', set(), operation='remove')) - self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" - %(0)s') + us.add_update(Set(Text, db_field='a'), set(), 'remove') + self.assertFalse(us.assignments) def test_update_list_prepend_with_empty_list(self): us = UpdateStatement('table') - us.add_assignment_clause(ListUpdateClause('a', [], operation='prepend')) - self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s + "a"') + us.add_update(List(Text, db_field='a'), [], 'prepend') + self.assertFalse(us.assignments) def test_update_list_append_with_empty_list(self): us = UpdateStatement('table') - us.add_assignment_clause(ListUpdateClause('a', [], operation='append')) - self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') + us.add_update(List(Text, db_field='a'), [], 'append') + self.assertFalse(us.assignments) From f239392747006e451b2b95ec2440c439a6cf8ae0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 1 Apr 2016 14:39:42 -0500 Subject: [PATCH 0476/2005] cqle: rework statement part. key building PYTHON-535 --- cassandra/cqlengine/query.py | 2 +- cassandra/cqlengine/statements.py | 33 ++++++++----------- .../statements/test_insert_statement.py | 11 ++----- 3 files changed, 18 insertions(+), 28 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 613fd112c7..7ac8f362f3 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -26,7 +26,7 @@ GreaterThanOrEqualOperator, LessThanOperator, LessThanOrEqualOperator, ContainsOperator, BaseWhereOperator) from cassandra.cqlengine.statements import (WhereClause, SelectStatement, DeleteStatement, - UpdateStatement, AssignmentClause, InsertStatement, + UpdateStatement, InsertStatement, BaseCQLStatement, MapDeleteClause, ConditionalClause) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 2156751b2e..1df7ead109 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -13,6 +13,7 @@ # limitations under the License. from datetime import datetime, timedelta +from itertools import ifilter import time import six @@ -481,8 +482,6 @@ def __unicode__(self): class BaseCQLStatement(UnicodeMixin): """ The base cql statement class """ - parition_key_values = None - def __init__(self, table, consistency=None, timestamp=None, where=None, fetch_size=None, conditionals=None): super(BaseCQLStatement, self).__init__() self.table = table @@ -500,24 +499,19 @@ def __init__(self, table, consistency=None, timestamp=None, where=None, fetch_si for conditional in conditionals or []: self.add_conditional_clause(conditional) - def _update_partition_key(self, column, value): - if column.partition_key: - if self.parition_key_values: - self.parition_key_values.append(value) - else: - self.parition_key_values = [value] - # assert part keys are added in order - # this is an optimization based on the way statements are constructed in - # cqlengine.query (columns always iterated in order). If that assumption - # goes away we can preallocate the key values list and insert using - # self.partition_key_values - assert column._partition_key_index == len(self.parition_key_values) - 1 + def _update_part_key_values(self, field_index_map, clauses, parts): + for clause in ifilter(lambda c: c.field in field_index_map, clauses): + parts[field_index_map[clause.field]] = clause.value + + def partition_key_values(self, field_index_map): + parts = [None] * len(field_index_map) + self._update_part_key_values(field_index_map, self.where_clauses, parts) + return parts def add_where(self, column, operator, value, quote_field=True): value = column.to_database(value) clause = WhereClause(column.db_field_name, operator, value, quote_field) self._add_where_clause(clause) - self._update_partition_key(column, value) def _add_where_clause(self, clause): clause.set_context_id(self.context_counter) @@ -682,11 +676,15 @@ def update_context_id(self, i): assignment.set_context_id(self.context_counter) self.context_counter += assignment.get_context_size() + def partition_key_values(self, field_index_map): + parts = super(AssignmentStatement, self).partition_key_values(field_index_map) + self._update_part_key_values(field_index_map, self.assignments, parts) + return parts + def add_assignment(self, column, value): value = column.to_database(value) clause = AssignmentClause(column.db_field_name, value) self._add_assignment_clause(clause) - self._update_partition_key(column, value) def _add_assignment_clause(self, clause): clause.set_context_id(self.context_counter) @@ -724,9 +722,6 @@ def __init__(self, self.if_not_exists = if_not_exists - def add_where_clause(self, clause): - raise StatementException("Cannot add where clauses to insert statements") - def __unicode__(self): qs = ['INSERT INTO {0}'.format(self.table)] diff --git a/tests/integration/cqlengine/statements/test_insert_statement.py b/tests/integration/cqlengine/statements/test_insert_statement.py index df2aa0f594..7cde948a3c 100644 --- a/tests/integration/cqlengine/statements/test_insert_statement.py +++ b/tests/integration/cqlengine/statements/test_insert_statement.py @@ -16,19 +16,14 @@ except ImportError: import unittest # noqa +import six + from cassandra.cqlengine.columns import Column -from cassandra.cqlengine.statements import InsertStatement, StatementException, AssignmentClause +from cassandra.cqlengine.statements import InsertStatement -import six class InsertStatementTests(unittest.TestCase): - def test_where_clause_failure(self): - """ tests that where clauses cannot be added to Insert statements """ - ist = InsertStatement('table', None) - with self.assertRaises(StatementException): - ist.add_where_clause('s') - def test_statement(self): ist = InsertStatement('table', None) ist.add_assignment(Column(db_field='a'), 'b') From 90fdc4149e46aa35ff2fb48ef47cd7bc2f604bd4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 1 Apr 2016 15:00:07 -0500 Subject: [PATCH 0477/2005] remove superfluous conditional --- cassandra/cqlengine/connection.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index a737f29e65..cf9285e30f 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -157,10 +157,7 @@ def execute(query, params=None, consistency_level=None, timeout=NOT_SET): if not session: raise CQLEngineException("It is required to setup() cqlengine before executing queries") - if isinstance(query, Statement): - pass - - elif isinstance(query, BaseCQLStatement): + if isinstance(query, BaseCQLStatement): params = query.get_context() query = SimpleStatement(str(query), consistency_level=consistency_level, fetch_size=query.fetch_size) From 05ba3a10a2c311869ba4d8c52691bed57d60f7d6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 1 Apr 2016 16:04:43 -0500 Subject: [PATCH 0478/2005] fix SimpleStatement composite key packing, refactor packing to common base --- cassandra/query.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index 01bf726979..8d2654db56 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -233,13 +233,17 @@ def __init__(self, retry_policy=None, consistency_level=None, routing_key=None, if custom_payload is not None: self.custom_payload = custom_payload + def _key_parts_packed(self, parts): + for p in parts: + l = len(p) + yield struct.pack(">H%dsB" % l, l, p, 0) + def _get_routing_key(self): return self._routing_key def _set_routing_key(self, key): if isinstance(key, (list, tuple)): - self._routing_key = b"".join(struct.pack("HsB", len(component), component, 0) - for component in key) + self._routing_key = b"".join(self._key_parts_packed(key)) else: self._routing_key = key @@ -565,13 +569,7 @@ def routing_key(self): if len(routing_indexes) == 1: self._routing_key = self.values[routing_indexes[0]] else: - components = [] - for statement_index in routing_indexes: - val = self.values[statement_index] - l = len(val) - components.append(struct.pack(">H%dsB" % l, l, val, 0)) - - self._routing_key = b"".join(components) + self._routing_key = b"".join(self._key_parts_packed(self.values[i] for i in routing_indexes)) return self._routing_key From 9dce60a745a91d6304c9320abc8a1c4e3ab89be7 Mon Sep 17 00:00:00 2001 From: Nikolay Fominykh Date: Sun, 3 Apr 2016 18:50:05 +0300 Subject: [PATCH 0479/2005] Fix for #Python-533. Secondary index on set field --- cassandra/cqlengine/management.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 19f8e9fa16..a071e0b7c8 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -113,7 +113,8 @@ def _get_index_name_by_column(table, column_name): """ for _, index_metadata in six.iteritems(table.indexes): options = dict(index_metadata.index_options) - if 'target' in options and options['target'] == column_name: + possible_index_values = [column_name, "values(%s)" % column_name] + if 'target' in options and options['target'] in possible_index_values: return index_metadata.name return None From 0f1b554a2b79a4af47eb4ae9748db151e217ff19 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 4 Apr 2016 10:36:28 -0500 Subject: [PATCH 0480/2005] handle single-element routing key as tuple, list --- cassandra/query.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cassandra/query.py b/cassandra/query.py index 8d2654db56..55d0230096 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -243,7 +243,10 @@ def _get_routing_key(self): def _set_routing_key(self, key): if isinstance(key, (list, tuple)): - self._routing_key = b"".join(self._key_parts_packed(key)) + if len(key) == 1: + self._routing_key = key[0] + else: + self._routing_key = b"".join(self._key_parts_packed(key)) else: self._routing_key = key From 2493023f2b393623e9aeeebc823dc968a423d484 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 4 Apr 2016 10:37:45 -0500 Subject: [PATCH 0481/2005] map cql type name to cassandra.cqltypes --- cassandra/cqltypes.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 26d7ebc0ff..23cafb03e3 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -77,6 +77,7 @@ def trim_if_startswith(s, prefix): _casstypes = {} +_cqltypes = {} cql_type_scanner = re.Scanner(( @@ -106,6 +107,8 @@ def __new__(metacls, name, bases, dct): cls = type.__new__(metacls, name, bases, dct) if not name.startswith('_'): _casstypes[name] = cls + if not cls.typename.startswith("'org"): + _cqltypes[cls.typename] = cls return cls From c4c0c839f1ad21811c4f7cfacb3ea2049a7c95e4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 4 Apr 2016 12:39:26 -0500 Subject: [PATCH 0482/2005] fix SimpleDateType serialization from offset int value --- cassandra/cqltypes.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 23cafb03e3..4a2a203286 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -623,6 +623,11 @@ def serialize(val, protocol_version): try: days = val.days_from_epoch except AttributeError: + if isinstance(val, int): + # the DB wants offset int values, but util.Date init takes days from epoch + # here we assume int values are offset, as they would appear in CQL + # short circuit to avoid subtracting just to add offset + return uint32_pack(val) days = util.Date(val).days_from_epoch return uint32_pack(days + SimpleDateType.EPOCH_OFFSET_DAYS) From 7a1fde1a691d4d754b331cb203dda777081585a9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 4 Apr 2016 12:39:52 -0500 Subject: [PATCH 0483/2005] cqle: attach routing key to mapper statements PYTHON-535 --- cassandra/cqlengine/columns.py | 14 +++++++++- cassandra/cqlengine/models.py | 11 ++++++++ cassandra/cqlengine/named.py | 2 ++ cassandra/cqlengine/query.py | 28 +++++++++++++------ cassandra/cqlengine/statements.py | 4 +-- .../cqlengine/columns/test_value_io.py | 1 - .../cqlengine/query/test_queryset.py | 19 ++++++------- tests/integration/standard/test_routing.py | 2 +- 8 files changed, 58 insertions(+), 23 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index f6f843db90..1ee601373e 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -19,7 +19,7 @@ from uuid import UUID as _UUID from cassandra import util -from cassandra.cqltypes import SimpleDateType +from cassandra.cqltypes import SimpleDateType, _cqltypes, UserType from cassandra.cqlengine import ValidationError from cassandra.cqlengine.functions import get_total_seconds @@ -256,6 +256,10 @@ def _val_is_null(self, val): def sub_types(self): return [] + @property + def cql_type(self): + return _cqltypes[self.db_type] + class Blob(Column): """ @@ -666,6 +670,10 @@ def _freeze_db_type(self): def sub_types(self): return self.types + @property + def cql_type(self): + return _cqltypes[self.__class__.__name__.lower()].apply_parameters([c.cql_type for c in self.types]) + class Tuple(BaseCollectionColumn): """ @@ -877,6 +885,10 @@ def __init__(self, user_type, **kwargs): def sub_types(self): return list(self.user_type._fields.values()) + @property + def cql_type(self): + return UserType.apply_parameters([c.cql_type for c in self.user_type._fields.values()]) + def resolve_udts(col_def, out_list): for col in col_def.sub_types: diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index ff73207523..78d2990190 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -379,6 +379,10 @@ def __str__(self): return '{0} <{1}>'.format(self.__class__.__name__, ', '.join('{0}={1}'.format(k, getattr(self, k)) for k in self._primary_keys.keys())) + @classmethod + def _routing_key_from_values(cls, pk_values, protocol_version): + return cls._key_serializer(pk_values, protocol_version) + @classmethod def _discover_polymorphic_submodels(cls): if not cls._is_polymorphic_base: @@ -867,6 +871,11 @@ def _get_polymorphic_base(bases): partition_keys = OrderedDict(k for k in primary_keys.items() if k[1].partition_key) clustering_keys = OrderedDict(k for k in primary_keys.items() if not k[1].partition_key) + key_cols = [c for c in partition_keys.values()] + partition_key_index = dict((col.db_field_name, col._partition_key_index) for col in key_cols) + key_cql_types = [c.cql_type for c in key_cols] + key_serializer = staticmethod(lambda parts, proto_version: [t.to_binary(p, proto_version) for t, p in zip(key_cql_types, parts)]) + # setup partition key shortcut if len(partition_keys) == 0: if not is_abstract: @@ -910,6 +919,8 @@ def _get_polymorphic_base(bases): attrs['_dynamic_columns'] = {} attrs['_partition_keys'] = partition_keys + attrs['_partition_key_index'] = partition_key_index + attrs['_key_serializer'] = key_serializer attrs['_clustering_keys'] = clustering_keys attrs['_has_counter'] = len(counter_columns) > 0 diff --git a/cassandra/cqlengine/named.py b/cassandra/cqlengine/named.py index 90a0d8fdf2..07b4c50b61 100644 --- a/cassandra/cqlengine/named.py +++ b/cassandra/cqlengine/named.py @@ -84,6 +84,8 @@ class NamedTable(object): __partition_keys = None + _partition_key_index = None + class DoesNotExist(_DoesNotExist): pass diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 7ac8f362f3..f2856bf11e 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -19,6 +19,7 @@ import six from warnings import warn +from cassandra.query import SimpleStatement from cassandra.cqlengine import columns, CQLEngineException, ValidationError, UnicodeMixin from cassandra.cqlengine import connection from cassandra.cqlengine.functions import Token, BaseQueryFunction, QueryValue @@ -310,11 +311,11 @@ def __init__(self, model): def column_family_name(self): return self.model.column_family_name() - def _execute(self, q): + def _execute(self, statement): if self._batch: - return self._batch.add_query(q) + return self._batch.add_query(statement) else: - result = connection.execute(q, consistency_level=self._consistency, timeout=self._timeout) + result = _execute_statement(self.model, statement, self._consistency, None, self._timeout) if self._if_not_exists or self._if_exists or self._conditional: check_applied(result) return result @@ -1205,14 +1206,14 @@ def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, self._conditional = conditional self._timeout = timeout - def _execute(self, q): + def _execute(self, statement): if self._batch: - return self._batch.add_query(q) + return self._batch.add_query(statement) else: - tmp = connection.execute(q, consistency_level=self._consistency, timeout=self._timeout) + results = _execute_statement(self.model, statement, self._consistency, None, self._timeout) if self._if_not_exists or self._if_exists or self._conditional: - check_applied(tmp) - return tmp + check_applied(results) + return results def batch(self, batch_obj): if batch_obj is not None and not isinstance(batch_obj, BatchQuery): @@ -1338,3 +1339,14 @@ def delete(self): continue ds.add_where(col, EqualsOperator(), val) self._execute(ds) + + +def _execute_statement(model, statement, consistency_level, fetch_size, timeout): + params = statement.get_context() + s = SimpleStatement(str(statement), consistency_level=consistency_level, fetch_size=fetch_size) + if model._partition_key_index: + key_values = statement.partition_key_values(model._partition_key_index) + if not any(v is None for v in key_values): + parts = model._routing_key_from_values(key_values, connection.get_cluster().protocol_version) + s.routing_key = parts + return connection.execute(s, params, timeout=timeout) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 1df7ead109..21044f8dd2 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -21,7 +21,7 @@ from cassandra.cqlengine import columns from cassandra.cqlengine import UnicodeMixin from cassandra.cqlengine.functions import QueryValue -from cassandra.cqlengine.operators import BaseWhereOperator, InOperator +from cassandra.cqlengine.operators import BaseWhereOperator, InOperator, EqualsOperator class StatementException(Exception): @@ -505,7 +505,7 @@ def _update_part_key_values(self, field_index_map, clauses, parts): def partition_key_values(self, field_index_map): parts = [None] * len(field_index_map) - self._update_part_key_values(field_index_map, self.where_clauses, parts) + self._update_part_key_values(field_index_map, (w for w in self.where_clauses if isinstance(w, EqualsOperator)), parts) return parts def add_where(self, column, operator, value, quote_field=True): diff --git a/tests/integration/cqlengine/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py index 455f80c1e4..42dc2420e7 100644 --- a/tests/integration/cqlengine/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -61,7 +61,6 @@ def setUpClass(cls): # create a table with the given column class IOTestModel(Model): - table_name = cls.column.db_type + "_io_test_model_{0}".format(uuid4().hex[:8]) pkey = cls.column(primary_key=True) data = cls.column() diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 0097e60490..a7364f2afc 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -22,7 +22,6 @@ import time from uuid import uuid1, uuid4 import uuid -import sys from cassandra.cluster import Session from cassandra import InvalidRequest @@ -457,8 +456,8 @@ class NonEqualityFilteringModel(Model): NonEqualityFilteringModel.create(sequence_id=3, example_type=0, created_at=datetime.now()) NonEqualityFilteringModel.create(sequence_id=5, example_type=1, created_at=datetime.now()) - qA = NonEqualityFilteringModel.objects(NonEqualityFilteringModel.sequence_id > 3).allow_filtering() - num = qA.count() + qa = NonEqualityFilteringModel.objects(NonEqualityFilteringModel.sequence_id > 3).allow_filtering() + num = qa.count() assert num == 1, num @@ -473,7 +472,7 @@ def test_distinct_with_parameter(self): self.assertEqual(len(q), 3) def test_distinct_with_filter(self): - q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[1,2]) + q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[1, 2]) self.assertEqual(len(q), 2) def test_distinct_with_non_partition(self): @@ -510,19 +509,19 @@ def test_order_by_success_case(self): def test_ordering_by_non_second_primary_keys_fail(self): # kwarg filtering with self.assertRaises(query.QueryException): - q = TestModel.objects(test_id=0).order_by('test_id') + TestModel.objects(test_id=0).order_by('test_id') # kwarg filtering with self.assertRaises(query.QueryException): - q = TestModel.objects(TestModel.test_id == 0).order_by('test_id') + TestModel.objects(TestModel.test_id == 0).order_by('test_id') def test_ordering_by_non_primary_keys_fails(self): with self.assertRaises(query.QueryException): - q = TestModel.objects(test_id=0).order_by('description') + TestModel.objects(test_id=0).order_by('description') def test_ordering_on_indexed_columns_fails(self): with self.assertRaises(query.QueryException): - q = IndexedTestModel.objects(test_id=0).order_by('attempt_id') + IndexedTestModel.objects(test_id=0).order_by('attempt_id') def test_ordering_on_multiple_clustering_columns(self): TestMultiClusteringModel.create(one=1, two=1, three=4) @@ -673,7 +672,7 @@ def test_range_deletion(self): TestMultiClusteringModel.objects(one=1, two__gt=3, two__lt=5).delete() self.assertEqual(5, len(TestMultiClusteringModel.objects.all())) - TestMultiClusteringModel.objects(one=1, two__in=[8,9]).delete() + TestMultiClusteringModel.objects(one=1, two__in=[8, 9]).delete() self.assertEqual(3, len(TestMultiClusteringModel.objects.all())) TestMultiClusteringModel.objects(one__in=[1], two__gte=0).delete() @@ -878,7 +877,7 @@ def test_values_list(self): class TestObjectsProperty(BaseQuerySetUsage): def test_objects_property_returns_fresh_queryset(self): assert TestModel.objects._result_cache is None - len(TestModel.objects) # evaluate queryset + len(TestModel.objects) # evaluate queryset assert TestModel.objects._result_cache is None diff --git a/tests/integration/standard/test_routing.py b/tests/integration/standard/test_routing.py index 1ad0a2fa5d..b22184a044 100644 --- a/tests/integration/standard/test_routing.py +++ b/tests/integration/standard/test_routing.py @@ -74,7 +74,7 @@ def create_prepare(self, key_types): select = s.prepare("SELECT token(%s) FROM %s WHERE %s" % (primary_key, table_name, where_clause)) - return (insert, select) + return insert, select def test_singular_key(self): # string From b3e9ae308bd1f326e972c876ca66285cd2b8a653 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 4 Apr 2016 13:36:04 -0500 Subject: [PATCH 0484/2005] cqle: fix UserDefinteType cql_type PYTHON-535 --- cassandra/cqlengine/columns.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 1ee601373e..14b70915a7 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -887,7 +887,9 @@ def sub_types(self): @property def cql_type(self): - return UserType.apply_parameters([c.cql_type for c in self.user_type._fields.values()]) + return UserType.make_udt_class(keyspace='', udt_name=self.user_type.type_name(), + field_names=[c.db_field_name for c in self.user_type._fields.values()], + field_types=[c.cql_type for c in self.user_type._fields.values()]) def resolve_udts(col_def, out_list): From 13cbfe7b126094e3981b98899fa650e543a0f09b Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 4 Apr 2016 13:44:47 -0500 Subject: [PATCH 0485/2005] Reverting to use local ccm install for integration tests --- build.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/build.yaml b/build.yaml index 1d3c93915e..ee7dd4d94f 100644 --- a/build.yaml +++ b/build.yaml @@ -20,6 +20,7 @@ build: export JAVA_HOME=$CCM_JAVA_HOME export PATH=$JAVA_HOME/bin:$PATH + sudo python /home/jenkins/ccm/setup.py install # Install dependencies if [[ $EVENT_LOOP_MANAGER == 'libev' ]]; then sudo apt-get install -y libev4 libev-dev From 8b09c44126a9cad341d7224f6599f50b7949f2c5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 4 Apr 2016 13:48:45 -0500 Subject: [PATCH 0486/2005] cqle: removed unused consistency attr from statements --- cassandra/cqlengine/query.py | 2 +- cassandra/cqlengine/statements.py | 14 ++------------ 2 files changed, 3 insertions(+), 13 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index f2856bf11e..88b9a37cd9 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1344,7 +1344,7 @@ def delete(self): def _execute_statement(model, statement, consistency_level, fetch_size, timeout): params = statement.get_context() s = SimpleStatement(str(statement), consistency_level=consistency_level, fetch_size=fetch_size) - if model._partition_key_index: + if model._partition_key_index: # not available on NamedTable key_values = statement.partition_key_values(model._partition_key_index) if not any(v is None for v in key_values): parts = model._routing_key_from_values(key_values, connection.get_cluster().protocol_version) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 21044f8dd2..503ebf9009 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -482,10 +482,9 @@ def __unicode__(self): class BaseCQLStatement(UnicodeMixin): """ The base cql statement class """ - def __init__(self, table, consistency=None, timestamp=None, where=None, fetch_size=None, conditionals=None): + def __init__(self, table, timestamp=None, where=None, fetch_size=None, conditionals=None): super(BaseCQLStatement, self).__init__() self.table = table - self.consistency = consistency self.context_id = 0 self.context_counter = self.context_id self.timestamp = timestamp @@ -591,7 +590,6 @@ def __init__(self, table, fields=None, count=False, - consistency=None, where=None, order_by=None, limit=None, @@ -605,7 +603,6 @@ def __init__(self, """ super(SelectStatement, self).__init__( table, - consistency=consistency, where=where, fetch_size=fetch_size ) @@ -651,14 +648,12 @@ class AssignmentStatement(BaseCQLStatement): def __init__(self, table, assignments=None, - consistency=None, where=None, ttl=None, timestamp=None, conditionals=None): super(AssignmentStatement, self).__init__( table, - consistency=consistency, where=where, conditionals=conditionals ) @@ -708,14 +703,12 @@ class InsertStatement(AssignmentStatement): def __init__(self, table, assignments=None, - consistency=None, where=None, ttl=None, timestamp=None, if_not_exists=False): super(InsertStatement, self).__init__(table, assignments=assignments, - consistency=consistency, where=where, ttl=ttl, timestamp=timestamp) @@ -751,7 +744,6 @@ class UpdateStatement(AssignmentStatement): def __init__(self, table, assignments=None, - consistency=None, where=None, ttl=None, timestamp=None, @@ -759,7 +751,6 @@ def __init__(self, if_exists=False): super(UpdateStatement, self). __init__(table, assignments=assignments, - consistency=consistency, where=where, ttl=ttl, timestamp=timestamp, @@ -825,10 +816,9 @@ def add_update(self, column, value, operation=None, previous=None): class DeleteStatement(BaseCQLStatement): """ a cql delete statement """ - def __init__(self, table, fields=None, consistency=None, where=None, timestamp=None, conditionals=None, if_exists=False): + def __init__(self, table, fields=None, where=None, timestamp=None, conditionals=None, if_exists=False): super(DeleteStatement, self).__init__( table, - consistency=consistency, where=where, timestamp=timestamp, conditionals=conditionals From a3012628a7b06071095c73d334337f34f4c93246 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 4 Apr 2016 13:59:18 -0500 Subject: [PATCH 0487/2005] remove unused fetch_size execute parameter in favor of statement attr PYTHON-535 --- cassandra/cqlengine/connection.py | 6 +++--- cassandra/cqlengine/query.py | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index cf9285e30f..8edcc76d54 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -157,16 +157,16 @@ def execute(query, params=None, consistency_level=None, timeout=NOT_SET): if not session: raise CQLEngineException("It is required to setup() cqlengine before executing queries") - if isinstance(query, BaseCQLStatement): + if isinstance(query, SimpleStatement): + pass # + elif isinstance(query, BaseCQLStatement): params = query.get_context() query = SimpleStatement(str(query), consistency_level=consistency_level, fetch_size=query.fetch_size) - elif isinstance(query, six.string_types): query = SimpleStatement(query, consistency_level=consistency_level) log.debug(query.query_string) - params = params or {} result = session.execute(query, params, timeout=timeout) return result diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 88b9a37cd9..a339f09c75 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -315,7 +315,7 @@ def _execute(self, statement): if self._batch: return self._batch.add_query(statement) else: - result = _execute_statement(self.model, statement, self._consistency, None, self._timeout) + result = _execute_statement(self.model, statement, self._consistency, self._timeout) if self._if_not_exists or self._if_exists or self._conditional: check_applied(result) return result @@ -1210,7 +1210,7 @@ def _execute(self, statement): if self._batch: return self._batch.add_query(statement) else: - results = _execute_statement(self.model, statement, self._consistency, None, self._timeout) + results = _execute_statement(self.model, statement, self._consistency, self._timeout) if self._if_not_exists or self._if_exists or self._conditional: check_applied(results) return results @@ -1341,9 +1341,9 @@ def delete(self): self._execute(ds) -def _execute_statement(model, statement, consistency_level, fetch_size, timeout): +def _execute_statement(model, statement, consistency_level, timeout): params = statement.get_context() - s = SimpleStatement(str(statement), consistency_level=consistency_level, fetch_size=fetch_size) + s = SimpleStatement(str(statement), consistency_level=consistency_level, fetch_size=statement.fetch_size) if model._partition_key_index: # not available on NamedTable key_values = statement.partition_key_values(model._partition_key_index) if not any(v is None for v in key_values): From 5c09da62faad824cc4f36638af3b20ddf7552b74 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 4 Apr 2016 14:56:54 -0500 Subject: [PATCH 0488/2005] cqle: model attribute to disable routing key computation PYTHON-535 --- cassandra/cqlengine/models.py | 19 +++++++++++++++---- cassandra/cqlengine/query.py | 3 ++- cassandra/cqlengine/statements.py | 2 +- docs/api/cassandra/cqlengine/models.rst | 2 ++ 4 files changed, 20 insertions(+), 6 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 78d2990190..e940955ed4 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -335,6 +335,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): __options__ = None + __compute_routing_key__ = True + # the queryset class used for this class __queryset__ = query.ModelQuerySet __dmlquery__ = query.DMLQuery @@ -871,10 +873,14 @@ def _get_polymorphic_base(bases): partition_keys = OrderedDict(k for k in primary_keys.items() if k[1].partition_key) clustering_keys = OrderedDict(k for k in primary_keys.items() if not k[1].partition_key) - key_cols = [c for c in partition_keys.values()] - partition_key_index = dict((col.db_field_name, col._partition_key_index) for col in key_cols) - key_cql_types = [c.cql_type for c in key_cols] - key_serializer = staticmethod(lambda parts, proto_version: [t.to_binary(p, proto_version) for t, p in zip(key_cql_types, parts)]) + if attrs.get('__compute_routing_key__', True): + key_cols = [c for c in partition_keys.values()] + partition_key_index = dict((col.db_field_name, col._partition_key_index) for col in key_cols) + key_cql_types = [c.cql_type for c in key_cols] + key_serializer = staticmethod(lambda parts, proto_version: [t.to_binary(p, proto_version) for t, p in zip(key_cql_types, parts)]) + else: + partition_key_index = {} + key_serializer = staticmethod(lambda parts, proto_version: None) # setup partition key shortcut if len(partition_keys) == 0: @@ -998,3 +1004,8 @@ class Model(BaseModel): """ *Optional* Specifies a value for the discriminator column when using model inheritance. """ + + __compute_routing_key__ = True + """ + *Optional* Setting False disables computing the routing key for TokenAwareRouting + """ diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index a339f09c75..1a2e52dbb1 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1344,9 +1344,10 @@ def delete(self): def _execute_statement(model, statement, consistency_level, timeout): params = statement.get_context() s = SimpleStatement(str(statement), consistency_level=consistency_level, fetch_size=statement.fetch_size) - if model._partition_key_index: # not available on NamedTable + if model._partition_key_index: key_values = statement.partition_key_values(model._partition_key_index) if not any(v is None for v in key_values): parts = model._routing_key_from_values(key_values, connection.get_cluster().protocol_version) s.routing_key = parts + s.keyspace = model._get_keyspace() return connection.execute(s, params, timeout=timeout) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 503ebf9009..28f4031635 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -504,7 +504,7 @@ def _update_part_key_values(self, field_index_map, clauses, parts): def partition_key_values(self, field_index_map): parts = [None] * len(field_index_map) - self._update_part_key_values(field_index_map, (w for w in self.where_clauses if isinstance(w, EqualsOperator)), parts) + self._update_part_key_values(field_index_map, (w for w in self.where_clauses if isinstance(w.operator, EqualsOperator)), parts) return parts def add_where(self, column, operator, value, quote_field=True): diff --git a/docs/api/cassandra/cqlengine/models.rst b/docs/api/cassandra/cqlengine/models.rst index c4529b3988..d6f3391974 100644 --- a/docs/api/cassandra/cqlengine/models.rst +++ b/docs/api/cassandra/cqlengine/models.rst @@ -79,6 +79,8 @@ Model 'tombstone_compaction_interval': '86400'}, 'gc_grace_seconds': '0'} + .. autoattribute:: __compute_routing_key__ + The base methods allow creating, storing, and querying modeled objects. From e62407bd1a4fa1aa9ad9c86ed21eb6a15b1a1771 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 4 Apr 2016 15:28:45 -0500 Subject: [PATCH 0489/2005] cqle: protect col name when looking for index meta PYTHON-533 --- cassandra/cqlengine/management.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index a071e0b7c8..6978964ad0 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -111,14 +111,13 @@ def _get_index_name_by_column(table, column_name): """ Find the index name for a given table and column. """ - for _, index_metadata in six.iteritems(table.indexes): + protected_name = metadata.protect_name(column_name) + possible_index_values = [protected_name, "values(%s)" % protected_name] + for index_metadata in table.indexes.values(): options = dict(index_metadata.index_options) - possible_index_values = [column_name, "values(%s)" % column_name] - if 'target' in options and options['target'] in possible_index_values: + if options.get('target') in possible_index_values: return index_metadata.name - return None - def sync_table(model): """ From 6c452f4104647fb78fa334277e6032463df13cf9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 4 Apr 2016 15:37:06 -0500 Subject: [PATCH 0490/2005] Fix part key to exclude in, contains operators PYTHON-535 --- cassandra/cqlengine/statements.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 28f4031635..7ca74a850b 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -504,7 +504,7 @@ def _update_part_key_values(self, field_index_map, clauses, parts): def partition_key_values(self, field_index_map): parts = [None] * len(field_index_map) - self._update_part_key_values(field_index_map, (w for w in self.where_clauses if isinstance(w.operator, EqualsOperator)), parts) + self._update_part_key_values(field_index_map, (w for w in self.where_clauses if w.operator.__class__ == EqualsOperator), parts) return parts def add_where(self, column, operator, value, quote_field=True): From 19fb14bf15441411b08b1367c087f9a473e164c4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 4 Apr 2016 16:04:41 -0500 Subject: [PATCH 0491/2005] cqle: support more comparisons in LWT conditional also fixes an issue where iff did not use the column's db_field PYTHON-528 --- cassandra/cqlengine/operators.py | 5 +++++ cassandra/cqlengine/query.py | 7 +++++-- cassandra/cqlengine/statements.py | 2 -- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index 0aa29d94ae..c4f9828f5b 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -60,6 +60,11 @@ class EqualsOperator(BaseWhereOperator): cql_symbol = '=' +class NotEqualsOperator(BaseWhereOperator): + symbol = 'NE' + cql_symbol = '!=' + + class InOperator(EqualsOperator): symbol = 'IN' cql_symbol = 'IN' diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index f9e1a75d42..3f9414a2c1 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -560,10 +560,11 @@ def iff(self, *args, **kwargs): raise QueryException('{0} is not a valid query operator'.format(operator)) clone._conditional.append(operator) - for col_name, val in kwargs.items(): + for arg, val in kwargs.items(): if isinstance(val, Token): raise QueryException("Token() values are not valid in conditionals") + col_name, col_op = self._parse_filter_arg(arg) try: column = self.model._get_column(col_name) except KeyError: @@ -574,7 +575,9 @@ def iff(self, *args, **kwargs): else: query_val = column.to_database(val) - clone._conditional.append(ConditionalClause(col_name, query_val)) + operator_class = BaseWhereOperator.get_operator(col_op or 'EQ') + operator = operator_class() + clone._conditional.append(WhereClause(column.db_field_name, operator, query_val)) return clone diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index f5f626a49b..aaa6d5bfaa 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -527,8 +527,6 @@ def add_conditional_clause(self, clause): :param clause: The clause that will be added to the iff statement :type clause: ConditionalClause """ - if not isinstance(clause, ConditionalClause): - raise StatementException('only instances of AssignmentClause can be added to statements') clause.set_context_id(self.context_counter) self.context_counter += clause.get_context_size() self.conditionals.append(clause) From c52cdba0e6f926dee08f5a169aba1a7326dd8a01 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 5 Apr 2016 09:59:59 -0500 Subject: [PATCH 0492/2005] Fixing bug with test_queryset test collision --- tests/integration/cqlengine/query/test_queryset.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 3c87d26caf..c325f7646c 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -978,15 +978,13 @@ class DBFieldModelMixed2(Model): class TestModelQueryWithDBField(BaseCassEngTestCase): - @classmethod - def setUpClass(cls): + def setUp(cls): super(TestModelQueryWithDBField, cls).setUpClass() cls.model_list = [DBFieldModel, DBFieldModelMixed1, DBFieldModelMixed2] for model in cls.model_list: sync_table(model) - @classmethod - def tearDownClass(cls): + def tearDown(cls): super(TestModelQueryWithDBField, cls).tearDownClass() for model in cls.model_list: drop_table(model) @@ -1120,8 +1118,6 @@ def test_db_field_names_used(self): self.assertTrue(value not in str(b2.queries[0])) - - class TestModelSmall(Model): test_id = columns.Integer(primary_key=True) From 78d3cc462560412e45232cf261d03ae1fc618644 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 5 Apr 2016 10:48:18 -0500 Subject: [PATCH 0493/2005] 3.2 changelog update --- CHANGELOG.rst | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d002af7774..916dda5343 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,10 +1,29 @@ +3.2.0 +===== + +Features +-------- +* cqlengine: Warn on sync_schema type mismatch (PYTHON-260) +* cqlengine: Automatically defer fields with the '=' operator (and immutable values) in select queries (PYTHON-520) +* cqlengine: support non-equal conditions for LWT (PYTHON-528) +* cqlengine: sync_table should validate the primary key composition (PYTHON-532) +* cqlengine: token-aware routing for mapper statements (PYTHON-535) + +Bug Fixes +--------- +* Deleting a column in a lightweight transaction raises a SyntaxException #325 (PYTHON-249) +* cqlengine: make Token function works with named tables/columns #86 (PYTHON-272) +* comparing models with datetime fields fail #79 (PYTHON-273) +* cython date deserializer integer math should be aligned with CPython (PYTHON-480) +* db_field is not always respected with UpdateStatement (PYTHON-530) +* Sync_table fails on column.Set with secondary index (PYTHON-533) + 3.1.1 ===== March 14, 2016 Bug Fixes --------- - * cqlengine: Fix performance issue related to additional "COUNT" queries (PYTHON-522) 3.1.0 From 2eec974e104ea7ebcd06f2189cde012898c350e3 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 1 Apr 2016 17:14:56 -0500 Subject: [PATCH 0494/2005] PYTHON-532 Added tests for primary key validation --- .../cqlengine/management/test_management.py | 53 ++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index e4b35e2136..e2ef180054 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -78,9 +78,45 @@ class CapitalizedKeyModel(Model): class PrimaryKeysOnlyModel(Model): + __table_name__ = "primary_keys_only" __options__ = {'compaction': {'class': 'LeveledCompactionStrategy'}} - first_ey = columns.Integer(primary_key=True) + first_key = columns.Integer(primary_key=True) + second_key = columns.Integer(primary_key=True) + + +class PrimaryKeysModelChanged(Model): + + __table_name__ = "primary_keys_only" + __options__ = {'compaction': {'class': 'LeveledCompactionStrategy'}} + + new_first_key = columns.Integer(primary_key=True) + second_key = columns.Integer(primary_key=True) + + +class PrimaryKeysModelTypeChanged(Model): + + __table_name__ = "primary_keys_only" + __options__ = {'compaction': {'class': 'LeveledCompactionStrategy'}} + + first_key = columns.Float(primary_key=True) + second_key = columns.Integer(primary_key=True) + + +class PrimaryKeysRemovedPk(Model): + + __table_name__ = "primary_keys_only" + __options__ = {'compaction': {'class': 'LeveledCompactionStrategy'}} + + second_key = columns.Integer(primary_key=True) + + +class PrimaryKeysAddedClusteringKey(Model): + + __table_name__ = "primary_keys_only" + __options__ = {'compaction': {'class': 'LeveledCompactionStrategy'}} + + new_first_key = columns.Float(primary_key=True) second_key = columns.Integer(primary_key=True) @@ -242,6 +278,21 @@ def test_sync_table_works_with_primary_keys_only_tables(self): table_meta = management._get_table_metadata(PrimaryKeysOnlyModel) self.assertIn('SizeTieredCompactionStrategy', table_meta.as_cql_query()) + def test_primary_key_validation(self): + """ + Test to ensure that changes to primary keys throw CQLEngineExceptions + + @since 3.2 + @jira_ticket PYTHON-532 + @expected_result Attempts to modify primary keys throw an exception + + @test_category object_mapper + """ + sync_table(PrimaryKeysOnlyModel) + self.assertRaises(CQLEngineException, sync_table, PrimaryKeysModelChanged) + self.assertRaises(CQLEngineException, sync_table, PrimaryKeysAddedClusteringKey) + self.assertRaises(CQLEngineException, sync_table, PrimaryKeysRemovedPk) + class IndexModel(Model): From 62434e5f9d2caf6d8bca6003f69ad112ba2c33c8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 5 Apr 2016 15:53:16 -0500 Subject: [PATCH 0495/2005] no ifilter in py3 --- cassandra/cqlengine/statements.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 75fe7faf0e..b923e38d34 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -13,9 +13,9 @@ # limitations under the License. from datetime import datetime, timedelta -from itertools import ifilter import time import six +from six.moves import filter from cassandra.query import FETCH_SIZE_UNSET from cassandra.cqlengine import columns @@ -499,7 +499,7 @@ def __init__(self, table, timestamp=None, where=None, fetch_size=None, condition self.add_conditional_clause(conditional) def _update_part_key_values(self, field_index_map, clauses, parts): - for clause in ifilter(lambda c: c.field in field_index_map, clauses): + for clause in filter(lambda c: c.field in field_index_map, clauses): parts[field_index_map[clause.field]] = clause.value def partition_key_values(self, field_index_map): From d9cb5419f1b02ddbd23c43da0cf04507ffb04ac1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 6 Apr 2016 09:26:09 -0500 Subject: [PATCH 0496/2005] richer error for ResponseFuture timeout callback PYTHON-444 --- cassandra/cluster.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d8c1026e90..ce87205333 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2717,7 +2717,16 @@ def _cancel_timer(self): self._timer.cancel() def _on_timeout(self): - self._set_final_exception(OperationTimedOut(self._errors, self._current_host)) + errors = self._errors + if not errors: + if self.is_schema_agreed: + errors = {self._current_host.address: "Client request timeout. See Session.execute[_async](timeout)"} + else: + connection = getattr(self.session.cluster.control_connection, '_connection') + host = connection.host if connection else 'unknown' + errors = {host: "Request timed out while waiting for schema agreement. See Session.execute[_async](timeout) and Cluster.max_schema_agreement_wait."} + + self._set_final_exception(OperationTimedOut(errors, self._current_host)) def _make_query_plan(self): # convert the list/generator/etc to an iterator so that subsequent From ced919a32afaec2178767d116ccb5b5127830bd2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 6 Apr 2016 13:56:30 -0500 Subject: [PATCH 0497/2005] DNS resolve contact points removes duplicate hosts, also expands A records PYTHON-103 PYTHON-415 --- cassandra/cluster.py | 8 ++++++-- cassandra/policies.py | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d8c1026e90..c1773267ad 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -532,6 +532,10 @@ def __init__(self, self.contact_points = contact_points self.port = port + + self.contact_points_resolved = [endpoint[4][0] for a in self.contact_points + for endpoint in socket.getaddrinfo(a, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)] + self.compression = compression self.protocol_version = protocol_version self.auth_provider = auth_provider @@ -830,7 +834,7 @@ def connect(self, keyspace=None): self.contact_points, self.protocol_version) self.connection_class.initialize_reactor() atexit.register(partial(_shutdown_cluster, self)) - for address in self.contact_points: + for address in self.contact_points_resolved: host, new = self.add_host(address, signal=False) if new: host.set_up() @@ -2320,7 +2324,7 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, if old_host.address != connection.host and old_host.address not in found_hosts: should_rebuild_token_map = True if old_host.address not in self._cluster.contact_points: - log.debug("[control connection] Found host that has been removed: %r", old_host) + log.debug("[control connection] Removing host not found in peers metadata: %r", old_host) self._cluster.remove_host(old_host) log.debug("[control connection] Finished fetching ring info") diff --git a/cassandra/policies.py b/cassandra/policies.py index 595717ca5a..48c850db1c 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -235,7 +235,7 @@ def populate(self, cluster, hosts): self._dc_live_hosts[dc] = tuple(set(dc_hosts)) if not self.local_dc: - self._contact_points = cluster.contact_points + self._contact_points = cluster.contact_points_resolved self._position = randint(0, len(hosts) - 1) if hosts else 0 From e00b150ad7da935782a69cfe8f5eff02f10f1121 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 6 Apr 2016 15:20:33 -0500 Subject: [PATCH 0498/2005] check for no hosts on getaddrinfo not sure how this can happen without raising something else, but socket.create_connection checks it --- cassandra/connection.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/connection.py b/cassandra/connection.py index 0624ad1364..a762346155 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -125,6 +125,7 @@ def __str__(self): NONBLOCKING = (errno.EAGAIN, errno.EWOULDBLOCK) + class ConnectionException(Exception): """ An unrecoverable error was hit when attempting to use a connection, @@ -319,6 +320,8 @@ def factory(cls, host, timeout, *args, **kwargs): def _connect_socket(self): sockerr = None addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) + if not addresses: + raise ConnectionException("getaddrinfo returned empty list for %s" % (self.host,)) for (af, socktype, proto, canonname, sockaddr) in addresses: try: self._socket = self._socket_impl.socket(af, socktype, proto) From cdfb03e7b72e05b1ce6a7998436fb23fa1e2b692 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 6 Apr 2016 15:34:07 -0500 Subject: [PATCH 0499/2005] reject None in contact_points --- cassandra/cluster.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c1773267ad..ef76ffca20 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -529,6 +529,8 @@ def __init__(self, if isinstance(contact_points, six.string_types): raise TypeError("contact_points should not be a string, it should be a sequence (e.g. list) of strings") + if None in contact_points: + raise ValueError("contact_points should not contain None (it can resolve to localhost)") self.contact_points = contact_points self.port = port From d6e20b4b3f73e8fbe944f139d66939a6b4f0b61c Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 6 Apr 2016 16:07:31 -0500 Subject: [PATCH 0500/2005] PYTHON-533 added tests for container types and indices --- .../cqlengine/management/test_management.py | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index e2ef180054..0a11a554df 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -366,6 +366,14 @@ def test_sync_warnings(self): logger.removeHandler(mock_handler) +class TestIndexSetModel(Model): + partition = columns.UUID(primary_key=True) + int_set = columns.Set(columns.Integer, index=True) + int_list = columns.List(columns.Integer, index=True) + text_map = columns.Map(columns.Text, columns.DateTime, index=True) + mixed_tuple = columns.Tuple(columns.Text, columns.Integer, columns.Text, index=True) + + class IndexTests(BaseCassEngTestCase): def setUp(self): @@ -412,6 +420,23 @@ def test_sync_index_case_sensitive(self): table_meta = management._get_table_metadata(IndexCaseSensitiveModel) self.assertIsNotNone(management._get_index_name_by_column(table_meta, 'second_key')) + def test_sync_indexed_set(self): + """ + Tests that models that have container types with indices can be synced. + + @since 3.2 + @jira_ticket PYTHON-533 + @expected_result table_sync should complete without a server error. + + @test_category object_mapper + """ + sync_table(TestIndexSetModel) + table_meta = management._get_table_metadata(TestIndexSetModel) + self.assertIsNotNone(management._get_index_name_by_column(table_meta, 'int_set')) + self.assertIsNotNone(management._get_index_name_by_column(table_meta, 'int_list')) + self.assertIsNotNone(management._get_index_name_by_column(table_meta, 'text_map')) + self.assertIsNotNone(management._get_index_name_by_column(table_meta, 'mixed_tuple')) + class NonModelFailureTest(BaseCassEngTestCase): class FakeModel(object): From 0574318f0ca3fd46b25b6cb0648b1d2f9e8693c3 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 7 Apr 2016 13:19:50 -0500 Subject: [PATCH 0501/2005] PYTHON-528 tests, Fix for set index test --- .../cqlengine/management/test_management.py | 1 + .../operators/test_where_operators.py | 3 + .../statements/test_delete_statement.py | 4 ++ .../statements/test_update_statement.py | 3 + .../cqlengine/test_lwt_conditional.py | 56 +++++++++++++++++++ 5 files changed, 67 insertions(+) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 0a11a554df..f8b3f70316 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -420,6 +420,7 @@ def test_sync_index_case_sensitive(self): table_meta = management._get_table_metadata(IndexCaseSensitiveModel) self.assertIsNotNone(management._get_index_name_by_column(table_meta, 'second_key')) + @greaterthancass20 def test_sync_indexed_set(self): """ Tests that models that have container types with indices can be synced. diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index 72ff2f6263..f297d2482a 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -17,6 +17,7 @@ import six + class TestWhereOperators(TestCase): def test_symbol_lookup(self): @@ -27,6 +28,7 @@ def check_lookup(symbol, expected): self.assertEqual(op, expected) check_lookup('EQ', EqualsOperator) + check_lookup('NE', NotEqualsOperator) check_lookup('IN', InOperator) check_lookup('GT', GreaterThanOperator) check_lookup('GTE', GreaterThanOrEqualOperator) @@ -37,6 +39,7 @@ def check_lookup(symbol, expected): def test_operator_rendering(self): """ tests symbols are rendered properly """ self.assertEqual("=", six.text_type(EqualsOperator())) + self.assertEqual("=!", six.text_type(NotEqualsOperator())) self.assertEqual("IN", six.text_type(InOperator())) self.assertEqual(">", six.text_type(GreaterThanOperator())) self.assertEqual(">=", six.text_type(GreaterThanOrEqualOperator())) diff --git a/tests/integration/cqlengine/statements/test_delete_statement.py b/tests/integration/cqlengine/statements/test_delete_statement.py index 2767c7da33..aa449fc7c1 100644 --- a/tests/integration/cqlengine/statements/test_delete_statement.py +++ b/tests/integration/cqlengine/statements/test_delete_statement.py @@ -76,6 +76,10 @@ def test_range_deletion_rendering(self): ds.add_where(Column(db_field='created_at'), InOperator(), ['0', '10', '20']) self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" = %(0)s AND "created_at" IN %(1)s', six.text_type(ds)) + ds = DeleteStatement('table', None) + ds.add_where(Column(db_field='a'), NotEqualsOperator(), 'b') + self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" != %(0)s', six.text_type(ds)) + def test_delete_conditional(self): where = [WhereClause('id', EqualsOperator(), 1)] conditionals = [ConditionalClause('f0', 'value0'), ConditionalClause('f1', 'value1')] diff --git a/tests/integration/cqlengine/statements/test_update_statement.py b/tests/integration/cqlengine/statements/test_update_statement.py index 5679457fee..e81263ff4f 100644 --- a/tests/integration/cqlengine/statements/test_update_statement.py +++ b/tests/integration/cqlengine/statements/test_update_statement.py @@ -39,6 +39,9 @@ def test_rendering(self): us.add_where(Column(db_field='a'), EqualsOperator(), 'x') self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s', six.text_type(us)) + us.add_where(Column(db_field='a'), NotEqualsOperator(), 'y') + self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s AND "a" != %(3)s', six.text_type(us)) + def test_context(self): us = UpdateStatement('table') us.add_assignment(Column(db_field='a'), 'b') diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index 2d4e3181c7..2717415659 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -154,6 +154,62 @@ def test_delete_conditional(self): TestConditionalModel.objects(id=t.id).iff(count=5).delete() self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0) + def test_delete_lwt_ne(self): + """ + Test to ensure that deletes using IF and not equals are honored correctly + + @since 3.2 + @jira_ticket PYTHON-328 + @expected_result Delete conditional with NE should be honored + + @test_category object_mapper + """ + + # DML path + t = TestConditionalModel.create(text='something', count=5) + self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) + with self.assertRaises(LWTException): + t.iff(count__ne=5).delete() + t.iff(count__ne=2).delete() + self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0) + + # QuerySet path + t = TestConditionalModel.create(text='something', count=5) + self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) + with self.assertRaises(LWTException): + TestConditionalModel.objects(id=t.id).iff(count__ne=5).delete() + TestConditionalModel.objects(id=t.id).iff(count__ne=2).delete() + self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0) + + def test_update_lwt_ne(self): + """ + Test to ensure that update using IF and not equals are honored correctly + + @since 3.2 + @jira_ticket PYTHON-328 + @expected_result update conditional with NE should be honored + + @test_category object_mapper + """ + + # DML path + t = TestConditionalModel.create(text='something', count=5) + self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) + with self.assertRaises(LWTException): + t.iff(count__ne=5).update(text='nothing') + t.iff(count__ne=2).update(text='nothing') + self.assertEqual(TestConditionalModel.objects(id=t.id).first().text, 'nothing') + t.delete() + + # QuerySet path + t = TestConditionalModel.create(text='something', count=5) + self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) + with self.assertRaises(LWTException): + TestConditionalModel.objects(id=t.id).iff(count__ne=5).update(text='nothing') + TestConditionalModel.objects(id=t.id).iff(count__ne=2).update(text='nothing') + self.assertEqual(TestConditionalModel.objects(id=t.id).first().text, 'nothing') + t.delete() + def test_update_to_none(self): # This test is done because updates to none are split into deletes # for old versions of cassandra. Can be removed when we drop that code From e8fda07c2f794463caf2ab245f15beaac17012d8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 7 Apr 2016 14:50:36 -0500 Subject: [PATCH 0502/2005] fix unit test following cluster/policy update --- tests/unit/test_policies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index a9406cf790..c58515de5e 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -483,7 +483,7 @@ def test_default_dc(self): host_none = Host(1, SimpleConvictionPolicy) # contact point is '1' - cluster = Mock(contact_points=[1]) + cluster = Mock(contact_points_resolved=[1]) # contact DC first policy = DCAwareRoundRobinPolicy() From 85ef15870e476ccc60e9325097a72014ac35bb1d Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 7 Apr 2016 14:59:29 -0500 Subject: [PATCH 0503/2005] Fixing bug in test_where_operators, also adding C* 2.0 exclusion for NE tests --- tests/integration/cqlengine/operators/test_where_operators.py | 2 +- tests/integration/cqlengine/test_lwt_conditional.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index f297d2482a..0683d3901e 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -39,7 +39,7 @@ def check_lookup(symbol, expected): def test_operator_rendering(self): """ tests symbols are rendered properly """ self.assertEqual("=", six.text_type(EqualsOperator())) - self.assertEqual("=!", six.text_type(NotEqualsOperator())) + self.assertEqual("!=", six.text_type(NotEqualsOperator())) self.assertEqual("IN", six.text_type(InOperator())) self.assertEqual(">", six.text_type(GreaterThanOperator())) self.assertEqual(">=", six.text_type(GreaterThanOrEqualOperator())) diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index 2717415659..d273df9cc0 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -27,7 +27,7 @@ from cassandra.cqlengine.statements import ConditionalClause from tests.integration.cqlengine.base import BaseCassEngTestCase -from tests.integration import CASSANDRA_VERSION +from tests.integration import CASSANDRA_VERSION, greaterthancass20 class TestConditionalModel(Model): @@ -154,6 +154,7 @@ def test_delete_conditional(self): TestConditionalModel.objects(id=t.id).iff(count=5).delete() self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0) + @greaterthancass20 def test_delete_lwt_ne(self): """ Test to ensure that deletes using IF and not equals are honored correctly @@ -181,6 +182,7 @@ def test_delete_lwt_ne(self): TestConditionalModel.objects(id=t.id).iff(count__ne=2).delete() self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0) + @greaterthancass20 def test_update_lwt_ne(self): """ Test to ensure that update using IF and not equals are honored correctly From 0184d5158749671ed7182be83e2af9d87ce3379a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 7 Apr 2016 15:57:45 -0500 Subject: [PATCH 0504/2005] don't raise Exception base from the driver PYTHON-527 --- cassandra/__init__.py | 24 ++++++++++++++---------- cassandra/cluster.py | 26 +++++++++++++------------- cassandra/connection.py | 2 +- cassandra/policies.py | 2 +- cassandra/protocol.py | 6 +++--- cassandra/util.py | 2 +- 6 files changed, 33 insertions(+), 29 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 33fe14c384..de41f3907c 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -194,7 +194,11 @@ class UserAggregateDescriptor(SignatureDescriptor): """ -class Unavailable(Exception): +class DriverException(Exception): + pass + + +class Unavailable(DriverException): """ There were not enough live replicas to satisfy the requested consistency level, so the coordinator node immediately failed the request without @@ -220,7 +224,7 @@ def __init__(self, summary_message, consistency=None, required_replicas=None, al 'alive_replicas': alive_replicas})) -class Timeout(Exception): +class Timeout(DriverException): """ Replicas failed to respond to the coordinator node before timing out. """ @@ -289,7 +293,7 @@ def __init__(self, message, write_type=None, **kwargs): self.write_type = write_type -class CoordinationFailure(Exception): +class CoordinationFailure(DriverException): """ Replicas sent a failure to the coordinator. """ @@ -359,7 +363,7 @@ def __init__(self, message, write_type=None, **kwargs): self.write_type = write_type -class FunctionFailure(Exception): +class FunctionFailure(DriverException): """ User Defined Function failed during execution """ @@ -386,7 +390,7 @@ def __init__(self, summary_message, keyspace, function, arg_types): Exception.__init__(self, summary_message) -class AlreadyExists(Exception): +class AlreadyExists(DriverException): """ An attempt was made to create a keyspace or table that already exists. """ @@ -414,7 +418,7 @@ def __init__(self, keyspace=None, table=None): self.table = table -class InvalidRequest(Exception): +class InvalidRequest(DriverException): """ A query was made that was invalid for some reason, such as trying to set the keyspace for a connection to a nonexistent keyspace. @@ -422,21 +426,21 @@ class InvalidRequest(Exception): pass -class Unauthorized(Exception): +class Unauthorized(DriverException): """ The current user is not authorized to perfom the requested operation. """ pass -class AuthenticationFailed(Exception): +class AuthenticationFailed(DriverException): """ Failed to authenticate. """ pass -class OperationTimedOut(Exception): +class OperationTimedOut(DriverException): """ The operation took longer than the specified (client-side) timeout to complete. This is not an error generated by Cassandra, only @@ -460,7 +464,7 @@ def __init__(self, errors=None, last_host=None): Exception.__init__(self, message) -class UnsupportedOperation(Exception): +class UnsupportedOperation(DriverException): """ An attempt was made to use a feature that is not supported by the selected protocol version. See :attr:`Cluster.protocol_version` diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cc816f9cbb..bb5eeb2542 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -45,7 +45,7 @@ from cassandra import (ConsistencyLevel, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, - SchemaTargetType) + SchemaTargetType, DriverException) from cassandra.connection import (ConnectionException, ConnectionShutdown, ConnectionHeartbeat, ProtocolVersionUnsupported) from cassandra.cqltypes import UserType @@ -819,7 +819,7 @@ def protocol_downgrade(self, host_addr, previous_version): log.warning("Downgrading core protocol version from %d to %d for %s", self.protocol_version, new_version, host_addr) self.protocol_version = new_version else: - raise Exception("Cannot downgrade protocol version (%d) below minimum supported version: %d" % (new_version, MIN_SUPPORTED_VERSION)) + raise DriverException("Cannot downgrade protocol version (%d) below minimum supported version: %d" % (new_version, MIN_SUPPORTED_VERSION)) def connect(self, keyspace=None): """ @@ -829,7 +829,7 @@ def connect(self, keyspace=None): """ with self._lock: if self.is_shutdown: - raise Exception("Cluster is already shut down") + raise DriverException("Cluster is already shut down") if not self._is_setup: log.debug("Connecting to cluster, contact points: %s; protocol version: %s", @@ -1251,7 +1251,7 @@ def refresh_schema_metadata(self, max_schema_agreement_wait=None): An Exception is raised if schema refresh fails for any reason. """ if not self.control_connection.refresh_schema(schema_agreement_wait=max_schema_agreement_wait): - raise Exception("Schema metadata was not refreshed. See log for details.") + raise DriverException("Schema metadata was not refreshed. See log for details.") def refresh_keyspace_metadata(self, keyspace, max_schema_agreement_wait=None): """ @@ -1262,7 +1262,7 @@ def refresh_keyspace_metadata(self, keyspace, max_schema_agreement_wait=None): """ if not self.control_connection.refresh_schema(target_type=SchemaTargetType.KEYSPACE, keyspace=keyspace, schema_agreement_wait=max_schema_agreement_wait): - raise Exception("Keyspace metadata was not refreshed. See log for details.") + raise DriverException("Keyspace metadata was not refreshed. See log for details.") def refresh_table_metadata(self, keyspace, table, max_schema_agreement_wait=None): """ @@ -1272,7 +1272,7 @@ def refresh_table_metadata(self, keyspace, table, max_schema_agreement_wait=None See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=table, schema_agreement_wait=max_schema_agreement_wait): - raise Exception("Table metadata was not refreshed. See log for details.") + raise DriverException("Table metadata was not refreshed. See log for details.") def refresh_materialized_view_metadata(self, keyspace, view, max_schema_agreement_wait=None): """ @@ -1281,7 +1281,7 @@ def refresh_materialized_view_metadata(self, keyspace, view, max_schema_agreemen See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=view, schema_agreement_wait=max_schema_agreement_wait): - raise Exception("View metadata was not refreshed. See log for details.") + raise DriverException("View metadata was not refreshed. See log for details.") def refresh_user_type_metadata(self, keyspace, user_type, max_schema_agreement_wait=None): """ @@ -1290,7 +1290,7 @@ def refresh_user_type_metadata(self, keyspace, user_type, max_schema_agreement_w See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TYPE, keyspace=keyspace, type=user_type, schema_agreement_wait=max_schema_agreement_wait): - raise Exception("User Type metadata was not refreshed. See log for details.") + raise DriverException("User Type metadata was not refreshed. See log for details.") def refresh_user_function_metadata(self, keyspace, function, max_schema_agreement_wait=None): """ @@ -1301,7 +1301,7 @@ def refresh_user_function_metadata(self, keyspace, function, max_schema_agreemen See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ if not self.control_connection.refresh_schema(target_type=SchemaTargetType.FUNCTION, keyspace=keyspace, function=function, schema_agreement_wait=max_schema_agreement_wait): - raise Exception("User Function metadata was not refreshed. See log for details.") + raise DriverException("User Function metadata was not refreshed. See log for details.") def refresh_user_aggregate_metadata(self, keyspace, aggregate, max_schema_agreement_wait=None): """ @@ -1312,7 +1312,7 @@ def refresh_user_aggregate_metadata(self, keyspace, aggregate, max_schema_agreem See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ if not self.control_connection.refresh_schema(target_type=SchemaTargetType.AGGREGATE, keyspace=keyspace, aggregate=aggregate, schema_agreement_wait=max_schema_agreement_wait): - raise Exception("User Aggregate metadata was not refreshed. See log for details.") + raise DriverException("User Aggregate metadata was not refreshed. See log for details.") def refresh_nodes(self): """ @@ -1321,7 +1321,7 @@ def refresh_nodes(self): An Exception is raised if node refresh fails for any reason. """ if not self.control_connection.refresh_node_list_and_token_map(): - raise Exception("Node list was not refreshed. See log for details.") + raise DriverException("Node list was not refreshed. See log for details.") def set_meta_refresh_enabled(self, enabled): """ @@ -2824,7 +2824,7 @@ def warnings(self): """ # TODO: When timers are introduced, just make this wait if not self._event.is_set(): - raise Exception("warnings cannot be retrieved before ResponseFuture is finalized") + raise DriverException("warnings cannot be retrieved before ResponseFuture is finalized") return self._warnings @property @@ -2842,7 +2842,7 @@ def custom_payload(self): """ # TODO: When timers are introduced, just make this wait if not self._event.is_set(): - raise Exception("custom_payload cannot be retrieved before ResponseFuture is finalized") + raise DriverException("custom_payload cannot be retrieved before ResponseFuture is finalized") return self._custom_payload def start_fetching_next_page(self): diff --git a/cassandra/connection.py b/cassandra/connection.py index a762346155..f292f4fab9 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -327,7 +327,7 @@ def _connect_socket(self): self._socket = self._socket_impl.socket(af, socktype, proto) if self.ssl_options: if not self._ssl_impl: - raise Exception("This version of Python was not compiled with SSL support") + raise RuntimeError("This version of Python was not compiled with SSL support") self._socket = self._ssl_impl.wrap_socket(self._socket, **self.ssl_options) self._socket.settimeout(self.connect_timeout) self._socket.connect(sockaddr) diff --git a/cassandra/policies.py b/cassandra/policies.py index 48c850db1c..5050beff77 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -337,7 +337,7 @@ def populate(self, cluster, hosts): def check_supported(self): if not self._cluster_metadata.can_support_partitioner(): - raise Exception( + raise RuntimeError( '%s cannot be used with the cluster partitioner (%s) because ' 'the relevant C extension for this driver was not compiled. ' 'See the installation instructions for details on building ' diff --git a/cassandra/protocol.py b/cassandra/protocol.py index d2b9467348..ede6cc5869 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -22,7 +22,7 @@ from six.moves import range import io -from cassandra import type_codes +from cassandra import type_codes, DriverException from cassandra import (Unavailable, WriteTimeout, ReadTimeout, WriteFailure, ReadFailure, FunctionFailure, AlreadyExists, InvalidRequest, Unauthorized, @@ -589,7 +589,7 @@ def recv_body(cls, f, protocol_version, user_type_map): elif kind == RESULT_KIND_SCHEMA_CHANGE: results = cls.recv_results_schema_change(f, protocol_version) else: - raise Exception("Unknown RESULT kind: %d" % kind) + raise DriverException("Unknown RESULT kind: %d" % kind) return cls(kind, results, paging_state) @classmethod @@ -971,7 +971,7 @@ def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcod """ if flags & COMPRESSED_FLAG: if decompressor is None: - raise Exception("No de-compressor available for compressed frame!") + raise RuntimeError("No de-compressor available for compressed frame!") body = decompressor(body) flags ^= COMPRESSED_FLAG diff --git a/cassandra/util.py b/cassandra/util.py index ab6968e035..c4837af40d 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1101,7 +1101,7 @@ class sockaddr(ctypes.Structure): WSAAddressToStringA = ctypes.windll.ws2_32.WSAAddressToStringA else: def not_windows(*args): - raise Exception("IPv6 addresses cannot be handled on Windows. " + raise OSError("IPv6 addresses cannot be handled on Windows. " "Missing ctypes.windll") WSAStringToAddressA = not_windows WSAAddressToStringA = not_windows From d4bf40263e0aef5ea6556b68d6ce493cc9c6fb78 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 7 Apr 2016 16:42:04 -0500 Subject: [PATCH 0505/2005] Cluster config to disable schema and token processing PYTHON-327 --- cassandra/cluster.py | 130 +++++++++++++++++++++++---------- cassandra/metadata.py | 6 +- docs/api/cassandra/cluster.rst | 6 ++ 3 files changed, 100 insertions(+), 42 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d8c1026e90..101288f584 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -481,6 +481,37 @@ def auth_provider(self, value): establishment, options passing, and authentication. """ + @property + def schema_metadata_enabled(self): + """ + Flag indicating whether internal schema metadata is updated. + + When disabled, the driver does not populate Cluster.metadata.keyspaces on connect, or on schema change events. This + can be used to speed initial connection, and reduce load on client and server during operation. Turning this off + gives away token aware request routing, and programmatic inspection of the metadata model. + """ + return self.control_connection._schema_meta_enabled + + @schema_metadata_enabled.setter + def schema_metadata_enabled(self, enabled): + self.control_connection._schema_meta_enabled = bool(enabled) + + @property + def token_metadata_enabled(self): + """ + Flag indicating whether internal token metadata is updated. + + When disabled, the driver does not query node token information on connect, or on topology change events. This + can be used to speed initial connection, and reduce load on client and server during operation. It is most useful + in large clusters using vnodes, where the token map can be expensive to compute. Turning this off + gives away token aware request routing, and programmatic inspection of the token ring. + """ + return self.control_connection._token_meta_enabled + + @token_metadata_enabled.setter + def token_metadata_enabled(self, enabled): + self.control_connection._token_meta_enabled = bool(enabled) + sessions = None control_connection = None scheduler = None @@ -520,7 +551,9 @@ def __init__(self, idle_heartbeat_interval=30, schema_event_refresh_window=2, topology_event_refresh_window=10, - connect_timeout=5): + connect_timeout=5, + schema_metadata_enabled=True, + token_metadata_enabled=True): """ Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. @@ -619,7 +652,9 @@ def __init__(self, self.control_connection = ControlConnection( self, self.control_connection_timeout, - self.schema_event_refresh_window, self.topology_event_refresh_window) + self.schema_event_refresh_window, self.topology_event_refresh_window, + schema_metadata_enabled, token_metadata_enabled) + def register_user_type(self, keyspace, user_type, klass): """ @@ -1244,7 +1279,7 @@ def refresh_schema_metadata(self, max_schema_agreement_wait=None): An Exception is raised if schema refresh fails for any reason. """ - if not self.control_connection.refresh_schema(schema_agreement_wait=max_schema_agreement_wait): + if not self.control_connection.refresh_schema(schema_agreement_wait=max_schema_agreement_wait, force=True): raise Exception("Schema metadata was not refreshed. See log for details.") def refresh_keyspace_metadata(self, keyspace, max_schema_agreement_wait=None): @@ -1255,7 +1290,7 @@ def refresh_keyspace_metadata(self, keyspace, max_schema_agreement_wait=None): See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ if not self.control_connection.refresh_schema(target_type=SchemaTargetType.KEYSPACE, keyspace=keyspace, - schema_agreement_wait=max_schema_agreement_wait): + schema_agreement_wait=max_schema_agreement_wait, force=True): raise Exception("Keyspace metadata was not refreshed. See log for details.") def refresh_table_metadata(self, keyspace, table, max_schema_agreement_wait=None): @@ -1265,7 +1300,8 @@ def refresh_table_metadata(self, keyspace, table, max_schema_agreement_wait=None See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ - if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=table, schema_agreement_wait=max_schema_agreement_wait): + if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=table, + schema_agreement_wait=max_schema_agreement_wait, force=True): raise Exception("Table metadata was not refreshed. See log for details.") def refresh_materialized_view_metadata(self, keyspace, view, max_schema_agreement_wait=None): @@ -1274,7 +1310,8 @@ def refresh_materialized_view_metadata(self, keyspace, view, max_schema_agreemen See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ - if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=view, schema_agreement_wait=max_schema_agreement_wait): + if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=view, + schema_agreement_wait=max_schema_agreement_wait, force=True): raise Exception("View metadata was not refreshed. See log for details.") def refresh_user_type_metadata(self, keyspace, user_type, max_schema_agreement_wait=None): @@ -1283,7 +1320,8 @@ def refresh_user_type_metadata(self, keyspace, user_type, max_schema_agreement_w See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ - if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TYPE, keyspace=keyspace, type=user_type, schema_agreement_wait=max_schema_agreement_wait): + if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TYPE, keyspace=keyspace, type=user_type, + schema_agreement_wait=max_schema_agreement_wait, force=True): raise Exception("User Type metadata was not refreshed. See log for details.") def refresh_user_function_metadata(self, keyspace, function, max_schema_agreement_wait=None): @@ -1294,7 +1332,8 @@ def refresh_user_function_metadata(self, keyspace, function, max_schema_agreemen See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ - if not self.control_connection.refresh_schema(target_type=SchemaTargetType.FUNCTION, keyspace=keyspace, function=function, schema_agreement_wait=max_schema_agreement_wait): + if not self.control_connection.refresh_schema(target_type=SchemaTargetType.FUNCTION, keyspace=keyspace, function=function, + schema_agreement_wait=max_schema_agreement_wait, force=True): raise Exception("User Function metadata was not refreshed. See log for details.") def refresh_user_aggregate_metadata(self, keyspace, aggregate, max_schema_agreement_wait=None): @@ -1305,7 +1344,8 @@ def refresh_user_aggregate_metadata(self, keyspace, aggregate, max_schema_agreem See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ - if not self.control_connection.refresh_schema(target_type=SchemaTargetType.AGGREGATE, keyspace=keyspace, aggregate=aggregate, schema_agreement_wait=max_schema_agreement_wait): + if not self.control_connection.refresh_schema(target_type=SchemaTargetType.AGGREGATE, keyspace=keyspace, aggregate=aggregate, + schema_agreement_wait=max_schema_agreement_wait, force=True): raise Exception("User Aggregate metadata was not refreshed. See log for details.") def refresh_nodes(self): @@ -1319,6 +1359,8 @@ def refresh_nodes(self): def set_meta_refresh_enabled(self, enabled): """ + *Deprecated:* set :attr:`~.Cluster.schema_metadata_enabled` :attr:`~.Cluster.token_metadata_enabled` instead + Sets a flag to enable (True) or disable (False) all metadata refresh queries. This applies to both schema and node topology. @@ -1327,7 +1369,8 @@ def set_meta_refresh_enabled(self, enabled): Meta refresh must be enabled for the driver to become aware of any cluster topology changes or schema updates. """ - self.control_connection.set_meta_refresh_enabled(bool(enabled)) + self.schema_metadata_enabled = enabled + self.token_metadata_enabled = enabled def _prepare_all_queries(self, host): if not self._prepared_statements: @@ -2010,7 +2053,10 @@ class ControlConnection(object): """ _SELECT_PEERS = "SELECT peer, data_center, rack, tokens, rpc_address, schema_version FROM system.peers" + _SELECT_PEERS_NO_TOKENS = "SELECT peer, data_center, rack, rpc_address, schema_version FROM system.peers" _SELECT_LOCAL = "SELECT cluster_name, data_center, rack, tokens, partitioner, release_version, schema_version FROM system.local WHERE key='local'" + _SELECT_LOCAL_NO_TOKENS = "SELECT cluster_name, data_center, rack, partitioner, release_version, schema_version FROM system.local WHERE key='local'" + _SELECT_SCHEMA_PEERS = "SELECT peer, rpc_address, schema_version FROM system.peers" _SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'" @@ -2022,14 +2068,17 @@ class ControlConnection(object): _schema_event_refresh_window = None _topology_event_refresh_window = None - _meta_refresh_enabled = True + _schema_meta_enabled = True + _token_meta_enabled = True # for testing purposes _time = time def __init__(self, cluster, timeout, schema_event_refresh_window, - topology_event_refresh_window): + topology_event_refresh_window, + schema_meta_enabled=True, + token_meta_enabled=True): # use a weak reference to allow the Cluster instance to be GC'ed (and # shutdown) since implementing __del__ disables the cycle detector self._cluster = weakref.proxy(cluster) @@ -2038,6 +2087,8 @@ def __init__(self, cluster, timeout, self._schema_event_refresh_window = schema_event_refresh_window self._topology_event_refresh_window = topology_event_refresh_window + self._schema_meta_enabled = schema_meta_enabled + self._token_meta_enabled = token_meta_enabled self._lock = RLock() self._schema_agreement_lock = Lock() @@ -2119,8 +2170,10 @@ def _try_connect(self, host): "SCHEMA_CHANGE": partial(_watch_callback, self_weakref, '_handle_schema_change') }, register_timeout=self._timeout) - peers_query = QueryMessage(query=self._SELECT_PEERS, consistency_level=ConsistencyLevel.ONE) - local_query = QueryMessage(query=self._SELECT_LOCAL, consistency_level=ConsistencyLevel.ONE) + sel_peers = self._SELECT_PEERS if self._token_meta_enabled else self._SELECT_PEERS_NO_TOKENS + sel_local = self._SELECT_LOCAL if self._token_meta_enabled else self._SELECT_LOCAL_NO_TOKENS + peers_query = QueryMessage(query=sel_peers, consistency_level=ConsistencyLevel.ONE) + local_query = QueryMessage(query=sel_local, consistency_level=ConsistencyLevel.ONE) shared_results = connection.wait_for_responses( peers_query, local_query, timeout=self._timeout) @@ -2200,14 +2253,10 @@ def shutdown(self): self._connection.close() del self._connection - def refresh_schema(self, **kwargs): - if not self._meta_refresh_enabled: - log.debug("[control connection] Skipping schema refresh because meta refresh is disabled") - return False - + def refresh_schema(self, force=False, **kwargs): try: if self._connection: - return self._refresh_schema(self._connection, **kwargs) + return self._refresh_schema(self._connection, force=force, **kwargs) except ReferenceError: pass # our weak reference to the Cluster is no good except Exception: @@ -2215,13 +2264,18 @@ def refresh_schema(self, **kwargs): self._signal_error() return False - def _refresh_schema(self, connection, preloaded_results=None, schema_agreement_wait=None, **kwargs): + def _refresh_schema(self, connection, preloaded_results=None, schema_agreement_wait=None, force=False, **kwargs): if self._cluster.is_shutdown: return False agreed = self.wait_for_schema_agreement(connection, preloaded_results=preloaded_results, wait_time=schema_agreement_wait) + + if not self._schema_meta_enabled and not force: + log.debug("[control connection] Skipping schema refresh because schema metadata is disabled") + return False + if not agreed: log.debug("Skipping schema refresh due to lack of schema agreement") return False @@ -2231,10 +2285,6 @@ def _refresh_schema(self, connection, preloaded_results=None, schema_agreement_w return True def refresh_node_list_and_token_map(self, force_token_rebuild=False): - if not self._meta_refresh_enabled: - log.debug("[control connection] Skipping node list refresh because meta refresh is disabled") - return False - try: if self._connection: self._refresh_node_list_and_token_map(self._connection, force_token_rebuild=force_token_rebuild) @@ -2254,10 +2304,17 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, peers_result = preloaded_results[0] local_result = preloaded_results[1] else: - log.debug("[control connection] Refreshing node list and token map") cl = ConsistencyLevel.ONE - peers_query = QueryMessage(query=self._SELECT_PEERS, consistency_level=cl) - local_query = QueryMessage(query=self._SELECT_LOCAL, consistency_level=cl) + if not self._token_meta_enabled: + log.debug("[control connection] Refreshing node list without token map") + sel_peers = self._SELECT_PEERS_NO_TOKENS + sel_local = self._SELECT_LOCAL_NO_TOKENS + else: + log.debug("[control connection] Refreshing node list and token map") + sel_peers = self._SELECT_PEERS + sel_local = self._SELECT_LOCAL + peers_query = QueryMessage(query=sel_peers, consistency_level=cl) + local_query = QueryMessage(query=sel_local, consistency_level=cl) peers_result, local_result = connection.wait_for_responses( peers_query, local_query, timeout=self._timeout) @@ -2296,8 +2353,8 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, if not addr or addr in ["0.0.0.0", "::"]: addr = row.get("peer") - tokens = row.get("tokens") - if not tokens: + tokens = row.get("tokens", None) + if 'tokens' in row and not tokens: # it was selected, but empty log.warning("Excluding host (%s) with no tokens in system.peers table of %s." % (addr, connection.host)) continue @@ -2529,9 +2586,6 @@ def return_connection(self, connection): if connection is self._connection and (connection.is_defunct or connection.is_closed): self.reconnect() - def set_meta_refresh_enabled(self, enabled): - self._meta_refresh_enabled = enabled - def _stop_scheduler(scheduler, thread): try: @@ -2626,13 +2680,9 @@ def _log_if_failed(self, future): def refresh_schema_and_set_result(control_conn, response_future, **kwargs): try: - if control_conn._meta_refresh_enabled: - log.debug("Refreshing schema in response to schema change. " - "%s", kwargs) - response_future.is_schema_agreed = control_conn._refresh_schema(response_future._connection, **kwargs) - else: - log.debug("Skipping schema refresh in response to schema change because meta refresh is disabled; " - "%s", kwargs) + log.debug("Refreshing schema in response to schema change. " + "%s", kwargs) + response_future.is_schema_agreed = control_conn._refresh_schema(response_future._connection, **kwargs) except Exception: log.exception("Exception refreshing schema in response to schema change:") response_future.session.submit(control_conn.refresh_schema, **kwargs) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 1d04b4c964..28e19decf3 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1402,8 +1402,10 @@ def rebuild_keyspace(self, keyspace, build_if_absent=False): with self._rebuild_lock: current = self.tokens_to_hosts_by_ks.get(keyspace, None) if (build_if_absent and current is None) or (not build_if_absent and current is not None): - replica_map = self.replica_map_for_keyspace(self._metadata.keyspaces[keyspace]) - self.tokens_to_hosts_by_ks[keyspace] = replica_map + ks_meta = self._metadata.keyspaces.get(keyspace) + if ks_meta: + replica_map = self.replica_map_for_keyspace(self._metadata.keyspaces[keyspace]) + self.tokens_to_hosts_by_ks[keyspace] = replica_map def replica_map_for_keyspace(self, ks_metadata): strategy = ks_metadata.replication_strategy diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 9c546d3be8..e82982806a 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -49,6 +49,12 @@ .. autoattribute:: connect_timeout + .. autoattribute:: schema_metadata_enabled + :annotation: = True + + .. autoattribute:: token_metadata_enabled + :annotation: = True + .. automethod:: connect .. automethod:: shutdown From 940166863af189977b5f5455bcf3053cfd225ed2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 8 Apr 2016 13:11:41 -0500 Subject: [PATCH 0506/2005] added AddressTranlator interface PYTHON-69 --- cassandra/cluster.py | 43 +++++++++++++++++++++++++++---------------- cassandra/policies.py | 31 ++++++++++++++++++++++++++++--- 2 files changed, 55 insertions(+), 19 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 101288f584..bf499163b3 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -65,7 +65,7 @@ from cassandra.metadata import Metadata, protect_name, murmur3 from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, - RetryPolicy) + RetryPolicy, IdentityTranslator) from cassandra.pool import (Host, _ReconnectionHandler, _HostReconnectionHandler, HostConnectionPool, HostConnection, NoConnectionsAvailable) @@ -347,6 +347,12 @@ def auth_provider(self, value): :class:`.policies.SimpleConvictionPolicy`. """ + address_translator = IdentityTranslator() + """ + :class:`.policies.AddressTranslator` instance to be used in translating server node addresses + to driver connection addresses. + """ + connect_to_remote_hosts = True """ If left as :const:`True`, hosts that are considered :attr:`~.HostDistance.REMOTE` @@ -553,7 +559,8 @@ def __init__(self, topology_event_refresh_window=10, connect_timeout=5, schema_metadata_enabled=True, - token_metadata_enabled=True): + token_metadata_enabled=True, + address_translator=None): """ Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. @@ -572,7 +579,6 @@ def __init__(self, if load_balancing_policy is not None: if isinstance(load_balancing_policy, type): raise TypeError("load_balancing_policy should not be a class, it should be an instance of that class") - self.load_balancing_policy = load_balancing_policy else: self.load_balancing_policy = default_lbp_factory() @@ -580,13 +586,11 @@ def __init__(self, if reconnection_policy is not None: if isinstance(reconnection_policy, type): raise TypeError("reconnection_policy should not be a class, it should be an instance of that class") - self.reconnection_policy = reconnection_policy if default_retry_policy is not None: if isinstance(default_retry_policy, type): raise TypeError("default_retry_policy should not be a class, it should be an instance of that class") - self.default_retry_policy = default_retry_policy if conviction_policy_factory is not None: @@ -594,6 +598,11 @@ def __init__(self, raise ValueError("conviction_policy_factory must be callable") self.conviction_policy_factory = conviction_policy_factory + if address_translator is not None: + if isinstance(address_translator, type): + raise TypeError("address_translator should not be a class, it should be an instance of that class") + self.address_translator = address_translator + if connection_class is not None: self.connection_class = connection_class @@ -2348,10 +2357,7 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, should_rebuild_token_map = force_token_rebuild or self._cluster.metadata.partitioner is None found_hosts = set() for row in peers_result: - addr = row.get("rpc_address") - - if not addr or addr in ["0.0.0.0", "::"]: - addr = row.get("peer") + addr = self._rpc_from_peer_row(row) tokens = row.get("tokens", None) if 'tokens' in row and not tokens: # it was selected, but empty @@ -2413,7 +2419,7 @@ def _delay_for_event_type(self, event_type, delay_window): def _handle_topology_change(self, event): change_type = event["change_type"] - addr, port = event["address"] + addr = self._translate_address(event["address"][0]) if change_type == "NEW_NODE" or change_type == "MOVED_NODE": if self._topology_event_refresh_window >= 0: delay = self._delay_for_event_type('topology_change', self._topology_event_refresh_window) @@ -2424,7 +2430,7 @@ def _handle_topology_change(self, event): def _handle_status_change(self, event): change_type = event["change_type"] - addr, port = event["address"] + addr = self._translate_address(event["address"][0]) host = self._cluster.metadata.get_host(addr) if change_type == "UP": delay = 1 + self._delay_for_event_type('status_change', 0.5) # randomness to avoid thundering herd problem on events @@ -2442,6 +2448,9 @@ def _handle_status_change(self, event): # this will be run by the scheduler self._cluster.on_down(host, is_host_addition=False) + def _translate_address(self, addr): + return self._cluster.address_translator.translate(addr) + def _handle_schema_change(self, event): if self._schema_event_refresh_window < 0: return @@ -2523,11 +2532,7 @@ def _get_schema_mismatches(self, peers_result, local_result, local_address): schema_ver = row.get('schema_version') if not schema_ver: continue - - addr = row.get("rpc_address") - if not addr or addr in ["0.0.0.0", "::"]: - addr = row.get("peer") - + addr = self._rpc_from_peer_row(row) peer = self._cluster.metadata.get_host(addr) if peer and peer.is_up: versions[schema_ver].add(addr) @@ -2538,6 +2543,12 @@ def _get_schema_mismatches(self, peers_result, local_result, local_address): return dict((version, list(nodes)) for version, nodes in six.iteritems(versions)) + def _rpc_from_peer_row(self, row): + addr = row.get("rpc_address") + if not addr or addr in ["0.0.0.0", "::"]: + addr = row.get("peer") + return self._translate_address(addr) + def _signal_error(self): with self._lock: if self._is_shutdown: diff --git a/cassandra/policies.py b/cassandra/policies.py index 595717ca5a..e47f62a96b 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -16,12 +16,9 @@ import logging from random import randint from threading import Lock -import six from cassandra import ConsistencyLevel -from six.moves import range - log = logging.getLogger(__name__) @@ -849,3 +846,31 @@ def on_unavailable(self, query, consistency, required_replicas, alive_replicas, return (self.RETHROW, None) else: return self._pick_consistency(alive_replicas) + + +class AddressTranslator(object): + """ + Interface for translating cluster-defined endpoints. + + The driver discovers nodes using server metadata and topology change events. Normally, + the endpoint defined by the server is the right way to connect to a node. In some environments, + these addresses may not be reachable, or not preferred (public vs. private IPs in cloud environments, + suboptimal routing, etc). This interface allows for translating from server defined endpoints to + preferred addresses for driver connections. + + *Note:* :attr:`~Cluster.contact_points` provided while creating the :class:`~.Cluster` instance are not + translated using this mechanism -- only addresses received from Cassandra nodes are. + """ + def translate(self, addr): + """ + Accepts the node ip address, and returns a translated address to be used connecting to this node. + """ + raise NotImplementedError + + +class IdentityTranslator(AddressTranslator): + """ + Returns the endpoint with no translation + """ + def translate(self, addr): + return addr From bd6dae2fbb01c4d96dc1c81bd5dc01a0a8c0db01 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 8 Apr 2016 13:16:04 -0500 Subject: [PATCH 0507/2005] address translator docs PYTHON-69 --- docs/api/cassandra/cluster.rst | 2 ++ docs/api/cassandra/policies.rst | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index e82982806a..fdea3e6789 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -25,6 +25,8 @@ .. autoattribute:: conviction_policy_factory + .. autoattribute:: address_translator + .. autoattribute:: connection_class .. autoattribute:: metrics_enabled diff --git a/docs/api/cassandra/policies.rst b/docs/api/cassandra/policies.rst index 44346c4bd4..c96e491a03 100644 --- a/docs/api/cassandra/policies.rst +++ b/docs/api/cassandra/policies.rst @@ -24,6 +24,15 @@ Load Balancing .. autoclass:: TokenAwarePolicy :members: +Translating Server Node Addresses +--------------------------------- + +.. autoclass:: AddressTranslator + :members: + +.. autoclass:: IdentityTranslator + :members: + Marking Hosts Up or Down ------------------------ From 1f596b493546e1540e9afba4c12e7835a0fff1f1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 8 Apr 2016 14:42:47 -0500 Subject: [PATCH 0508/2005] fix gevent connection to use only blocking semantics PYTHON-429 PYTHON-536 --- cassandra/io/geventreactor.py | 39 +++++++---------------------------- 1 file changed, 8 insertions(+), 31 deletions(-) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 6e62a38b0e..c825a8c1a2 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -14,17 +14,16 @@ import gevent import gevent.event from gevent.queue import Queue -from gevent import select, socket +from gevent import socket import gevent.ssl -from functools import partial import logging import os import time from six.moves import range -from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL +from errno import EINVAL from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager @@ -34,9 +33,8 @@ def is_timeout(err): return ( - err in (EINPROGRESS, EALREADY, EWOULDBLOCK) or (err == EINVAL and os.name in ('nt', 'ce')) or - isinstance(err, socket.timeout) + isinstance(err, socket.timeout) ) @@ -118,44 +116,23 @@ def handle_close(self): self.close() def handle_write(self): - run_select = partial(select.select, (), (self._socket,), ()) while True: try: next_msg = self._write_queue.get() - run_select() - except Exception as exc: - if not self.is_closed: - log.debug("Exception during write select() for %s: %s", self, exc) - self.defunct(exc) - return - - try: self._socket.sendall(next_msg) except socket.error as err: - log.debug("Exception during socket sendall for %s: %s", self, err) + log.debug("Exception in send for %s: %s", self, err) self.defunct(err) - return # Leave the write loop + return def handle_read(self): - run_select = partial(select.select, (self._socket,), (), ()) while True: try: - run_select() - except Exception as exc: - if not self.is_closed: - log.debug("Exception during read select() for %s: %s", self, exc) - self.defunct(exc) - return - - try: - while True: - buf = self._socket.recv(self.in_buffer_size) - self._iobuf.write(buf) - if len(buf) < self.in_buffer_size: - break + buf = self._socket.recv(self.in_buffer_size) + self._iobuf.write(buf) except socket.error as err: if not is_timeout(err): - log.debug("Exception during socket recv for %s: %s", self, err) + log.debug("Exception in read for %s: %s", self, err) self.defunct(err) return # leave the read loop From 48467bb7469c72a74fb1fbb996c9c13f7ce7d781 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 8 Apr 2016 15:09:07 -0500 Subject: [PATCH 0509/2005] PYTHON-535 Tests for cqlengine routing key generation --- tests/integration/cqlengine/__init__.py | 1 + .../cqlengine/model/test_model_io.py | 161 ++++++++++++++++++ 2 files changed, 162 insertions(+) diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index 2950984f58..c54dd95abe 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -38,6 +38,7 @@ def is_prepend_reversed(): ver, _ = get_server_versions() return not (ver >= (2, 0, 13) or ver >= (2, 1, 3)) + def setup_connection(keyspace_name): connection.setup(['127.0.0.1'], consistency=ConsistencyLevel.ONE, diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index b05c8b7cb4..a2cc173962 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -27,10 +27,17 @@ from cassandra.cqlengine.management import sync_table from cassandra.cqlengine.management import drop_table from cassandra.cqlengine.models import Model +from cassandra.query import SimpleStatement from cassandra.util import Date, Time +from cassandra.cqltypes import Int32Type +from cassandra.cqlengine.statements import SelectStatement, DeleteStatement, WhereClause +from cassandra.cqlengine.operators import EqualsOperator from tests.integration import PROTOCOL_VERSION from tests.integration.cqlengine.base import BaseCassEngTestCase +from tests.integration.cqlengine import DEFAULT_KEYSPACE + + class TestModel(Model): @@ -638,6 +645,160 @@ def test_query_with_date(self): self.assertTrue(inst.date == day) +class BasicModel(Model): + __table_name__ = 'basic_model_routing' + k = columns.Integer(primary_key=True) + v = columns.Integer() + + +class BasicModelMulti(Model): + __table_name__ = 'basic_model_routing_multi' + k = columns.Integer(partition_key=True) + v = columns.Integer(partition_key=True) + + +class ComplexModelRouting(Model): + __table_name__ = 'complex_model_routing' + partition = columns.UUID(partition_key=True, default=uuid4) + cluster = columns.Integer(partition_key=True) + count = columns.Integer() + text = columns.Text(partition_key=True) + float = columns.Float(partition_key=True) + text_2 = columns.Text() + + +class TestModelRoutingKeys(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestModelRoutingKeys, cls).setUpClass() + sync_table(BasicModel) + sync_table(BasicModelMulti) + sync_table(ComplexModelRouting) + + @classmethod + def tearDownClass(cls): + super(TestModelRoutingKeys, cls).tearDownClass() + drop_table(BasicModel) + drop_table(BasicModelMulti) + drop_table(ComplexModelRouting) + + def test_routing_key_generation_basic(self): + """ + Compares the routing key generated by simple partition key using the model with the one generated by the equivalent + bound statement + @since 3.2 + @jira_ticket PYTHON-535 + @expected_result they should match + + @test_category object_mapper + """ + + prepared = self.session.prepare( + """ + INSERT INTO {0}.basic_model_routing (k, v) VALUES (?, ?) + """.format(DEFAULT_KEYSPACE)) + bound = prepared.bind((1, 2)) + + mrk = BasicModel._routing_key_from_values([1], self.session.cluster.protocol_version) + simple = SimpleStatement("") + simple.routing_key = mrk + self.assertEqual(bound.routing_key, simple.routing_key) + + def test_routing_key_generation_multi(self): + """ + Compares the routing key generated by composite partition key using the model with the one generated by the equivalent + bound statement + @since 3.2 + @jira_ticket PYTHON-535 + @expected_result they should match + + @test_category object_mapper + """ + + prepared = self.session.prepare( + """ + INSERT INTO {0}.basic_model_routing_multi (k, v) VALUES (?, ?) + """.format(DEFAULT_KEYSPACE)) + bound = prepared.bind((1, 2)) + mrk = BasicModelMulti._routing_key_from_values([1, 2], self.session.cluster.protocol_version) + simple = SimpleStatement("") + simple.routing_key = mrk + self.assertEqual(bound.routing_key, simple.routing_key) + + def test_routing_key_generation_complex(self): + """ + Compares the routing key generated by complex composite partition key using the model with the one generated by the equivalent + bound statement + @since 3.2 + @jira_ticket PYTHON-535 + @expected_result they should match + + @test_category object_mapper + """ + prepared = self.session.prepare( + """ + INSERT INTO {0}.complex_model_routing (partition, cluster, count, text, float, text_2) VALUES (?, ?, ?, ?, ?, ?) + """.format(DEFAULT_KEYSPACE)) + partition = uuid4() + cluster = 1 + count = 2 + text = "text" + float = 1.2 + text_2 = "text_2" + bound = prepared.bind((partition, cluster, count, text, float, text_2)) + mrk = ComplexModelRouting._routing_key_from_values([partition, cluster, text, float], self.session.cluster.protocol_version) + simple = SimpleStatement("") + simple.routing_key = mrk + self.assertEqual(bound.routing_key, simple.routing_key) + + def test_partition_key_index(self): + """ + Test to ensure that statement partition key generation is in the correct order + @since 3.2 + @jira_ticket PYTHON-535 + @expected_result . + + @test_category object_mapper + """ + self._check_partition_value_generation(BasicModel, SelectStatement(BasicModel.__table_name__)) + self._check_partition_value_generation(BasicModel, DeleteStatement(BasicModel.__table_name__)) + self._check_partition_value_generation(BasicModelMulti, SelectStatement(BasicModelMulti.__table_name__)) + self._check_partition_value_generation(BasicModelMulti, DeleteStatement(BasicModelMulti.__table_name__)) + self._check_partition_value_generation(ComplexModelRouting, SelectStatement(ComplexModelRouting.__table_name__)) + self._check_partition_value_generation(ComplexModelRouting, DeleteStatement(ComplexModelRouting.__table_name__)) + self._check_partition_value_generation(BasicModel, SelectStatement(BasicModel.__table_name__), reverse=True) + self._check_partition_value_generation(BasicModel, DeleteStatement(BasicModel.__table_name__), reverse=True) + self._check_partition_value_generation(BasicModelMulti, SelectStatement(BasicModelMulti.__table_name__), reverse=True) + self._check_partition_value_generation(BasicModelMulti, DeleteStatement(BasicModelMulti.__table_name__), reverse=True) + self._check_partition_value_generation(ComplexModelRouting, SelectStatement(ComplexModelRouting.__table_name__), reverse=True) + self._check_partition_value_generation(ComplexModelRouting, DeleteStatement(ComplexModelRouting.__table_name__), reverse=True) + + def _check_partition_value_generation(self, model, state, reverse=False): + """ + This generates a some statements based on the partition_key_index of the model. + It then validates that order of the partition key values in the statement matches the index + specified in the models partition_key_index + """ + # Setup some unique values for statement generation + uuid = uuid4() + values = {'k': 5, 'v': 3, 'partition': uuid, 'cluster': 6, 'count': 42, 'text': 'text', 'float': 3.1415, 'text_2': 'text_2'} + res = dict((v, k) for k, v in values.items()) + items = model._partition_key_index.items() + if(reverse): + items.reverse() + # Add where clauses for each partition key + for partition_key, position in items: + wc = WhereClause(partition_key, EqualsOperator(), values.get(partition_key)) + state._add_where_clause(wc) + + # Iterate over the partition key values check to see that their index matches + # Those specified in the models partition field + for indx, value in enumerate(state.partition_key_values(model._partition_key_index)): + name = res.get(value) + self.assertEqual(indx, model._partition_key_index.get(name)) + + def test_none_filter_fails(): class NoneFilterModel(Model): From 613c0052825cc13a5b43caa2234042d14cdb27be Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 8 Apr 2016 16:31:19 -0500 Subject: [PATCH 0510/2005] fix unit test following addition of address_translator --- tests/unit/test_control_connection.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index 9fac7e2f46..e109a76fd0 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -25,7 +25,7 @@ from cassandra.cluster import ControlConnection, _Scheduler from cassandra.pool import Host from cassandra.policies import (SimpleConvictionPolicy, RoundRobinPolicy, - ConstantReconnectionPolicy) + ConstantReconnectionPolicy, IdentityTranslator) PEER_IP = "foobar" @@ -61,6 +61,7 @@ class MockCluster(object): max_schema_agreement_wait = 5 load_balancing_policy = RoundRobinPolicy() reconnection_policy = ConstantReconnectionPolicy(2) + address_translator = IdentityTranslator() down_host = None contact_points = [] is_shutdown = False From edacf56353c3519a4e42327d7a058313e3775b49 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 11 Apr 2016 12:19:59 -0500 Subject: [PATCH 0511/2005] fix SimpleDateType slz from long (same as int) --- cassandra/cqltypes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 4a2a203286..7eb0a2df58 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -623,7 +623,7 @@ def serialize(val, protocol_version): try: days = val.days_from_epoch except AttributeError: - if isinstance(val, int): + if isinstance(val, six.integer_types): # the DB wants offset int values, but util.Date init takes days from epoch # here we assume int values are offset, as they would appear in CQL # short circuit to avoid subtracting just to add offset From c7f15a2165b0ddacda2a8ffba8e25e291752d134 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 11 Apr 2016 12:20:23 -0500 Subject: [PATCH 0512/2005] make cqlengine routing key test work in py3 --- tests/integration/cqlengine/model/test_model_io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index a2cc173962..3faf62febc 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -784,7 +784,7 @@ def _check_partition_value_generation(self, model, state, reverse=False): uuid = uuid4() values = {'k': 5, 'v': 3, 'partition': uuid, 'cluster': 6, 'count': 42, 'text': 'text', 'float': 3.1415, 'text_2': 'text_2'} res = dict((v, k) for k, v in values.items()) - items = model._partition_key_index.items() + items = list(model._partition_key_index.items()) if(reverse): items.reverse() # Add where clauses for each partition key From e2ce469800d6db752c220c4011a14bbc0a036982 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 11 Apr 2016 14:12:07 -0500 Subject: [PATCH 0513/2005] expose broadcast and listen addresses on Host PYTHON-332 --- cassandra/cluster.py | 8 ++++++-- cassandra/pool.py | 16 +++++++++++++++- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 0975e6bad8..7e79b31c2a 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2067,9 +2067,9 @@ class ControlConnection(object): Internal """ - _SELECT_PEERS = "SELECT peer, data_center, rack, tokens, rpc_address, schema_version FROM system.peers" + _SELECT_PEERS = "SELECT * FROM system.peers" _SELECT_PEERS_NO_TOKENS = "SELECT peer, data_center, rack, rpc_address, schema_version FROM system.peers" - _SELECT_LOCAL = "SELECT cluster_name, data_center, rack, tokens, partitioner, release_version, schema_version FROM system.local WHERE key='local'" + _SELECT_LOCAL = "SELECT * FROM system.local WHERE key='local'" _SELECT_LOCAL_NO_TOKENS = "SELECT cluster_name, data_center, rack, partitioner, release_version, schema_version FROM system.local WHERE key='local'" @@ -2349,6 +2349,8 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, datacenter = local_row.get("data_center") rack = local_row.get("rack") self._update_location_info(host, datacenter, rack) + host.listen_address = local_row.get("listen_address") + host.broadcast_address = local_row.get("broadcast_address") partitioner = local_row.get("partitioner") tokens = local_row.get("tokens") @@ -2382,6 +2384,8 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, else: should_rebuild_token_map |= self._update_location_info(host, datacenter, rack) + host.broadcast_address = row.get("peer") + if partitioner and tokens: token_map[host] = tokens diff --git a/cassandra/pool.py b/cassandra/pool.py index 08cfd37271..3cb14e91bd 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -48,7 +48,21 @@ class Host(object): address = None """ - The IP address or hostname of the node. + The IP address of the node. This is the RPC address the driver uses when connecting to the node + """ + + broadcast_address = None + """ + broadcast address configured for the node, *if available* ('peer' in system.peers table). + This is not present in the ``system.local`` table for older versions of Cassandra. It is also not queried if + :attr:`~.Cluster.token_metadata_enabled` is ``False``. + """ + + listen_address = None + """ + listen address configured for the node, *if available*. This is only available in the ``system.local`` table for newer + versions of Cassandra. It is also not queried if :attr:`~.Cluster.token_metadata_enabled` is ``False``. + Usually the same as ``broadcast_address`` unless configured differently in cassandra.yaml. """ conviction_policy = None From 0b21341a2fcac08128bd079bd5646993b9ef8c03 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 11 Apr 2016 15:49:00 -0500 Subject: [PATCH 0514/2005] add RETRY_NEXT_HOST decision, default to this on unavailable PYTHON-285 --- cassandra/cluster.py | 6 ++- cassandra/policies.py | 56 ++++++++++++---------- tests/integration/standard/test_metrics.py | 4 +- tests/unit/test_policies.py | 8 ++-- 4 files changed, 41 insertions(+), 33 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 0975e6bad8..3fc2f9109a 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3058,15 +3058,17 @@ def _set_result(self, response): return retry_type, consistency = retry - if retry_type is RetryPolicy.RETRY: + if retry_type in (RetryPolicy.RETRY, RetryPolicy.RETRY_NEXT_HOST): self._query_retries += 1 - self._retry(reuse_connection=True, consistency_level=consistency) + reuse = retry_type == RetryPolicy.RETRY + self._retry(reuse_connection=reuse, consistency_level=consistency) elif retry_type is RetryPolicy.RETHROW: self._set_final_exception(response.to_exception()) else: # IGNORE if self._metrics is not None: self._metrics.on_ignore() self._set_final_result(None) + self._errors[self._current_host] = response.to_exception() elif isinstance(response, ConnectionException): if self._metrics is not None: self._metrics.on_connection_error() diff --git a/cassandra/policies.py b/cassandra/policies.py index 45a7a1c5b8..8eb97e2036 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -551,8 +551,8 @@ def __init__(self, base_delay, max_delay, max_attempts=64): self.max_attempts = max_attempts def new_schedule(self): - i=0 - while self.max_attempts == None or i < self.max_attempts: + i = 0 + while self.max_attempts is None or i < self.max_attempts: yield min(self.base_delay * (2 ** i), self.max_delay) i += 1 @@ -647,6 +647,12 @@ class or one of its subclasses. should be ignored but no more retries should be attempted. """ + RETRY_NEXT_HOST = 3 + """ + This should be returned from the below methods if the operation + should be retried on another connection. + """ + def on_read_timeout(self, query, consistency, required_responses, received_responses, data_retrieved, retry_num): """ @@ -674,11 +680,11 @@ def on_read_timeout(self, query, consistency, required_responses, a sufficient number of replicas responded (with data digests). """ if retry_num != 0: - return (self.RETHROW, None) + return self.RETHROW, None elif received_responses >= required_responses and not data_retrieved: - return (self.RETRY, consistency) + return self.RETRY, consistency else: - return (self.RETHROW, None) + return self.RETHROW, None def on_write_timeout(self, query, consistency, write_type, required_responses, received_responses, retry_num): @@ -707,11 +713,11 @@ def on_write_timeout(self, query, consistency, write_type, :attr:`~.WriteType.BATCH_LOG`. """ if retry_num != 0: - return (self.RETHROW, None) + return self.RETHROW, None elif write_type == WriteType.BATCH_LOG: - return (self.RETRY, consistency) + return self.RETRY, consistency else: - return (self.RETHROW, None) + return self.RETHROW, None def on_unavailable(self, query, consistency, required_replicas, alive_replicas, retry_num): """ @@ -736,7 +742,7 @@ def on_unavailable(self, query, consistency, required_replicas, alive_replicas, By default, no retries will be attempted and the error will be re-raised. """ - return (self.RETHROW, None) + return (self.RETRY_NEXT_HOST, consistency) if retry_num == 0 else (self.RETHROW, None) class FallthroughRetryPolicy(RetryPolicy): @@ -746,13 +752,13 @@ class FallthroughRetryPolicy(RetryPolicy): """ def on_read_timeout(self, *args, **kwargs): - return (self.RETHROW, None) + return self.RETHROW, None def on_write_timeout(self, *args, **kwargs): - return (self.RETHROW, None) + return self.RETHROW, None def on_unavailable(self, *args, **kwargs): - return (self.RETHROW, None) + return self.RETHROW, None class DowngradingConsistencyRetryPolicy(RetryPolicy): @@ -804,46 +810,46 @@ class DowngradingConsistencyRetryPolicy(RetryPolicy): """ def _pick_consistency(self, num_responses): if num_responses >= 3: - return (self.RETRY, ConsistencyLevel.THREE) + return self.RETRY, ConsistencyLevel.THREE elif num_responses >= 2: - return (self.RETRY, ConsistencyLevel.TWO) + return self.RETRY, ConsistencyLevel.TWO elif num_responses >= 1: - return (self.RETRY, ConsistencyLevel.ONE) + return self.RETRY, ConsistencyLevel.ONE else: - return (self.RETHROW, None) + return self.RETHROW, None def on_read_timeout(self, query, consistency, required_responses, received_responses, data_retrieved, retry_num): if retry_num != 0: - return (self.RETHROW, None) + return self.RETHROW, None elif received_responses < required_responses: return self._pick_consistency(received_responses) elif not data_retrieved: - return (self.RETRY, consistency) + return self.RETRY, consistency else: - return (self.RETHROW, None) + return self.RETHROW, None def on_write_timeout(self, query, consistency, write_type, required_responses, received_responses, retry_num): if retry_num != 0: - return (self.RETHROW, None) + return self.RETHROW, None if write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER): if received_responses > 0: # persisted on at least one replica - return (self.IGNORE, None) + return self.IGNORE, None else: - return (self.RETHROW, None) + return self.RETHROW, None elif write_type == WriteType.UNLOGGED_BATCH: return self._pick_consistency(received_responses) elif write_type == WriteType.BATCH_LOG: - return (self.RETRY, consistency) + return self.RETRY, consistency - return (self.RETHROW, None) + return self.RETHROW, None def on_unavailable(self, query, consistency, required_replicas, alive_replicas, retry_num): if retry_num != 0: - return (self.RETHROW, None) + return self.RETHROW, None else: return self._pick_consistency(alive_replicas) diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 48627b3f01..13758b65ad 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -145,13 +145,13 @@ def test_unavailable(self): query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL) with self.assertRaises(Unavailable): self.session.execute(query) - self.assertEqual(1, self.cluster.metrics.stats.unavailables) + self.assertEqual(2, self.cluster.metrics.stats.unavailables) # Test write query = SimpleStatement("SELECT * FROM test", consistency_level=ConsistencyLevel.ALL) with self.assertRaises(Unavailable): self.session.execute(query, timeout=None) - self.assertEqual(2, self.cluster.metrics.stats.unavailables) + self.assertEqual(4, self.cluster.metrics.stats.unavailables) finally: get_node(1).start(wait_other_notice=True, wait_for_binary_proto=True) # Give some time for the cluster to come back up, for the next test diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index c58515de5e..6640ccf1fa 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -916,14 +916,14 @@ def test_unavailable(self): retry, consistency = policy.on_unavailable( query=None, consistency=ONE, required_replicas=1, alive_replicas=2, retry_num=0) - self.assertEqual(retry, RetryPolicy.RETHROW) - self.assertEqual(consistency, None) + self.assertEqual(retry, RetryPolicy.RETRY_NEXT_HOST) + self.assertEqual(consistency, ONE) retry, consistency = policy.on_unavailable( query=None, consistency=ONE, required_replicas=10000, alive_replicas=1, retry_num=0) - self.assertEqual(retry, RetryPolicy.RETHROW) - self.assertEqual(consistency, None) + self.assertEqual(retry, RetryPolicy.RETRY_NEXT_HOST) + self.assertEqual(consistency, ONE) class FallthroughRetryPolicyTest(unittest.TestCase): From 47cb3e90154a46a34cc43b288ce0d727d15373bf Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 11 Apr 2016 16:49:00 -0500 Subject: [PATCH 0515/2005] Adding decorator for counting underlying query execution, updating some tests to add expected query counts --- tests/integration/cqlengine/__init__.py | 62 +++++++++++++++++++ .../cqlengine/query/test_batch_query.py | 12 ++++ .../cqlengine/query/test_datetime_queries.py | 15 +++-- .../integration/cqlengine/query/test_named.py | 12 +++- .../cqlengine/query/test_queryoperators.py | 3 + .../cqlengine/query/test_queryset.py | 55 +++++++++++++++- .../cqlengine/query/test_updates.py | 16 ++++- 7 files changed, 162 insertions(+), 13 deletions(-) diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index c54dd95abe..381839b88a 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -14,15 +14,23 @@ import os import warnings +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa from cassandra import ConsistencyLevel from cassandra.cqlengine import connection from cassandra.cqlengine.management import create_keyspace_simple, CQLENG_ALLOW_SCHEMA_MANAGEMENT +import cassandra from tests.integration import get_server_versions, use_single_node, PROTOCOL_VERSION DEFAULT_KEYSPACE = 'cqlengine_test' +CQL_SKIP_EXECUTE = bool(os.getenv('CQL_SKIP_EXECUTE', False)) + + def setup_package(): warnings.simplefilter('always') # for testing warnings, make sure all are let through os.environ[CQLENG_ALLOW_SCHEMA_MANAGEMENT] = '1' @@ -44,3 +52,57 @@ def setup_connection(keyspace_name): consistency=ConsistencyLevel.ONE, protocol_version=PROTOCOL_VERSION, default_keyspace=keyspace_name) + + +class StatementCounter(object): + """ + Simple python object used to hold a count of the number of times + the wrapped method has been invoked + """ + def __init__(self, patched_func): + self.func = patched_func + self.counter = 0 + + def wrapped_execute(self, *args, **kwargs): + self.counter += 1 + return self.func(*args, **kwargs) + + def get_counter(self): + return self.counter + + +def execute_count(expected): + """ + A decorator used wrap cassandra.cqlengine.connection.execute. It counts the number of times this method is invoked + then compares it to the number expected. If they don't match it throws an assertion error. + This function can be disabled by running the test harness with the env variable CQL_SKIP_EXECUTE=1 set + """ + def innerCounter(fn): + def wrapped_function(*args, **kwargs): + # Create a counter monkey patch into cassandra.cqlengine.connection.execute + count = StatementCounter(cassandra.cqlengine.connection.execute) + original_function = cassandra.cqlengine.connection.execute + # Monkey patch in our StatementCounter wrapper + cassandra.cqlengine.connection.execute = count.wrapped_execute + # Invoked the underlying unit test + to_return = fn(*args, **kwargs) + # Get the count from our monkey patched counter + count.get_counter() + # DeMonkey Patch our code + cassandra.cqlengine.connection.execute = original_function + # Check to see if the count is what you expect + tc = unittest.TestCase("__init__") + tc.assertEquals(count.get_counter(), expected, "Expected number of cassandra.cqlengine.connection.execute calls doesn't match actual number invoked Expected: {0}, Invoked {1}".format(count.get_counter(), expected)) + return to_return + # Name of the wrapped function must match the original or unittest will error out. + wrapped_function.__name__ = fn.__name__ + wrapped_function.__doc__ = fn.__doc__ + # Escape hatch + if(CQL_SKIP_EXECUTE): + return fn + else: + return wrapped_function + + return innerCounter + + diff --git a/tests/integration/cqlengine/query/test_batch_query.py b/tests/integration/cqlengine/query/test_batch_query.py index 126ad21e36..dafad04a1e 100644 --- a/tests/integration/cqlengine/query/test_batch_query.py +++ b/tests/integration/cqlengine/query/test_batch_query.py @@ -20,6 +20,7 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine.query import BatchQuery, DMLQuery from tests.integration.cqlengine.base import BaseCassEngTestCase +from tests.integration.cqlengine import execute_count from cassandra.cluster import Session @@ -55,6 +56,7 @@ def setUp(self): for obj in TestMultiKeyModel.filter(partition=self.pkey): obj.delete() + @execute_count(3) def test_insert_success_case(self): b = BatchQuery() @@ -67,6 +69,7 @@ def test_insert_success_case(self): TestMultiKeyModel.get(partition=self.pkey, cluster=2) + @execute_count(4) def test_update_success_case(self): inst = TestMultiKeyModel.create(partition=self.pkey, cluster=2, count=3, text='4') @@ -84,6 +87,7 @@ def test_update_success_case(self): inst3 = TestMultiKeyModel.get(partition=self.pkey, cluster=2) self.assertEqual(inst3.count, 4) + @execute_count(4) def test_delete_success_case(self): inst = TestMultiKeyModel.create(partition=self.pkey, cluster=2, count=3, text='4') @@ -99,6 +103,7 @@ def test_delete_success_case(self): with self.assertRaises(TestMultiKeyModel.DoesNotExist): TestMultiKeyModel.get(partition=self.pkey, cluster=2) + @execute_count(11) def test_context_manager(self): with BatchQuery() as b: @@ -112,6 +117,7 @@ def test_context_manager(self): for i in range(5): TestMultiKeyModel.get(partition=self.pkey, cluster=i) + @execute_count(9) def test_bulk_delete_success_case(self): for i in range(1): @@ -127,6 +133,7 @@ def test_bulk_delete_success_case(self): for m in TestMultiKeyModel.all(): m.delete() + @execute_count(0) def test_none_success_case(self): """ Tests that passing None into the batch call clears any batch object """ b = BatchQuery() @@ -137,6 +144,7 @@ def test_none_success_case(self): q = q.batch(None) self.assertIsNone(q._batch) + @execute_count(0) def test_dml_none_success_case(self): """ Tests that passing None into the batch call clears any batch object """ b = BatchQuery() @@ -147,6 +155,7 @@ def test_dml_none_success_case(self): q.batch(None) self.assertIsNone(q._batch) + @execute_count(3) def test_batch_execute_on_exception_succeeds(self): # makes sure if execute_on_exception == True we still apply the batch drop_table(BatchQueryLogModel) @@ -166,6 +175,7 @@ def test_batch_execute_on_exception_succeeds(self): # should be 1 because the batch should execute self.assertEqual(1, len(obj)) + @execute_count(2) def test_batch_execute_on_exception_skips_if_not_specified(self): # makes sure if execute_on_exception == True we still apply the batch drop_table(BatchQueryLogModel) @@ -186,12 +196,14 @@ def test_batch_execute_on_exception_skips_if_not_specified(self): # should be 0 because the batch should not execute self.assertEqual(0, len(obj)) + @execute_count(1) def test_batch_execute_timeout(self): with mock.patch.object(Session, 'execute') as mock_execute: with BatchQuery(timeout=1) as b: BatchQueryLogModel.batch(b).create(k=2, v=2) self.assertEqual(mock_execute.call_args[-1]['timeout'], 1) + @execute_count(1) def test_batch_execute_no_timeout(self): with mock.patch.object(Session, 'execute') as mock_execute: with BatchQuery() as b: diff --git a/tests/integration/cqlengine/query/test_datetime_queries.py b/tests/integration/cqlengine/query/test_datetime_queries.py index 118d74ddcf..813dcad07a 100644 --- a/tests/integration/cqlengine/query/test_datetime_queries.py +++ b/tests/integration/cqlengine/query/test_datetime_queries.py @@ -20,15 +20,17 @@ from cassandra.cqlengine.management import sync_table from cassandra.cqlengine.management import drop_table -from cassandra.cqlengine.models import Model, ModelException +from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns -from cassandra.cqlengine import query +from tests.integration.cqlengine import execute_count + class DateTimeQueryTestModel(Model): - user = columns.Integer(primary_key=True) - day = columns.DateTime(primary_key=True) - data = columns.Text() + user = columns.Integer(primary_key=True) + day = columns.DateTime(primary_key=True) + data = columns.Text() + class TestDateTimeQueries(BaseCassEngTestCase): @@ -46,12 +48,12 @@ def setUpClass(cls): data=str(uuid4()) ) - @classmethod def tearDownClass(cls): super(TestDateTimeQueries, cls).tearDownClass() drop_table(DateTimeQueryTestModel) + @execute_count(1) def test_range_query(self): """ Tests that loading from a range of dates works properly """ start = datetime(*self.base_date.timetuple()[:3]) @@ -60,6 +62,7 @@ def test_range_query(self): results = DateTimeQueryTestModel.filter(user=0, day__gte=start, day__lt=end) assert len(results) == 3 + @execute_count(3) def test_datetime_precision(self): """ Tests that millisecond resolution is preserved when saving datetime objects """ now = datetime.now() diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index 3b51d1d216..9cddbece17 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -25,7 +25,7 @@ from cassandra.concurrent import execute_concurrent_with_args from cassandra.cqlengine import models -from tests.integration.cqlengine import setup_connection +from tests.integration.cqlengine import setup_connection, execute_count from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine.query.test_queryset import BaseQuerySetUsage @@ -134,6 +134,7 @@ def setUpClass(cls): cls.keyspace = NamedKeyspace(ks) cls.table = cls.keyspace.table(tn) + @execute_count(2) def test_count(self): """ Tests that adding filtering statements affects the count query as expected """ assert self.table.objects.count() == 12 @@ -141,6 +142,7 @@ def test_count(self): q = self.table.objects(test_id=0) assert q.count() == 4 + @execute_count(2) def test_query_expression_count(self): """ Tests that adding query statements affects the count query as expected """ assert self.table.objects.count() == 12 @@ -148,6 +150,7 @@ def test_query_expression_count(self): q = self.table.objects(self.table.column('test_id') == 0) assert q.count() == 4 + @execute_count(3) def test_iteration(self): """ Tests that iterating over a query set pulls back all of the expected results """ q = self.table.objects(test_id=0) @@ -181,6 +184,7 @@ def test_iteration(self): compare_set.remove(val) assert len(compare_set) == 0 + @execute_count(2) def test_multiple_iterations_work_properly(self): """ Tests that iterating over a query set more than once works """ # test with both the filtering method and the query method @@ -201,6 +205,7 @@ def test_multiple_iterations_work_properly(self): compare_set.remove(val) assert len(compare_set) == 0 + @execute_count(2) def test_multiple_iterators_are_isolated(self): """ tests that the use of one iterator does not affect the behavior of another @@ -214,6 +219,7 @@ def test_multiple_iterators_are_isolated(self): assert next(iter1).attempt_id == attempt_id assert next(iter2).attempt_id == attempt_id + @execute_count(3) def test_get_success_case(self): """ Tests that the .get() method works on new and existing querysets @@ -235,6 +241,7 @@ def test_get_success_case(self): assert m.test_id == 0 assert m.attempt_id == 0 + @execute_count(3) def test_query_expression_get_success_case(self): """ Tests that the .get() method works on new and existing querysets @@ -256,6 +263,7 @@ def test_query_expression_get_success_case(self): assert m.test_id == 0 assert m.attempt_id == 0 + @execute_count(1) def test_get_doesnotexist_exception(self): """ Tests that get calls that don't return a result raises a DoesNotExist error @@ -263,6 +271,7 @@ def test_get_doesnotexist_exception(self): with self.assertRaises(self.table.DoesNotExist): self.table.objects.get(test_id=100) + @execute_count(1) def test_get_multipleobjects_exception(self): """ Tests that get calls that return multiple results raise a MultipleObjectsReturned error @@ -286,6 +295,7 @@ def tearDownClass(cls): super(TestNamedWithMV, cls).tearDownClass() @greaterthanorequalcass30 + @execute_count(5) def test_named_table_with_mv(self): """ Test NamedTable access to materialized views diff --git a/tests/integration/cqlengine/query/test_queryoperators.py b/tests/integration/cqlengine/query/test_queryoperators.py index ae19f2498e..c2a2a74206 100644 --- a/tests/integration/cqlengine/query/test_queryoperators.py +++ b/tests/integration/cqlengine/query/test_queryoperators.py @@ -24,6 +24,7 @@ from cassandra.cqlengine.statements import WhereClause from tests.integration.cqlengine import DEFAULT_KEYSPACE from tests.integration.cqlengine.base import BaseCassEngTestCase +from tests.integration.cqlengine import execute_count class TestQuerySetOperation(BaseCassEngTestCase): @@ -71,6 +72,7 @@ def tearDown(self): super(TestTokenFunction, self).tearDown() drop_table(TokenTestModel) + @execute_count(14) def test_token_function(self): """ Tests that token functions work properly """ assert TokenTestModel.objects().count() == 0 @@ -127,6 +129,7 @@ class TestModel(Model): func = functions.Token('a') self.assertRaises(query.QueryException, TestModel.objects.filter, pk__token__gt=func) + @execute_count(7) def test_named_table_pk_token_function(self): """ Test to ensure that token function work with named tables. diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 9cc8aa3475..9d01cd5628 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -40,9 +40,9 @@ from cassandra.cqlengine import statements from cassandra.cqlengine import operators from cassandra.util import uuid_from_time - from cassandra.cqlengine.connection import get_session from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthancass20, greaterthancass21 +from tests.integration.cqlengine import execute_count class TzOffset(tzinfo): @@ -105,6 +105,7 @@ class TestMultiClusteringModel(Model): class TestQuerySetOperation(BaseCassEngTestCase): + def test_query_filter_parsing(self): """ Tests the queryset filter method parses it's kwargs properly @@ -229,6 +230,7 @@ def test_defining_only_or_defer_on_nonexistant_fields_fails(self): class BaseQuerySetUsage(BaseCassEngTestCase): + @classmethod def setUpClass(cls): super(BaseQuerySetUsage, cls).setUpClass() @@ -298,6 +300,8 @@ def tearDownClass(cls): class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): + + @execute_count(2) def test_count(self): """ Tests that adding filtering statements affects the count query as expected """ assert TestModel.objects.count() == 12 @@ -305,6 +309,7 @@ def test_count(self): q = TestModel.objects(test_id=0) assert q.count() == 4 + @execute_count(2) def test_query_expression_count(self): """ Tests that adding query statements affects the count query as expected """ assert TestModel.objects.count() == 12 @@ -312,6 +317,7 @@ def test_query_expression_count(self): q = TestModel.objects(TestModel.test_id == 0) assert q.count() == 4 + @execute_count(3) def test_iteration(self): """ Tests that iterating over a query set pulls back all of the expected results """ q = TestModel.objects(test_id=0) @@ -345,6 +351,7 @@ def test_iteration(self): compare_set.remove(val) assert len(compare_set) == 0 + @execute_count(2) def test_multiple_iterations_work_properly(self): """ Tests that iterating over a query set more than once works """ # test with both the filtering method and the query method @@ -365,6 +372,7 @@ def test_multiple_iterations_work_properly(self): compare_set.remove(val) assert len(compare_set) == 0 + @execute_count(2) def test_multiple_iterators_are_isolated(self): """ tests that the use of one iterator does not affect the behavior of another @@ -378,6 +386,7 @@ def test_multiple_iterators_are_isolated(self): assert next(iter1).attempt_id == attempt_id assert next(iter2).attempt_id == attempt_id + @execute_count(3) def test_get_success_case(self): """ Tests that the .get() method works on new and existing querysets @@ -399,6 +408,7 @@ def test_get_success_case(self): assert m.test_id == 0 assert m.attempt_id == 0 + @execute_count(3) def test_query_expression_get_success_case(self): """ Tests that the .get() method works on new and existing querysets @@ -420,6 +430,7 @@ def test_query_expression_get_success_case(self): assert m.test_id == 0 assert m.attempt_id == 0 + @execute_count(1) def test_get_doesnotexist_exception(self): """ Tests that get calls that don't return a result raises a DoesNotExist error @@ -427,6 +438,7 @@ def test_get_doesnotexist_exception(self): with self.assertRaises(TestModel.DoesNotExist): TestModel.objects.get(test_id=100) + @execute_count(1) def test_get_multipleobjects_exception(self): """ Tests that get calls that return multiple results raise a MultipleObjectsReturned error @@ -438,7 +450,7 @@ def test_allow_filtering_flag(self): """ """ - +@execute_count(4) def test_non_quality_filtering(): class NonEqualityFilteringModel(Model): @@ -463,28 +475,34 @@ class NonEqualityFilteringModel(Model): class TestQuerySetDistinct(BaseQuerySetUsage): + @execute_count(1) def test_distinct_without_parameter(self): q = TestModel.objects.distinct() self.assertEqual(len(q), 3) + @execute_count(1) def test_distinct_with_parameter(self): q = TestModel.objects.distinct(['test_id']) self.assertEqual(len(q), 3) + @execute_count(1) def test_distinct_with_filter(self): q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[1, 2]) self.assertEqual(len(q), 2) + @execute_count(1) def test_distinct_with_non_partition(self): with self.assertRaises(InvalidRequest): q = TestModel.objects.distinct(['description']).filter(test_id__in=[1, 2]) len(q) + @execute_count(1) def test_zero_result(self): q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[52]) self.assertEqual(len(q), 0) @greaterthancass21 + @execute_count(2) def test_distinct_with_explicit_count(self): q = TestModel.objects.distinct(['test_id']) self.assertEqual(q.count(), 3) @@ -494,7 +512,7 @@ def test_distinct_with_explicit_count(self): class TestQuerySetOrdering(BaseQuerySetUsage): - + @execute_count(2) def test_order_by_success_case(self): q = TestModel.objects(test_id=0).order_by('attempt_id') expected_order = [0, 1, 2, 3] @@ -523,6 +541,7 @@ def test_ordering_on_indexed_columns_fails(self): with self.assertRaises(query.QueryException): IndexedTestModel.objects(test_id=0).order_by('attempt_id') + @execute_count(8) def test_ordering_on_multiple_clustering_columns(self): TestMultiClusteringModel.create(one=1, two=1, three=4) TestMultiClusteringModel.create(one=1, two=1, three=2) @@ -541,23 +560,28 @@ def test_ordering_on_multiple_clustering_columns(self): class TestQuerySetSlicing(BaseQuerySetUsage): + + @execute_count(1) def test_out_of_range_index_raises_error(self): q = TestModel.objects(test_id=0).order_by('attempt_id') with self.assertRaises(IndexError): q[10] + @execute_count(1) def test_array_indexing_works_properly(self): q = TestModel.objects(test_id=0).order_by('attempt_id') expected_order = [0, 1, 2, 3] for i in range(len(q)): assert q[i].attempt_id == expected_order[i] + @execute_count(1) def test_negative_indexing_works_properly(self): q = TestModel.objects(test_id=0).order_by('attempt_id') expected_order = [0, 1, 2, 3] assert q[-1].attempt_id == expected_order[-1] assert q[-2].attempt_id == expected_order[-2] + @execute_count(1) def test_slicing_works_properly(self): q = TestModel.objects(test_id=0).order_by('attempt_id') expected_order = [0, 1, 2, 3] @@ -568,6 +592,7 @@ def test_slicing_works_properly(self): for model, expect in zip(q[0:3:2], expected_order[0:3:2]): self.assertEqual(model.attempt_id, expect) + @execute_count(1) def test_negative_slicing(self): q = TestModel.objects(test_id=0).order_by('attempt_id') expected_order = [0, 1, 2, 3] @@ -589,6 +614,7 @@ def test_negative_slicing(self): class TestQuerySetValidation(BaseQuerySetUsage): + def test_primary_key_or_index_must_be_specified(self): """ Tests that queries that don't have an equals relation to a primary key or indexed field fail @@ -606,6 +632,7 @@ def test_primary_key_or_index_must_have_equal_relation_filter(self): list([i for i in q]) @greaterthancass20 + @execute_count(7) def test_indexed_field_can_be_queried(self): """ Tests that queries on an indexed field will work without any primary key relations specified @@ -633,6 +660,8 @@ def test_indexed_field_can_be_queried(self): class TestQuerySetDelete(BaseQuerySetUsage): + + @execute_count(9) def test_delete(self): TestModel.objects.create(test_id=3, attempt_id=0, description='try9', expected_result=50, test_result=40) TestModel.objects.create(test_id=3, attempt_id=1, description='try10', expected_result=60, test_result=40) @@ -658,6 +687,7 @@ def test_delete_without_any_where_args(self): TestModel.objects(attempt_id=0).delete() @unittest.skipIf(CASSANDRA_VERSION < '3.0', "range deletion was introduce in C* 3.0, currently running {0}".format(CASSANDRA_VERSION)) + @execute_count(18) def test_range_deletion(self): """ Tests that range deletion work as expected @@ -697,6 +727,7 @@ def tearDownClass(cls): super(TestMinMaxTimeUUIDFunctions, cls).tearDownClass() drop_table(TimeUUIDQueryModel) + @execute_count(7) def test_tzaware_datetime_support(self): """Test that using timezone aware datetime instances works with the MinTimeUUID/MaxTimeUUID functions. @@ -740,6 +771,7 @@ def test_tzaware_datetime_support(self): TimeUUIDQueryModel.partition == pk, TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint_helsinki))] + @execute_count(8) def test_success_case(self): """ Test that the min and max time uuid functions work as expected """ pk = uuid4() @@ -790,11 +822,14 @@ def test_success_case(self): class TestInOperator(BaseQuerySetUsage): + + @execute_count(1) def test_kwarg_success_case(self): """ Tests the in operator works with the kwarg query method """ q = TestModel.filter(test_id__in=[0, 1]) assert q.count() == 8 + @execute_count(1) def test_query_expression_success_case(self): """ Tests the in operator works with the query expression query method """ q = TestModel.filter(TestModel.test_id.in_([0, 1])) @@ -803,6 +838,8 @@ def test_query_expression_success_case(self): @greaterthancass20 class TestContainsOperator(BaseQuerySetUsage): + + @execute_count(6) def test_kwarg_success_case(self): """ Tests the CONTAINS operator works with the kwarg query method """ q = IndexedCollectionsTestModel.filter(test_list__contains=1) @@ -833,6 +870,7 @@ def test_kwarg_success_case(self): q = IndexedCollectionsTestModel.filter(test_map_no_index__contains=1) self.assertEqual(q.count(), 0) + @execute_count(6) def test_query_expression_success_case(self): """ Tests the CONTAINS operator works with the query expression query method """ q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_list.contains_(1)) @@ -865,6 +903,8 @@ def test_query_expression_success_case(self): class TestValuesList(BaseQuerySetUsage): + + @execute_count(2) def test_values_list(self): q = TestModel.objects.filter(test_id=0, attempt_id=1) item = q.values_list('test_id', 'attempt_id', 'description', 'expected_result', 'test_result').first() @@ -875,6 +915,7 @@ def test_values_list(self): class TestObjectsProperty(BaseQuerySetUsage): + @execute_count(1) def test_objects_property_returns_fresh_queryset(self): assert TestModel.objects._result_cache is None len(TestModel.objects) # evaluate queryset @@ -882,6 +923,7 @@ def test_objects_property_returns_fresh_queryset(self): class PageQueryTests(BaseCassEngTestCase): + @execute_count(3) def test_paged_result_handling(self): if PROTOCOL_VERSION < 2: raise unittest.SkipTest("Paging requires native protocol 2+, currently using: {0}".format(PROTOCOL_VERSION)) @@ -989,6 +1031,7 @@ def tearDown(cls): for model in cls.model_list: drop_table(model) + @execute_count(33) def test_basic_crud(self): """ Tests creation update and delete of object model queries that are using db_field mappings. @@ -1025,6 +1068,7 @@ def test_basic_crud(self): i = model.objects(k0=i.k0, k1=i.k1).first() self.assertIsNone(i) + @execute_count(21) def test_slice(self): """ Tests slice queries for object models that are using db_field mapping @@ -1047,6 +1091,7 @@ def test_slice(self): self.assertEqual(model.objects(k0=i.k0, k1=i.k1, c0__lt=i.c0).count(), len(clustering_values[:-1])) self.assertEqual(model.objects(k0=i.k0, k1=i.k1, c0__gt=0).count(), len(clustering_values[1:])) + @execute_count(15) def test_order(self): """ Tests order by queries for object models that are using db_field mapping @@ -1066,6 +1111,7 @@ def test_order(self): self.assertEqual(model.objects(k0=i.k0, k1=i.k1).order_by('c0').first().c0, clustering_values[0]) self.assertEqual(model.objects(k0=i.k0, k1=i.k1).order_by('-c0').first().c0, clustering_values[-1]) + @execute_count(15) def test_index(self): """ Tests queries using index fields for object models using db_field mapping @@ -1086,6 +1132,7 @@ def test_index(self): self.assertEqual(model.objects(k0=i.k0, k1=i.k1).count(), len(clustering_values)) self.assertEqual(model.objects(k0=i.k0, k1=i.k1, v1=0).count(), 1) + @execute_count(1) def test_db_field_names_used(self): """ Tests to ensure that with generated cql update statements correctly utilize the db_field values. @@ -1145,6 +1192,7 @@ def tearDownClass(cls): super(TestModelQueryWithFetchSize, cls).tearDownClass() drop_table(TestModelSmall) + @execute_count(9) def test_defaultFetchSize(self): with BatchQuery() as b: for i in range(5100): @@ -1202,6 +1250,7 @@ def tearDownClass(cls): super(TestModelQueryWithDifferedFeld, cls).tearDownClass() drop_table(People) + @execute_count(8) def test_defaultFetchSize(self): # Populate Table People.objects.create(last_name="Smith", first_name="John", birthday=datetime.now()) diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index afa3a7f096..a3244d210b 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -20,6 +20,7 @@ from cassandra.cqlengine import columns from tests.integration.cqlengine import is_prepend_reversed from tests.integration.cqlengine.base import BaseCassEngTestCase +from tests.integration.cqlengine import execute_count class TestQueryUpdateModel(Model): @@ -45,6 +46,7 @@ def tearDownClass(cls): super(QueryUpdateTests, cls).tearDownClass() drop_table(TestQueryUpdateModel) + @execute_count(8) def test_update_values(self): """ tests calling udpate on a queryset """ partition = uuid4() @@ -65,6 +67,7 @@ def test_update_values(self): self.assertEqual(row.count, 6 if i == 3 else i) self.assertEqual(row.text, str(i)) + @execute_count(6) def test_update_values_validation(self): """ tests calling udpate on models with values passed in """ partition = uuid4() @@ -91,6 +94,7 @@ def test_primary_key_update_failure(self): with self.assertRaises(ValidationError): TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(cluster=5000) + @execute_count(8) def test_null_update_deletes_column(self): """ setting a field to null in the update should issue a delete statement """ partition = uuid4() @@ -111,6 +115,7 @@ def test_null_update_deletes_column(self): self.assertEqual(row.count, i) self.assertEqual(row.text, None if i == 3 else str(i)) + @execute_count(9) def test_mixed_value_and_null_update(self): """ tests that updating a columns value, and removing another works properly """ partition = uuid4() @@ -131,9 +136,7 @@ def test_mixed_value_and_null_update(self): self.assertEqual(row.count, 6 if i == 3 else i) self.assertEqual(row.text, None if i == 3 else str(i)) - def test_counter_updates(self): - pass - + @execute_count(3) def test_set_add_updates(self): partition = uuid4() cluster = 1 @@ -144,6 +147,7 @@ def test_set_add_updates(self): obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_set, set(("foo", "bar"))) + @execute_count(2) def test_set_add_updates_new_record(self): """ If the key doesn't exist yet, an update creates the record """ @@ -154,6 +158,7 @@ def test_set_add_updates_new_record(self): obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_set, set(("bar",))) + @execute_count(3) def test_set_remove_updates(self): partition = uuid4() cluster = 1 @@ -165,6 +170,7 @@ def test_set_remove_updates(self): obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_set, set(("baz",))) + @execute_count(3) def test_set_remove_new_record(self): """ Removing something not in the set should silently do nothing """ @@ -178,6 +184,7 @@ def test_set_remove_new_record(self): obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_set, set(("foo",))) + @execute_count(3) def test_list_append_updates(self): partition = uuid4() cluster = 1 @@ -189,6 +196,7 @@ def test_list_append_updates(self): obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_list, ["foo", "bar"]) + @execute_count(3) def test_list_prepend_updates(self): """ Prepend two things since order is reversed by default by CQL """ partition = uuid4() @@ -204,6 +212,7 @@ def test_list_prepend_updates(self): expected = (prepended[::-1] if is_prepend_reversed() else prepended) + original self.assertEqual(obj.text_list, expected) + @execute_count(3) def test_map_update_updates(self): """ Merge a dictionary into existing value """ partition = uuid4() @@ -217,6 +226,7 @@ def test_map_update_updates(self): obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_map, {"foo": '1', "bar": '3', "baz": '4'}) + @execute_count(3) def test_map_update_none_deletes_key(self): """ The CQL behavior is if you set a key in a map to null it deletes that key from the map. Test that this works with __update. From 4ae307fdec4a26bc2bbbd61a1960e32cf14ba43d Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 12 Apr 2016 11:27:26 -0500 Subject: [PATCH 0516/2005] Making a few fixes based on PR feedback --- tests/integration/cqlengine/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index 381839b88a..e61698e82c 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -90,9 +90,13 @@ def wrapped_function(*args, **kwargs): count.get_counter() # DeMonkey Patch our code cassandra.cqlengine.connection.execute = original_function + # Check to see if we have a pre-existing test case to work from. + if len(args) is 0: + test_case = unittest.TestCase("__init__") + else: + test_case = args[0] # Check to see if the count is what you expect - tc = unittest.TestCase("__init__") - tc.assertEquals(count.get_counter(), expected, "Expected number of cassandra.cqlengine.connection.execute calls doesn't match actual number invoked Expected: {0}, Invoked {1}".format(count.get_counter(), expected)) + test_case.assertEqual(count.get_counter(), expected, msg="Expected number of cassandra.cqlengine.connection.execute calls doesn't match actual number invoked Expected: {0}, Invoked {1}".format(count.get_counter(), expected)) return to_return # Name of the wrapped function must match the original or unittest will error out. wrapped_function.__name__ = fn.__name__ From 27d1559b6d4bd8ce466e1d6bf491b926a898ee43 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 12 Apr 2016 14:46:53 -0400 Subject: [PATCH 0517/2005] Prepare 3.2.0 release --- CHANGELOG.rst | 1 + cassandra/__init__.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 916dda5343..694fc12493 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,6 @@ 3.2.0 ===== +April 12, 2016 Features -------- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 33fe14c384..54cca0ad34 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 2, 0, 'a1', 'post0') +__version_info__ = (3, 2, 0) __version__ = '.'.join(map(str, __version_info__)) From d79255bf80934abb401b5da461a2be04d06421ed Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 12 Apr 2016 14:52:12 -0400 Subject: [PATCH 0518/2005] post-release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 54cca0ad34..23cc1f1134 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 2, 0) +__version_info__ = (3, 2, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From 0c17ba9f81dc20903997b7a5e147a0cfc45804d8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 12 Apr 2016 14:13:16 -0500 Subject: [PATCH 0519/2005] better timeout message for connection heartbeat --- cassandra/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index f292f4fab9..0398ab3074 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -924,7 +924,7 @@ def wait(self, timeout): if self._exception: raise self._exception else: - raise OperationTimedOut() + raise OperationTimedOut("Connection heartbeat timeout after %s seconds" % (timeout,), self.connection.host) def _options_callback(self, response): if not isinstance(response, SupportedMessage): From 17e9ced7efd4490d9a91df48e5ceb7645d964c23 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 12 Apr 2016 14:13:48 -0500 Subject: [PATCH 0520/2005] don't log heartbeat options message on error path --- cassandra/connection.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 0398ab3074..8abbf7bc22 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -927,15 +927,15 @@ def wait(self, timeout): raise OperationTimedOut("Connection heartbeat timeout after %s seconds" % (timeout,), self.connection.host) def _options_callback(self, response): - if not isinstance(response, SupportedMessage): + if isinstance(response, SupportedMessage): + log.debug("Received options response on connection (%s) from %s", + id(self.connection), self.connection.host) + else: if isinstance(response, ConnectionException): self._exception = response else: self._exception = ConnectionException("Received unexpected response to OptionsMessage: %s" % (response,)) - - log.debug("Received options response on connection (%s) from %s", - id(self.connection), self.connection.host) self._event.set() From 0a29c65360f5c660b9fa87641104d9ea64fe48e3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 12 Apr 2016 14:14:48 -0500 Subject: [PATCH 0521/2005] keep heartbeat exceptions and use when defuncting provides a better error log --- cassandra/connection.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 8abbf7bc22..efafd21df7 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -967,10 +967,10 @@ def run(self): if connection.is_idle: try: futures.append(HeartbeatFuture(connection, owner)) - except Exception: + except Exception as e: log.warning("Failed sending heartbeat message on connection (%s) to %s", id(connection), connection.host, exc_info=True) - failed_connections.append((connection, owner)) + failed_connections.append((connection, owner, e)) else: connection.reset_idle() else: @@ -987,14 +987,14 @@ def run(self): with connection.lock: connection.in_flight -= 1 connection.reset_idle() - except Exception: + except Exception as e: log.warning("Heartbeat failed for connection (%s) to %s", id(connection), connection.host, exc_info=True) - failed_connections.append((f.connection, f.owner)) + failed_connections.append((f.connection, f.owner, e)) - for connection, owner in failed_connections: + for connection, owner, exc in failed_connections: self._raise_if_stopped() - connection.defunct(Exception('Connection heartbeat failure')) + connection.defunct(exc) owner.return_connection(connection) except self.ShutdownException: pass From 604b691019b8fa5c64d34f10595ec7dc8d15aa2d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 12 Apr 2016 16:16:41 -0500 Subject: [PATCH 0522/2005] don't log exc_info in heartbeat; it's logged while defuncting --- cassandra/connection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index efafd21df7..2e693e4870 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -969,7 +969,7 @@ def run(self): futures.append(HeartbeatFuture(connection, owner)) except Exception as e: log.warning("Failed sending heartbeat message on connection (%s) to %s", - id(connection), connection.host, exc_info=True) + id(connection), connection.host) failed_connections.append((connection, owner, e)) else: connection.reset_idle() @@ -989,7 +989,7 @@ def run(self): connection.reset_idle() except Exception as e: log.warning("Heartbeat failed for connection (%s) to %s", - id(connection), connection.host, exc_info=True) + id(connection), connection.host) failed_connections.append((f.connection, f.owner, e)) for connection, owner, exc in failed_connections: From d1967db4f7fbdac694f054d09a1a3b9b1b4e6acf Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 12 Apr 2016 16:17:13 -0500 Subject: [PATCH 0523/2005] by default, don't convict a host based on heartbeat timeouts PYTHON-286 --- cassandra/policies.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 45a7a1c5b8..5b740b75cc 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -17,7 +17,7 @@ from random import randint from threading import Lock -from cassandra import ConsistencyLevel +from cassandra import ConsistencyLevel, OperationTimedOut log = logging.getLogger(__name__) @@ -465,7 +465,7 @@ class SimpleConvictionPolicy(ConvictionPolicy): """ def add_failure(self, connection_exc): - return True + return not isinstance(connection_exc, OperationTimedOut) def reset(self): pass From 5b7627a84866cbebd8d74cf7431d0d6dd4752559 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 12 Apr 2016 16:17:52 -0500 Subject: [PATCH 0524/2005] Make sure pools keep trying to connect now This follows from changing the default conviction (we now keep pools up, and failures should keep trying to connect) PYTHON-286 --- cassandra/pool.py | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/cassandra/pool.py b/cassandra/pool.py index 08cfd37271..4829927450 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -335,12 +335,17 @@ def return_connection(self, connection): def _replace(self, connection): log.debug("Replacing connection (%s) to %s", id(connection), self.host) - conn = self._session.cluster.connection_factory(self.host.address) - if self._session.keyspace: - conn.set_keyspace_blocking(self._session.keyspace) - self._connection = conn - with self._lock: - self._is_replacing = False + try: + conn = self._session.cluster.connection_factory(self.host.address) + if self._session.keyspace: + conn.set_keyspace_blocking(self._session.keyspace) + self._connection = conn + except Exception: + log.warning("Failed reconnecting %s. Retrying." % (self.host.address,)) + self._session.submit(self._replace, connection) + else: + with self._lock: + self._is_replacing = False def shutdown(self): with self._lock: @@ -499,10 +504,10 @@ def _add_conn_if_under_max(self): max_conns = self._session.cluster.get_max_connections_per_host(self.host_distance) with self._lock: if self.is_shutdown: - return False + return True if self.open_count >= max_conns: - return False + return True self.open_count += 1 @@ -646,17 +651,22 @@ def _replace(self, connection): if should_replace: log.debug("Replacing connection (%s) to %s", id(connection), self.host) - - def close_and_replace(): - connection.close() - self._add_conn_if_under_max() - - self._session.submit(close_and_replace) + connection.close() + self._session.submit(self._retrying_replace) else: - # just close it log.debug("Closing connection (%s) to %s", id(connection), self.host) connection.close() + def _retrying_replace(self): + replaced = False + try: + replaced = self._add_conn_if_under_max() + except Exception: + log.exception("Failed replacing connection to %s", self.host) + if not replaced: + log.debug("Failed replacing connection to %s. Retrying.", self.host) + self._session.submit(self._retrying_replace) + def shutdown(self): with self._lock: if self.is_shutdown: From c030d3e2e720ab2c8fd5cc6fe40a81daed9f2f3e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 12 Apr 2016 16:51:25 -0500 Subject: [PATCH 0525/2005] fix unit tests following heartbeat update --- tests/unit/test_connection.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 15fa6e722b..1872bbe0c4 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -22,9 +22,11 @@ import time from threading import Lock -from cassandra.cluster import Cluster, Session +from cassandra import OperationTimedOut +from cassandra.cluster import Cluster from cassandra.connection import (Connection, HEADER_DIRECTION_TO_CLIENT, ProtocolError, - locally_supported_compressions, ConnectionHeartbeat, _Frame, Timer, TimerManager) + locally_supported_compressions, ConnectionHeartbeat, _Frame, Timer, TimerManager, + ConnectionException) from cassandra.marshal import uint8_pack, uint32_pack, int32_pack from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ProtocolHandler) @@ -382,8 +384,8 @@ def send_msg(msg, req_id, msg_callback): connection.send_msg.assert_has_calls([call(ANY, request_id, ANY)] * get_holders.call_count) connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) exc = connection.defunct.call_args_list[0][0][0] - self.assertIsInstance(exc, Exception) - self.assertEqual(exc.args, Exception('Connection heartbeat failure').args) + self.assertIsInstance(exc, ConnectionException) + self.assertRegex(exc.args[0], r'^Received unexpected response to OptionsMessage.*') holder.return_connection.assert_has_calls([call(connection)] * get_holders.call_count) def test_timeout(self, *args): @@ -410,8 +412,9 @@ def send_msg(msg, req_id, msg_callback): connection.send_msg.assert_has_calls([call(ANY, request_id, ANY)] * get_holders.call_count) connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) exc = connection.defunct.call_args_list[0][0][0] - self.assertIsInstance(exc, Exception) - self.assertEqual(exc.args, Exception('Connection heartbeat failure').args) + self.assertIsInstance(exc, OperationTimedOut) + self.assertEqual(exc.errors, 'Connection heartbeat timeout after 0.05 seconds') + self.assertEqual(exc.last_host, 'localhost') holder.return_connection.assert_has_calls([call(connection)] * get_holders.call_count) From d89b5688052d337e54f81bd05115900bd6f35d0b Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 12 Apr 2016 19:39:39 -0500 Subject: [PATCH 0526/2005] PYTHON-103, PYTHON-415, Adding tests for host resolution, and duplication --- tests/integration/standard/test_cluster.py | 32 ++++++++++++++++++++++ tests/unit/test_cluster.py | 14 +++++++--- 2 files changed, 42 insertions(+), 4 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 003c78a913..77cd666c25 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -41,6 +41,38 @@ def setup_module(): class ClusterTests(unittest.TestCase): + def test_host_resolution(self): + """ + Test to insure A records are resolved appropriately. + + @since 3.3 + @jira_ticket PYTHON-415 + @expected_result hostname will be transformed into IP + + @test_category connection + """ + cluster = Cluster(contact_points=["localhost"], protocol_version=PROTOCOL_VERSION, connect_timeout=1) + self.assertTrue('127.0.0.1' in cluster.contact_points_resolved) + + def test_host_duplication(self): + """ + Ensure that duplicate hosts in the contact points are surfaced in the cluster metadata + + @since 3.3 + @jira_ticket PYTHON-103 + @expected_result duplicate hosts aren't surfaced in cluster.metadata + + @test_category connection + """ + cluster = Cluster(contact_points=["localhost", "127.0.0.1", "localhost", "localhost", "localhost"], protocol_version=PROTOCOL_VERSION, connect_timeout=1) + cluster.connect() + self.assertEqual(len(cluster.metadata.all_hosts()), 3) + cluster.shutdown() + cluster = Cluster(contact_points=["127.0.0.1", "localhost"], protocol_version=PROTOCOL_VERSION, connect_timeout=1) + cluster.connect() + self.assertEqual(len(cluster.metadata.all_hosts()), 3) + cluster.shutdown() + def test_raise_error_on_control_connection_timeout(self): """ Test for initial control connection timeout diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 763875c9f8..ec3516ca66 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -20,10 +20,19 @@ from mock import patch, Mock from cassandra import ConsistencyLevel -from cassandra.cluster import _Scheduler, Session +from cassandra.cluster import _Scheduler, Session, Cluster from cassandra.query import SimpleStatement +class ContactListTest(unittest.TestCase): + + def test_invalid_types(self, *args): + with self.assertRaises(ValueError): + Cluster(contact_points=[None], protocol_version=4, connect_timeout=1) + with self.assertRaises(TypeError): + Cluster(contact_points="not a sequence", protocol_version=4, connect_timeout=1) + + class SchedulerTest(unittest.TestCase): # TODO: this suite could be expanded; for now just adding a test covering a ticket @@ -35,9 +44,6 @@ def test_event_delay_timing(self, *args): PYTHON-473 """ - sched = _Scheduler(None) - sched.schedule(0, lambda: None) - sched.schedule(0, lambda: None) # pre-473: "TypeError: unorderable types: function() < function()"t class SessionTest(unittest.TestCase): From 6684e1ae59c420b3c99e20b64e70ed18acecc1f8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 13 Apr 2016 09:34:17 -0500 Subject: [PATCH 0527/2005] assertRegex is only python3 or unittest2 --- tests/unit/test_connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 1872bbe0c4..dcec9cd759 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -385,7 +385,7 @@ def send_msg(msg, req_id, msg_callback): connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) exc = connection.defunct.call_args_list[0][0][0] self.assertIsInstance(exc, ConnectionException) - self.assertRegex(exc.args[0], r'^Received unexpected response to OptionsMessage.*') + self.assertRegexpMatches(exc.args[0], r'^Received unexpected response to OptionsMessage.*') holder.return_connection.assert_has_calls([call(connection)] * get_holders.call_count) def test_timeout(self, *args): From 86b804306d21c0773ef5dc2f663280b308073fa7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 13 Apr 2016 13:40:37 -0500 Subject: [PATCH 0528/2005] Explicit type for scale in cython deserializer Fixes broken compilation that showed up in in Cython 0.24 PYTHON-542 --- cassandra/deserializers.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/deserializers.pyx b/cassandra/deserializers.pyx index 3967ea1431..9ffcdd5daa 100644 --- a/cassandra/deserializers.pyx +++ b/cassandra/deserializers.pyx @@ -59,7 +59,7 @@ cdef class DesDecimalType(Deserializer): cdef Buffer varint_buf slice_buffer(buf, &varint_buf, 4, buf.size - 4) - scale = unpack_num[int32_t](buf) + cdef int32_t scale = unpack_num[int32_t](buf) unscaled = varint_unpack(&varint_buf) return Decimal('%de%d' % (unscaled, -scale)) From 298785b3472466b83a9faff4c149609f197492d4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 13 Apr 2016 14:33:09 -0500 Subject: [PATCH 0529/2005] 3.2.1 patch version and changelog --- CHANGELOG.rst | 6 ++++++ cassandra/__init__.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 694fc12493..e0ba17fd3d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,9 @@ +3.2.1 +===== +April 13, 2016 + +* Introduced an update to allow deserializer compilation with recently released Cython 0.24 + 3.2.0 ===== April 12, 2016 diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 23cc1f1134..ac4b6fb535 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 2, 0, 'post0') +__version_info__ = (3, 2, 1) __version__ = '.'.join(map(str, __version_info__)) From b7d329218ef1a5d1b0fcfcee307b437054288b17 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 13 Apr 2016 14:36:27 -0500 Subject: [PATCH 0530/2005] post-release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index ac4b6fb535..28c20c5df3 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 2, 1) +__version_info__ = (3, 2, 1, 'post0') __version__ = '.'.join(map(str, __version_info__)) From 2a507e7799ea15f93f26d25334f5bd8620bf91ab Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 14 Apr 2016 11:57:11 -0500 Subject: [PATCH 0531/2005] make pool.HostConnection block on a number of concurrent requests This allows basic rate limiting for low-level integrations. PYTHON-514 --- cassandra/connection.py | 21 ++++++++++++++------- cassandra/pool.py | 23 +++++++++++++++++++---- tests/unit/test_connection.py | 2 +- 3 files changed, 34 insertions(+), 12 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index f292f4fab9..5843622f40 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -211,6 +211,12 @@ class Connection(object): # the number of request IDs that are currently in use. in_flight = 0 + # Max concurrent requests allowed per connection. This is set optimistically high, allowing + # all request ids to be used in protocol version 3+. Normally concurrency would be controlled + # at a higher level by the application or concurrent.execute_concurrent. This attribute + # is for lower-level integrations that want some upper bound without reimplementing. + max_in_flight = 2 ** 15 + # A set of available request IDs. When using the v3 protocol or higher, # this will not initially include all request IDs in order to save memory, # but the set will grow if it is exhausted. @@ -262,13 +268,14 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self._iobuf = io.BytesIO() if protocol_version >= 3: - self.max_request_id = (2 ** 15) - 1 - # Don't fill the deque with 2**15 items right away. Start with 300 and add + self.max_request_id = min(self.max_in_flight - 1, (2 ** 15) - 1) + # Don't fill the deque with 2**15 items right away. Start with some and add # more if needed. - self.request_ids = deque(range(300)) - self.highest_request_id = 299 + initial_size = min(300, self.max_in_flight) + self.request_ids = deque(range(initial_size)) + self.highest_request_id = initial_size - 1 else: - self.max_request_id = (2 ** 7) - 1 + self.max_request_id = min(self.max_in_flight, (2 ** 7) - 1) self.request_ids = deque(range(self.max_request_id + 1)) self.highest_request_id = self.max_request_id @@ -464,7 +471,7 @@ def wait_for_responses(self, *msgs, **kwargs): while True: needed = len(msgs) - messages_sent with self.lock: - available = min(needed, self.max_request_id - self.in_flight) + available = min(needed, self.max_request_id - self.in_flight + 1) request_ids = [self.get_request_id() for _ in range(available)] self.in_flight += available @@ -911,7 +918,7 @@ def __init__(self, connection, owner): log.debug("Sending options message heartbeat on idle connection (%s) %s", id(connection), connection.host) with connection.lock: - if connection.in_flight < connection.max_request_id: + if connection.in_flight <= connection.max_request_id: connection.in_flight += 1 connection.send_msg(OptionsMessage(), connection.get_request_id(), self._options_callback) else: diff --git a/cassandra/pool.py b/cassandra/pool.py index 3cb14e91bd..73c59211e4 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -296,6 +296,8 @@ def __init__(self, host, host_distance, session): self.host_distance = host_distance self._session = weakref.proxy(session) self._lock = Lock() + # this is used in conjunction with the connection streams. Not using the connection lock because the connection can be replaced in the lifetime of the pool. + self._stream_available_condition = Condition(self._lock) self._is_replacing = False if host_distance == HostDistance.IGNORED: @@ -320,16 +322,27 @@ def borrow_connection(self, timeout): if not conn: raise NoConnectionsAvailable() - with conn.lock: - if conn.in_flight < conn.max_request_id: - conn.in_flight += 1 - return conn, conn.get_request_id() + start = time.time() + remaining = timeout + while True: + with conn.lock: + if conn.in_flight <= conn.max_request_id: + conn.in_flight += 1 + return conn, conn.get_request_id() + if timeout is not None: + remaining = timeout - time.time() + start + if remaining < 0: + break + with self._stream_available_condition: + self._stream_available_condition.wait(remaining) raise NoConnectionsAvailable("All request IDs are currently in use") def return_connection(self, connection): with connection.lock: connection.in_flight -= 1 + with self._stream_available_condition: + self._stream_available_condition.notify() if (connection.is_defunct or connection.is_closed) and not connection.signaled_error: log.debug("Defunct or closed connection (%s) returned to pool, potentially " @@ -355,6 +368,7 @@ def _replace(self, connection): self._connection = conn with self._lock: self._is_replacing = False + self._stream_available_condition.notify() def shutdown(self): with self._lock: @@ -362,6 +376,7 @@ def shutdown(self): return else: self.is_shutdown = True + self._stream_available_condition.notify_all() if self._connection: self._connection.close() diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 15fa6e722b..843beda555 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -344,7 +344,7 @@ def test_no_req_ids(self, *args): get_holders = self.make_get_holders(1) max_connection = Mock(spec=Connection, host='localhost', lock=Lock(), - max_request_id=in_flight, in_flight=in_flight, + max_request_id=in_flight - 1, in_flight=in_flight, is_idle=True, is_defunct=False, is_closed=False) holder = get_holders.return_value[0] holder.get_connections.return_value.append(max_connection) From 023a7a876d5068744d3f26b0812d09ca9f626d35 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 14 Apr 2016 11:58:09 -0500 Subject: [PATCH 0532/2005] raise if timeout during query plan --- cassandra/cluster.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index a9dacff457..9f1ee60c78 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2810,6 +2810,7 @@ def send_request(self): """ Internal """ # query_plan is an iterator, so this will resume where we last left # off if send_request() is called multiple times + start = time.time() for host in self.query_plan: req_id = self._query(host) if req_id is not None: @@ -2821,6 +2822,9 @@ def send_request(self): if self._timer is None: self._start_timer() return + if self.timeout is not None and time.time() - start > self.timeout: + self._on_timeout() + return self._set_final_exception(NoHostAvailable( "Unable to complete the operation against any hosts", self._errors)) From 995e7a4f1a0422aa94d192c41e568755090feb65 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 14 Apr 2016 13:09:14 -0500 Subject: [PATCH 0533/2005] make server exception hierarchy match server definition PYTHON-443 --- cassandra/__init__.py | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index b9183c945f..dbc155aca5 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -198,7 +198,11 @@ class DriverException(Exception): pass -class Unavailable(DriverException): +class RequestExecutionException(DriverException): + pass + + +class Unavailable(RequestExecutionException): """ There were not enough live replicas to satisfy the requested consistency level, so the coordinator node immediately failed the request without @@ -224,7 +228,7 @@ def __init__(self, summary_message, consistency=None, required_replicas=None, al 'alive_replicas': alive_replicas})) -class Timeout(DriverException): +class Timeout(RequestExecutionException): """ Replicas failed to respond to the coordinator node before timing out. """ @@ -293,7 +297,7 @@ def __init__(self, message, write_type=None, **kwargs): self.write_type = write_type -class CoordinationFailure(DriverException): +class CoordinationFailure(RequestExecutionException): """ Replicas sent a failure to the coordinator. """ @@ -363,7 +367,7 @@ def __init__(self, message, write_type=None, **kwargs): self.write_type = write_type -class FunctionFailure(DriverException): +class FunctionFailure(RequestExecutionException): """ User Defined Function failed during execution """ @@ -390,7 +394,15 @@ def __init__(self, summary_message, keyspace, function, arg_types): Exception.__init__(self, summary_message) -class AlreadyExists(DriverException): +class RequestValidationException(DriverException): + pass + + +class ConfigurationException(RequestValidationException): + pass + + +class AlreadyExists(ConfigurationException): """ An attempt was made to create a keyspace or table that already exists. """ @@ -418,7 +430,7 @@ def __init__(self, keyspace=None, table=None): self.table = table -class InvalidRequest(DriverException): +class InvalidRequest(RequestValidationException): """ A query was made that was invalid for some reason, such as trying to set the keyspace for a connection to a nonexistent keyspace. @@ -426,9 +438,9 @@ class InvalidRequest(DriverException): pass -class Unauthorized(DriverException): +class Unauthorized(RequestValidationException): """ - The current user is not authorized to perfom the requested operation. + The current user is not authorized to perform the requested operation. """ pass From fb5d62ab05c482536bdde334d7c8ca0f0b0ccb03 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 14 Apr 2016 14:12:59 -0500 Subject: [PATCH 0534/2005] deduplicate column names to avoid blowing up named_tuple results PYTHON-467 --- cassandra/query.py | 4 ++-- cassandra/util.py | 11 +++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index 55d0230096..bca0745bdc 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -31,7 +31,7 @@ from cassandra.encoder import Encoder import cassandra.encoder from cassandra.protocol import _UNSET_VALUE -from cassandra.util import OrderedDict, _positional_rename_invalid_identifiers +from cassandra.util import OrderedDict, _sanitize_identifiers import logging log = logging.getLogger(__name__) @@ -123,7 +123,7 @@ def named_tuple_factory(colnames, rows): "Avoid this by choosing different names, using SELECT \"\" AS aliases, " "or specifying a different row_factory on your Session" % (colnames, clean_column_names)) - Row = namedtuple('Row', _positional_rename_invalid_identifiers(clean_column_names)) + Row = namedtuple('Row', _sanitize_identifiers(clean_column_names)) return [Row(*row) for row in rows] diff --git a/cassandra/util.py b/cassandra/util.py index c4837af40d..ec590e4b5d 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1171,3 +1171,14 @@ def _positional_rename_invalid_identifiers(field_names): or name.startswith('_')): names_out[index] = 'field_%d_' % index return names_out + + +def _sanitize_identifiers(field_names): + names_out = _positional_rename_invalid_identifiers(field_names) + if len(names_out) != len(set(names_out)): + observed_names = set() + for index, name in enumerate(names_out): + while names_out[index] in observed_names: + names_out[index] = "%s_" % (names_out[index],) + observed_names.add(names_out[index]) + return names_out From 8077388432037f9326f96e17462d4ab7c78f25b4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 14 Apr 2016 15:16:22 -0500 Subject: [PATCH 0535/2005] remove connection leak when control conn reconnect during shutdown PYTHON-482 --- cassandra/cluster.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index a9dacff457..d9817eefa8 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2152,6 +2152,8 @@ def _reconnect_internal(self): except Exception as exc: errors[host.address] = exc log.warning("[control connection] Error connecting to %s:", host, exc_info=True) + if self._is_shutdown: + raise DriverException("[control connection] Reconnection in progress during shutdown") raise NoHostAvailable("Unable to connect to any servers", errors) @@ -2165,6 +2167,9 @@ def _try_connect(self, host): while True: try: connection = self._cluster.connection_factory(host.address, is_control_connection=True) + if self._is_shutdown: + connection.close() + raise DriverException("Reconnecting during shutdown") break except ProtocolVersionUnsupported as e: self._cluster.protocol_downgrade(host.address, e.startup_version) @@ -2266,7 +2271,7 @@ def shutdown(self): log.debug("Shutting down control connection") if self._connection: self._connection.close() - del self._connection + self._connection = None def refresh_schema(self, force=False, **kwargs): try: From d4d29cc523ab715d92f8cadff28f60996af39ffa Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 14 Apr 2016 16:27:39 -0500 Subject: [PATCH 0536/2005] pool.Host.release_version PYTHON-301 --- cassandra/cluster.py | 5 +++-- cassandra/pool.py | 5 +++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index a9dacff457..a067cff6a9 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2068,11 +2068,10 @@ class ControlConnection(object): """ _SELECT_PEERS = "SELECT * FROM system.peers" - _SELECT_PEERS_NO_TOKENS = "SELECT peer, data_center, rack, rpc_address, schema_version FROM system.peers" + _SELECT_PEERS_NO_TOKENS = "SELECT peer, data_center, rack, rpc_address, release_version, schema_version FROM system.peers" _SELECT_LOCAL = "SELECT * FROM system.local WHERE key='local'" _SELECT_LOCAL_NO_TOKENS = "SELECT cluster_name, data_center, rack, partitioner, release_version, schema_version FROM system.local WHERE key='local'" - _SELECT_SCHEMA_PEERS = "SELECT peer, rpc_address, schema_version FROM system.peers" _SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'" @@ -2351,6 +2350,7 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, self._update_location_info(host, datacenter, rack) host.listen_address = local_row.get("listen_address") host.broadcast_address = local_row.get("broadcast_address") + host.release_version = local_row.get("release_version") partitioner = local_row.get("partitioner") tokens = local_row.get("tokens") @@ -2385,6 +2385,7 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, should_rebuild_token_map |= self._update_location_info(host, datacenter, rack) host.broadcast_address = row.get("peer") + host.release_version = row.get("release_version") if partitioner and tokens: token_map[host] = tokens diff --git a/cassandra/pool.py b/cassandra/pool.py index 3cb14e91bd..25e6b820b9 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -78,6 +78,11 @@ class Host(object): up or down. """ + release_version = None + """ + release_version as queried from the control connection system tables + """ + _datacenter = None _rack = None _reconnection_handler = None From 71e37957826824662aec6cde39f838bcf3a8094f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 15 Apr 2016 10:08:45 -0500 Subject: [PATCH 0537/2005] remove Connection.server_version and use host metadata --- cassandra/cluster.py | 2 -- cassandra/connection.py | 14 -------------- cassandra/metadata.py | 16 ++++++---------- tests/unit/test_metadata.py | 4 +--- 4 files changed, 7 insertions(+), 29 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index a067cff6a9..b2dd3ed516 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2357,8 +2357,6 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, if partitioner and tokens: token_map[host] = tokens - connection.server_version = local_row['release_version'] - # Check metadata.partitioner to see if we haven't built anything yet. If # every node in the cluster was in the contact points, we won't discover # any new nodes, so we need this additional check. (See PYTHON-90) diff --git a/cassandra/connection.py b/cassandra/connection.py index f292f4fab9..ece12f4045 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -232,8 +232,6 @@ class Connection(object): is_control_connection = False signaled_error = False # used for flagging at the pool level - _server_version = None - _iobuf = None _current_frame = None @@ -829,18 +827,6 @@ def is_idle(self): def reset_idle(self): self.msg_received = False - @property - def server_version(self): - if self._server_version is None: - query_message = QueryMessage(query="SELECT release_version FROM system.local", consistency_level=ConsistencyLevel.ONE) - message = self.wait_for_response(query_message) - self._server_version = message.results[1][0][0] # (col names, rows)[rows][first row][only item] - return self._server_version - - @server_version.setter - def server_version(self, version): - self._server_version = version - def __str__(self): status = "" if self.is_defunct: diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 28e19decf3..e9bf0b4b6e 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -117,13 +117,15 @@ def export_schema_as_string(self): def refresh(self, connection, timeout, target_type=None, change_type=None, **kwargs): + server_version = self.get_host(connection.host).release_version + parser = get_schema_parser(connection, server_version, timeout) + if not target_type: - self._rebuild_all(connection, timeout) + self._rebuild_all(parser) return tt_lower = target_type.lower() try: - parser = get_schema_parser(connection, timeout) parse_method = getattr(parser, 'get_' + tt_lower) meta = parse_method(self.keyspaces, **kwargs) if meta: @@ -135,12 +137,7 @@ def refresh(self, connection, timeout, target_type=None, change_type=None, **kwa except AttributeError: raise ValueError("Unknown schema target_type: '%s'" % target_type) - def _rebuild_all(self, connection, timeout): - """ - For internal use only. - """ - parser = get_schema_parser(connection, timeout) - + def _rebuild_all(self, parser): current_keyspaces = set() for keyspace_meta in parser.get_all_keyspaces(): current_keyspaces.add(keyspace_meta.name) @@ -2455,8 +2452,7 @@ def export_as_string(self): return self.as_cql_query(formatted=True) + ";" -def get_schema_parser(connection, timeout): - server_version = connection.server_version +def get_schema_parser(connection, server_version, timeout): if server_version.startswith('3'): return SchemaParserV3(connection, timeout) else: diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index c2e3513fd1..0b86c48789 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -380,9 +380,7 @@ def test_build_index_as_cql(self): column_meta.table.name = 'table_name_here' column_meta.table.keyspace_name = 'keyspace_name_here' column_meta.table.columns = {column_meta.name: column_meta} - connection = Mock() - connection.server_version = '2.1.0' - parser = get_schema_parser(connection, 0.1) + parser = get_schema_parser(Mock(), '2.1.0', 0.1) row = {'index_name': 'index_name_here', 'index_type': 'index_type_here'} index_meta = parser._build_index_metadata(column_meta, row) From 9ff3d13a2416e5a6e14565f6185f7cd7ddc8b3c4 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 15 Apr 2016 10:29:50 -0500 Subject: [PATCH 0538/2005] PYTHON-332 Test for broadcast/listen address --- tests/integration/standard/test_metadata.py | 24 ++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 583943bcc3..00db06024a 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -34,16 +34,38 @@ from cassandra.pool import Host from tests.integration import get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, \ - BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster + BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster from tests.unit.cython.utils import notcython + def setup_module(): use_singledc() global CASS_SERVER_VERSION CASS_SERVER_VERSION = get_server_versions()[0] +class HostMetatDataTests(BasicExistingKeyspaceUnitTestCase): + def test_broadcast_listen_address(self): + """ + Check to ensure that the broadcast and listen adresss is populated correctly + + @since 3.3 + @jira_ticket PYTHON-332 + @expected_result They are populated for C*> 2.0.16, 2.1.6, 2.2.0 + + @test_category metadata + """ + # All nodes should have the broadcast_address set + for host in self.cluster.metadata.all_hosts(): + self.assertIsNotNone(host.broadcast_address) + con = self.cluster.control_connection.get_connections()[0] + local_host = con.host + # The control connection node should have the listen address set. + listen_addrs = [host.listen_address for host in self.cluster.metadata.all_hosts()] + self.assertTrue(local_host in listen_addrs) + + class SchemaMetadataTests(BasicSegregatedKeyspaceUnitTestCase): def make_create_statement(self, partition_cols, clustering_cols=None, other_cols=None, compact=False): From 38bba9cb678866668697ebcccb9164c0aed24eec Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 15 Apr 2016 12:02:40 -0500 Subject: [PATCH 0539/2005] fix metadata tests following get_schema_parser update --- tests/integration/standard/test_metadata.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 583943bcc3..4d212f615a 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -120,7 +120,8 @@ def test_basic_table_meta_properties(self): self.assertEqual([], tablemeta.clustering_key) self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys())) - parser = get_schema_parser(self.cluster.control_connection._connection, 1) + cc = self.cluster.control_connection._connection + parser = get_schema_parser(cc, str(CASS_SERVER_VERSION[0]), 1) for option in tablemeta.options: self.assertIn(option, parser.recognized_table_options) @@ -1897,7 +1898,7 @@ def setup_class(cls): cls.session.execute("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name) cls.session.set_keyspace(cls.keyspace_name) connection = cls.cluster.control_connection._connection - cls.parser_class = get_schema_parser(connection, timeout=20).__class__ + cls.parser_class = get_schema_parser(connection, str(CASS_SERVER_VERSION[0]), timeout=20).__class__ @classmethod def teardown_class(cls): From d0beb8ee922f2e9f45dab30a92eddd40c61b4ef6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 15 Apr 2016 13:23:49 -0500 Subject: [PATCH 0540/2005] client-settable CL for trace queries PYTHON-435 --- cassandra/cluster.py | 16 +++++++++------- cassandra/query.py | 9 ++++++--- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index a9dacff457..730d4707e5 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3232,32 +3232,34 @@ def get_query_trace_ids(self): """ return [trace.trace_id for trace in self._query_traces] - def get_query_trace(self, max_wait=None): + def get_query_trace(self, max_wait=None, query_cl=ConsistencyLevel.LOCAL_ONE): """ Fetches and returns the query trace of the last response, or `None` if tracing was not enabled. Note that this may raise an exception if there are problems retrieving the trace - details from Cassandra. If the trace is not available after `max_wait_sec`, + details from Cassandra. If the trace is not available after `max_wait`, :exc:`cassandra.query.TraceUnavailable` will be raised. + + `query_cl` is the consistency level used to poll the trace tables. """ if self._query_traces: - return self._get_query_trace(len(self._query_traces) - 1, max_wait) + return self._get_query_trace(len(self._query_traces) - 1, max_wait, query_cl) - def get_all_query_traces(self, max_wait_per=None): + def get_all_query_traces(self, max_wait_per=None, query_cl=ConsistencyLevel.LOCAL_ONE): """ Fetches and returns the query traces for all query pages, if tracing was enabled. See note in :meth:`~.get_query_trace` regarding possible exceptions. """ if self._query_traces: - return [self._get_query_trace(i, max_wait_per) for i in range(len(self._query_traces))] + return [self._get_query_trace(i, max_wait_per, query_cl) for i in range(len(self._query_traces))] return [] - def _get_query_trace(self, i, max_wait): + def _get_query_trace(self, i, max_wait, query_cl): trace = self._query_traces[i] if not trace.events: - trace.populate(max_wait=max_wait) + trace.populate(max_wait=max_wait, query_cl=query_cl) return trace def add_callback(self, fn, *args, **kwargs): diff --git a/cassandra/query.py b/cassandra/query.py index 55d0230096..453592e614 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -864,7 +864,7 @@ def __init__(self, trace_id, session): self.trace_id = trace_id self._session = session - def populate(self, max_wait=2.0, wait_for_complete=True): + def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): """ Retrieves the actual tracing details from Cassandra and populates the attributes of this instance. Because tracing details are stored @@ -875,6 +875,9 @@ def populate(self, max_wait=2.0, wait_for_complete=True): `wait_for_complete=False` bypasses the wait for duration to be populated. This can be used to query events from partial sessions. + + `query_cl` specifies a consistency level to use for polling the trace tables, + if it should be different than the session default. """ attempt = 0 start = time.time() @@ -886,7 +889,7 @@ def populate(self, max_wait=2.0, wait_for_complete=True): log.debug("Attempting to fetch trace info for trace ID: %s", self.trace_id) session_results = self._execute( - self._SELECT_SESSIONS_FORMAT, (self.trace_id,), time_spent, max_wait) + SimpleStatement(self._SELECT_SESSIONS_FORMAT, consistency_level=query_cl), (self.trace_id,), time_spent, max_wait) is_complete = session_results and session_results[0].duration is not None if not session_results or (wait_for_complete and not is_complete): @@ -910,7 +913,7 @@ def populate(self, max_wait=2.0, wait_for_complete=True): log.debug("Attempting to fetch trace events for trace ID: %s", self.trace_id) time_spent = time.time() - start event_results = self._execute( - self._SELECT_EVENTS_FORMAT, (self.trace_id,), time_spent, max_wait) + SimpleStatement(self._SELECT_EVENTS_FORMAT, consistency_level=query_cl), (self.trace_id,), time_spent, max_wait) log.debug("Fetched trace events for trace ID: %s", self.trace_id) self.events = tuple(TraceEvent(r.activity, r.event_id, r.source, r.source_elapsed, r.thread) for r in event_results) From 619b9f117760f81a6024738218f0706e9d821cf4 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 15 Apr 2016 16:10:31 -0500 Subject: [PATCH 0541/2005] PYTHON-69 Simple litmus tests for AddressTranslation --- tests/integration/standard/test_cluster.py | 56 +++++++++++++++++++++- 1 file changed, 54 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 77cd666c25..d6650a4796 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -27,11 +27,11 @@ from cassandra.concurrent import execute_concurrent from cassandra.policies import (RoundRobinPolicy, ExponentialReconnectionPolicy, RetryPolicy, SimpleConvictionPolicy, HostDistance, - WhiteListRoundRobinPolicy) + WhiteListRoundRobinPolicy, AddressTranslator) from cassandra.protocol import MAX_SUPPORTED_VERSION from cassandra.query import SimpleStatement, TraceUnavailable -from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node, CASSANDRA_VERSION, execute_until_pass, execute_with_long_wait_retry +from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node, CASSANDRA_VERSION, execute_until_pass, execute_with_long_wait_retry, BasicExistingKeyspaceUnitTestCase, get_node from tests.integration.util import assert_quiescent_pool_state @@ -41,6 +41,7 @@ def setup_module(): class ClusterTests(unittest.TestCase): + def test_host_resolution(self): """ Test to insure A records are resolved appropriately. @@ -617,3 +618,54 @@ def test_pool_management(self): cluster.shutdown() +class LocalHostAdressTranslator(AddressTranslator): + + def __init__(self, addr_map=None): + self.addr_map = addr_map + + def translate(self, addr): + new_addr = self.addr_map.get(addr) + return new_addr + + +class TestAddressTranslation(unittest.TestCase): + + def test_address_translator_basic(self): + """ + Test host address translation + + Uses a custom Address Translator to map all ip back to one. + Validates AddressTranslator invocation by ensuring that only meta data associated with single + host is populated + + @since 3.3 + @jira_ticket PYTHON-69 + @expected_result only one hosts' metadata will be populeated + + @test_category metadata + """ + lh_ad = LocalHostAdressTranslator({'127.0.0.1': '127.0.0.1', '127.0.0.2': '127.0.0.1', '127.0.0.3': '127.0.0.1'}) + c = Cluster(address_translator=lh_ad) + c.connect() + self.assertEqual(len(c.metadata.all_hosts()), 1) + c.shutdown() + + def test_address_translator_with_mixed_nodes(self): + """ + Test host address translation + + Uses a custom Address Translator to map ip's of non control_connection nodes to each other + Validates AddressTranslator invocation by ensuring that metadata for mapped hosts is also mapped + + @since 3.3 + @jira_ticket PYTHON-69 + @expected_result metadata for crossed hosts will also be crossed + + @test_category metadata + """ + adder_map = {'127.0.0.1': '127.0.0.1', '127.0.0.2': '127.0.0.3', '127.0.0.3': '127.0.0.2'} + lh_ad = LocalHostAdressTranslator(adder_map) + c = Cluster(address_translator=lh_ad) + c.connect() + for host in c.metadata.all_hosts(): + self.assertEqual(adder_map.get(str(host)), host.broadcast_address) From 97fcf03f91183be5f06fb0b2e1143dcc68c4be31 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 15 Apr 2016 16:31:57 -0500 Subject: [PATCH 0542/2005] PYTHON-301 Adding test for host release version --- tests/integration/standard/test_metadata.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 0c937f51bd..98c146e3a1 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -34,7 +34,7 @@ from cassandra.pool import Host from tests.integration import get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, \ - BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster + BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION from tests.unit.cython.utils import notcython @@ -65,6 +65,20 @@ def test_broadcast_listen_address(self): listen_addrs = [host.listen_address for host in self.cluster.metadata.all_hosts()] self.assertTrue(local_host in listen_addrs) + def test_host_release_version(self): + """ + Checks the hosts release version and validates that it is equal to the + Cassandra version we are using in our test harness. + + @since 3.3 + @jira_ticket PYTHON-301 + @expected_result host.release version should match our specified Cassandra version. + + @test_category metadata + """ + for host in self.cluster.metadata.all_hosts(): + self.assertEqual(host.release_version, CASSANDRA_VERSION) + class SchemaMetadataTests(BasicSegregatedKeyspaceUnitTestCase): From 98ba9be9bb83286d3915ce3d1d9ceb00f9f8c0ae Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 18 Apr 2016 14:17:22 -0500 Subject: [PATCH 0543/2005] 'match_hostname' ssl option PYTHON-296 --- cassandra/cluster.py | 8 ++++++++ cassandra/connection.py | 11 ++++++++++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index a9dacff457..672b258ae6 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -386,6 +386,14 @@ def auth_provider(self, value): a string pointing to the location of the CA certs file), and you probably want to specify ``ssl_version`` as ``ssl.PROTOCOL_TLSv1`` to match Cassandra's default protocol. + + .. versionchanged:: 3.3.0 + + In addition to ``wrap_socket`` kwargs, clients may also specify ``'match_hostname': True`` to verify the cert hostname + as outlined in RFC 2818 and RFC 6125. Note that this requires the certificate to be transferred, so + should almost always require the option ``'cert_reqs': ssl.CERT_REQUIRED``. Note also that this functionality was not built into + Python standard library until (2.7.9, 3.2). To enable this mechanism in earlier versions, patch ``ssl.match_hostname`` + with a custom or `back-ported function `_. """ sockopts = None diff --git a/cassandra/connection.py b/cassandra/connection.py index f292f4fab9..6a691cf732 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -249,7 +249,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.host = host self.port = port self.authenticator = authenticator - self.ssl_options = ssl_options + self.ssl_options = ssl_options.copy() self.sockopts = sockopts self.compression = compression self.cql_version = cql_version @@ -261,6 +261,13 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self._requests = {} self._iobuf = io.BytesIO() + if ssl_options: + self._match_hostname = bool(self.ssl_options.pop('match_hostname', False)) + if self._match_hostname: + if not getattr(ssl, 'match_hostname', None): + raise RuntimeError("ssl_options specify 'match_hostname', but ssl.match_hostname is not provided. " + "Patch or upgrade Python to use this option.") + if protocol_version >= 3: self.max_request_id = (2 ** 15) - 1 # Don't fill the deque with 2**15 items right away. Start with 300 and add @@ -331,6 +338,8 @@ def _connect_socket(self): self._socket = self._ssl_impl.wrap_socket(self._socket, **self.ssl_options) self._socket.settimeout(self.connect_timeout) self._socket.connect(sockaddr) + if self._match_hostname: + ssl.match_hostname(self._socket.getpeercert(), self.host) sockerr = None break except socket.error as err: From ca28ae0ac26791a6698823a31d93d0c5d45b7f75 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 18 Apr 2016 14:36:19 -0500 Subject: [PATCH 0544/2005] fix connection for non-ssl paths PYTHON0-296 --- cassandra/connection.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 6a691cf732..a4cddd21e4 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -242,6 +242,8 @@ class Connection(object): _socket_impl = socket _ssl_impl = ssl + _match_hostname = False + def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, cql_version=None, protocol_version=MAX_SUPPORTED_VERSION, is_control_connection=False, @@ -249,7 +251,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.host = host self.port = port self.authenticator = authenticator - self.ssl_options = ssl_options.copy() + self.ssl_options = ssl_options.copy() if ssl_options else None self.sockopts = sockopts self.compression = compression self.cql_version = cql_version From 5ecb8b167c55b6fd94ed29560f7ecb22248e1647 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 19 Apr 2016 09:52:32 -0500 Subject: [PATCH 0545/2005] pass prev value to counter update clause PYTHON-547 --- cassandra/cqlengine/statements.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index b923e38d34..3867704a77 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -804,7 +804,7 @@ def add_update(self, column, value, operation=None, previous=None): previous = column.to_database(previous) clause = container_update_type(column.db_field_name, value, operation, previous) elif col_type == columns.Counter: - clause = CounterUpdateClause(column.db_field_name, value) + clause = CounterUpdateClause(column.db_field_name, value, previous) else: clause = AssignmentClause(column.db_field_name, value) if clause.get_context_size(): # this is to exclude map removals from updates. Can go away if we drop support for C* < 1.2.4 and remove two-phase updates From e39c62a6731e5bd245254e81bffd6c7607799f39 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 19 Apr 2016 10:02:09 -0500 Subject: [PATCH 0546/2005] test for counter save after no update PYTHON-547 --- .../cqlengine/columns/test_counter_column.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/integration/cqlengine/columns/test_counter_column.py b/tests/integration/cqlengine/columns/test_counter_column.py index 11e5c1bd91..addfd95a1f 100644 --- a/tests/integration/cqlengine/columns/test_counter_column.py +++ b/tests/integration/cqlengine/columns/test_counter_column.py @@ -112,3 +112,19 @@ def test_new_instance_defaults_to_zero(self): instance = TestCounterModel() assert instance.counter == 0 + def test_save_after_no_update(self): + expected_value = 15 + instance = TestCounterModel.create() + instance.update(counter=expected_value) + + # read back + instance = TestCounterModel.get(partition=instance.partition) + self.assertEqual(instance.counter, expected_value) + + # save after doing nothing + instance.save() + self.assertEqual(instance.counter, expected_value) + + # make sure there was no increment + instance = TestCounterModel.get(partition=instance.partition) + self.assertEqual(instance.counter, expected_value) From 000d239e51f37fc948522d63177f693b651f3e04 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 19 Apr 2016 10:05:25 -0500 Subject: [PATCH 0547/2005] 3.2.2 patch release --- CHANGELOG.rst | 8 +++++++- cassandra/__init__.py | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e0ba17fd3d..a3618ca1db 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,8 +1,14 @@ +3.2.2 +===== +April 19, 2016 + +* Fix counter save-after-no-update (PYTHON-547) + 3.2.1 ===== April 13, 2016 -* Introduced an update to allow deserializer compilation with recently released Cython 0.24 +* Introduced an update to allow deserializer compilation with recently released Cython 0.24 (PYTHON-542) 3.2.0 ===== diff --git a/cassandra/__init__.py b/cassandra/__init__.py index ac4b6fb535..c93058b049 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 2, 1) +__version_info__ = (3, 2, 2) __version__ = '.'.join(map(str, __version_info__)) From f42c26d842f3a9c5980f1a992597d94d56173789 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 19 Apr 2016 16:18:48 -0500 Subject: [PATCH 0548/2005] post-release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index c93058b049..fba9cd9efc 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 2, 2) +__version_info__ = (3, 2, 2, 'post0') __version__ = '.'.join(map(str, __version_info__)) From 36028790d0d69e3ea52d55d87442e3e68282134f Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 20 Apr 2016 15:29:46 -0500 Subject: [PATCH 0549/2005] PYTHON-435 adding test for trace consistency levels --- tests/integration/standard/test_query.py | 34 +++++++++++++++++++++--- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 51ab1f514d..dff9715a4f 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -20,13 +20,13 @@ except ImportError: import unittest # noqa -from cassandra import ConsistencyLevel +from cassandra import ConsistencyLevel, Unavailable, InvalidRequest from cassandra.query import (PreparedStatement, BoundStatement, SimpleStatement, BatchStatement, BatchType, dict_factory, TraceUnavailable) -from cassandra.cluster import Cluster +from cassandra.cluster import Cluster, NoHostAvailable from cassandra.policies import HostDistance -from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, greaterthanprotocolv3 +from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, greaterthanprotocolv3, get_node import time import re @@ -109,7 +109,7 @@ def test_client_ip_in_trace(self): only be the case if the c* version is 2.2 or greater @since 2.6.0 - @jira_ticket PYTHON-235 + @jira_ticket PYTHON-435 @expected_result client address should be present in C* >= 2.2, otherwise should be none. @test_category tracing @@ -138,6 +138,32 @@ def test_client_ip_in_trace(self): self.assertIsNotNone(client_ip, "Client IP was not set in trace with C* >= 2.2") self.assertTrue(pat.match(client_ip), "Client IP from trace did not match the expected value") + def test_trace_cl(self): + """ + Test to ensure that CL is set correctly honored when executing trace queries. + + @since 3.3 + @jira_ticket PYTHON-435 + @expected_result Consistency Levels set on get_query_trace should be honored + """ + # Execute a query + query = "SELECT * FROM system.local" + statement = SimpleStatement(query) + response_future = self.session.execute_async(statement, trace=True) + response_future.result() + with self.assertRaises(Unavailable): + response_future.get_query_trace(query_cl=ConsistencyLevel.THREE) + # Try again with a smattering of other CL's + self.assertIsNotNone(response_future.get_query_trace(max_wait=2.0, query_cl=ConsistencyLevel.TWO).trace_id) + response_future = self.session.execute_async(statement, trace=True) + response_future.result() + self.assertIsNotNone(response_future.get_query_trace(max_wait=2.0, query_cl=ConsistencyLevel.ONE).trace_id) + response_future = self.session.execute_async(statement, trace=True) + response_future.result() + with self.assertRaises(InvalidRequest): + self.assertIsNotNone(response_future.get_query_trace(max_wait=2.0, query_cl=ConsistencyLevel.ANY).trace_id) + self.assertIsNotNone(response_future.get_query_trace(max_wait=2.0, query_cl=ConsistencyLevel.QUORUM).trace_id) + def test_incomplete_query_trace(self): """ Tests to ensure that partial tracing works. From 90134e142845352092c4bc41e28851db1bdee95e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 20 Apr 2016 15:30:52 -0500 Subject: [PATCH 0550/2005] only add local token to the map if it's still in the ring addresses an issue which would cause a None key in the map if the topo change triggering this event was removing the control connection host PYTHON-548 --- cassandra/cluster.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d7bc95dbf2..641528ffc3 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2356,6 +2356,9 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, cluster_name = local_row["cluster_name"] self._cluster.metadata.cluster_name = cluster_name + partitioner = local_row.get("partitioner") + tokens = local_row.get("tokens") + host = self._cluster.metadata.get_host(connection.host) if host: datacenter = local_row.get("data_center") @@ -2365,10 +2368,8 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, host.broadcast_address = local_row.get("broadcast_address") host.release_version = local_row.get("release_version") - partitioner = local_row.get("partitioner") - tokens = local_row.get("tokens") - if partitioner and tokens: - token_map[host] = tokens + if partitioner and tokens: + token_map[host] = tokens # Check metadata.partitioner to see if we haven't built anything yet. If # every node in the cluster was in the contact points, we won't discover From b3149e69e3764c428206d6a97381230bec1c6e66 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 20 Apr 2016 15:33:51 -0500 Subject: [PATCH 0551/2005] proactively reconnect when the control conn host is removed --- cassandra/cluster.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 641528ffc3..b84fa59055 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2610,7 +2610,13 @@ def on_add(self, host, refresh_nodes=True): self.refresh_node_list_and_token_map(force_token_rebuild=True) def on_remove(self, host): - self.refresh_node_list_and_token_map(force_token_rebuild=True) + c = self._connection + if c and c.host == host.address: + log.debug("[control connection] Control connection host (%s) is being removed. Reconnecting", host) + # refresh will be done on reconnect + self.reconnect() + else: + self.refresh_node_list_and_token_map(force_token_rebuild=True) def get_connections(self): c = getattr(self, '_connection', None) From 876965a877c0a3a222bbc401e3bb4811b9b75469 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 20 Apr 2016 16:00:28 -0500 Subject: [PATCH 0552/2005] PYTHON-443 unit test to verify exception types --- tests/unit/test_cluster.py | 57 +++++++++++++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index ec3516ca66..3c4f477f72 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -19,11 +19,66 @@ from mock import patch, Mock -from cassandra import ConsistencyLevel +from cassandra import ConsistencyLevel, DriverException, Timeout, Unavailable, RequestExecutionException, ReadTimeout, WriteTimeout, CoordinationFailure, ReadFailure, WriteFailure, FunctionFailure, AlreadyExists,\ + InvalidRequest, Unauthorized, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, RequestValidationException, ConfigurationException from cassandra.cluster import _Scheduler, Session, Cluster from cassandra.query import SimpleStatement +class ExceptionTypeTest(unittest.TestCase): + + def test_exception_types(self): + """ + PYTHON-443 + Sanity check to ensure we don't unintentionally change class hierarchy of exception types + """ + self.assertTrue(issubclass(Unavailable, DriverException)) + self.assertTrue(issubclass(Unavailable, RequestExecutionException)) + + self.assertTrue(issubclass(ReadTimeout, DriverException)) + self.assertTrue(issubclass(ReadTimeout, RequestExecutionException)) + self.assertTrue(issubclass(ReadTimeout, Timeout)) + + self.assertTrue(issubclass(WriteTimeout, DriverException)) + self.assertTrue(issubclass(WriteTimeout, RequestExecutionException)) + self.assertTrue(issubclass(WriteTimeout, Timeout)) + + self.assertTrue(issubclass(CoordinationFailure, DriverException)) + self.assertTrue(issubclass(CoordinationFailure, RequestExecutionException)) + + self.assertTrue(issubclass(ReadFailure, DriverException)) + self.assertTrue(issubclass(ReadFailure, RequestExecutionException)) + self.assertTrue(issubclass(ReadFailure, CoordinationFailure)) + + self.assertTrue(issubclass(WriteFailure, DriverException)) + self.assertTrue(issubclass(WriteFailure, RequestExecutionException)) + self.assertTrue(issubclass(WriteFailure, CoordinationFailure)) + + self.assertTrue(issubclass(FunctionFailure, DriverException)) + self.assertTrue(issubclass(FunctionFailure, RequestExecutionException)) + + self.assertTrue(issubclass(RequestValidationException, DriverException)) + + self.assertTrue(issubclass(ConfigurationException, DriverException)) + self.assertTrue(issubclass(ConfigurationException, RequestValidationException)) + + self.assertTrue(issubclass(AlreadyExists, DriverException)) + self.assertTrue(issubclass(AlreadyExists, RequestValidationException)) + self.assertTrue(issubclass(AlreadyExists, ConfigurationException)) + + self.assertTrue(issubclass(InvalidRequest, DriverException)) + self.assertTrue(issubclass(InvalidRequest, RequestValidationException)) + + self.assertTrue(issubclass(Unauthorized, DriverException)) + self.assertTrue(issubclass(Unauthorized, RequestValidationException)) + + self.assertTrue(issubclass(AuthenticationFailed, DriverException)) + + self.assertTrue(issubclass(OperationTimedOut, DriverException)) + + self.assertTrue(issubclass(UnsupportedOperation, DriverException)) + + class ContactListTest(unittest.TestCase): def test_invalid_types(self, *args): From 98074db5b786fef8b97b440c41c392299e2a1a31 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 20 Apr 2016 16:35:58 -0500 Subject: [PATCH 0553/2005] make token mapping robust against unexpected state --- cassandra/metadata.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index e9bf0b4b6e..f7337d8706 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1397,12 +1397,18 @@ def __init__(self, token_class, token_to_host_owner, all_tokens, metadata): def rebuild_keyspace(self, keyspace, build_if_absent=False): with self._rebuild_lock: - current = self.tokens_to_hosts_by_ks.get(keyspace, None) - if (build_if_absent and current is None) or (not build_if_absent and current is not None): - ks_meta = self._metadata.keyspaces.get(keyspace) - if ks_meta: - replica_map = self.replica_map_for_keyspace(self._metadata.keyspaces[keyspace]) - self.tokens_to_hosts_by_ks[keyspace] = replica_map + try: + current = self.tokens_to_hosts_by_ks.get(keyspace, None) + if (build_if_absent and current is None) or (not build_if_absent and current is not None): + ks_meta = self._metadata.keyspaces.get(keyspace) + if ks_meta: + replica_map = self.replica_map_for_keyspace(self._metadata.keyspaces[keyspace]) + self.tokens_to_hosts_by_ks[keyspace] = replica_map + except Exception: + # should not happen normally, but we don't want to blow up queries because of unexpected meta state + # bypass until new map is generated + self.tokens_to_hosts_by_ks[keyspace] = {} + log.exception("Failed creating a token map for keyspace '%s' with %s. PLEASE REPORT THIS: https://datastax-oss.atlassian.net/projects/PYTHON", keyspace, self.token_to_host_owner) def replica_map_for_keyspace(self, ks_metadata): strategy = ks_metadata.replication_strategy From 097e3efbed216ababb5b2b5b36c7bc4fa1c9074e Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 14 Apr 2016 10:51:57 -0500 Subject: [PATCH 0554/2005] PYTHON-543 Forced cython validation --- build.yaml | 9 +++++---- tests/integration/__init__.py | 9 +++++++++ .../standard/test_cython_protocol_handlers.py | 18 ++++++++++++++++-- tests/unit/cython/utils.py | 5 +++-- 4 files changed, 33 insertions(+), 8 deletions(-) diff --git a/build.yaml b/build.yaml index ee7dd4d94f..e9f4b0c863 100644 --- a/build.yaml +++ b/build.yaml @@ -27,8 +27,9 @@ build: fi pip install -r test-requirements.txt pip install nose-ignore-docstring - + FORCE_CYTHON=False if [[ $CYTHON == 'CYTHON' ]]; then + FORCE_CYTHON=True pip install cython pip install numpy # Install the driver & compile C extensions @@ -39,12 +40,12 @@ build: fi echo "==========RUNNING CQLENGINE TESTS==========" - CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true + CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true echo "==========RUNNING INTEGRATION TESTS==========" - CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true + CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true echo "==========RUNNING LONG INTEGRATION TESTS==========" - CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true + CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true - xunit: - "*_results.xml" diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 4b3afaa045..178c1a5a82 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -86,6 +86,15 @@ def _tuple_version(version_string): USE_CASS_EXTERNAL = bool(os.getenv('USE_CASS_EXTERNAL', False)) +# If set to to true this will force the Cython tests to run regardless of whether they are installed +cython_env = os.getenv('VERIFY_CYTHON', "False") + + +VERIFY_CYTHON = False + +if(cython_env == 'True'): + VERIFY_CYTHON = True + default_cassandra_version = '2.2.0' diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index dc24d0a3e6..53457c21da 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -10,8 +10,8 @@ from cassandra.query import tuple_factory from cassandra.cluster import Cluster from cassandra.protocol import ProtocolHandler, LazyProtocolHandler, NumpyProtocolHandler - -from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1, drop_keyspace_shutdown_cluster +from cassandra.cython_deps import HAVE_CYTHON, HAVE_NUMPY +from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1, drop_keyspace_shutdown_cluster, VERIFY_CYTHON from tests.integration.datatype_utils import update_datatypes from tests.integration.standard.utils import ( create_table_with_all_types, get_all_primitive_params, get_primitive_datatypes) @@ -123,6 +123,20 @@ def test_numpy_results_paged(self): cluster.shutdown() + @numpytest + def test_cython_numpy_are_installed_valid(self): + """ + Test to validate that cython and numpy are installed correctly + @since 3.3.0 + @jira_ticket PYTHON-543 + @expected_result Cython and Numpy should be present + + @test_category configuration + """ + if VERIFY_CYTHON: + self.assertTrue(HAVE_CYTHON) + self.assertTrue(HAVE_NUMPY) + def _verify_numpy_page(self, page): colnames = self.colnames datatypes = get_primitive_datatypes() diff --git a/tests/unit/cython/utils.py b/tests/unit/cython/utils.py index 6dbeb15c06..916de13763 100644 --- a/tests/unit/cython/utils.py +++ b/tests/unit/cython/utils.py @@ -13,6 +13,7 @@ # limitations under the License. from cassandra.cython_deps import HAVE_CYTHON, HAVE_NUMPY +from tests.integration import VERIFY_CYTHON try: import unittest2 as unittest @@ -34,6 +35,6 @@ def cyimport(import_path): # @cythontest # def test_something(self): ... -cythontest = unittest.skipUnless(HAVE_CYTHON, 'Cython is not available') +cythontest = unittest.skipUnless((HAVE_CYTHON or VERIFY_CYTHON) or VERIFY_CYTHON, 'Cython is not available') notcython = unittest.skipIf(HAVE_CYTHON, 'Cython not supported') -numpytest = unittest.skipUnless(HAVE_CYTHON and HAVE_NUMPY, 'NumPy is not available') +numpytest = unittest.skipUnless((HAVE_CYTHON and HAVE_NUMPY) or VERIFY_CYTHON, 'NumPy is not available') From d33376fde98cbf5ff0304a179852f1c9753d5719 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 21 Apr 2016 10:20:49 -0500 Subject: [PATCH 0555/2005] Allow for snapshot server versions in host metadata test --- tests/integration/standard/test_metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 98c146e3a1..5877842f5c 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -77,7 +77,7 @@ def test_host_release_version(self): @test_category metadata """ for host in self.cluster.metadata.all_hosts(): - self.assertEqual(host.release_version, CASSANDRA_VERSION) + self.assertTrue(host.release_version.startswith(CASSANDRA_VERSION)) class SchemaMetadataTests(BasicSegregatedKeyspaceUnitTestCase): From 4fa2312169860cf1aac8a64ab152a86dfffa503e Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 20 Apr 2016 12:21:11 -0500 Subject: [PATCH 0556/2005] PYTHON-327 Adding tests for skipping metadata population --- tests/integration/standard/test_metadata.py | 30 +++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 5877842f5c..fdcb9b862b 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -82,6 +82,36 @@ def test_host_release_version(self): class SchemaMetadataTests(BasicSegregatedKeyspaceUnitTestCase): + def test_schema_metadata_disable(self): + """ + Checks to ensure that schema metadata_enabled, and token_metadata_enabled + flags work correctly. + + @since 3.3 + @jira_ticket PYTHON-327 + @expected_result schema metadata will not be populated when schema_metadata_enabled is fause + token_metadata will be missing when token_metadata is set to false + + @test_category metadata + """ + # Validate metadata is missing where appropriate + no_schema = Cluster(schema_metadata_enabled=False) + no_schema_session = no_schema.connect() + self.assertEqual(len(no_schema.metadata.keyspaces), 0) + self.assertEqual(no_schema.metadata.export_schema_as_string(), '') + no_token = Cluster(token_metadata_enabled=False) + no_token_session = no_token.connect() + self.assertEqual(len(no_token.metadata.token_map.token_to_host_owner), 0) + + # Do a simple query to ensure queries are working + query = "SELECT * FROM system.local" + no_schema_rs = no_schema_session.execute(query) + no_token_rs = no_token_session.execute(query) + self.assertIsNotNone(no_schema_rs[0]) + self.assertIsNotNone(no_token_rs[0]) + no_schema.shutdown() + no_token.shutdown() + def make_create_statement(self, partition_cols, clustering_cols=None, other_cols=None, compact=False): clustering_cols = clustering_cols or [] other_cols = other_cols or [] From da977afe92eb74ace187cf42836644512c219085 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 21 Apr 2016 15:16:03 -0500 Subject: [PATCH 0557/2005] weakref cluster arg in exit reg prevents cluster ref leak when cycling Clusters PYTHON-135 --- cassandra/cluster.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d7bc95dbf2..ebc78b1f0d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -167,8 +167,11 @@ def new_f(self, *args, **kwargs): def _shutdown_cluster(cluster): - if cluster and not cluster.is_shutdown: - cluster.shutdown() + try: + if not cluster.is_shutdown: + cluster.shutdown() + except ReferenceError: + pass # murmur3 implementation required for TokenAware is only available for CPython @@ -887,7 +890,7 @@ def connect(self, keyspace=None): log.debug("Connecting to cluster, contact points: %s; protocol version: %s", self.contact_points, self.protocol_version) self.connection_class.initialize_reactor() - atexit.register(partial(_shutdown_cluster, self)) + atexit.register(partial(_shutdown_cluster, weakref.proxy(self))) for address in self.contact_points_resolved: host, new = self.add_host(address, signal=False) if new: From d0ff842862943c345cc917652b8fa5dddfa4da61 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 21 Apr 2016 15:41:41 -0500 Subject: [PATCH 0558/2005] don't register cluster scheduler for separate exit function PYTHON-135 --- cassandra/cluster.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ebc78b1f0d..99c6f1df6a 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2633,7 +2633,7 @@ def _stop_scheduler(scheduler, thread): thread.join() -class _Scheduler(object): +class _Scheduler(Thread): _queue = None _scheduled_tasks = None @@ -2646,13 +2646,9 @@ def __init__(self, executor): self._count = count() self._executor = executor - t = Thread(target=self.run, name="Task Scheduler") - t.daemon = True - t.start() - - # although this runs on a daemonized thread, we prefer to stop - # it gracefully to avoid random errors during interpreter shutdown - atexit.register(partial(_stop_scheduler, weakref.proxy(self), t)) + Thread.__init__(self, name="Task Scheduler") + self.daemon = True + self.start() def shutdown(self): try: @@ -2662,6 +2658,7 @@ def shutdown(self): pass self.is_shutdown = True self._queue.put_nowait((0, 0, None)) + self.join() def schedule(self, delay, fn, *args, **kwargs): self._insert_task(delay, (fn, args, tuple(kwargs.items()))) @@ -2690,7 +2687,8 @@ def run(self): while True: run_at, i, task = self._queue.get(block=True, timeout=None) if self.is_shutdown: - log.debug("Not executing scheduled task due to Scheduler shutdown") + if task: + log.debug("Not executing scheduled task due to Scheduler shutdown") return if run_at <= time.time(): self._scheduled_tasks.discard(task) From b8dc51b4a667b8cab0fd5757708ea1d6c68b214c Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 21 Apr 2016 15:45:32 -0500 Subject: [PATCH 0559/2005] PYTHON-467 test for duplicate names --- .../standard/test_row_factories.py | 48 ++++++++++++++++++- 1 file changed, 47 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index a18646489b..df9765ee78 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCaseWFunctionTable +from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCaseWFunctionTable, BasicSharedKeyspaceUnitTestCase, execute_until_pass try: import unittest2 as unittest @@ -28,6 +28,52 @@ def setup_module(): use_singledc() +class NameTupleFactory(BasicSharedKeyspaceUnitTestCase): + + def setUp(self): + super(NameTupleFactory, self).setUp() + self.session.row_factory = named_tuple_factory + ddl = ''' + CREATE TABLE {0}.{1} ( + k int PRIMARY KEY, + v1 text, + v2 text, + v3 text)'''.format(self.ks_name, self.function_table_name) + self.session.execute(ddl) + execute_until_pass(self.session, ddl) + + def test_sanitizing(self): + """ + Test to ensure that same named results are surfaced in the NamedTupleFactory + + Creates a table with a few different text fields. Inserts a few values in that table. + It then fetches the values and confirms that despite all be being selected as the same name + they are propagated in the result set differently. + + @since 3.3 + @jira_ticket PYTHON-467 + @expected_result duplicate named results have unique row names. + + @test_category queries + """ + + for x in range(5): + insert1 = ''' + INSERT INTO {0}.{1} + ( k , v1, v2, v3 ) + VALUES + ( 1 , 'v1{2}', 'v2{2}','v3{2}' ) + '''.format(self.keyspace_name, self.function_table_name, str(x)) + self.session.execute(insert1) + + query = "SELECT v1 AS duplicate, v2 AS duplicate, v3 AS duplicate from {0}.{1}".format(self.ks_name, self.function_table_name) + rs = self.session.execute(query) + row = rs[0] + self.assertTrue(hasattr(row, 'duplicate')) + self.assertTrue(hasattr(row, 'duplicate_')) + self.assertTrue(hasattr(row, 'duplicate__')) + + class RowFactoryTests(BasicSharedKeyspaceUnitTestCaseWFunctionTable): """ Test different row_factories and access code From 6910bf7d93cfb947b5d18493217b728a25c681f3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 21 Apr 2016 16:51:13 -0500 Subject: [PATCH 0560/2005] single atexit shutdown function prevents growing the atexit function list indefinitely while cycling clusters PYTHON-135 --- cassandra/cluster.py | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 99c6f1df6a..6c8e69e94d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -27,7 +27,6 @@ import sys import time from threading import Lock, RLock, Thread, Event -import warnings import six from six.moves import range @@ -166,12 +165,23 @@ def new_f(self, *args, **kwargs): return new_f -def _shutdown_cluster(cluster): - try: - if not cluster.is_shutdown: - cluster.shutdown() - except ReferenceError: - pass +_clusters_for_shutdown = set() + + +def _register_cluster_shutdown(cluster): + _clusters_for_shutdown.add(cluster) + + +def _discard_cluster_shutdown(cluster): + _clusters_for_shutdown.discard(cluster) + + +def _shutdown_clusters(): + clusters = _clusters_for_shutdown.copy() # copy because shutdown modifies the global set "discard" + for cluster in clusters: + cluster.shutdown() + +atexit.register(_shutdown_clusters) # murmur3 implementation required for TokenAware is only available for CPython @@ -890,7 +900,7 @@ def connect(self, keyspace=None): log.debug("Connecting to cluster, contact points: %s; protocol version: %s", self.contact_points, self.protocol_version) self.connection_class.initialize_reactor() - atexit.register(partial(_shutdown_cluster, weakref.proxy(self))) + _register_cluster_shutdown(self) for address in self.contact_points_resolved: host, new = self.add_host(address, signal=False) if new: @@ -954,6 +964,8 @@ def shutdown(self): self.executor.shutdown() + _discard_cluster_shutdown(self) + def _new_session(self): session = Session(self, self.metadata.all_hosts()) self._session_register_user_types(session) From da994dcd3ad76f3eacae367b87f345f2ccc7a1c4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 22 Apr 2016 09:25:36 -0500 Subject: [PATCH 0561/2005] Explicitly state errors originating from server PYTHON-412 --- cassandra/protocol.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index ede6cc5869..a2432a6e07 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -134,12 +134,12 @@ def recv_body(cls, f, protocol_version, user_type_map): return subcls(code=code, message=msg, info=extra_info) def summary_msg(self): - msg = 'code=%04x [%s] message="%s"' \ + msg = 'Error from server: code=%04x [%s] message="%s"' \ % (self.code, self.summary, self.message) return msg def __str__(self): - return '' % self.summary_msg() + return '<%s>' % self.summary_msg() __repr__ = __str__ @staticmethod @@ -544,6 +544,7 @@ def send_body(self, f, protocol_version): if self.timestamp is not None: write_long(f, self.timestamp) + CUSTOM_TYPE = object() RESULT_KIND_VOID = 0x0001 @@ -552,6 +553,7 @@ def send_body(self, f, protocol_version): RESULT_KIND_PREPARED = 0x0004 RESULT_KIND_SCHEMA_CHANGE = 0x0005 + class ResultMessage(_MessageType): opcode = 0x08 name = 'RESULT' From f72b207c3c1c0e8388e8ef1addbcb4319cd72fec Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 22 Apr 2016 09:56:37 -0500 Subject: [PATCH 0562/2005] Improve warning message when protocol downgrade is used also don't log error when we're defuncting for unsupported protocol PYTHON-157 --- cassandra/cluster.py | 4 +++- cassandra/connection.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d7bc95dbf2..a821a74ae7 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -868,7 +868,9 @@ def protocol_downgrade(self, host_addr, previous_version): new_version = previous_version - 1 if new_version < self.protocol_version: if new_version >= MIN_SUPPORTED_VERSION: - log.warning("Downgrading core protocol version from %d to %d for %s", self.protocol_version, new_version, host_addr) + log.warning("Downgrading core protocol version from %d to %d for %s. " + "To avoid this, it is best practice to explicitly set Cluster(protocol_version) to the version supported by your cluster. " + "http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Cluster.protocol_version", self.protocol_version, new_version, host_addr) self.protocol_version = new_version else: raise DriverException("Cannot downgrade protocol version (%d) below minimum supported version: %d" % (new_version, MIN_SUPPORTED_VERSION)) diff --git a/cassandra/connection.py b/cassandra/connection.py index 2b13eebff4..8d9ba0e539 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -601,8 +601,8 @@ def process_msg(self, header, body): if isinstance(response, ProtocolException): if 'unsupported protocol version' in response.message: self.is_unsupported_proto_version = True - - log.error("Closing connection %s due to protocol error: %s", self, response.summary_msg()) + else: + log.error("Closing connection %s due to protocol error: %s", self, response.summary_msg()) self.defunct(response) if callback is not None: callback(response) From aad27c813bb0df76bf6d56b2ecf6f6178123aefc Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 22 Apr 2016 12:37:36 -0500 Subject: [PATCH 0563/2005] PYTHON-514 adding test for request id blocking --- tests/integration/standard/test_connection.py | 42 ++++++++++++++++++- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 51dd11a74b..be2fe1187c 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -24,12 +24,15 @@ import time from cassandra import ConsistencyLevel, OperationTimedOut -from cassandra.cluster import NoHostAvailable +from cassandra.cluster import NoHostAvailable, Cluster from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.protocol import QueryMessage +from cassandra.connection import Connection +from cassandra.policies import WhiteListRoundRobinPolicy +from cassandra.query import SimpleStatement from tests import is_monkey_patched -from tests.integration import use_singledc, PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION, execute_with_long_wait_retry try: from cassandra.io.libevreactor import LibevConnection @@ -41,6 +44,41 @@ def setup_module(): use_singledc() +class ConnectionTimeoutTest(unittest.TestCase): + + def setUp(self): + self.defaultInFlight = Connection.max_in_flight + Connection.max_in_flight = 2 + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) + self.session = self.cluster.connect() + + def tearDown(self): + Connection.max_in_flight = self.defaultInFlight + self.cluster.shutdown() + + def testInFlightTimeout(self): + """ + Test to ensure that connection id fetching will block when max_id is reached/ + + In previous versions of the driver this test will cause a + NoHostAvailable exception to be thrown, when the max_id is restricted + + @since 3.3 + @jira_ticket PYTHON-514 + @expected_result When many requests are run on a single node connection acquisition should block + until connection is available or the request times out. + + @test_category connection timeout + """ + futures = [] + query = '''SELECT * FROM system.local''' + for i in range(100): + futures.append(self.session.execute_async(query)) + + for future in futures: + future.result() + + class ConnectionTests(object): klass = None From ee47706c0f23720de6ad71579c18ac91fa0211ea Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 19 Apr 2016 13:14:16 -0500 Subject: [PATCH 0564/2005] PYTHON-269 Add tests for hostname validation --- tests/integration/long/ssl/.keystore | Bin 0 -> 2287 bytes tests/integration/long/ssl/.truststore | Bin 0 -> 997 bytes tests/integration/long/ssl/cassandra.crt | Bin 626 -> 0 bytes tests/integration/long/ssl/cassandra.pem | 19 +++ tests/integration/long/ssl/driver.crt | Bin 0 -> 931 bytes tests/integration/long/ssl/driver.key | 27 ++++ tests/integration/long/ssl/driver.pem | 19 +++ tests/integration/long/ssl/driver_ca_cert.pem | 16 -- tests/integration/long/ssl/python_driver.crt | Bin 947 -> 0 bytes tests/integration/long/ssl/python_driver.jks | Bin 2306 -> 0 bytes tests/integration/long/ssl/python_driver.key | 34 ---- tests/integration/long/ssl/python_driver.pem | 19 --- .../long/ssl/python_driver_bad.pem | 19 --- .../long/ssl/python_driver_no_pass.key | 27 ---- tests/integration/long/ssl/server_cert.pem | 13 -- .../integration/long/ssl/server_keystore.jks | Bin 1398 -> 0 bytes tests/integration/long/ssl/server_trust.jks | Bin 1677 -> 0 bytes tests/integration/long/test_ssl.py | 149 +++++++++++------- tests/integration/standard/test_connection.py | 5 +- 19 files changed, 156 insertions(+), 191 deletions(-) create mode 100644 tests/integration/long/ssl/.keystore create mode 100644 tests/integration/long/ssl/.truststore delete mode 100644 tests/integration/long/ssl/cassandra.crt create mode 100644 tests/integration/long/ssl/cassandra.pem create mode 100644 tests/integration/long/ssl/driver.crt create mode 100644 tests/integration/long/ssl/driver.key create mode 100644 tests/integration/long/ssl/driver.pem delete mode 100644 tests/integration/long/ssl/driver_ca_cert.pem delete mode 100644 tests/integration/long/ssl/python_driver.crt delete mode 100644 tests/integration/long/ssl/python_driver.jks delete mode 100644 tests/integration/long/ssl/python_driver.key delete mode 100644 tests/integration/long/ssl/python_driver.pem delete mode 100644 tests/integration/long/ssl/python_driver_bad.pem delete mode 100644 tests/integration/long/ssl/python_driver_no_pass.key delete mode 100644 tests/integration/long/ssl/server_cert.pem delete mode 100644 tests/integration/long/ssl/server_keystore.jks delete mode 100644 tests/integration/long/ssl/server_trust.jks diff --git a/tests/integration/long/ssl/.keystore b/tests/integration/long/ssl/.keystore new file mode 100644 index 0000000000000000000000000000000000000000..58ab9696b565276d128f5c4070953bc4591affc1 GIT binary patch literal 2287 zcmc(g`8(8$7sqGD%rJIlY*|Jqgz=fN6QjYsQuZ|?nT+haJGSfEh3xyDLMS2Blr~Fb zkfq2Lg=F6cH)>qn=ljF`55CWN-ankz`Q<$CbDnb;>x^{}2n0D1_+KHMNr8bRA94Wc zgjgwxy&469U=Sb%$^bawta5NL6nqZC0|vuD&=}}Sc(1)<544v3b!1jau5RL{^rhg~rjBt=qlhn6&Qnq7 zP3w3|CIvi98S)BeX9gn->6%KveJZ`?m4kSAyYDRglxc|LV=m$}o?bA(+f6N8H5H78 zcjkQUoM>N|HC~919=zAbZdA2XD>0u`Iw|a^MOf^)KlM%?VJjneTAPod6SAX_7I020 z+DoZWJoHC!pIeGtvy>N|QZnRcJDTuudp0_Nsz1k=P;^ruwm4oSZr)W_>3it*mNiqJ zp!>AhSZq@JZ&#<<7eKGVWpkmUzt?Lq$?GeGv>;YG6x%z~;Rfm!V*mh*8U-$E; z)D!}3EWBE>G2l^^xt|xznW52fOKb&kR&F;jJ=T`g1Z&qHR+?+Q<3E1>lGn?-IJfgd zv>0ErL+-=G-}v=W#W`>8k%hIzUr&Cs8q8+|jedR{Il$sfU|85b$~6&_Z7uaPC7Nw= z?lM0^3ktkaGit~>YH#R;el)4m5JJ*%U6lO+OMLXHO6#uO%pe&_jddR7RD5ex_{cHm zs-#-@la~Ag%pA-2A+<&$6CcE^>0jt!fxCiLN(PCy>Wq3)jq01P z(mVzWcP%&Q+uMuHy9RuDy^DbhUz@8A9v8OgY`c(4tJ}IPNT%DbXWS}9yR#D)JB}OL z><4+2DO#nxQKr#n)-+^3XSPbn{_L>(9=0f`khT@lpO;dbLt;;`-gLG%R}6yP5>;v! zaF5TPYs%;nY~thZrq}UN-^EWE&Wj&t&{a2Wp$$*EBHAKCe`LrUXy}N zb9uT0)^$>UqIhZJ0cyvRiZ7VzIzRnwSY$&e5FspeUxzXJ$FTbB`s&+T(H-g_sz=cbpazwJ@Ir8vN$@=&hd&-ruHdvp~xrpQio9Br>IO$G)+W&b39DwB0fJ08858zB&ZAJl_AtUsLtwIIhyc zK*d+6e_B%p)*{dI7GHhsO_{E)f3@SDauYh$EH2q)9jyQMyfS2sRc(@&i(o!BEbt-H zWJ-6r@S$qM~#-|21InZhlWPxLYlfcbA`rz>YC4t@i@tEiLKOHs@v*Vof14-FpK zmmIi6l*!p0zFog6vdY@o)R%)Z>-nPsC!(nB$$tr-cLl-(27xP1186|>WW1uBP<|*X za)?YxA^YHa*9rt`sg7UTgL%UKYvRf7otOv)g-1+|Uq=Qv4%$0)@Er-&i1&B}`;W4t z1NJhjWak~TLvU-W)VL1)4D5=Y8Cj!yqT+IpOF~);=&BxDM*jQT5Zi0E2bi(ch-H4} z@5U--@W=oF literal 0 HcmV?d00001 diff --git a/tests/integration/long/ssl/.truststore b/tests/integration/long/ssl/.truststore new file mode 100644 index 0000000000000000000000000000000000000000..80bbc1fd78aafafbe847717dfec764570e6eda68 GIT binary patch literal 997 zcmezO_TO6u1_mY|W(3om$%)0qiFqkSi9nGMeYxXi46G4)rUsS_49trSnwaMsG%>X= zU}j=uVq#J8I<>)omyJ`a&72>lsQK zh=atrg#`jU0u)?|GRsnn6hcyqONtGJ3 zY+@QE!Ea;;6fib2g9;416Pp;7kb{kpm4Ug5k)Oe!iIIz`iII_Ek!v)_$33i#Yx@a(BJ`ws+H2kCV>@4`@F- z-~0M^a>#4X`)Zjc!s5mgr-nq$<_g$y^swsJO<8qyYd6Su?-Q83l5ywRovBGH6Beav zWU!yU_V@EXxA%*7-Q6W#eZslG&~(PcbAsZt|J9Y>dAe+?Df?~vu=C%%Yy6J9KgYb4 ziJ6gsaj~L-yn!q*dS&@o#8^aXGA14N;uJAF_2%+{=wl)d*O@$&kpmH!_JD!N$grR! zp;@g*b9>|K+Ba!D$3NJ-pT~3H-zVmur49REI!RuVFmrqNHK;~$nNIel9HZY4mv3d& zkDM{Pc0%Tf7we_wn;HlC^&3A7p1vmO#-vBySI(|dvsAtNA|?KGv1U!P64#EU0qeCK zBi)^Jf3n)VIe9!_Z@o#jPT(g$e0_~Wio}iMO7nXD zPmz0dQhlrRU6Z3T754nOk@RWyvv`~|28tWJab_I%hgZ@^_)FhxZX3) zzrKrMn_HUO!QNN@)|j&Ilv-%D?#+_9dQUemic4^J_6=dU@?7o;+aAVQoJs!QjxCk> hXTvx($<8l6f4|}pb^%T`3pTa6Ud?YVhTBEd0syZsZd(8V literal 0 HcmV?d00001 diff --git a/tests/integration/long/ssl/cassandra.crt b/tests/integration/long/ssl/cassandra.crt deleted file mode 100644 index 432e58540babe586367427cfa5f5d7fdf8c2f170..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 626 zcmXqLV#+gUV!Xb9nTe5!iN*eN$qNHsHcqWJkGAi;jEvl@3le43th=CADhFzFDI5DpzQNcMUu_)0{(10H#$|cO~9<7#bKtxpZ|)<9y_h zU}R-rZtP_+XzXNaY-Cs{VWZtPMeySN-bE{x8gVmDm20?RUb^5+N8mfNjeff3Yacwh zXQ6mbaH++n%nWrV=kNEkRPHwDh3+ZZ&ZvK5z5801MT;Dic4RQaWUJJq`6GYj0}v6 z6%FJKWPu?m%f}+dBJwpUGf0BXN5K8Dm?LN0jjv5YT=S590E|m!pwq>d@2Lz>UN|HE z#Qeq4GaD~Wy>;u?&-|^f&VBg{Zr+jFP@Zw^^B>m9Z#HxuK5v&irBl(u!2G|)-Dy_I z#YSDrCnRLqMhjeu+~-g_wKd!K<}azMyxbeE?JfOmq*$uQm0&QzI4M4|_nSY{7K4lX aUkLAczwrJ9tC$`&Pr$|cO}l30=$ zT#{I!;F*`KXDDeP4ie`U76|YNP;e>AEK4m?2uUq2DK-=`5CBOr3v(J8nd=zj*dGY?tPX+&vz3yB7zTGip!n_q~QYs%0-t%~Hd0Ie= z=uEN0509Ktd~ba@V@F4w(O#hk?^*uOZ5O&7sS#eVpV#c2W$9-N)@Sz)%A0BVX)_#r zz__&8&hpdZ){{R>d8QT{=bSp^Uzc)Gc5XR$i}>e`mzUHT>#v4b`(>sr;`rCd-Szg{ z-c46MPCgerp#AK8@9W>mA+J5}t7V!9iyKdz8WJ^|D`3mf!>V65W!2TK-5}q+Phj#& z#+_$(rY5aSSd^-f!G8MM-_QHp-Y?p9cb9ne3FiVs(-{-b35w7DS66=L>9Vb+?6>X1 z&VTc+@jLSV9P?HtW=00a#fk><2C~5DmE~g*V-cyzm~_~SQ^fGpo685HkBK;3XYx=+ z4n$zu0|p`^!-A58X0;y8?TxQ%-=y&z|6uce9?yY)pO}A^Htc`tBzZ}~%nf137IEete2W^Y8>R(Z~QQL`kJI0lOB0rIlD^DQuXeOl=#!d znl;TzTsxKqtk-gkba&GI$!hcF9%0F2UW|H-zEJbGa*Q Xdl+YNCi#Cmwp8Yy4dc`#JHL1UE7@EN literal 0 HcmV?d00001 diff --git a/tests/integration/long/ssl/driver.key b/tests/integration/long/ssl/driver.key new file mode 100644 index 0000000000..bcb033d338 --- /dev/null +++ b/tests/integration/long/ssl/driver.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAohKfXUHM5AQvwgTPIretqenYVJzo78z5IE/xjUe0/e24lJCe +qCYaJPHB3kjg05ZQhBWZFsPhxMwh7zvTaLiIfjK9EuDvBP+dhxLbWShXcL8NNu45 +dfM4BebewR82Kk4rAMHgAaWDPjnyo4XJ+DUMlXMzbMrCT35k0R2ddwuEF/OI6dIn +AX/VVDtOaWaiCP6BC4rt9r2y1UjJ5xHAK+bPjev7Y1TrSd8maTQTFzORlVRamwpQ +tMXDJfWyan5+rbAfi74Qk6kBuc25ZWKpYKJlKGgHy9b9875G76K63boXe8hDcDE1 +mJHOEReb/n533OWmtTUH2z9Wz/ZLfE7E784DtQIDAQABAoIBAEjCDXMDMCUI0BvM +puSSCs9loG1Tx6qnnQtpeCoRj+oTBzMCvYcFG+WL9pmApBW4vEqfm4tBwp/jeDR6 +6gxwEy58Pq8z9XmL+z8BMyGSX7bIvfjz9y3BU1wq6cNO7LUPRHThlFuI3yNXRlss +m5/myNBq2CI14Adp2X6bSe6AZ/uL6+Ez8ZLpNiMUtGzTDILyH8bkgRHsyDxoDpF+ +h1r42dZG3ue4JC3oqUE2epvNE5Rzh02OTBSYYbmOmFWkhV2lm6DH46lycFHjkzcm +2meU7q9I7nZrf0+ZkkQXhWGQWcGzHd3si+XMhaHT0BnH8gGFZDzpC/qnvZdIn//y +zDu2B/0CgYEA43kkgZ1YGqi5Vi4ngek/uk4sJ9dOvYIaAag+u5ybRinLhCFDGhr9 +MzksqFY7naq9oeZZrLbftw2FsKsCMYAOrrxZIbhvGm538jwWmBefEmD0Ww+k/WKG +AAv0z4sSnSOq2/sZBQJlOrk8K6wQ+FcyEddiy16zyj9UCzEPSDQoNj8CgYEAtmXY +xYALbGz0peDGfzPq/WjeqCJtjlXIKOB7+vdyi88YE4Tea2vfBmAAWmbTf/el/o8R +txDfP6+RKxXs2jCFCY640W83ar3RjXcwtu+oH4Aqa8S21o6Dx2sx3HOgKoJ3DGGB +HHmYczuDZmN/zHseBrYUf1UNcIDQIWZCLKImkQsCgYEAuY735bfJyC18yr1pwwLX +1o2wXWu4ssSvnEx3qCmvTIQJnaZYi7IafC+Wq1d02EAQ40H6QvcG9ddVCHHnnyOc +VpQUjXpbP49/zx2PPNCAFcj7bFENwRkewGkAclE7hO40kbB6j2iN1WKHoASD72GJ +2Z3+3HFLbp9MWrjRg4/wgZcCgYAk2IgkxYwJOC1nwPN4SM2kqWWpQ2MsSKnpkAeb +mBccpRYxAztHKgBgsk3/9RuukyGGPfKPL6pZnCbQNFqnbPvDBYDSVgw01OmbEUPX +AKzOyD5JjPB+gUWfqEjnRrhJPhe8eYnybaHdTV1q9piffxN+uZOEcXMIkgz5YkXl +7E+sJwKBgFLA2CS19lbcoi5R8AJbUydZJr/LNAbFknWnliiq3GxISfWydA9cG/dI +CxV3297awLhHrCWqziC0zITjEcAhsNkfG/VQlYGJOS3sfvMSrLuW/9bAL8o4VCpC +cOs9e+svbJukJB6UQu4vpROMmv+0quXM325VlCZNel7DPAovYwjW +-----END RSA PRIVATE KEY----- diff --git a/tests/integration/long/ssl/driver.pem b/tests/integration/long/ssl/driver.pem new file mode 100644 index 0000000000..f5aaa25b3c --- /dev/null +++ b/tests/integration/long/ssl/driver.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIEJErKsDANBgkqhkiG9w0BAQsFADB/MQswCQYDVQQGEwJVUzETMBEGA1UE +CBMKQ2FsaWZvcm5pYTEUMBIGA1UEBxMLU2FudGEgQ2xhcmExFjAUBgNVBAoTDURhdGFTdGF4IElu +Yy4xGTAXBgNVBAsTEFBIUCBEcml2ZXIgVGVzdHMxEjAQBgNVBAMTCTEyNy4wLjAuMTAgFw0xNjA0 +MTkxNTIzNDVaGA8yMTE2MDMyNjE1MjM0NVowfzELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm +b3JuaWExFDASBgNVBAcTC1NhbnRhIENsYXJhMRYwFAYDVQQKEw1EYXRhU3RheCBJbmMuMRkwFwYD +VQQLExBQSFAgRHJpdmVyIFRlc3RzMRIwEAYDVQQDEwkxMjcuMC4wLjEwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCiEp9dQczkBC/CBM8it62p6dhUnOjvzPkgT/GNR7T97biUkJ6oJhok +8cHeSODTllCEFZkWw+HEzCHvO9NouIh+Mr0S4O8E/52HEttZKFdwvw027jl18zgF5t7BHzYqTisA +weABpYM+OfKjhcn4NQyVczNsysJPfmTRHZ13C4QX84jp0icBf9VUO05pZqII/oELiu32vbLVSMnn +EcAr5s+N6/tjVOtJ3yZpNBMXM5GVVFqbClC0xcMl9bJqfn6tsB+LvhCTqQG5zbllYqlgomUoaAfL +1v3zvkbvorrduhd7yENwMTWYkc4RF5v+fnfc5aa1NQfbP1bP9kt8TsTvzgO1AgMBAAGjITAfMB0G +A1UdDgQWBBR8aJLDSgkUMcrs08BbxhRA1wJIIzANBgkqhkiG9w0BAQsFAAOCAQEAoHRggyaMKbeB +633sZgzH8DzvngzA/vID+XWAv+lCGdIYNkbu9VJ8IaYsa9JsMvvhp7UFL1mYm32QacjorxqfNTNS +To8z4VOXrGLYkuJL1M2qJjkl3ehkX8tzKXyDIgq4pVCvKkFZR0It+QU87MnHUL1/HIOy+zdNW6ZU +Q7sRCUMtqstiQ4scbispsVevfEBkGNjHIp6M/5Qe6skntRvdNMWZILz82GLym+NppTgcNcwDf7lq +g/syNznM0KAE1VUAJ2y8tArvAZ/XugC2RmZGwY3q/qw1B7kaoTqu7KSdLuWzol5gR0NNVADU5x7U +BrwBmgliT/bGpRz+PAGVYj5OXw== +-----END CERTIFICATE----- diff --git a/tests/integration/long/ssl/driver_ca_cert.pem b/tests/integration/long/ssl/driver_ca_cert.pem deleted file mode 100644 index 7e55555767..0000000000 --- a/tests/integration/long/ssl/driver_ca_cert.pem +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICbjCCAdegAwIBAgIEP/N06DANBgkqhkiG9w0BAQsFADBqMQswCQYDVQQGEwJU -RTELMAkGA1UECBMCQ0ExFDASBgNVBAcTC1NhbnRhIENsYXJhMREwDwYDVQQKEwhE -YXRhU3RheDELMAkGA1UECxMCVEUxGDAWBgNVBAMTD1BoaWxpcCBUaG9tcHNvbjAe -Fw0xNDEwMDMxNTQ2NDdaFw0xNTAxMDExNTQ2NDdaMGoxCzAJBgNVBAYTAlRFMQsw -CQYDVQQIEwJDQTEUMBIGA1UEBxMLU2FudGEgQ2xhcmExETAPBgNVBAoTCERhdGFT -dGF4MQswCQYDVQQLEwJURTEYMBYGA1UEAxMPUGhpbGlwIFRob21wc29uMIGfMA0G -CSqGSIb3DQEBAQUAA4GNADCBiQKBgQChGDwrhpQR0d+NoqilMgsBlR6A2Dd1oMyI -Ue42sU4tN63g5N4adasfasdfsWgnAkP332ok3YAuVbxytwEv2K9HrUSiokAiuinl -hhHA8CXTHt/1ItzzWj9uJ3Hneb+5lOkXVTZX7Y+q3aSdpx/HnZqn4i27DtLZF0z3 -LccWPWRinQIDAQABoyEwHzAdBgNVHQ4EFgQU9WJpUhgGTBBH4xZBCV7Y9YISCp4w -DQYJKoZIhvcNAQELBQADgYEAF6e8eVAjoZhfyJ+jW5mB0pXa2vr5b7VFQ45voNnc -GrB3aNbz/AWT7LCJw88+Y5SJITgwN/8o3ZY6Y3MyiqeQYGo9WxDSWb5AdZWFa03Z -+hrVDQuw1r118zIhdS4KYDCQM2JfWY32TwK0MNG/6BO876HfkDpcjCYzq8Gh0gEg -uOA= ------END CERTIFICATE----- diff --git a/tests/integration/long/ssl/python_driver.crt b/tests/integration/long/ssl/python_driver.crt deleted file mode 100644 index 0a419f4eb13cea8866961205155cfc16f91c2f7e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 947 zcmXqLVqS01#58>YGZP~d6N|{_iR=cvY@Awc9&O)w85y}*84McR47m+B*_cCF*o2uv zgAIiZ1VJ1QVJ_#yoXoWRqP)yRLlFZZkRZD-cW`1}Nuq*tPGV7_p_qXPNR&&M*Cnwe zF}NhLLcudHSRS(1^Tr{GeQS(aL)5RzJ4QVi3@EX<1{YoH*`YiMd< zW@Kt;ZeU<&5hcNIWC#*4FoX&W9;Y@jDj^3WBP#=Q6C*!^K@%evQxhX2L$AlKqdyl_ z?ArKU;kTS5_^v9{bQsa zIg0U195b~P()hh8G4{ed-E209C*_74en1zuW;g#B~x*&@>IE-e1#k#t7+0tZ&_8Ga4mscYPA}}oi1CfzI zI^nL;(I9teMJ-9|{wpPAX6GA%Rj0mv_I%~HMWRmc&MrDtym9%16Nx1WES#&|Pq#=q z^2Sst3+eMtl|QrGBjgyTFGqFCyeDPH6AOhDH7D)R^D^&vqY|kr6?g1{{fn5mJ#1lW z9O3IHuFH(mds9#?8n6CFoMXDoqu;kHkLFmN-8buO!S~}LabJV3-uGHkW&UQ<)|t}k z2|t$Hf6!Rj`YQWivB7lHvVi?1ceFXnIMYLTgyzh$Z#i-I=aS~bo%c8+CvyFoZyOwN zhGz!%yV8Asm3B_hknVfsU&}kulY61|BDwkX2ltClllXbHW!1qFt;LD@w>O;H)pkea N`V%ji<^;BhNdSf*Rxtno diff --git a/tests/integration/long/ssl/python_driver.jks b/tests/integration/long/ssl/python_driver.jks deleted file mode 100644 index 9a0fd59f738b77c276f06fc0048a0b52fe64b345..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2306 zcmd6o`8U*$8pmhMUl2=ee@dB9Mx2A2pJ%ngFFpqupzncJdI1r=?# z&f#Ba%M?93!9aIFimfzz#$~&b5}AdIf2O7QGp%hC^k#y1A4Qbw*{J?g7gOx)l71$* zGhtkyj3TVmmwt&|+sapPQ18)Fm*Z6)T^K)Fp);#BN?i_WL_B)z@vY|FgPNDT8iPG_ zYgdx=r?7$Wj^Ia^rh=bxrK(9dpYQ zOjPrdN}sAztJp^0m|Dr0q{xEQaG1c@F|&s}oQNjn{eLkSS1eP;l*eTtM(*fFc$mD2 zTd_N?vb1jIC=WkpWW^5#d#J2UAg>RA%;njPiEB}V8uvb2^ze!ec<>_TD^)b8*OC@+ zitTSRyx88_a3C4y2;TS>d(PV=kBPNWH5w7On&~!j&YZWJd|Z&*WGX1DQe`w+Mrd|0 zTMmJt1;`ksdv5#DYiLidK#LEUi;uIe(ftZ`HU5OKqYC1y1GG072czM^`TXl!8-69z zm|F&ZhFw}i34H{i$#55kI*H!XMRrWL@Vv=rPy5Sie{5-um(w`Lp^7~%G3FTNE=aKF z4ejLlv^(t*_!94cCl-Wva-5n=WqNgPH`@0aDM4MCs@(fW+~y+sJw!%rKNn9~&6Sbt%-y~&_KQfWu8+ww; zJ1pZg*HiwmlU<4NlpQicP@YR-$G%%=<(e%v!_)%=&&LvLQeLHdJpb56aQ>K#r6kU) zB#+{Ib&!N))e3`{x$LPU+s_&gI*1L?{KA@$eu4g^smcK+X&{Fh6hnjCvQnm=I2jF< zySC=T2p>5GtO(#>I|ldqXGR+0mA zHTeT(osl5jkhZq1=+|P~W%`!_%$GQZ&t6fQu1$;U(QieVz-&AJaV+1i!A*rMVJ6^u zP>Y@D=zCiivC{)qBU!a)8jY(9rm>@B@MXgy6rjv?Ij^T0z$S3k?tU8hy{KlH$C z&^nv2rY~yauJ1#$U+BAbyLV)h%l0f4Mv%4#wx1`o2_GL(IG*e=i=ChFNx)mjKz%(I z)kIQSjC$+Xk@ZET+UC06+7%&*LUQ@+)jeMa>Ch3xt?-+x+Ny&y9M8$(^Z1gm@xYjY zt0hCq0`aR~)LX%hr85OjY|deR+mTmk#%Vfx?@$GIw~%(?XYJ zMO`#HpOV^L$;_L*Z5%hMlbkHH=aG+^%Q(p*M7qXDcax&7izeU2AEdfA&q&e(vtI0a z+_$`osoRLcTz)Baa_x?T%1>gptc%@pFex?92mP6_Qeo!&ass`PyjHDu;Wl9wcKvrg za=U5a0RjjFX#!Y~2LOxfMj3<)%*6#o?dI|V2p+gbidD*21Q^WE4FVEVaQpzA2jUFn z5#@4r!ifSRzZ#e*pD8sY&_66PG?0oz0mxsFSCrq08cL_in}$#$sW@=}^^5X}BFw0C zsuP_WBY!&doEA0sD@$kt&SYRO#zykAevA|$ZmKA&OTSW}J@vHp4EE>Eu z%!zAmpB2#$tctYO>76O{H{2aH*hP;|YNqO=@{I-N z;Y|X{<-jtt8I@rvoS*%ajro+d%byfyKX2z_7-%&wCY;O;cijZT8FnYQ6XE1>m8Lg; zH34T`sFr;jFI&{9`|s*^EKZI>VLjbK(nA*?Zx-Ba+Z?b_wHB%`z_GHxtLoa0C~?DF zzw)^TpEEhRy|VjJ3}tBi)LSy8lgC9B=K3JFK9HjIKAb6bR_(n6?1uEp{ybwa ssH7g7Zx~^xEGW;eSxaB`=E?PGO<$;UD<*~oO=dDeTg@A{mK0L{4ZtwgI{*Lx diff --git a/tests/integration/long/ssl/python_driver.key b/tests/integration/long/ssl/python_driver.key deleted file mode 100644 index afd73b298c..0000000000 --- a/tests/integration/long/ssl/python_driver.key +++ /dev/null @@ -1,34 +0,0 @@ -Bag Attributes - friendlyName: python_driver - localKeyID: 54 69 6D 65 20 31 34 33 35 33 33 33 34 30 34 33 33 32 -Key Attributes: ------BEGIN RSA PRIVATE KEY----- -Proc-Type: 4,ENCRYPTED -DEK-Info: DES-EDE3-CBC,8A0BC9CFBBB36D47 - -J3Rh82LhsNdIdCV4KCp758VIJJnmedwtq/I9oxH5kY4XoUQjfNcvLGlEnbAUD6+N -mYnQ5XPDvD7iC19XvlA9gfaoWERq+zroGEP+e4dX1X5RlT6YQBJpJR8IW4DWngDM -Nv6CuaGFJWMH8QUvKlJyFOPOHBqbhsCRaxg3pOG3RyUFXpGPDV0ySUyp6poHE9KE -pEVif/SdS3AhV2sb4tyBS9sRZdH1eeCN4gY6k9PQWyNViAgUYAG5xWsE4fITa3qY -gisyzbOYU8ue2QvmjPJgieiKPQf+st/ZRV5eQUCdUgAfLEnULGJXRZ5kw7kMXL0X -gLaKFbGxv4pKQCDCZQq4GXIA/nmTy6cme+VPKwq3usm+GdxfdWQJjgG65+AFaut/ -XjGm1fvSQzWuzpesfLy57HMK+bBh1/UKjuQa3wAHtgPtJLtUSW+/qBnQRdBbl20C -dJtJXyyTlX6H8bQBIfBLc4ntUwS8fVd2jsYJRpCBY6HdtpfsZZ5gQnm1Nux4ksKn -rYYx3I/JpChr7AV7Yj/lwc3Zca/VJl16CjyWeRTQEvkl6GK958aIzj73HfXleZc6 -HGVfOgo2BLmOzY0ZCq/Wa4fnURRgrC3SusrT9mjVbID91oNYw4BjMEU53u0uxPC+ -rr6SwG2EUVawGTVK4XZw2DINCPP/wsKqf0xqA+sxArcTN/MEdLUBdf8oDntkj2jG -Oy0kwpjqhSvWo1DqYKZjV/wKT2SS18OMAW+0qplbHw1/FDGWK+OseD8MXwBo06a5 -LWRQXhf0kEXUQ+oNj3eahe/npHiNChR6mEiIbCuE3NAXPPXJNkhMuj2f5EqrOPfZ -jqbNiLfKKx7L5t6B8LXkdKGPqztcFlnB8rRF9Eqa8F4wiEg8MBLrPyxgd/uT+NIz -LdDgvUE+IkCwQoYoCU70ApiEOyQNacuSxwUiVWVyn9CJYXPM4Vlje7GDIDRR5Xp6 -zNf0ktNP46PsRqDlYG9hZWndj4PRaAqtatlEEm37rmyouVBe3rxcbL1b1zsH/p1I -eaGGTyZ8+iEiuEk4gCOmfmYmpE7H/DXlQvtDRblid/bEY64Uietx0HQ5yZwXZYi8 -hb4itke6xkgRQEIXVyQOdU88PEuA5yofEGoXkfdLgtdu3erPrVDc+nQTYrMWNacR -JQljfhAFJdjOw81Yd5PnFHAtxcxzqEkWv0TGQLL1VjJdinhI7q/fIPLJ76FtuGmt -zlxo/Jy1aaUgM/e485+7aoNSGi2/t6zGqGuotdUCO5epgrUHX+9fOJnnrYTG9ixp -FSHTT69y72khnw3eMP8NnOS3Lu+xLEzQHNbUDfB8uyVEX4pyA3FPVVqwIaeJDiPS -2x7Sl5KKwLbqPPKRFRC1qLsN4KcqeXBG+piTLPExdzsLbrU9JZMcaNmSmUabdg20 -SCwIuU2kHEpO7O7yNGeV9m0CGFUaoCAHVG70oXHxpVjAJbtgyoBkiwSxghCxXkfW -Mg+1B2k4Gk1WrLjIyasH6p0MLUJ7qLYN+c+wF7ms00F/w04rM6zUpkgnqsazpw6F -weUhpA8qY2vOJN6rsB4byaOUnd33xhAwcY/pIAcjW7UBjNmFMB1DQg== ------END RSA PRIVATE KEY----- diff --git a/tests/integration/long/ssl/python_driver.pem b/tests/integration/long/ssl/python_driver.pem deleted file mode 100644 index 83556fd9ce..0000000000 --- a/tests/integration/long/ssl/python_driver.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDrzCCApegAwIBAgIEFPORBzANBgkqhkiG9w0BAQsFADCBhjELMAkGA1UEBhMCVVMxEzARBgNV -BAgTCkNhbGlmb3JuaWExFDASBgNVBAcTC1NhbnRhIENsYXJhMRYwFAYDVQQKEw1EYXRhU3RheCBJ -bmMuMRwwGgYDVQQLExNQeXRob24gRHJpdmVyIFRlc3RzMRYwFAYDVQQDEw1QeXRob24gRHJpdmVy -MCAXDTE1MDYyNTE3MDAxOFoYDzIxMTUwNjAxMTcwMDE4WjCBhjELMAkGA1UEBhMCVVMxEzARBgNV -BAgTCkNhbGlmb3JuaWExFDASBgNVBAcTC1NhbnRhIENsYXJhMRYwFAYDVQQKEw1EYXRhU3RheCBJ -bmMuMRwwGgYDVQQLExNQeXRob24gRHJpdmVyIFRlc3RzMRYwFAYDVQQDEw1QeXRob24gRHJpdmVy -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAjUi6xfmieLqx9yD7HhkB6sjHfbS51xE7 -aaRySjTA1p9mPfPMPPMZ0NIsiDsUlT7Fa+LWZU9cGuJBFg8YxjU5Eij7smFd0J4tawk51KudDdUZ -crALGFC3WY7sEboMl8UHqV+kESPlNm5/JSNSYkNm1TMi9mHcB/Bg3RDpORRW/keMtBSLRxCVjsu6 -GvKN8wuEfU/bTmI9aUjbFRCFunBX6QEJeU44BYEJXNAls+X8szBfVmFHwefatSlh++uu7kY6zAQI -v74PHMZ8w+mWmbjpxEsmSg+uljGCjQHjKTNSFBY9kWWh2LBiTcZuEsQ9DK0J/+1tUa0s5vq6CjUK -XRxwpQIDAQABoyEwHzAdBgNVHQ4EFgQUJwTYG8dcZDt7faalYwCHmG3jp3swDQYJKoZIhvcNAQEL -BQADggEBABtg3SLFUkcbISoZO4/UdHY2z4BTJZXt5uep9qIVQu7NospzsafgyGF0YAQJq0fLhBlB -DVx6IxIvDZUfzKdIVMYJTQh7ZJ7kdsdhcRIhKZK4Lko3iOwkWS0aXsbQP+hcXrwGViYIV6+Rrmle -LuxwexVfJ+wXCJcc4vvbecVsOs2+ms1w98cUXvVS1d9KpHo37LK1mRsnYPik3+CBeYXqa8FzMJc1 -dlC/dNwrCXYJZ1QMEpyaP4TI3fmkg8OJ3glZkQr6nz1TUMwMmAvudb79IrmQKBuO6k99DZFJC6Er -oh6ff8G/F5YY+dWEqsF0KqNhL9uwyrqG3CTX5Eocg2AGkWI= ------END CERTIFICATE----- diff --git a/tests/integration/long/ssl/python_driver_bad.pem b/tests/integration/long/ssl/python_driver_bad.pem deleted file mode 100644 index 978d6c53f3..0000000000 --- a/tests/integration/long/ssl/python_driver_bad.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDrzCCApegAwIBAgIEFPORBzANBgkqhkiG9w0BAQsFADCBhjELMAkGA1UEBhMCVVMxEzARBgNV -BAgTCkNhbGlmb3JuaWExFDASBgNVBAcTC1NhbnRhIENsYXJhMRYwFAYDVQQKEw1EYXRhU3RheCBJ -bmMuMRwwGgYDVQQLExNQeXRob24gRHJpdmVyIFRlc3RzMRYwFAYDVQQDEw1QeXRob24gRHJpdmVy -MCAXDTE1MDYyNTE3MDAxOFoYDzIxMTUwNjAxMTcwMDE4WjCBhjELMAkGA1UEBhMCVVMxEzARBgNV -BAgTCkNhbGlmb3JuaWExFDASBgNVBAcTC1NhbnRhIENsYXJhMRYwFAYDVQQKEw1EYXRhU3RheCBJ -bmMuMRwwGgYDVQQLExNQeXRob24gRHJpdmVyIFRlc3RzMRYwFAYDVQQDEw1QeXRob24gRHJpdmVy -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAjUi6xfmieLqx9yD7HhkB6sjHfbS51xE7 -aaRySjTA1p9mPfPMPPMZ0NIsiDsUlT7Fa+LWZU9cGuJBFg8YxjU5Eij7smFd0J4tawk51KudDdUZ -crALGFC3WY7sEboMl8UHqV+kESPlNm5/JSNSYkNm1TMi9mHcB/Bg3RDpORRW/keMtBSLRxCVjsu6 -GvKN8wuEfU/bTmI9aUjbFRCFunBX6QEJeU44BYEJXNAls+X8szBfVmFHwefatSlh++uu7kY6zAQI -v74PHMZ8w+mWmbjpxEsmSg+uljGCjQHjKTNSFBY9kWWh2LBiTcZuEsQ9DK0J/+1tUa0s5vq6CjUK -XRxwpQIDAQABoyE666666gNVHQ4EFgQUJwTYG8dcZDt7faalYwCHmG3jp3swDQYJKoZIhvcNAQEL -BQADggEBABtg3SLFUkcbISoZO4/UdHY2z4BTJZXt5uep9qIVQu7NospzsafgyGF0YAQJq0fLhBlB -DVx6IxIvDZUfzKdIVMYJTQh7ZJ7kdsdhcRIhKZK4Lko3iOwkWS0aXsbQP+hcXrwGViYIV6+Rrmle -LuxwexVfJ+wXCJcc4vvbecVsOs2+ms1w98cUXvVS1d9KpHo37LK1mRsnYPik3+CBeYXqa8FzMJc1 -dlC/dNwrCXYJZ1QMEpyaP4TI3fmkg8OJ3glZkQr6nz1TUMwMmAvudb79IrmQKBuO6k99DZFJC6Er -oh6ff8G/F5YY+dWEqsF0KqNhL9uwyrqG3CTX5Eocg2AGkWI= ------END CERTIFICATE----- diff --git a/tests/integration/long/ssl/python_driver_no_pass.key b/tests/integration/long/ssl/python_driver_no_pass.key deleted file mode 100644 index 8dd14f84f0..0000000000 --- a/tests/integration/long/ssl/python_driver_no_pass.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAjUi6xfmieLqx9yD7HhkB6sjHfbS51xE7aaRySjTA1p9mPfPM -PPMZ0NIsiDsUlT7Fa+LWZU9cGuJBFg8YxjU5Eij7smFd0J4tawk51KudDdUZcrAL -GFC3WY7sEboMl8UHqV+kESPlNm5/JSNSYkNm1TMi9mHcB/Bg3RDpORRW/keMtBSL -RxCVjsu6GvKN8wuEfU/bTmI9aUjbFRCFunBX6QEJeU44BYEJXNAls+X8szBfVmFH -wefatSlh++uu7kY6zAQIv74PHMZ8w+mWmbjpxEsmSg+uljGCjQHjKTNSFBY9kWWh -2LBiTcZuEsQ9DK0J/+1tUa0s5vq6CjUKXRxwpQIDAQABAoIBAC3bpYQM+wdk0c79 -DYU/aLfkY5wRxSBhn38yuUYMyWrgYjdJoslFvuNg1MODKbMnpLzX6+8GS0cOmUGn -tMrhC50xYEEOCX1lWiib3gGBkoCi4pevPGqwCFMxaL54PQ4mDc6UFJTbqdJ5Gxva -0yrB5ebdqkN+kASjqU0X6Bt21qXB6BvwAgpIXSX8r+NoH2Z9dumSYD+bOwhXo+/b -FQ1wyLL78tDdlJ8KibwnTv9RtLQbALUinMEHyP+4Gp/t/JnxlcAfvEwggYBxFR1K -5sN8dMFbMZVNqNREXZyWCMQqPbKLhIHPHlNo5pJP7cUh9iVH4QwYNIbOqUza/aUx -z7DIISECgYEAvpAAdDiBExMOELz4+ku5Uk6wmVOMnAK6El4ijOXjJsOB4FB6M0A6 -THXlzLws0YLcoZ3Pm91z20rqmkv1VG+En27uKC1Dgqqd4DOQzMuPoPxzq/q2ozFH -V5U1a0tTmyynr3CFzQUJKLJs1pKKIp6HMiB48JWQc5q6ZaaomEnOiYsCgYEAvczB -Bwwf7oaZGhson1HdcYs5kUm9VkL/25dELUt6uq5AB5jjvfOYd7HatngNRCabUCgE -gcaNfJSwpbOEZ00AxKVSxGmyIP1YAlkVcSdfAPwGO6C1+V4EPHqYUW0AVHOYo7oB -0MCyLT6nSUNiHWyI7qSEwCP03SqyAKA1pDRUVI8CgYBt+bEpYYqsNW0Cn+yYlqcH -Jz6n3h3h03kLLKSH6AwlzOLhT9CWT1TV15ydgWPkLb+ize6Ip087mYq3LWsSJaHG -WUC8kxLJECo4v8mrRzdG0yr2b6SDnebsVsITf89qWGUVzLyLS4Kzp/VECCIMRK0F -ctQZFFffP8ae74WRDddSbQKBgQC7vZ9qEyo6zNUAp8Ck51t+BtNozWIFw7xGP/hm -PXUm11nqqecMa7pzG3BWcaXdtbqHrS3YGMi3ZHTfUxUzAU4zNb0LH+ndC/xURj4Z -cXJeDO01aiDWi5LxJ+snEAT1hGqF+WX2UcVtT741j/urU0KXnBDb5jU92A++4rps -tH5+LQKBgGHtOWD+ffKNw7IrVLhP16GmYoZZ05zh10d1eUa0ifgczjdAsuEH5/Aq -zK7MsDyPcQBH/pOwAcifWGEdXmn9hL6w5dn96ABfa8Qh9nXWrCE2OFD81PDU9Osd -wnwbTKlYWPBwdF7UCseKC7gXkUD6Ls0ADWJvrCI7AfQJv6jj6nnE ------END RSA PRIVATE KEY----- diff --git a/tests/integration/long/ssl/server_cert.pem b/tests/integration/long/ssl/server_cert.pem deleted file mode 100644 index 7c96b96ade..0000000000 --- a/tests/integration/long/ssl/server_cert.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICbjCCAdegAwIBAgIEP/N06DANBgkqhkiG9w0BAQsFADBqMQswCQYDVQQGEwJURTELMAkGA1UE -CBMCQ0ExFDASBgNVBAcTC1NhbnRhIENsYXJhMREwDwYDVQQKEwhEYXRhU3RheDELMAkGA1UECxMC -VEUxGDAWBgNVBAMTD1BoaWxpcCBUaG9tcHNvbjAeFw0xNDEwMDMxNTQ2NDdaFw0xNTAxMDExNTQ2 -NDdaMGoxCzAJBgNVBAYTAlRFMQswCQYDVQQIEwJDQTEUMBIGA1UEBxMLU2FudGEgQ2xhcmExETAP -BgNVBAoTCERhdGFTdGF4MQswCQYDVQQLEwJURTEYMBYGA1UEAxMPUGhpbGlwIFRob21wc29uMIGf -MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQChGDwrhpQR0d+NoqilMgsBlR6A2Dd1oMyIUe42sU4t -N63g5N44Ic4RpTiyaWgnAkP332ok3YAuVbxytwEv2K9HrUSiokAiuinlhhHA8CXTHt/1ItzzWj9u -J3Hneb+5lOkXVTZX7Y+q3aSdpx/HnZqn4i27DtLZF0z3LccWPWRinQIDAQABoyEwHzAdBgNVHQ4E -FgQU9WJpUhgGTBBH4xZBCV7Y9YISCp4wDQYJKoZIhvcNAQELBQADgYEAF6e8eVAjoZhfyJ+jW5mB -0pXa2vr5b7VFQ45voNncGrB3aNbz/AWT7LCJw88+Y5SJITgwN/8o3ZY6Y3MyiqeQYGo9WxDSWb5A -dZWFa03Z+hrVDQuw1r118zIhdS4KYDCQM2JfWY32TwK0MNG/6BO876HfkDpcjCYzq8Gh0gEguOA= ------END CERTIFICATE----- diff --git a/tests/integration/long/ssl/server_keystore.jks b/tests/integration/long/ssl/server_keystore.jks deleted file mode 100644 index 8125935516b7ca7d39498640f321040a16746e6f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1398 zcmezO_TO6u1_mY|W&~r-f zqOZ(8Bi5~0mwmOh=efU@f6#-6#$o>ooBF>6J@?b#=HJVHa*I}I-h+CD=*T&S6SP$`!+ghorIcZ*(RTvLX zEAbYzbACS2RB3VHuaXCI)e;K7+<#vs86)s)5wD4|&c!puJEk)ow2b+&wfCO&%DI|p z=VwmSUfEdn^=1Zx@Bb+aUf7;XJJ)tVDDrdJmiCL>6V7FvHr~en^?mz+eKxN=l~pTk z3+~PgTebGmI>|;~=APU`>nkh(ieV*-k+hBJ?Vb* zpSN35+Ff2x5VybGHA$_wygfB$_BTQQP`!ncZ)Ix^)jG00ka>Qc{l)!%{f%owui89o zYm#B!x$)ZaRZRvrO?}0~=AG;L*mF&BwN2^nNMGf4_q);37iT@LJNSHhwr2)^MfZ|w zg|$jk#l3n;%Vkb~+vRQ5@TTEqfqBQm_-hdtFZ2a*2p?AnXVSm1=;5TsrwfeSU(AS; zE#9uM+FIOw@yuWKQ+LQ;wPRygpSQC8oz0OiC$2V{+-435sF#k)JRl2B)T|MDrUsV4 zgj@tn$aw}$jMo=1GchtTvDkkud11iI#;Mij(e|B}k&&B~!63_!+klgeIh2J>m?^{+ z#^DfVa&|NnF%SaDunThsC+3wTDmdpP79|=A8t{Wexr8}f5=#<;OA;$!y12ncNEnEL zd5d|tVK=aiS?p=RN4`&ZpvGIzQB@wu~>KhoXJcj>0M&v)J9VzwzsbD5YK85kEU z8ps>S0z*=kk420{!u<{|q47?;dIr;9J&QyHMVa7O%z z`HQ1xHeQ-~>(;NI`CDC``|=muyd$-tJmcEuKdh7AZ0J0E-Y$7cr=o>{`G1YO)2xz< zjk=akNXW8{7Pu6-&!Kc`YqsyrUs6|jxi?(fTl(2Zu~d&M!C-=MQha3ZH-Dxr1{e3g u5Z?2C;r$6#F+FO=s}C-`#Hg_2!GX3nhWw`v&yrZjliX3p^FL_4p(Oxq{5b#s diff --git a/tests/integration/long/ssl/server_trust.jks b/tests/integration/long/ssl/server_trust.jks deleted file mode 100644 index feb0784a06189e4abb5d763d0bf011590a311c1f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1677 zcmezO_TO6u1_mZL=1fj3E>6r#DN1BuVD!`BoBN-EHA2tSz!Ipa$e@WS&!CC%`T}Mq zMkXc}`_CmW40zc%wc0$|zVk9Nayplu(=bXf%L_X)h&(lkwb!!m4Ugjm%*U1lc}+h zVWEVLcH0!ei}!mMtypTr%{W!A;f8tXf-@a~@60y(>6)*7@Z_F_;yJ;k7Mn6N)R~;W z-_KIH+n^V^r)WE){*CqSYh4yCa!}f(`Ls>&zz5aKa`(R~-T544pQm2P(0Ta0UGkJpMGFJ-{~C9vStS=6buFKekYyV!a4B-1 zL+RAkY~P!|q^|OEZ@9L%^s|v-sUBB?!35)^_{iRG{!Cj8F7AIJyyyMG`xC5Uden?p zA6$5eQDMgeaJu9zs4U6I&x=nf$}CGQ0;fxU^FDaGWZrDh#JnDuHm5_=rO4-r>}Y;5 zXl#R}DqzA44K@@u5Co+v4q-0m#GK5u{GzviJ%s>Q^KzU&aRKYVZS=`RIzL0cZJ__l8moT9IxH7^SYpQ=8_^W zlLOc0r`di!WAj<^!X=#!YmupTN3$PYOZAVDdgLg^FLBJ&Qb^2X_BpmkS4_)A94No^pnS!O*@|8Nv4e8@CDc04s%a$fHw9m+W zyu2DY5JCA06n4@Hca@F?xl1c*Nm}<`DJe5M-w>=i_3g9gE59ugb$WMp(W&B%%O9Lb zEJ&=L*$0aarkj=p>@T^a%~{5o9>OCuXO?}-iMv0SG#~D~ z#~C@1>(_kS;D9qcGq~TC?)$5>bApC+-z)!G-ie;v3$+)?&96VWUwoRx&#Nt~4wh&w tPSn4>;nc3SJ1W 5: raise RuntimeError("Failed to connect to SSL cluster after 5 attempts") try: - cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path, - 'ssl_version': ssl.PROTOCOL_TLSv1}) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options=ssl_options) session = cluster.connect() break except Exception: @@ -134,6 +110,58 @@ def test_can_connect_with_ssl_ca(self): cluster.shutdown() +class SSLConnectionTests(unittest.TestCase): + + @classmethod + def setUpClass(cls): + setup_cluster_ssl() + + def test_can_connect_with_ssl_ca(self): + """ + Test to validate that we are able to connect to a cluster using ssl. + + test_can_connect_with_ssl_ca performs a simple sanity check to ensure that we can connect to a cluster with ssl + authentication via simple server-side shared certificate authority. The client is able to validate the identity + of the server, however by using this method the server can't trust the client unless additional authentication + has been provided. + + @since 2.6.0 + @jira_ticket PYTHON-332 + @expected_result The client can connect via SSL and preform some basic operations + + @test_category connection:ssl + """ + + # find absolute path to client CA_CERTS + abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) + ssl_options = {'ca_certs': abs_path_ca_cert_path, + 'ssl_version': ssl.PROTOCOL_TLSv1} + validate_ssl_options(ssl_options=ssl_options) + + def test_can_connect_with_ssl_ca_host_match(self): + """ + Test to validate that we are able to connect to a cluster using ssl, and host matching + + test_can_connect_with_ssl_ca_host_match performs a simple sanity check to ensure that we can connect to a cluster with ssl + authentication via simple server-side shared certificate authority. It also validates that the host ip matches what is expected + + @since 3.3 + @jira_ticket PYTHON-296 + @expected_result The client can connect via SSL and preform some basic operations, with match_hostname specified + + @test_category connection:ssl + """ + + # find absolute path to client CA_CERTS + abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) + ssl_options = {'ca_certs': abs_path_ca_cert_path, + 'ssl_version': ssl.PROTOCOL_TLSv1, + 'cert_reqs': ssl.CERT_REQUIRED, + 'match_hostname': True} + + validate_ssl_options(ssl_options=ssl_options) + + class SSLConnectionAuthTests(unittest.TestCase): @classmethod @@ -158,40 +186,39 @@ def test_can_connect_with_ssl_client_auth(self): abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE) abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE) + ssl_options = {'ca_certs': abs_path_ca_cert_path, + 'ssl_version': ssl.PROTOCOL_TLSv1, + 'keyfile': abs_driver_keyfile, + 'certfile': abs_driver_certfile} + validate_ssl_options(ssl_options) - tries = 0 - while True: - if tries > 5: - raise RuntimeError("Failed to connect to SSL cluster after 5 attempts") - try: - cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path, - 'ssl_version': ssl.PROTOCOL_TLSv1, - 'keyfile': abs_driver_keyfile, - 'certfile': abs_driver_certfile}) + def test_can_connect_with_ssl_client_auth_host_name(self): + """ + Test to validate that we can connect to a C* cluster that has client_auth enabled, and hostmatching - session = cluster.connect() - break - except Exception: - ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) - del tb - tries += 1 + This test will setup and use a c* cluster that has client authentication enabled. It will then attempt + to connect using valid client keys, and certs (that are in the server's truststore), and attempt to preform some + basic operations, with match_hostname specified + @jira_ticket PYTHON-296 + @since 3.3 - # attempt a few simple commands. + @expected_result The client can connect via SSL and preform some basic operations - insert_keyspace = """CREATE KEYSPACE ssltest - WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'} - """ - statement = SimpleStatement(insert_keyspace) - statement.consistency_level = 3 - session.execute(statement) + @test_category connection:ssl + """ - drop_keyspace = "DROP KEYSPACE ssltest" - statement = SimpleStatement(drop_keyspace) - statement.consistency_level = ConsistencyLevel.ANY - session.execute(statement) + # Need to get absolute paths for certs/key + abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) + abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE) + abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE) - cluster.shutdown() + ssl_options = {'ca_certs': abs_path_ca_cert_path, + 'ssl_version': ssl.PROTOCOL_TLSv1, + 'keyfile': abs_driver_keyfile, + 'certfile': abs_driver_certfile, + 'cert_reqs': ssl.CERT_REQUIRED, + 'match_hostname': True} + validate_ssl_options(ssl_options) def test_cannot_connect_without_client_auth(self): """ @@ -213,6 +240,7 @@ def test_cannot_connect_without_client_auth(self): with self.assertRaises(NoHostAvailable) as context: cluster.connect() + cluster.shutdown() def test_cannot_connect_with_bad_client_auth(self): """ @@ -239,3 +267,4 @@ def test_cannot_connect_with_bad_client_auth(self): 'certfile': abs_driver_certfile}) with self.assertRaises(NoHostAvailable) as context: cluster.connect() + cluster.shutdown() diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index be2fe1187c..1b29c528d7 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -29,10 +29,9 @@ from cassandra.protocol import QueryMessage from cassandra.connection import Connection from cassandra.policies import WhiteListRoundRobinPolicy -from cassandra.query import SimpleStatement from tests import is_monkey_patched -from tests.integration import use_singledc, PROTOCOL_VERSION, execute_with_long_wait_retry +from tests.integration import use_singledc, PROTOCOL_VERSION try: from cassandra.io.libevreactor import LibevConnection @@ -56,7 +55,7 @@ def tearDown(self): Connection.max_in_flight = self.defaultInFlight self.cluster.shutdown() - def testInFlightTimeout(self): + def test_in_flight_timeout(self): """ Test to ensure that connection id fetching will block when max_id is reached/ From f966cb969569b82f58412fad82bdb4bca69ffa8b Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 19 Apr 2016 18:25:55 -0500 Subject: [PATCH 0565/2005] PYTHON-286 Adding test for heartbeat timeout --- tests/integration/standard/test_connection.py | 89 ++++++++++++++++++- 1 file changed, 87 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 1b29c528d7..f7acae6721 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -28,10 +28,10 @@ from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.protocol import QueryMessage from cassandra.connection import Connection -from cassandra.policies import WhiteListRoundRobinPolicy +from cassandra.policies import WhiteListRoundRobinPolicy, HostStateListener from tests import is_monkey_patched -from tests.integration import use_singledc, PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION, get_node try: from cassandra.io.libevreactor import LibevConnection @@ -78,6 +78,91 @@ def test_in_flight_timeout(self): future.result() +class TestHostListener(HostStateListener): + host_down = None + + def on_down(self, host): + host_down = host + + +class HeartbeatTest(unittest.TestCase): + """ + Test to validate failing a heartbeat check doesn't mark a host as down + + @since 3.3 + @jira_ticket PYTHON-286 + @expected_result host should not be marked down when heartbeat fails + + @test_category connection heartbeat + """ + + def setUp(self): + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=1) + self.session = self.cluster.connect() + + def tearDown(self): + self.cluster.shutdown() + + def test_heart_beat_timeout(self): + # Setup a host listener to ensure the nodes don't go down + test_listener = TestHostListener() + host = "127.0.0.1" + node = get_node(1) + initial_connections = self.fetch_connections(host, self.cluster) + self.assertNotEqual(len(initial_connections), 0) + self.cluster.register_listener(test_listener) + # Pause the node + node.pause() + # Wait for connections associated with this host go away + self.wait_for_no_connections(host, self.cluster) + # Resume paused node + node.resume() + # Run a query to ensure connections are re-established + current_host = "" + count = 0 + while current_host != host and count < 100: + rs = self.session.execute_async("SELECT * FROM system.local", trace=False) + rs.result() + current_host = str(rs._current_host) + count += 1 + self.assertLess(count, 100, "Never connected to the first node") + new_connections = self.wait_for_connections(host, self.cluster) + self.assertIsNone(test_listener.host_down) + # Make sure underlying new connections don't match previous ones + for connection in initial_connections: + self.assertFalse(connection in new_connections) + + def fetch_connections(self, host, cluster): + # Given a cluster object and host grab all connection associated with that host + connections = [] + holders = cluster.get_connection_holders() + for conn in holders: + if host == str(getattr(conn, 'host', '')): + if conn._connection is not None: + connections.append(conn._connection) + return connections + + def wait_for_connections(self, host, cluster): + retry = 0 + while(retry < 300): + retry += 1 + connections = self.fetch_connections(host, cluster) + if len(connections) is not 0: + return connections + time.sleep(.1) + self.fail("No new connections found") + + def wait_for_no_connections(self, host, cluster): + retry = 0 + while(retry < 100): + retry += 1 + connections = self.fetch_connections(host, cluster) + if len(connections) is 0: + return + time.sleep(.1) + self.fail("Connections never cleared") + + class ConnectionTests(object): klass = None From 6763fc64774c1108c2b691d421b472cf2b30ac0b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 25 Apr 2016 13:21:16 -0500 Subject: [PATCH 0566/2005] make benchmarks work with C* 3 Now using proto ver negotiation instead of 1. --- benchmarks/base.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/benchmarks/base.py b/benchmarks/base.py index 812db42aaa..bd30bb32ab 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -63,8 +63,7 @@ def setup(hosts): log.info("Using 'cassandra' package from %s", cassandra.__path__) - cluster = Cluster(hosts, protocol_version=1) - cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) + cluster = Cluster(hosts, schema_metadata_enabled=False, token_metadata_enabled=False) try: session = cluster.connect() @@ -91,8 +90,7 @@ def setup(hosts): def teardown(hosts): - cluster = Cluster(hosts, protocol_version=1) - cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) + cluster = Cluster(hosts, schema_metadata_enabled=False, token_metadata_enabled=False) session = cluster.connect() session.execute("DROP KEYSPACE " + KEYSPACE) cluster.shutdown() From 5254f73a1eb7871f97442c6ac092ff4813a7ba67 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 25 Apr 2016 13:38:25 -0500 Subject: [PATCH 0567/2005] regenerate column names in named tuple exception --- cassandra/query.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cassandra/query.py b/cassandra/query.py index 7bf3ad5ea2..1fb4d4cc91 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -117,6 +117,7 @@ def named_tuple_factory(colnames, rows): try: Row = namedtuple('Row', clean_column_names) except Exception: + clean_column_names = list(map(_clean_column_name, colnames)) # create list because py3 map object will be consumed by first attempt log.warning("Failed creating named tuple for results with column names %s (cleaned: %s) " "(see Python 'namedtuple' documentation for details on name rules). " "Results will be returned with positional names. " From d12f6c0a3b71f577f43911cd7475731bfcb7a5ff Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 26 Apr 2016 09:11:37 -0400 Subject: [PATCH 0568/2005] do not use protocol version and set_core_connections_per_host in setup/teardown --- benchmarks/base.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/benchmarks/base.py b/benchmarks/base.py index 812db42aaa..ec8242b878 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -63,8 +63,7 @@ def setup(hosts): log.info("Using 'cassandra' package from %s", cassandra.__path__) - cluster = Cluster(hosts, protocol_version=1) - cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) + cluster = Cluster(hosts) try: session = cluster.connect() @@ -91,8 +90,7 @@ def setup(hosts): def teardown(hosts): - cluster = Cluster(hosts, protocol_version=1) - cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) + cluster = Cluster(hosts) session = cluster.connect() session.execute("DROP KEYSPACE " + KEYSPACE) cluster.shutdown() From a01d8d8b70eee509b46826ac694e6bb6a07a960a Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 26 Apr 2016 10:04:34 -0400 Subject: [PATCH 0569/2005] Add an cli option to specify the number of columns for the schema --- benchmarks/base.py | 49 ++++++++++++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/benchmarks/base.py b/benchmarks/base.py index ec8242b878..39912be480 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -60,10 +60,10 @@ TABLE = "testtable" -def setup(hosts): +def setup(options): log.info("Using 'cassandra' package from %s", cassandra.__path__) - cluster = Cluster(hosts) + cluster = Cluster(options.hosts, protocol_version=options.protocol_version) try: session = cluster.connect() @@ -77,20 +77,21 @@ def setup(hosts): session.set_keyspace(KEYSPACE) log.debug("Creating table...") - session.execute(""" - CREATE TABLE %s ( + create_table_query = """ + CREATE TABLE {} ( thekey text, - col1 text, - col2 text, - PRIMARY KEY (thekey, col1) - ) - """ % TABLE) + """ + for i in range(options.num_columns): + create_table_query += "col{} text,\n".format(i) + create_table_query += "PRIMARY KEY (thekey))" + + session.execute(create_table_query.format(TABLE)) finally: cluster.shutdown() -def teardown(hosts): - cluster = Cluster(hosts) +def teardown(options): + cluster = Cluster(options.hosts, protocol_version=options.protocol_version) session = cluster.connect() session.execute("DROP KEYSPACE " + KEYSPACE) cluster.shutdown() @@ -99,7 +100,7 @@ def teardown(hosts): def benchmark(thread_class): options, args = parse_options() for conn_class in options.supported_reactors: - setup(options.hosts) + setup(options) log.info("==== %s ====" % (conn_class.__name__,)) kwargs = {'metrics_enabled': options.enable_metrics, @@ -112,10 +113,18 @@ def benchmark(thread_class): log.debug("Sleeping for two seconds...") time.sleep(2.0) - query = session.prepare(""" - INSERT INTO {table} (thekey, col1, col2) VALUES (?, ?, ?) - """.format(table=TABLE)) - values = ('key', 'a', 'b') + # Generate the INSERT query + insert_query = "INSERT INTO {} (thekey".format(TABLE) + for i in range(options.num_columns): + insert_query += ", col{}".format(i) + + insert_query += ") VALUES ('{}'".format('key') + + for i in range(options.num_columns): + insert_query += ", '{}'".format(i) + insert_query += ")" + + values = None per_thread = options.num_ops // options.threads threads = [] @@ -125,7 +134,7 @@ def benchmark(thread_class): try: for i in range(options.threads): thread = thread_class( - i, session, query, values, per_thread, + i, session, insert_query, values, per_thread, cluster.protocol_version, options.profile) thread.daemon = True threads.append(thread) @@ -140,7 +149,7 @@ def benchmark(thread_class): end = time.time() finally: cluster.shutdown() - teardown(options.hosts) + teardown(options) total = end - start log.info("Total time: %0.2fs" % total) @@ -188,8 +197,10 @@ def parse_options(): help='logging level: debug, info, warning, or error') parser.add_option('-p', '--profile', action='store_true', dest='profile', help='Profile the run') - parser.add_option('--protocol-version', type='int', dest='protocol_version', + parser.add_option('--protocol-version', type='int', dest='protocol_version', default=4, help='Native protocol version to use') + parser.add_option('-c', '--num-columns', type='int', dest='num_columns', default=2, + help='Specify the number of columns for the schema') options, args = parser.parse_args() From aaf78afb3a9bf2442f944cf315d7d861dadb1a0f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 26 Apr 2016 10:16:57 -0400 Subject: [PATCH 0570/2005] Add an cli option to specify the keyspace name and the ability to keep the data after the benchmark --- benchmarks/base.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/benchmarks/base.py b/benchmarks/base.py index 39912be480..b3b0fb045c 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -71,10 +71,10 @@ def setup(options): session.execute(""" CREATE KEYSPACE %s WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '2' } - """ % KEYSPACE) + """ % options.keyspace) log.debug("Setting keyspace...") - session.set_keyspace(KEYSPACE) + session.set_keyspace(options.keyspace) log.debug("Creating table...") create_table_query = """ @@ -93,7 +93,8 @@ def setup(options): def teardown(options): cluster = Cluster(options.hosts, protocol_version=options.protocol_version) session = cluster.connect() - session.execute("DROP KEYSPACE " + KEYSPACE) + if not options.keep_data: + session.execute("DROP KEYSPACE " + options.keyspace) cluster.shutdown() @@ -108,7 +109,7 @@ def benchmark(thread_class): if options.protocol_version: kwargs['protocol_version'] = options.protocol_version cluster = Cluster(options.hosts, **kwargs) - session = cluster.connect(KEYSPACE) + session = cluster.connect(options.keyspace) log.debug("Sleeping for two seconds...") time.sleep(2.0) @@ -201,6 +202,10 @@ def parse_options(): help='Native protocol version to use') parser.add_option('-c', '--num-columns', type='int', dest='num_columns', default=2, help='Specify the number of columns for the schema') + parser.add_option('-k', '--keyspace', type='str', dest='keyspace', default=KEYSPACE, + help='Specify the keyspace name for the schema') + parser.add_option('--keep-data', action='store_true', dest='keep_data', default=False, + help='Keep the data after the benchmark') options, args = parser.parse_args() From 1d52f7afdd85d00e550f383c8aacf7f851e443d1 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 26 Apr 2016 10:43:07 -0400 Subject: [PATCH 0571/2005] Add an cli option to specify the column type for the schema --- benchmarks/base.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/benchmarks/base.py b/benchmarks/base.py index b3b0fb045c..6178d80f94 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -19,6 +19,7 @@ from threading import Thread import time from optparse import OptionParser +import uuid from greplin import scales @@ -59,6 +60,14 @@ KEYSPACE = "testkeyspace" + str(int(time.time())) TABLE = "testtable" +COLUMN_VALUES = { + 'int': 42, + 'text': "'42'", + 'float': 42.0, + 'uuid': uuid.uuid4(), + 'timestamp': "'2016-02-03 04:05+0000'" +} + def setup(options): log.info("Using 'cassandra' package from %s", cassandra.__path__) @@ -82,7 +91,7 @@ def setup(options): thekey text, """ for i in range(options.num_columns): - create_table_query += "col{} text,\n".format(i) + create_table_query += "col{} {},\n".format(i, options.column_type) create_table_query += "PRIMARY KEY (thekey))" session.execute(create_table_query.format(TABLE)) @@ -122,7 +131,7 @@ def benchmark(thread_class): insert_query += ") VALUES ('{}'".format('key') for i in range(options.num_columns): - insert_query += ", '{}'".format(i) + insert_query += ", {}".format(COLUMN_VALUES[options.column_type]) insert_query += ")" values = None @@ -206,6 +215,9 @@ def parse_options(): help='Specify the keyspace name for the schema') parser.add_option('--keep-data', action='store_true', dest='keep_data', default=False, help='Keep the data after the benchmark') + parser.add_option('--column-type', type='str', dest='column_type', default='text', + help='Specify the column type for the schema (supported: int, text, float, uuid, timestamp)') + options, args = parser.parse_args() From 857bcbfcff55b18f2fed6bbff1e0b9d050261205 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 26 Apr 2016 10:49:09 -0400 Subject: [PATCH 0572/2005] Reuse keyspace and table if they exists --- benchmarks/base.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/benchmarks/base.py b/benchmarks/base.py index 6178d80f94..4e173f89db 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -77,12 +77,16 @@ def setup(options): session = cluster.connect() log.debug("Creating keyspace...") - session.execute(""" - CREATE KEYSPACE %s - WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '2' } - """ % options.keyspace) + try: + session.execute(""" + CREATE KEYSPACE %s + WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '2' } + """ % options.keyspace) + + log.debug("Setting keyspace...") + except cassandra.AlreadyExists: + log.debug("Keyspace already exists") - log.debug("Setting keyspace...") session.set_keyspace(options.keyspace) log.debug("Creating table...") @@ -94,7 +98,11 @@ def setup(options): create_table_query += "col{} {},\n".format(i, options.column_type) create_table_query += "PRIMARY KEY (thekey))" - session.execute(create_table_query.format(TABLE)) + try: + session.execute(create_table_query.format(TABLE)) + except cassandra.AlreadyExists: + log.debug("Table already exists.") + finally: cluster.shutdown() From 3673fe0da82f66ad41631575930d6c8eb817428a Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 26 Apr 2016 11:34:13 -0400 Subject: [PATCH 0573/2005] all requests now use a different partition key --- benchmarks/base.py | 2 +- benchmarks/callback_full_pipeline.py | 6 ++++-- benchmarks/future_batches.py | 3 ++- benchmarks/future_full_pipeline.py | 3 ++- benchmarks/future_full_throttle.py | 5 +++-- 5 files changed, 12 insertions(+), 7 deletions(-) diff --git a/benchmarks/base.py b/benchmarks/base.py index 4e173f89db..074c61d1f8 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -136,7 +136,7 @@ def benchmark(thread_class): for i in range(options.num_columns): insert_query += ", col{}".format(i) - insert_query += ") VALUES ('{}'".format('key') + insert_query += ") VALUES ('{key}'" for i in range(options.num_columns): insert_query += ", {}".format(COLUMN_VALUES[options.column_type]) diff --git a/benchmarks/callback_full_pipeline.py b/benchmarks/callback_full_pipeline.py index 3736991b4e..da043d8261 100644 --- a/benchmarks/callback_full_pipeline.py +++ b/benchmarks/callback_full_pipeline.py @@ -41,8 +41,10 @@ def insert_next(self, previous_result=sentinel): if next(self.num_finished) >= self.num_queries: self.event.set() - if next(self.num_started) <= self.num_queries: - future = self.session.execute_async(self.query, self.values, timeout=None) + i = next(self.num_started) + if i <= self.num_queries: + key = "{}-{}".format(self.thread_num, i) + future = self.session.execute_async(self.query.format(key=key), timeout=None) future.add_callbacks(self.insert_next, self.insert_next) def run(self): diff --git a/benchmarks/future_batches.py b/benchmarks/future_batches.py index 91c250bcf9..c134d520de 100644 --- a/benchmarks/future_batches.py +++ b/benchmarks/future_batches.py @@ -35,7 +35,8 @@ def run(self): except queue.Empty: break - future = self.session.execute_async(self.query, self.values) + key = "{}-{}".format(self.thread_num, i) + future = self.session.execute_async(self.query.format(key=key)) futures.put_nowait(future) while True: diff --git a/benchmarks/future_full_pipeline.py b/benchmarks/future_full_pipeline.py index 40682e0418..b2b2c7d150 100644 --- a/benchmarks/future_full_pipeline.py +++ b/benchmarks/future_full_pipeline.py @@ -31,7 +31,8 @@ def run(self): old_future = futures.get_nowait() old_future.result() - future = self.session.execute_async(self.query, self.values) + key = "{}-{}".format(self.thread_num, i) + future = self.session.execute_async(self.query.format(key=key)) futures.put_nowait(future) while True: diff --git a/benchmarks/future_full_throttle.py b/benchmarks/future_full_throttle.py index 27d87442bb..a98bfcecc9 100644 --- a/benchmarks/future_full_throttle.py +++ b/benchmarks/future_full_throttle.py @@ -25,8 +25,9 @@ def run(self): self.start_profile() - for _ in range(self.num_queries): - future = self.session.execute_async(self.query, self.values) + for i in range(self.num_queries): + key = "{}-{}".format(self.thread_num, i) + future = self.session.execute_async(self.query.format(key=key)) futures.append(future) for future in futures: From 21e43412c72d3ebe85493edff083606d3036ae3e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 26 Apr 2016 11:32:50 -0500 Subject: [PATCH 0574/2005] asyncore: add method to wake loop and avoid idle spinning PYTHON-239 --- cassandra/io/asyncorereactor.py | 43 ++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 2a83996d17..f5cae96325 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -18,7 +18,7 @@ import os import socket import sys -from threading import Event, Lock, Thread +from threading import Lock, Thread import time import weakref @@ -52,8 +52,40 @@ def _cleanup(loop_weakref): loop._cleanup() -class AsyncoreLoop(object): +class _PipeWrapper(object): + + def __init__(self, fd): + self.fd = fd + + def fileno(self): + return self.fd + + +class _AsyncorePipeDispatcher(asyncore.dispatcher): + + def __init__(self): + self.read_fd, self.write_fd = os.pipe() + asyncore.dispatcher.__init__(self) + self.set_socket(_PipeWrapper(self.read_fd)) + self._notified = False + self._wrote = 0 + self._read = 0 + def writable(self): + return False + + def handle_read(self): + while len(os.read(self.read_fd, 4096)) == 4096: + pass + self._notified = False + + def notify_loop(self): + if not self._notified: + self._notified = True + os.write(self.write_fd, 'x') + + +class AsyncoreLoop(object): def __init__(self): self._pid = os.getpid() @@ -65,6 +97,7 @@ def __init__(self): self._timers = TimerManager() + self._pipe_dispatcher = _AsyncorePipeDispatcher() atexit.register(partial(_cleanup, weakref.ref(self))) def maybe_start(self): @@ -84,12 +117,15 @@ def maybe_start(self): self._thread.daemon = True self._thread.start() + def wake_loop(self): + self._pipe_dispatcher.notify_loop() + def _run_loop(self): log.debug("Starting asyncore event loop") with self._loop_lock: while not self._shutdown: try: - asyncore.loop(timeout=0.001, use_poll=True, count=100) + asyncore.loop(timeout=0.1, use_poll=True, count=1) self._timers.service_timeouts() if not asyncore.socket_map: time.sleep(0.005) @@ -254,6 +290,7 @@ def push(self, data): with self.deque_lock: self.deque.extend(chunks) self._writable = True + self._loop.wake_loop() def writable(self): return self._writable From 86a793f0510dddb0cd92ebcd8f0f248eeb7ea2d8 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 26 Apr 2016 12:42:24 -0400 Subject: [PATCH 0575/2005] Add read benchmark support --- benchmarks/base.py | 31 +++++++++++++++++----------- benchmarks/callback_full_pipeline.py | 2 +- benchmarks/future_batches.py | 2 +- benchmarks/future_full_pipeline.py | 2 +- benchmarks/future_full_throttle.py | 2 +- 5 files changed, 23 insertions(+), 16 deletions(-) diff --git a/benchmarks/base.py b/benchmarks/base.py index 074c61d1f8..e91846971b 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -131,28 +131,30 @@ def benchmark(thread_class): log.debug("Sleeping for two seconds...") time.sleep(2.0) - # Generate the INSERT query - insert_query = "INSERT INTO {} (thekey".format(TABLE) - for i in range(options.num_columns): - insert_query += ", col{}".format(i) - - insert_query += ") VALUES ('{key}'" - for i in range(options.num_columns): - insert_query += ", {}".format(COLUMN_VALUES[options.column_type]) - insert_query += ")" + # Generate the query + if options.read: + query = "SELECT * FROM {} WHERE thekey = '{{key}}'".format(TABLE) + else: + query = "INSERT INTO {} (thekey".format(TABLE) + for i in range(options.num_columns): + query += ", col{}".format(i) - values = None + query += ") VALUES ('{key}'" + for i in range(options.num_columns): + query += ", {}".format(COLUMN_VALUES[options.column_type]) + query += ")" + values = None # we don't use that anymore. Keeping it in case we go back to prepared statements. per_thread = options.num_ops // options.threads threads = [] - log.debug("Beginning inserts...") + log.debug("Beginning {}...".format('reads' if options.read else 'inserts')) start = time.time() try: for i in range(options.threads): thread = thread_class( - i, session, insert_query, values, per_thread, + i, session, query, values, per_thread, cluster.protocol_version, options.profile) thread.daemon = True threads.append(thread) @@ -225,6 +227,8 @@ def parse_options(): help='Keep the data after the benchmark') parser.add_option('--column-type', type='str', dest='column_type', default='text', help='Specify the column type for the schema (supported: int, text, float, uuid, timestamp)') + parser.add_option('--read', action='store_true', dest='read', default=False, + help='Read mode') options, args = parser.parse_args() @@ -269,6 +273,9 @@ def start_profile(self): if self.profiler: self.profiler.enable() + def run_query(self, key, **kwargs): + return self.session.execute_async(self.query.format(key=key), **kwargs) + def finish_profile(self): if self.profiler: self.profiler.disable() diff --git a/benchmarks/callback_full_pipeline.py b/benchmarks/callback_full_pipeline.py index da043d8261..a7990d80fa 100644 --- a/benchmarks/callback_full_pipeline.py +++ b/benchmarks/callback_full_pipeline.py @@ -44,7 +44,7 @@ def insert_next(self, previous_result=sentinel): i = next(self.num_started) if i <= self.num_queries: key = "{}-{}".format(self.thread_num, i) - future = self.session.execute_async(self.query.format(key=key), timeout=None) + future = self.run_query(key, timeout=None) future.add_callbacks(self.insert_next, self.insert_next) def run(self): diff --git a/benchmarks/future_batches.py b/benchmarks/future_batches.py index c134d520de..c8305369e9 100644 --- a/benchmarks/future_batches.py +++ b/benchmarks/future_batches.py @@ -36,7 +36,7 @@ def run(self): break key = "{}-{}".format(self.thread_num, i) - future = self.session.execute_async(self.query.format(key=key)) + future = self.run_query(key) futures.put_nowait(future) while True: diff --git a/benchmarks/future_full_pipeline.py b/benchmarks/future_full_pipeline.py index b2b2c7d150..ecc2ce6fc6 100644 --- a/benchmarks/future_full_pipeline.py +++ b/benchmarks/future_full_pipeline.py @@ -32,7 +32,7 @@ def run(self): old_future.result() key = "{}-{}".format(self.thread_num, i) - future = self.session.execute_async(self.query.format(key=key)) + future = self.run_query(key) futures.put_nowait(future) while True: diff --git a/benchmarks/future_full_throttle.py b/benchmarks/future_full_throttle.py index a98bfcecc9..2e47b19f9b 100644 --- a/benchmarks/future_full_throttle.py +++ b/benchmarks/future_full_throttle.py @@ -27,7 +27,7 @@ def run(self): for i in range(self.num_queries): key = "{}-{}".format(self.thread_num, i) - future = self.session.execute_async(self.query.format(key=key)) + future = self.run_query(key) futures.append(future) for future in futures: From 04259fd56f9d118a35bd68084f9816d4051f4d45 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 26 Apr 2016 14:36:46 -0500 Subject: [PATCH 0576/2005] prototype udp dispatcher for asyncore --- cassandra/io/asyncorereactor.py | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index f5cae96325..d26fd9fadb 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -30,6 +30,7 @@ from cassandra.util import WeakSet # noqa import asyncore +from errno import EWOULDBLOCK try: import ssl @@ -85,6 +86,33 @@ def notify_loop(self): os.write(self.write_fd, 'x') +class _AsyncoreUDPDispatcher(asyncore.dispatcher): + + def __init__(self): + self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self._socket.bind(('localhost', 10000)) + self._socket.setblocking(False) + asyncore.dispatcher.__init__(self) + self.set_socket(self._socket) + self._notified = False + + def writable(self): + return False + + def handle_read(self): + try: + while self._sock.recvfrom(0): + pass + except socket.error as e: + pass + self._notified = False + + def notify_loop(self): + if not self._notified: + self._notified = True + self._socket.sendto('', ('localhost', 10000)) + + class AsyncoreLoop(object): def __init__(self): @@ -97,7 +125,7 @@ def __init__(self): self._timers = TimerManager() - self._pipe_dispatcher = _AsyncorePipeDispatcher() + self._pipe_dispatcher = _AsyncoreUDPDispatcher() atexit.register(partial(_cleanup, weakref.ref(self))) def maybe_start(self): From 4a82dfe064786df9e17624077486541f6eb2bf90 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 26 Apr 2016 16:19:21 -0500 Subject: [PATCH 0577/2005] refactor asyncore loop dispatch to use pipe, fallback to busy wait PYTHON-239 --- cassandra/io/asyncorereactor.py | 98 +++++++++++++++++++++++++-------- 1 file changed, 75 insertions(+), 23 deletions(-) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index d26fd9fadb..42eab0fc83 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -62,15 +62,33 @@ def fileno(self): return self.fd -class _AsyncorePipeDispatcher(asyncore.dispatcher): +class _AsyncoreDispatcher(asyncore.dispatcher): - def __init__(self): - self.read_fd, self.write_fd = os.pipe() + def __init__(self, socket): asyncore.dispatcher.__init__(self) - self.set_socket(_PipeWrapper(self.read_fd)) + # inject after to avoid base class validation + self.set_socket(socket) self._notified = False - self._wrote = 0 - self._read = 0 + + def writable(self): + return False + + def validate(self): + assert not self._notified + self.notify_loop() + assert self._notified + self.loop(0.1) + assert not self._notified + + def loop(self, timeout): + asyncore.loop(timeout=timeout, use_poll=True, count=1) + + +class _AsyncorePipeDispatcher(_AsyncoreDispatcher): + + def __init__(self): + self.read_fd, self.write_fd = os.pipe() + _AsyncoreDispatcher.__init__(self, _PipeWrapper(self.read_fd)) def writable(self): return False @@ -86,23 +104,26 @@ def notify_loop(self): os.write(self.write_fd, 'x') -class _AsyncoreUDPDispatcher(asyncore.dispatcher): +class _AsyncoreUDPDispatcher(_AsyncoreDispatcher): + """ + Experimental alternate dispatcher for avoiding busy wait in the asyncore loop. It is not used by default because + it relies on local port binding. + Port scanning is not implemented, so multiple clients on one host will collide. This address would need to be set per + instance, or this could be specialized to scan until an address is found. + """ + bind_address = ('localhost', 10000) def __init__(self): self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - self._socket.bind(('localhost', 10000)) - self._socket.setblocking(False) - asyncore.dispatcher.__init__(self) - self.set_socket(self._socket) - self._notified = False - - def writable(self): - return False + self._socket.bind(self.bind_address) + self._socket.setblocking(0) + _AsyncoreDispatcher.__init__(self, self._socket) def handle_read(self): try: - while self._sock.recvfrom(0): - pass + d = self._socket.recvfrom(1) + while d and d[1]: + d = self._socket.recvfrom(1) except socket.error as e: pass self._notified = False @@ -110,11 +131,36 @@ def handle_read(self): def notify_loop(self): if not self._notified: self._notified = True - self._socket.sendto('', ('localhost', 10000)) + self._socket.sendto(b'', self.bind_address) + + def loop(self, timeout): + asyncore.loop(timeout=timeout, use_poll=False, count=1) + + +class _BusyWaitDispatcher(object): + + max_write_latency = 0.001 + """ + Timeout pushed down to asyncore select/poll. Dictates the amount of time it will sleep before coming back to check + if anything is writable. + """ + + def notify_loop(self): + pass + + def loop(self, timeout): + if not asyncore.socket_map: + time.sleep(0.005) + count = timeout // self.max_write_latency + asyncore.loop(timeout=self.max_write_latency, use_poll=True, count=count) class AsyncoreLoop(object): + timer_resolution = 0.1 # used as the max interval to be in the io loop before returning to service timeouts + + _loop_dispatch_class = _AsyncorePipeDispatcher + def __init__(self): self._pid = os.getpid() self._loop_lock = Lock() @@ -125,7 +171,15 @@ def __init__(self): self._timers = TimerManager() - self._pipe_dispatcher = _AsyncoreUDPDispatcher() + try: + dispatcher = self._loop_dispatch_class() + dispatcher.validate() + log.debug("Validated loop dispatch with %s", self._loop_dispatch_class) + except Exception: + log.exception("Failed validating loop dispatch with %s. Using busy wait execution instead.", self._loop_dispatch_class) + dispatcher = _BusyWaitDispatcher() + self._loop_dispatcher = dispatcher + atexit.register(partial(_cleanup, weakref.ref(self))) def maybe_start(self): @@ -146,17 +200,15 @@ def maybe_start(self): self._thread.start() def wake_loop(self): - self._pipe_dispatcher.notify_loop() + self._loop_dispatcher.notify_loop() def _run_loop(self): log.debug("Starting asyncore event loop") with self._loop_lock: while not self._shutdown: try: - asyncore.loop(timeout=0.1, use_poll=True, count=1) + self._loop_dispatcher.loop(self.timer_resolution) self._timers.service_timeouts() - if not asyncore.socket_map: - time.sleep(0.005) except Exception: log.debug("Asyncore event loop stopped unexepectedly", exc_info=True) break From fe762ae8641ab00eb7ae43b1aa882fa5a9fbaec2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 26 Apr 2016 16:26:39 -0500 Subject: [PATCH 0578/2005] validate for ayncore _BusyWaitDispatcher --- cassandra/io/asyncorereactor.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 42eab0fc83..242c0b3d36 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -154,6 +154,9 @@ def loop(self, timeout): count = timeout // self.max_write_latency asyncore.loop(timeout=self.max_write_latency, use_poll=True, count=count) + def validate(self): + pass + class AsyncoreLoop(object): From cc3307c9b27516018dad3ad9e22207670cee9f55 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 26 Apr 2016 16:41:35 -0500 Subject: [PATCH 0579/2005] isolate our dispatcher map from asyncore package map PYTHON-239 --- cassandra/io/asyncorereactor.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 242c0b3d36..c02513d7a6 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -30,19 +30,17 @@ from cassandra.util import WeakSet # noqa import asyncore -from errno import EWOULDBLOCK try: import ssl except ImportError: ssl = None # NOQA -from cassandra.connection import (Connection, ConnectionShutdown, - ConnectionException, NONBLOCKING, - Timer, TimerManager) +from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager log = logging.getLogger(__name__) +_dispatcher_map = {} def _cleanup(loop_weakref): try: @@ -65,7 +63,7 @@ def fileno(self): class _AsyncoreDispatcher(asyncore.dispatcher): def __init__(self, socket): - asyncore.dispatcher.__init__(self) + asyncore.dispatcher.__init__(self, map=_dispatcher_map) # inject after to avoid base class validation self.set_socket(socket) self._notified = False @@ -81,7 +79,7 @@ def validate(self): assert not self._notified def loop(self, timeout): - asyncore.loop(timeout=timeout, use_poll=True, count=1) + asyncore.loop(timeout=timeout, use_poll=True, map=_dispatcher_map, count=1) class _AsyncorePipeDispatcher(_AsyncoreDispatcher): @@ -134,7 +132,7 @@ def notify_loop(self): self._socket.sendto(b'', self.bind_address) def loop(self, timeout): - asyncore.loop(timeout=timeout, use_poll=False, count=1) + asyncore.loop(timeout=timeout, use_poll=False, map=_dispatcher_map, count=1) class _BusyWaitDispatcher(object): @@ -149,10 +147,10 @@ def notify_loop(self): pass def loop(self, timeout): - if not asyncore.socket_map: + if not _dispatcher_map: time.sleep(0.005) count = timeout // self.max_write_latency - asyncore.loop(timeout=self.max_write_latency, use_poll=True, count=count) + asyncore.loop(timeout=self.max_write_latency, use_poll=True, map=_dispatcher_map, count=count) def validate(self): pass @@ -273,13 +271,12 @@ def create_timer(cls, timeout, callback): def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) - asyncore.dispatcher.__init__(self) self.deque = deque() self.deque_lock = Lock() self._connect_socket() - asyncore.dispatcher.__init__(self, self._socket) + asyncore.dispatcher.__init__(self, self._socket, _dispatcher_map) self._writable = True self._readable = True From 9146a1b51c42da264f76e1b53affe470b9464346 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 26 Apr 2016 16:56:39 -0500 Subject: [PATCH 0580/2005] close and cleanup when asyncore dispatch validation fails --- cassandra/io/asyncorereactor.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index c02513d7a6..fd899614b8 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -59,6 +59,9 @@ def __init__(self, fd): def fileno(self): return self.fd + def close(self): + os.close(self.fd) + class _AsyncoreDispatcher(asyncore.dispatcher): @@ -155,6 +158,9 @@ def loop(self, timeout): def validate(self): pass + def close(self): + pass + class AsyncoreLoop(object): @@ -178,6 +184,7 @@ def __init__(self): log.debug("Validated loop dispatch with %s", self._loop_dispatch_class) except Exception: log.exception("Failed validating loop dispatch with %s. Using busy wait execution instead.", self._loop_dispatch_class) + dispatcher.close() dispatcher = _BusyWaitDispatcher() self._loop_dispatcher = dispatcher From 489afe5b9e2543d010079db94026c438458a6bde Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 26 Apr 2016 17:02:27 -0500 Subject: [PATCH 0581/2005] make asyncore default busywaitdispatcher for windows PYTHON-239 --- cassandra/io/asyncorereactor.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index fd899614b8..c6cf8a1924 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -111,6 +111,12 @@ class _AsyncoreUDPDispatcher(_AsyncoreDispatcher): it relies on local port binding. Port scanning is not implemented, so multiple clients on one host will collide. This address would need to be set per instance, or this could be specialized to scan until an address is found. + + To use:: + + from cassandra.io.asyncorereactor import _AsyncoreUDPDispatcher, AsyncoreLoop + AsyncoreLoop._loop_dispatch_class = _AsyncoreUDPDispatcher + """ bind_address = ('localhost', 10000) @@ -166,7 +172,7 @@ class AsyncoreLoop(object): timer_resolution = 0.1 # used as the max interval to be in the io loop before returning to service timeouts - _loop_dispatch_class = _AsyncorePipeDispatcher + _loop_dispatch_class = _AsyncorePipeDispatcher if os.name != 'nt' else _BusyWaitDispatcher def __init__(self): self._pid = os.getpid() From 6d1af98a6488ace02bd46ce6dd560741556fbc80 Mon Sep 17 00:00:00 2001 From: Mark Florisson Date: Wed, 27 Apr 2016 12:06:45 +0100 Subject: [PATCH 0582/2005] Support empty string values in NumpyProtocolHandler --- cassandra/deserializers.pyx | 8 ++++++++ cassandra/numpy_parser.pyx | 8 ++++++-- tests/integration/standard/utils.py | 6 +++++- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/cassandra/deserializers.pyx b/cassandra/deserializers.pyx index 9ffcdd5daa..6384682b69 100644 --- a/cassandra/deserializers.pyx +++ b/cassandra/deserializers.pyx @@ -44,6 +44,8 @@ cdef class Deserializer: cdef class DesBytesType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): + if buf.size == 0: + return "" return to_bytes(buf) # this is to facilitate cqlsh integration, which requires bytearrays for BytesType @@ -51,6 +53,8 @@ cdef class DesBytesType(Deserializer): # deserializers.DesBytesType = deserializers.DesBytesTypeByteArray cdef class DesBytesTypeByteArray(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): + if buf.size == 0: + return bytearray() return bytearray(buf.ptr[:buf.size]) # TODO: Use libmpdec: http://www.bytereef.org/mpdecimal/index.html @@ -84,6 +88,8 @@ cdef class DesByteType(Deserializer): cdef class DesAsciiType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): + if buf.size == 0: + return "" if PY2: return to_bytes(buf) return to_bytes(buf).decode('ascii') @@ -169,6 +175,8 @@ cdef class DesTimeType(Deserializer): cdef class DesUTF8Type(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): + if buf.size == 0: + return "" cdef val = to_bytes(buf) return val.decode('utf8') diff --git a/cassandra/numpy_parser.pyx b/cassandra/numpy_parser.pyx index 920a3efdd1..cc64d604c9 100644 --- a/cassandra/numpy_parser.pyx +++ b/cassandra/numpy_parser.pyx @@ -52,12 +52,14 @@ ctypedef struct ArrDesc: Py_uintptr_t buf_ptr int stride # should be large enough as we allocate contiguous arrays int is_object + int empty_binary_ok arrDescDtype = np.dtype( [ ('buf_ptr', np.uintp) , ('stride', np.dtype('i')) , ('is_object', np.dtype('i')) - ]) + , ('empty_binary_ok', np.dtype('i')) + ], align=True) _cqltype_to_numpy = { cqltypes.LongType: np.dtype('>i8'), @@ -117,6 +119,7 @@ def make_arrays(ParseDesc desc, array_size): array_descs[i]['buf_ptr'] = arr.ctypes.data array_descs[i]['stride'] = arr.strides[0] array_descs[i]['is_object'] = coltype not in _cqltype_to_numpy + array_descs[i]['empty_binary_ok'] = coltype.empty_binary_ok arrays.append(arr) return array_descs, arrays @@ -145,8 +148,9 @@ cdef inline int unpack_row( get_buf(reader, &buf) arr = arrays[i] - if buf.size == 0: + if buf.size < 0 or (buf.size == 0 and not arr.empty_binary_ok): raise ValueError("Cannot handle NULL value") + if arr.is_object: deserializer = desc.deserializers[i] val = from_binary(deserializer, &buf, desc.protocol_version) diff --git a/tests/integration/standard/utils.py b/tests/integration/standard/utils.py index 8749c5e3a2..4011047fc8 100644 --- a/tests/integration/standard/utils.py +++ b/tests/integration/standard/utils.py @@ -45,7 +45,11 @@ def get_all_primitive_params(key): """ params = [key] for datatype in PRIMITIVE_DATATYPES: - params.append(get_sample(datatype)) + # Also test for empty strings + if key == 1 and datatype == 'ascii': + params.append('') + else: + params.append(get_sample(datatype)) return params From af5a78f524cd21dbf318a7878a32bac8a964c38e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 27 Apr 2016 11:51:18 -0500 Subject: [PATCH 0583/2005] cython: make empty bytes type consistent for python 3 PYTHON-550 --- cassandra/deserializers.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/deserializers.pyx b/cassandra/deserializers.pyx index 6384682b69..6c29ebdea1 100644 --- a/cassandra/deserializers.pyx +++ b/cassandra/deserializers.pyx @@ -45,7 +45,7 @@ cdef class Deserializer: cdef class DesBytesType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): if buf.size == 0: - return "" + return b"" return to_bytes(buf) # this is to facilitate cqlsh integration, which requires bytearrays for BytesType From 4e6468574f21038d84d935bfaba4126452e843f4 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 27 Apr 2016 12:26:45 -0500 Subject: [PATCH 0584/2005] Allowing heartbeat test to run against C* 2.0. Fixing issue with legacy table test. --- tests/integration/standard/test_connection.py | 12 +++++++++--- tests/integration/standard/test_metadata.py | 10 +++++----- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index f7acae6721..2d07b92038 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -29,6 +29,7 @@ from cassandra.protocol import QueryMessage from cassandra.connection import Connection from cassandra.policies import WhiteListRoundRobinPolicy, HostStateListener +from cassandra.pool import HostConnectionPool from tests import is_monkey_patched from tests.integration import use_singledc, PROTOCOL_VERSION, get_node @@ -125,6 +126,7 @@ def test_heart_beat_timeout(self): rs.result() current_host = str(rs._current_host) count += 1 + time.sleep(.1) self.assertLess(count, 100, "Never connected to the first node") new_connections = self.wait_for_connections(host, self.cluster) self.assertIsNone(test_listener.host_down) @@ -138,8 +140,12 @@ def fetch_connections(self, host, cluster): holders = cluster.get_connection_holders() for conn in holders: if host == str(getattr(conn, 'host', '')): - if conn._connection is not None: - connections.append(conn._connection) + if isinstance(conn, HostConnectionPool): + if conn._connections is not None: + connections.append(conn._connections) + else: + if conn._connection is not None: + connections.append(conn._connection) return connections def wait_for_connections(self, host, cluster): @@ -159,7 +165,7 @@ def wait_for_no_connections(self, host, cluster): connections = self.fetch_connections(host, cluster) if len(connections) is 0: return - time.sleep(.1) + time.sleep(.5) self.fail("Connections never cleared") diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index fdcb9b862b..4d86723fb9 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1134,14 +1134,14 @@ def test_legacy_tables(self): CREATE TABLE legacy.composite_comp_with_col ( key blob, - t timeuuid, b blob, s text, + t timeuuid, "b@6869746d65776974686d75736963" blob, "b@6d616d6d616a616d6d61" blob, - PRIMARY KEY (key, t, b, s) + PRIMARY KEY (key, b, s, t) ) WITH COMPACT STORAGE - AND CLUSTERING ORDER BY (t ASC, b ASC, s ASC) + AND CLUSTERING ORDER BY (b ASC, s ASC, t ASC) AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = 'Stores file meta data' AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} @@ -1262,8 +1262,8 @@ def test_legacy_tables(self): CREATE TABLE legacy.composite_comp_no_col ( key blob, - column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(org.apache.cassandra.db.marshal.TimeUUIDType, org.apache.cassandra.db.marshal.BytesType, org.apache.cassandra.db.marshal.UTF8Type)', - column2 text, + column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(org.apache.cassandra.db.marshal.BytesType, org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.TimeUUIDType)', + column2 timeuuid, value blob, PRIMARY KEY (key, column1, column1, column2) ) WITH COMPACT STORAGE From fbe53a53a302ec38c76fddb9df9f6ce08fa22fe3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 27 Apr 2016 14:12:54 -0500 Subject: [PATCH 0585/2005] restore null object deser while still protecting against segfault in numpy parser PYTHON-550 --- cassandra/numpy_parser.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/numpy_parser.pyx b/cassandra/numpy_parser.pyx index cc64d604c9..dea8d9a641 100644 --- a/cassandra/numpy_parser.pyx +++ b/cassandra/numpy_parser.pyx @@ -148,7 +148,7 @@ cdef inline int unpack_row( get_buf(reader, &buf) arr = arrays[i] - if buf.size < 0 or (buf.size == 0 and not arr.empty_binary_ok): + if (buf.size == 0 and not arr.empty_binary_ok) or (buf.size < 0 and not arr.is_object): raise ValueError("Cannot handle NULL value") if arr.is_object: From d163950f30f21b534530bd7bdce7030afb194e98 Mon Sep 17 00:00:00 2001 From: Mark Florisson Date: Wed, 27 Apr 2016 20:10:13 +0100 Subject: [PATCH 0586/2005] Allow NULL values in numpy object arrays Conflicts: cassandra/numpy_parser.pyx --- cassandra/numpy_parser.pyx | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/cassandra/numpy_parser.pyx b/cassandra/numpy_parser.pyx index dea8d9a641..1334e747c4 100644 --- a/cassandra/numpy_parser.pyx +++ b/cassandra/numpy_parser.pyx @@ -52,13 +52,11 @@ ctypedef struct ArrDesc: Py_uintptr_t buf_ptr int stride # should be large enough as we allocate contiguous arrays int is_object - int empty_binary_ok arrDescDtype = np.dtype( [ ('buf_ptr', np.uintp) , ('stride', np.dtype('i')) , ('is_object', np.dtype('i')) - , ('empty_binary_ok', np.dtype('i')) ], align=True) _cqltype_to_numpy = { @@ -119,7 +117,6 @@ def make_arrays(ParseDesc desc, array_size): array_descs[i]['buf_ptr'] = arr.ctypes.data array_descs[i]['stride'] = arr.strides[0] array_descs[i]['is_object'] = coltype not in _cqltype_to_numpy - array_descs[i]['empty_binary_ok'] = coltype.empty_binary_ok arrays.append(arr) return array_descs, arrays @@ -148,14 +145,13 @@ cdef inline int unpack_row( get_buf(reader, &buf) arr = arrays[i] - if (buf.size == 0 and not arr.empty_binary_ok) or (buf.size < 0 and not arr.is_object): - raise ValueError("Cannot handle NULL value") - if arr.is_object: deserializer = desc.deserializers[i] val = from_binary(deserializer, &buf, desc.protocol_version) Py_INCREF(val) ( arr.buf_ptr)[0] = val + elif buf.size < 0: + raise ValueError("Cannot handle NULL value") else: memcpy( arr.buf_ptr, buf.ptr, buf.size) From 01a38259e4a1864db79e7c1e5e2e469a3372bf31 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 28 Apr 2016 16:20:41 -0500 Subject: [PATCH 0587/2005] Don't mark ignored hosts up on discovery We don't know their state unless an event comes in PYTHON-531 --- cassandra/cluster.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index dc5c7221b5..505c2e24eb 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1161,7 +1161,7 @@ def on_add(self, host, refresh_nodes=True): if distance == HostDistance.IGNORED: log.debug("Not adding connection pool for new host %r because the " "load balancing policy has marked it as IGNORED", host) - self._finalize_add(host) + self._finalize_add(host, set_up=False) return futures_lock = Lock() @@ -1203,9 +1203,10 @@ def future_completed(future): if not have_future: self._finalize_add(host) - def _finalize_add(self, host): - # mark the host as up and notify all listeners - host.set_up() + def _finalize_add(self, host, set_up=True): + if set_up: + host.set_up() + for listener in self.listeners: listener.on_add(host) From 1ad419bda41f934ad5b1fa0ce2d76de01f2d1ee7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 Apr 2016 09:54:40 -0500 Subject: [PATCH 0588/2005] don't check schema agreement with ignored hosts PYTHON-531 --- cassandra/cluster.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 505c2e24eb..d1d6590a63 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2569,13 +2569,14 @@ def _get_schema_mismatches(self, peers_result, local_result, local_address): if local_row.get("schema_version"): versions[local_row.get("schema_version")].add(local_address) + lbp = self._cluster.load_balancing_policy for row in peers_result: schema_ver = row.get('schema_version') if not schema_ver: continue addr = self._rpc_from_peer_row(row) peer = self._cluster.metadata.get_host(addr) - if peer and peer.is_up: + if peer and peer.is_up and lbp.distance(peer) != HostDistance.IGNORED: versions[schema_ver].add(addr) if len(versions) == 1: From ca779bd760e0f7220848eea5112a4bf04d8756d3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 Apr 2016 15:08:22 -0500 Subject: [PATCH 0589/2005] 3.3 changelog update --- CHANGELOG.rst | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a3618ca1db..8a5f6f3da7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,30 @@ +3.3.0 +===== + +Features +-------- +* Add an AddressTranslator interface (PYTHON-69) +* New Retry Policy Decision - try next host (PYTHON-285) +* Don't mark host down on timeout (PYTHON-286) +* SSL hostname verification (PYTHON-296) +* Add C* version to metadata or cluster objects (PYTHON-301) +* Expose listen_address of node we get ring information from (PYTHON-332) +* Use A-record with multiple IPs for contact points (PYTHON-415) +* Custom consistency level for populating query traces (PYTHON-435) +* Normalize Server Exception Types (PYTHON-443) +* Propagate exception message when DDL schema agreement fails (PYTHON-444) +* Specialized exceptions for metadata refresh methods failure (PYTHON-527) + +Bug Fixes +--------- +* Resolve contact point hostnames to avoid duplicate hosts (PYTHON-103) +* Options to Disable Schema, Token Metadata Processing (PYTHON-327) +* GeventConnection stalls requests when read is a multiple of the input buffer size (PYTHON-429) +* named_tuple_factory breaks with duplicate "cleaned" col names (PYTHON-467) +* Connection leak if Cluster.shutdown() happens during reconnection (PYTHON-482) +* HostConnection.borrow_connection does not block when all request ids are used (PYTHON-514) +* Empty field not being handled by the NumpyProtocolHandler (PYTHON-550) + 3.2.2 ===== April 19, 2016 From ef5606e1d1917d27f3ca3d416f9f103f5ce69059 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 29 Apr 2016 16:22:52 -0500 Subject: [PATCH 0590/2005] PYTHON-550 adding test for numpy types with null --- tests/integration/__init__.py | 16 ++++ .../standard/test_cython_protocol_handlers.py | 73 ++++++++++++++++++- 2 files changed, 86 insertions(+), 3 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 178c1a5a82..cd55757582 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -359,6 +359,22 @@ def execute_with_long_wait_retry(session, query, timeout=30): raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query)) +def execute_with_retry_tolerant(session, query, retry_exceptions, escape_exception): + # TODO refactor above methods into this one for code reuse + tries = 0 + while tries < 100: + try: + tries += 1 + rs = session.execute(query) + return rs + except escape_exception: + return + except retry_exceptions: + time.sleep(.1) + + raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query)) + + def drop_keyspace_shutdown_cluster(keyspace_name, session, cluster): try: execute_with_long_wait_retry(session, "DROP KEYSPACE {0}".format(keyspace_name)) diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index 53457c21da..783df2617f 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -7,11 +7,12 @@ except ImportError: import unittest +from cassandra import DriverException, Timeout, AlreadyExists from cassandra.query import tuple_factory -from cassandra.cluster import Cluster -from cassandra.protocol import ProtocolHandler, LazyProtocolHandler, NumpyProtocolHandler +from cassandra.cluster import Cluster, NoHostAvailable +from cassandra.protocol import ProtocolHandler, LazyProtocolHandler, NumpyProtocolHandler, ConfigurationException from cassandra.cython_deps import HAVE_CYTHON, HAVE_NUMPY -from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1, drop_keyspace_shutdown_cluster, VERIFY_CYTHON +from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1, drop_keyspace_shutdown_cluster, VERIFY_CYTHON, BasicSharedKeyspaceUnitTestCase, execute_with_retry_tolerant from tests.integration.datatype_utils import update_datatypes from tests.integration.standard.utils import ( create_table_with_all_types, get_all_primitive_params, get_primitive_datatypes) @@ -202,3 +203,69 @@ def verify_iterator_data(assertEqual, results): for expected, actual in zip(params, result): assertEqual(actual, expected) return count + + +class NumpyNullTest(BasicSharedKeyspaceUnitTestCase): + + # A dictionary containing table key to type. + # Boolean dictates whether or not the type can be deserialized with null value + NUMPY_TYPES = {"v1": ('bigint', False), + "v2": ('double', False), + "v3": ('float', False), + "v4": ('int', False), + "v5": ('smallint', False), + "v6": ("ascii", True), + "v7": ("blob", True), + "v8": ("boolean", True), + "v9": ("decimal", True), + "v10": ("inet", True), + "v11": ("text", True), + "v12": ("timestamp", True), + "v13": ("timeuuid", True), + "v14": ("uuid", True), + "v15": ("varchar", True), + "v16": ("varint", True), + } + + def setUp(self): + self.session.client_protocol_handler = NumpyProtocolHandler + self.session.row_factory = tuple_factory + + @numpytest + def test_null_types3(self): + """ + Test to validate that the numpy protocol handler can deal with null values. + @since 3.3.0 + @jira_ticket PYTHON-550 + @expected_result Numpy can handle non mapped types' null values. + + @test_category data_types:serialization + """ + + self.create_table_of_types() + self.session.execute("INSERT INTO {0}.{1} (k) VALUES (1)".format(self.keyspace_name, self.function_table_name)) + self.validate_types() + + def create_table_of_types(self): + """ + Builds a table containing all the numpy types + """ + base_ddl = '''CREATE TABLE {0}.{1} (k int PRIMARY KEY'''.format(self.keyspace_name, self.function_table_name, type) + for key, value in NumpyNullTest.NUMPY_TYPES.items(): + base_ddl = base_ddl+", {0} {1}".format(key, value[0]) + base_ddl = base_ddl+")" + execute_with_retry_tolerant(self.session, base_ddl, (DriverException, NoHostAvailable, Timeout), (ConfigurationException, AlreadyExists)) + + def validate_types(self): + """ + Selects each type from the table and expects either an exception or None depending on type + """ + for key, value in NumpyNullTest.NUMPY_TYPES.items(): + select = "SELECT {0} from {1}.{2}".format(key,self.keyspace_name, self.function_table_name) + if value[1]: + rs = execute_with_retry_tolerant(self.session, select, (NoHostAvailable), ()) + self.assertEqual(rs[0].get('v1'), None) + else: + with self.assertRaises(ValueError): + execute_with_retry_tolerant(self.session, select, (NoHostAvailable), ()) + From b0da64181ef33599864f1851876a03ff241a7317 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 21 Apr 2016 11:16:59 -0500 Subject: [PATCH 0591/2005] match_hostname --> check_hostname make the flag match the name in the std lib SSLContext API Conflicts: tests/integration/long/test_ssl.py --- cassandra/cluster.py | 2 +- cassandra/connection.py | 10 +++++----- tests/integration/long/test_ssl.py | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d7bc95dbf2..588300070e 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -389,7 +389,7 @@ def auth_provider(self, value): .. versionchanged:: 3.3.0 - In addition to ``wrap_socket`` kwargs, clients may also specify ``'match_hostname': True`` to verify the cert hostname + In addition to ``wrap_socket`` kwargs, clients may also specify ``'check_hostname': True`` to verify the cert hostname as outlined in RFC 2818 and RFC 6125. Note that this requires the certificate to be transferred, so should almost always require the option ``'cert_reqs': ssl.CERT_REQUIRED``. Note also that this functionality was not built into Python standard library until (2.7.9, 3.2). To enable this mechanism in earlier versions, patch ``ssl.match_hostname`` diff --git a/cassandra/connection.py b/cassandra/connection.py index 2b13eebff4..ae353cbcf6 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -246,7 +246,7 @@ class Connection(object): _socket_impl = socket _ssl_impl = ssl - _match_hostname = False + _check_hostname = False def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, @@ -268,10 +268,10 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self._iobuf = io.BytesIO() if ssl_options: - self._match_hostname = bool(self.ssl_options.pop('match_hostname', False)) - if self._match_hostname: + self._check_hostname = bool(self.ssl_options.pop('check_hostname', False)) + if self._check_hostname: if not getattr(ssl, 'match_hostname', None): - raise RuntimeError("ssl_options specify 'match_hostname', but ssl.match_hostname is not provided. " + raise RuntimeError("ssl_options specify 'check_hostname', but ssl.match_hostname is not provided. " "Patch or upgrade Python to use this option.") if protocol_version >= 3: @@ -345,7 +345,7 @@ def _connect_socket(self): self._socket = self._ssl_impl.wrap_socket(self._socket, **self.ssl_options) self._socket.settimeout(self.connect_timeout) self._socket.connect(sockaddr) - if self._match_hostname: + if self._check_hostname: ssl.match_hostname(self._socket.getpeercert(), self.host) sockerr = None break diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index e24651d7b1..9ae84f38d6 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -147,7 +147,7 @@ def test_can_connect_with_ssl_ca_host_match(self): @since 3.3 @jira_ticket PYTHON-296 - @expected_result The client can connect via SSL and preform some basic operations, with match_hostname specified + @expected_result The client can connect via SSL and preform some basic operations, with check_hostname specified @test_category connection:ssl """ @@ -157,7 +157,7 @@ def test_can_connect_with_ssl_ca_host_match(self): ssl_options = {'ca_certs': abs_path_ca_cert_path, 'ssl_version': ssl.PROTOCOL_TLSv1, 'cert_reqs': ssl.CERT_REQUIRED, - 'match_hostname': True} + 'check_hostname': True} validate_ssl_options(ssl_options=ssl_options) @@ -198,7 +198,7 @@ def test_can_connect_with_ssl_client_auth_host_name(self): This test will setup and use a c* cluster that has client authentication enabled. It will then attempt to connect using valid client keys, and certs (that are in the server's truststore), and attempt to preform some - basic operations, with match_hostname specified + basic operations, with check_hostname specified @jira_ticket PYTHON-296 @since 3.3 @@ -217,7 +217,7 @@ def test_can_connect_with_ssl_client_auth_host_name(self): 'keyfile': abs_driver_keyfile, 'certfile': abs_driver_certfile, 'cert_reqs': ssl.CERT_REQUIRED, - 'match_hostname': True} + 'check_hostname': True} validate_ssl_options(ssl_options) def test_cannot_connect_without_client_auth(self): From cc48fd48297bb2bda939244e52b071c984d89b08 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 2 May 2016 11:39:17 -0500 Subject: [PATCH 0592/2005] docs for new exception types --- cassandra/__init__.py | 12 ++++++++++++ docs/api/cassandra.rst | 15 +++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 1cf9f821dd..316114617e 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -195,10 +195,16 @@ class UserAggregateDescriptor(SignatureDescriptor): class DriverException(Exception): + """ + Base for all exceptions explicitly raised by the driver. + """ pass class RequestExecutionException(DriverException): + """ + Base for request execution exceptions returned from the server. + """ pass @@ -395,10 +401,16 @@ def __init__(self, summary_message, keyspace, function, arg_types): class RequestValidationException(DriverException): + """ + Server request validation failed + """ pass class ConfigurationException(RequestValidationException): + """ + Server indicated request errro due to current configuration + """ pass diff --git a/docs/api/cassandra.rst b/docs/api/cassandra.rst index 5628099a92..fd4273f470 100644 --- a/docs/api/cassandra.rst +++ b/docs/api/cassandra.rst @@ -22,6 +22,12 @@ :members: :inherited-members: +.. autoexception:: DriverException() + :members: + +.. autoexception:: RequestExecutionException() + :members: + .. autoexception:: Unavailable() :members: @@ -34,6 +40,9 @@ .. autoexception:: WriteTimeout() :members: +.. autoexception:: CoordinationFailure() + :members: + .. autoexception:: ReadFailure() :members: @@ -43,6 +52,12 @@ .. autoexception:: FunctionFailure() :members: +.. autoexception:: RequestValidationException() + :members: + +.. autoexception:: ConfigurationException() + :members: + .. autoexception:: AlreadyExists() :members: From fa76e1f5e058a6f6b7b483124375a9f94c9e2a38 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 2 May 2016 14:43:04 -0500 Subject: [PATCH 0593/2005] while repreparing, don't fail on first error also fixes iteration through responses for debug logging --- cassandra/cluster.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index dc5c7221b5..6e4c57a13d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1439,10 +1439,9 @@ def _prepare_all_queries(self, host): for ks_chunk in chunks: messages = [PrepareMessage(query=s.query_string) for s in ks_chunk] # TODO: make this timeout configurable somehow? - responses = connection.wait_for_responses(*messages, timeout=5.0) - for response in responses: - if (not isinstance(response, ResultMessage) or - response.kind != RESULT_KIND_PREPARED): + responses = connection.wait_for_responses(*messages, timeout=5.0, fail_on_error=False) + for success, response in responses: + if not success: log.debug("Got unexpected response when preparing " "statement on host %s: %r", host, response) From 3f67318f4681075ba9a5041f46f00727ea03c71c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 2 May 2016 15:52:07 -0500 Subject: [PATCH 0594/2005] 3.3 version and date --- CHANGELOG.rst | 1 + cassandra/__init__.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 8a5f6f3da7..0e4fb15ea8 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,6 @@ 3.3.0 ===== +May 2, 2016 Features -------- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 316114617e..b6a16347e9 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 2, 2, 'post0') +__version_info__ = (3, 3, 0) __version__ = '.'.join(map(str, __version_info__)) From 77727ea15574a4f2e2281d849020f1a028c6eb02 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 2 May 2016 15:57:20 -0500 Subject: [PATCH 0595/2005] post-release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index b6a16347e9..da8c56755d 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 3, 0) +__version_info__ = (3, 3, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From 348cdffcdb0c6a2dc3114f5c8a406dc86cd0dd5b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 2 May 2016 16:19:13 -0500 Subject: [PATCH 0596/2005] 327 is viewed as an improvement --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 0e4fb15ea8..7ffc4ec388 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -9,6 +9,7 @@ Features * Don't mark host down on timeout (PYTHON-286) * SSL hostname verification (PYTHON-296) * Add C* version to metadata or cluster objects (PYTHON-301) +* Options to Disable Schema, Token Metadata Processing (PYTHON-327) * Expose listen_address of node we get ring information from (PYTHON-332) * Use A-record with multiple IPs for contact points (PYTHON-415) * Custom consistency level for populating query traces (PYTHON-435) @@ -19,7 +20,6 @@ Features Bug Fixes --------- * Resolve contact point hostnames to avoid duplicate hosts (PYTHON-103) -* Options to Disable Schema, Token Metadata Processing (PYTHON-327) * GeventConnection stalls requests when read is a multiple of the input buffer size (PYTHON-429) * named_tuple_factory breaks with duplicate "cleaned" col names (PYTHON-467) * Connection leak if Cluster.shutdown() happens during reconnection (PYTHON-482) From f729e4bdc7ba357988b7368233d1dac7aff2921a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 3 May 2016 10:50:10 -0500 Subject: [PATCH 0597/2005] configurable response window for status events PYTHON-556 --- cassandra/cluster.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 6e4c57a13d..90e5d3e1cf 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -500,6 +500,18 @@ def auth_provider(self, value): See :attr:`.schema_event_refresh_window` for discussion of rationale """ + status_event_refresh_window = 2 + """ + Window, in seconds, within which the driver will start the reconnect after + receiving a status_change event. + + Setting this to zero will connect immediately. + + This is primarily used to avoid 'thundering herd' in deployments with large fanout from cluster to clients. + When nodes come up, clients attempt to reprepare prepared statements (depending on :attr:`.reprepare_on_up`), and + establish connection pools. This can cause a rush of connections and queries if not mitigated with this factor. + """ + connect_timeout = 5 """ Timeout, in seconds, for creating new connections. @@ -581,7 +593,8 @@ def __init__(self, connect_timeout=5, schema_metadata_enabled=True, token_metadata_enabled=True, - address_translator=None): + address_translator=None, + status_event_refresh_window=2): """ Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. @@ -642,6 +655,7 @@ def __init__(self, self.idle_heartbeat_interval = idle_heartbeat_interval self.schema_event_refresh_window = schema_event_refresh_window self.topology_event_refresh_window = topology_event_refresh_window + self.status_event_refresh_window = status_event_refresh_window self.connect_timeout = connect_timeout self._listeners = set() @@ -689,6 +703,7 @@ def __init__(self, self.control_connection = ControlConnection( self, self.control_connection_timeout, self.schema_event_refresh_window, self.topology_event_refresh_window, + self.status_event_refresh_window, schema_metadata_enabled, token_metadata_enabled) @@ -2105,6 +2120,7 @@ class ControlConnection(object): _schema_event_refresh_window = None _topology_event_refresh_window = None + _status_event_refresh_window = None _schema_meta_enabled = True _token_meta_enabled = True @@ -2115,6 +2131,7 @@ class ControlConnection(object): def __init__(self, cluster, timeout, schema_event_refresh_window, topology_event_refresh_window, + status_event_refresh_window, schema_meta_enabled=True, token_meta_enabled=True): # use a weak reference to allow the Cluster instance to be GC'ed (and @@ -2125,6 +2142,7 @@ def __init__(self, cluster, timeout, self._schema_event_refresh_window = schema_event_refresh_window self._topology_event_refresh_window = topology_event_refresh_window + self._status_event_refresh_window = status_event_refresh_window self._schema_meta_enabled = schema_meta_enabled self._token_meta_enabled = token_meta_enabled @@ -2472,7 +2490,7 @@ def _handle_status_change(self, event): addr = self._translate_address(event["address"][0]) host = self._cluster.metadata.get_host(addr) if change_type == "UP": - delay = 1 + self._delay_for_event_type('status_change', 0.5) # randomness to avoid thundering herd problem on events + delay = self._delay_for_event_type('status_change', self._status_event_refresh_window) if host is None: # this is the first time we've seen the node self._cluster.scheduler.schedule_unique(delay, self.refresh_node_list_and_token_map) From 38ae69aba8aa745a51774c3032ff896775129630 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 3 May 2016 14:11:33 -0500 Subject: [PATCH 0598/2005] initial prepare on all hosts of current session not all hosts on all sessions --- cassandra/cluster.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 90e5d3e1cf..8f35d62ad6 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1471,11 +1471,9 @@ def _prepare_all_queries(self, host): if connection: connection.close() - def prepare_on_all_sessions(self, query_id, prepared_statement, excluded_host): + def add_prepared(self, query_id, prepared_statement): with self._prepared_statement_lock: self._prepared_statements[query_id] = prepared_statement - for session in self.sessions: - session.prepare_on_all_hosts(prepared_statement.query_string, excluded_host) class Session(object): @@ -1829,9 +1827,11 @@ def prepare(self, query, custom_payload=None): self._protocol_version) prepared_statement.custom_payload = future.custom_payload + self.cluster.add_prepared(query_id, prepared_statement) + host = future._current_host try: - self.cluster.prepare_on_all_sessions(query_id, prepared_statement, host) + self.prepare_on_all_hosts(prepared_statement.query_string, host) except Exception: log.exception("Error preparing query on all hosts:") From 4bc66d31b0ddebc6150f742e0acbc3534a546d4f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 3 May 2016 14:29:37 -0500 Subject: [PATCH 0599/2005] options controlling prepared statement distribution PYTHON-556 --- cassandra/cluster.py | 40 +++++++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 8f35d62ad6..c52d7acec7 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -494,8 +494,7 @@ def auth_provider(self, value): Setting this to zero will execute refreshes immediately. - Setting this negative will disable node refreshes in response to push events - (refreshes will still occur in response to new nodes observed on "UP" events). + Setting this negative will disable node refreshes in response to push events. See :attr:`.schema_event_refresh_window` for discussion of rationale """ @@ -512,6 +511,24 @@ def auth_provider(self, value): establish connection pools. This can cause a rush of connections and queries if not mitigated with this factor. """ + prepare_on_all_hosts = True + """ + Specifies whether statements should be prepared on all hosts, or just one. + + This can reasonably be disabled on long-running applications with numerous clients preparing statements on startup, + where a randomized initial condition of the load balancing policy can be expected to distribute prepares from + different clients across the cluster. + """ + + reprepare_on_up = True + """ + Specifies whether all known prepared statements should be prepared on a node when it comes up. + + May be used to avoid overwhelming a node on return, or if it is supposed that the node was only marked down due to + network. If statements are not reprepared, they are prepared on the first execution, causing + an extra roundtrip for one or more client requests. + """ + connect_timeout = 5 """ Timeout, in seconds, for creating new connections. @@ -594,7 +611,9 @@ def __init__(self, schema_metadata_enabled=True, token_metadata_enabled=True, address_translator=None, - status_event_refresh_window=2): + status_event_refresh_window=2, + prepare_on_all_hosts=True, + reprepare_on_up=True): """ Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. @@ -657,6 +676,8 @@ def __init__(self, self.topology_event_refresh_window = topology_event_refresh_window self.status_event_refresh_window = status_event_refresh_window self.connect_timeout = connect_timeout + self.prepare_on_all_hosts = prepare_on_all_hosts + self.reprepare_on_up = reprepare_on_up self._listeners = set() self._listener_lock = Lock() @@ -1428,7 +1449,7 @@ def set_meta_refresh_enabled(self, enabled): self.token_metadata_enabled = enabled def _prepare_all_queries(self, host): - if not self._prepared_statements: + if not self._prepared_statements or not self.reprepare_on_up: return log.debug("Preparing all known prepared statements against host %s", host) @@ -1829,11 +1850,12 @@ def prepare(self, query, custom_payload=None): self.cluster.add_prepared(query_id, prepared_statement) - host = future._current_host - try: - self.prepare_on_all_hosts(prepared_statement.query_string, host) - except Exception: - log.exception("Error preparing query on all hosts:") + if self.cluster.prepare_on_all_hosts: + host = future._current_host + try: + self.prepare_on_all_hosts(prepared_statement.query_string, host) + except Exception: + log.exception("Error preparing query on all hosts:") return prepared_statement From dfc2cf96a6c1bea8ab63d045712c5c2599565633 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 3 May 2016 14:35:45 -0500 Subject: [PATCH 0600/2005] don't wait for schema agreement when repreparing on up PYTHON-556 --- cassandra/cluster.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c52d7acec7..99fff564cd 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1456,11 +1456,6 @@ def _prepare_all_queries(self, host): connection = None try: connection = self.connection_factory(host.address) - try: - self.control_connection.wait_for_schema_agreement(connection) - except Exception: - log.debug("Error waiting for schema agreement before preparing statements against host %s", host, exc_info=True) - statements = self._prepared_statements.values() for keyspace, ks_statements in groupby(statements, lambda s: s.keyspace): if keyspace is not None: From 67683c01640e15f6967720f82352c9ac6e30d845 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 3 May 2016 16:00:51 -0500 Subject: [PATCH 0601/2005] add checks making sure in-flight thresholds are within range PYTHON-220 --- cassandra/cluster.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index af4ef44ffe..0c763b5f2f 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -765,6 +765,10 @@ def set_min_requests_per_connection(self, host_distance, min_requests): raise UnsupportedOperation( "Cluster.set_min_requests_per_connection() only has an effect " "when using protocol_version 1 or 2.") + if min_requests < 0 or min_requests > 126 or \ + min_requests >= self._max_requests_per_connection[host_distance]: + raise ValueError("min_requests must be in the range [0, 127) and less than the max_requests for this host_distance (%d)" % + (self._min_requests_per_connection[host_distance],)) self._min_requests_per_connection[host_distance] = min_requests def get_max_requests_per_connection(self, host_distance): @@ -782,6 +786,10 @@ def set_max_requests_per_connection(self, host_distance, max_requests): raise UnsupportedOperation( "Cluster.set_max_requests_per_connection() only has an effect " "when using protocol_version 1 or 2.") + if max_requests < 1 or max_requests > 127 or \ + max_requests <= self._min_requests_per_connection[host_distance]: + raise ValueError("max_requests must be in the range (0, 127] and greater than the min_requests for this host_distance (%d)" % + (self._min_requests_per_connection[host_distance],)) self._max_requests_per_connection[host_distance] = max_requests def get_core_connections_per_host(self, host_distance): From f08b77203466394a1d1fb333c0392beeaa3e2dd5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 3 May 2016 16:01:13 -0500 Subject: [PATCH 0602/2005] restore unit test deleted in an unrelated branch --- tests/unit/test_cluster.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 3c4f477f72..d5c387b658 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -99,6 +99,9 @@ def test_event_delay_timing(self, *args): PYTHON-473 """ + sched = _Scheduler(None) + sched.schedule(0, lambda: None) + sched.schedule(0, lambda: None) # pre-473: "TypeError: unorderable types: function() < function()"t class SessionTest(unittest.TestCase): From a6ffe62e17fdca2d370c4302ed3fa3ec6e36ec13 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 4 May 2016 09:29:25 -0500 Subject: [PATCH 0603/2005] update unit tests for new CC init --- tests/unit/test_control_connection.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index e109a76fd0..d15801a36c 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -131,7 +131,7 @@ def setUp(self): self.connection = MockConnection() self.time = FakeTime() - self.control_connection = ControlConnection(self.cluster, 1, 0, 0) + self.control_connection = ControlConnection(self.cluster, 1, 0, 0, 0) self.control_connection._connection = self.connection self.control_connection._time = self.time @@ -441,7 +441,7 @@ def test_refresh_disabled(self): 'address': ('1.2.3.4', 9000) } - cc_no_schema_refresh = ControlConnection(cluster, 1, -1, 0) + cc_no_schema_refresh = ControlConnection(cluster, 1, -1, 0, 0) cluster.scheduler.reset_mock() # no call on schema refresh @@ -455,7 +455,7 @@ def test_refresh_disabled(self): cluster.scheduler.schedule_unique.assert_has_calls([call(ANY, cc_no_schema_refresh.refresh_node_list_and_token_map), call(ANY, cc_no_schema_refresh.refresh_node_list_and_token_map)]) - cc_no_topo_refresh = ControlConnection(cluster, 1, 0, -1) + cc_no_topo_refresh = ControlConnection(cluster, 1, 0, -1, 0) cluster.scheduler.reset_mock() # no call on topo refresh @@ -482,7 +482,7 @@ def setUp(self): self.time = FakeTime() # Use 2 for the schema_event_refresh_window which is what we would normally default to. - self.control_connection = ControlConnection(self.cluster, 1, 2, 0) + self.control_connection = ControlConnection(self.cluster, 1, 2, 0, 0) self.control_connection._connection = self.connection self.control_connection._time = self.time From 84c1fa70348b28d1ca6d6cb03b69991d65726709 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 4 May 2016 11:38:56 -0500 Subject: [PATCH 0604/2005] test for min/max requests validation PYTHON-220 --- tests/unit/test_cluster.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index d5c387b658..a43f52d55d 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -19,9 +19,11 @@ from mock import patch, Mock + from cassandra import ConsistencyLevel, DriverException, Timeout, Unavailable, RequestExecutionException, ReadTimeout, WriteTimeout, CoordinationFailure, ReadFailure, WriteFailure, FunctionFailure, AlreadyExists,\ InvalidRequest, Unauthorized, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, RequestValidationException, ConfigurationException from cassandra.cluster import _Scheduler, Session, Cluster +from cassandra.policies import HostDistance from cassandra.query import SimpleStatement @@ -79,21 +81,35 @@ def test_exception_types(self): self.assertTrue(issubclass(UnsupportedOperation, DriverException)) -class ContactListTest(unittest.TestCase): +class ClusterTest(unittest.TestCase): - def test_invalid_types(self, *args): + def test_invalid_contact_point_types(self): with self.assertRaises(ValueError): Cluster(contact_points=[None], protocol_version=4, connect_timeout=1) with self.assertRaises(TypeError): Cluster(contact_points="not a sequence", protocol_version=4, connect_timeout=1) + def test_requests_in_flight_threshold(self): + d = HostDistance.LOCAL + mn = 3 + mx = 5 + c = Cluster(protocol_version=2) + c.set_min_requests_per_connection(d, mn) + c.set_max_requests_per_connection(d, mx) + # min underflow, max, overflow + for n in (-1, mx, 127): + self.assertRaises(ValueError, c.set_min_requests_per_connection, d, n) + # max underflow, under min, overflow + for n in (0, mn, 128): + self.assertRaises(ValueError, c.set_max_requests_per_connection, d, n) + class SchedulerTest(unittest.TestCase): # TODO: this suite could be expanded; for now just adding a test covering a ticket @patch('time.time', return_value=3) # always queue at same time @patch('cassandra.cluster._Scheduler.run') # don't actually run the thread - def test_event_delay_timing(self, *args): + def test_event_delay_timing(self, *_): """ Schedule something with a time collision to make sure the heap comparison works @@ -108,7 +124,7 @@ class SessionTest(unittest.TestCase): # TODO: this suite could be expanded; for now just adding a test covering a PR @patch('cassandra.cluster.ResponseFuture._make_query_plan') - def test_default_serial_consistency_level(self, *args): + def test_default_serial_consistency_level(self, *_): """ Make sure default_serial_consistency_level passes through to a query message. Also make sure Statement.serial_consistency_level overrides the default. From 8b75733cb948ce27dda2aff38ea81156e9e308a7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 4 May 2016 13:13:25 -0500 Subject: [PATCH 0605/2005] Discard discovered hosts with duplicate rpc address PYTHON-365 --- cassandra/cluster.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c1e07a00fb..4298604d23 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2402,7 +2402,9 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, partitioner = None token_map = {} + found_hosts = set() if local_result.results: + found_hosts.add(connection.host) local_rows = dict_factory(*(local_result.results)) local_row = local_rows[0] cluster_name = local_row["cluster_name"] @@ -2427,7 +2429,6 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, # every node in the cluster was in the contact points, we won't discover # any new nodes, so we need this additional check. (See PYTHON-90) should_rebuild_token_map = force_token_rebuild or self._cluster.metadata.partitioner is None - found_hosts = set() for row in peers_result: addr = self._rpc_from_peer_row(row) @@ -2435,6 +2436,9 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, if 'tokens' in row and not tokens: # it was selected, but empty log.warning("Excluding host (%s) with no tokens in system.peers table of %s." % (addr, connection.host)) continue + if addr in found_hosts: + log.warning("Found multiple hosts with the same rpc_address (%s). Excluding peer %s", addr, row.get("peer")) + continue found_hosts.add(addr) From 4fd7c2917d6997f059ca3d0ac35b4a3a92f521fe Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 4 May 2016 13:34:20 -0500 Subject: [PATCH 0606/2005] fix botched merge conflict --- benchmarks/base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/benchmarks/base.py b/benchmarks/base.py index 256a344d88..644a92f5b4 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -72,7 +72,7 @@ def setup(options): log.info("Using 'cassandra' package from %s", cassandra.__path__) - cluster = Cluster(hosts, schema_metadata_enabled=False, token_metadata_enabled=False) + cluster = Cluster(options.hosts, schema_metadata_enabled=False, token_metadata_enabled=False) try: session = cluster.connect() @@ -107,8 +107,8 @@ def setup(options): cluster.shutdown() -def teardown(hosts): - cluster = Cluster(hosts, schema_metadata_enabled=False, token_metadata_enabled=False) +def teardown(options): + cluster = Cluster(options.hosts, schema_metadata_enabled=False, token_metadata_enabled=False) session = cluster.connect() if not options.keep_data: session.execute("DROP KEYSPACE " + options.keyspace) From 48cfcd0034693d9983e0ff50e6d93518923b98e5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 4 May 2016 13:55:26 -0500 Subject: [PATCH 0607/2005] make benchmarks work in py 2.6 PYTHON-434 --- benchmarks/base.py | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/benchmarks/base.py b/benchmarks/base.py index 644a92f5b4..10cce0c15c 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -30,7 +30,6 @@ import cassandra from cassandra.cluster import Cluster from cassandra.io.asyncorereactor import AsyncoreConnection -from cassandra.policies import HostDistance log = logging.getLogger() handler = logging.StreamHandler() @@ -39,6 +38,16 @@ logging.getLogger('cassandra').setLevel(logging.WARN) +_log_levels = { + 'CRITICAL': logging.CRITICAL, + 'ERROR': logging.ERROR, + 'WARN': logging.WARNING, + 'WARNING': logging.WARNING, + 'INFO': logging.INFO, + 'DEBUG': logging.DEBUG, + 'NOTSET': logging.NOTSET, +} + have_libev = False supported_reactors = [AsyncoreConnection] try: @@ -91,11 +100,11 @@ def setup(options): log.debug("Creating table...") create_table_query = """ - CREATE TABLE {} ( + CREATE TABLE {0} ( thekey text, """ for i in range(options.num_columns): - create_table_query += "col{} {},\n".format(i, options.column_type) + create_table_query += "col{0} {1},\n".format(i, options.column_type) create_table_query += "PRIMARY KEY (thekey))" try: @@ -134,22 +143,22 @@ def benchmark(thread_class): # Generate the query if options.read: - query = "SELECT * FROM {} WHERE thekey = '{{key}}'".format(TABLE) + query = "SELECT * FROM {0} WHERE thekey = '{{key}}'".format(TABLE) else: - query = "INSERT INTO {} (thekey".format(TABLE) + query = "INSERT INTO {0} (thekey".format(TABLE) for i in range(options.num_columns): - query += ", col{}".format(i) + query += ", col{0}".format(i) query += ") VALUES ('{key}'" for i in range(options.num_columns): - query += ", {}".format(COLUMN_VALUES[options.column_type]) + query += ", {0}".format(COLUMN_VALUES[options.column_type]) query += ")" values = None # we don't use that anymore. Keeping it in case we go back to prepared statements. per_thread = options.num_ops // options.threads threads = [] - log.debug("Beginning {}...".format('reads' if options.read else 'inserts')) + log.debug("Beginning {0}...".format('reads' if options.read else 'inserts')) start = time.time() try: for i in range(options.threads): @@ -235,7 +244,11 @@ def parse_options(): options.hosts = options.hosts.split(',') - log.setLevel(options.log_level.upper()) + level = options.log_level.upper() + try: + log.setLevel(_log_levels[level]) + except KeyError: + log.warn("Unknown log level specified: %s; specify one of %s", options.log_level, _log_levels.keys()) if options.asyncore_only: options.supported_reactors = [AsyncoreConnection] From d30da191caa2a3eed8f892327c4af93eb521dcbb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 4 May 2016 13:55:47 -0500 Subject: [PATCH 0608/2005] Fix asyncore pipe dispatch for py3 --- cassandra/io/asyncorereactor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index c6cf8a1924..54f985e897 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -102,7 +102,7 @@ def handle_read(self): def notify_loop(self): if not self._notified: self._notified = True - os.write(self.write_fd, 'x') + os.write(self.write_fd, b'x') class _AsyncoreUDPDispatcher(_AsyncoreDispatcher): From ceddacd30e849f2c2bc2e026eac5192c2a9500ab Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 5 May 2016 12:34:56 -0500 Subject: [PATCH 0609/2005] test unicode chars in cql generation PYTHON-447 --- tests/unit/test_metadata.py | 67 ++++++++++++++++++++++++++++++++++++- 1 file changed, 66 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 0b86c48789..1992a71475 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -28,7 +28,7 @@ LocalStrategy, protect_name, protect_names, protect_value, is_valid_name, UserType, KeyspaceMetadata, get_schema_parser, - _UnknownStrategy) + _UnknownStrategy, ColumnMetadata, TableMetadata, IndexMetadata, Function, Aggregate) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host @@ -392,3 +392,68 @@ def test_build_index_as_cql(self): index_meta = parser._build_index_metadata(column_meta, row) self.assertEqual(index_meta.as_cql_query(), "CREATE CUSTOM INDEX index_name_here ON keyspace_name_here.table_name_here (column_name_here) USING 'class_name_here'") + + +class UnicodeIdentifiersTests(unittest.TestCase): + """ + Exercise cql generation with unicode characters. Keyspace, Table, and Index names + cannot have special chars because C* names files by those identifiers, but they are + tested anyway. + + Looking for encoding errors like PYTHON-447 + """ + + name = six.text_type(b'\'_-()"\xc2\xac'.decode('utf-8')) + + def test_keyspace_name(self): + km = KeyspaceMetadata(self.name, False, 'SimpleStrategy', {'replication_factor': 1}) + print(km.export_as_string()) + + def test_table_name(self): + tm = TableMetadata(self.name, self.name) + print(tm.export_as_string()) + + def test_column_name_single_partition(self): + tm = TableMetadata('ks', 'table') + cm = ColumnMetadata(tm, self.name, u'int') + tm.columns[cm.name] = cm + tm.partition_key.append(cm) + print(tm.export_as_string()) + + def test_column_name_single_partition_single_clustering(self): + tm = TableMetadata('ks', 'table') + cm = ColumnMetadata(tm, self.name, u'int') + tm.columns[cm.name] = cm + tm.partition_key.append(cm) + cm = ColumnMetadata(tm, self.name + 'x', u'int') + tm.columns[cm.name] = cm + tm.clustering_key.append(cm) + print(tm.export_as_string()) + + def test_column_name_multiple_partition(self): + tm = TableMetadata('ks', 'table') + cm = ColumnMetadata(tm, self.name, u'int') + tm.columns[cm.name] = cm + tm.partition_key.append(cm) + cm = ColumnMetadata(tm, self.name + 'x', u'int') + tm.columns[cm.name] = cm + tm.partition_key.append(cm) + print(tm.export_as_string()) + + def test_index(self): + im = IndexMetadata(self.name, self.name, self.name, kind='', index_options={'target': self.name}) + print(im.export_as_string()) + im = IndexMetadata(self.name, self.name, self.name, kind='CUSTOM', index_options={'target': self.name, 'class_name': 'Class'}) + print(im.export_as_string()) + + def test_function(self): + fm = Function(self.name, self.name, (u'int', u'int'), (u'x', u'y'), u'int', u'language', self.name, False) + print(fm.export_as_string()) + + def test_aggregate(self): + am = Aggregate(self.name, self.name, (u'text',), self.name, u'text', self.name, self.name, u'text') + print(am.export_as_string()) + + def test_user_type(self): + um = UserType(self.name, self.name, [self.name, self.name], [u'int', u'text']) + print(um.export_as_string()) From 6aa4ec5ff8253454405010af399ef999e03c5437 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 5 May 2016 12:35:47 -0500 Subject: [PATCH 0610/2005] encode unicode for server error messages fixes "'ascii' codec can't encode character u'...' in position ...: ordinal not in range(128)" when printing exception in python2 (and in cqlsh) --- cassandra/protocol.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index a2432a6e07..4c63d557d5 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -136,6 +136,8 @@ def recv_body(cls, f, protocol_version, user_type_map): def summary_msg(self): msg = 'Error from server: code=%04x [%s] message="%s"' \ % (self.code, self.summary, self.message) + if six.PY2 and isinstance(msg, six.text_type): + msg = msg.encode('utf-8') return msg def __str__(self): From 0959b5dc1662d6957f2951738d0a4f053ac78f66 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 5 May 2016 13:18:08 -0500 Subject: [PATCH 0611/2005] remove encoding that was causing problems in cql generation PYTHON-447 --- cassandra/metadata.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index f7337d8706..18ac50c651 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1218,14 +1218,8 @@ def _make_option_strings(cls, options_map): return list(sorted(ret)) -if six.PY3: - def protect_name(name): - return maybe_escape_name(name) -else: - def protect_name(name): # NOQA - if isinstance(name, six.text_type): - name = name.encode('utf8') - return maybe_escape_name(name) +def protect_name(name): + return maybe_escape_name(name) def protect_names(names): @@ -2466,6 +2460,7 @@ def get_schema_parser(connection, server_version, timeout): # multi-version parser we have as of C* 2.2.0rc1. return SchemaParserV22(connection, timeout) + def _cql_from_cass_type(cass_type): """ A string representation of the type for this column, such as "varchar" From c4c8896b169203783d83b4bfd74b7f983594981f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 6 May 2016 14:03:16 -0500 Subject: [PATCH 0612/2005] remove costly test that was only there to demo another test deficiency --- tests/integration/standard/test_query.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index dff9715a4f..6d9041b5d5 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -471,11 +471,6 @@ def test_no_parameters(self): self.session.execute(batch) self.confirm_results() - def test_no_parameters_many_times(self): - for i in range(1000): - self.test_no_parameters() - self.session.execute("TRUNCATE test3rf.test") - def test_unicode(self): ddl = ''' CREATE TABLE test3rf.testtext ( From 76cc8c0f2e200a63f995c58f43e8fc9445e9a644 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 6 May 2016 14:24:37 -0500 Subject: [PATCH 0613/2005] raise if batch becomes too large PYTHON-151 --- cassandra/query.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index 1fb4d4cc91..7b0a1352e7 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -711,21 +711,19 @@ def add(self, statement, parameters=None): if parameters: encoder = Encoder() if self._session is None else self._session.encoder statement = bind_params(statement, parameters, encoder) - self._statements_and_parameters.append((False, statement, ())) + self._add_statement_and_params(False, statement, ()) elif isinstance(statement, PreparedStatement): query_id = statement.query_id bound_statement = statement.bind(() if parameters is None else parameters) self._update_state(bound_statement) - self._statements_and_parameters.append( - (True, query_id, bound_statement.values)) + self._add_statement_and_params(True, query_id, bound_statement.values) elif isinstance(statement, BoundStatement): if parameters: raise ValueError( "Parameters cannot be passed with a BoundStatement " "to BatchStatement.add()") self._update_state(statement) - self._statements_and_parameters.append( - (True, statement.prepared_statement.query_id, statement.values)) + self._add_statement_and_params(True, statement.prepared_statement.query_id, statement.values) else: # it must be a SimpleStatement query_string = statement.query_string @@ -733,7 +731,7 @@ def add(self, statement, parameters=None): encoder = Encoder() if self._session is None else self._session.encoder query_string = bind_params(query_string, parameters, encoder) self._update_state(statement) - self._statements_and_parameters.append((False, query_string, ())) + self._add_statement_and_params(False, query_string, ()) return self def add_all(self, statements, parameters): @@ -745,6 +743,11 @@ def add_all(self, statements, parameters): for statement, value in zip(statements, parameters): self.add(statement, parameters) + def _add_statement_and_params(self, is_prepared, statement, parameters): + if len(self._statements_and_parameters) >= 0xFFFF: + raise ValueError("Batch statement cannot contain more than %d statements." % 0xFFFF) + self._statements_and_parameters.append((is_prepared, statement, parameters)) + def _maybe_set_routing_attributes(self, statement): if self.routing_key is None: if statement.keyspace and statement.routing_key: From e046492799ddb136b3f04ee74de7e2ac4da86b3d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 6 May 2016 14:25:17 -0500 Subject: [PATCH 0614/2005] fix BatchStatement.add_all to pass individual params instead of the whole list --- cassandra/query.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index 7b0a1352e7..67d7d99c2b 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -737,11 +737,11 @@ def add(self, statement, parameters=None): def add_all(self, statements, parameters): """ Adds a sequence of :class:`.Statement` objects and a matching sequence - of parameters to the batch. :const:`None` can be used in place of - parameters when no parameters are needed. + of parameters to the batch. Statement and parameter sequences must be of equal length or + one will be truncated. :const:`None` can be used in the parameters position where are needed. """ for statement, value in zip(statements, parameters): - self.add(statement, parameters) + self.add(statement, value) def _add_statement_and_params(self, is_prepared, statement, parameters): if len(self._statements_and_parameters) >= 0xFFFF: From e97e6f3906b8bcbdfe70be040b6aeb7c5e3347fa Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 6 May 2016 14:25:52 -0500 Subject: [PATCH 0615/2005] test batch statement with too many queries PYTHON-151 --- tests/integration/standard/test_query.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 6d9041b5d5..881eed54dd 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -486,6 +486,22 @@ def test_unicode(self): finally: self.session.execute("DROP TABLE test3rf.testtext") + def test_too_many_statements(self): + max_statements = 0xFFFF + ss = SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (0, 0)") + b = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=ConsistencyLevel.ONE) + + # max works + b.add_all([ss] * max_statements, [None] * max_statements) + self.session.execute(b) + + # max + 1 raises + self.assertRaises(ValueError, b.add, ss) + + # also would have bombed trying to encode + b._statements_and_parameters.append((False, ss.query_string, ())) + self.assertRaises(NoHostAvailable, self.session.execute, b) + class SerialConsistencyTests(unittest.TestCase): def setUp(self): From 4cd4e987f40e3c2b264a0e94ce7f7d5e6aed9a58 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 6 May 2016 15:37:09 -0500 Subject: [PATCH 0616/2005] BatchStatement.clear + unit test --- cassandra/query.py | 13 +++++++++ tests/unit/test_query.py | 59 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) create mode 100644 tests/unit/test_query.py diff --git a/cassandra/query.py b/cassandra/query.py index 67d7d99c2b..cefdeb53b8 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -699,6 +699,19 @@ def __init__(self, batch_type=BatchType.LOGGED, retry_policy=None, Statement.__init__(self, retry_policy=retry_policy, consistency_level=consistency_level, serial_consistency_level=serial_consistency_level, custom_payload=custom_payload) + def clear(self): + """ + This is a convenience method to clear a batch statement for reuse. + + *Note:* it should not be used concurrently with uncompleted execution futures executing the same + ``BatchStatement``. + """ + del self._statements_and_parameters[:] + self.keyspace = None + self.routing_key = None + if self.custom_payload: + self.custom_payload.clear() + def add(self, statement, parameters=None): """ Adds a :class:`.Statement` and optional sequence of parameters diff --git a/tests/unit/test_query.py b/tests/unit/test_query.py new file mode 100644 index 0000000000..e843330638 --- /dev/null +++ b/tests/unit/test_query.py @@ -0,0 +1,59 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import six + +from cassandra.query import BatchStatement, SimpleStatement + + +class BatchStatementTest(unittest.TestCase): + # TODO: this suite could be expanded; for now just adding a test covering a PR + + def test_clear(self): + keyspace = 'keyspace' + routing_key = 'routing_key' + custom_payload = {'key': six.b('value')} + + ss = SimpleStatement('whatever', keyspace=keyspace, routing_key=routing_key, custom_payload=custom_payload) + + batch = BatchStatement() + batch.add(ss) + + self.assertTrue(batch._statements_and_parameters) + self.assertEqual(batch.keyspace, keyspace) + self.assertEqual(batch.routing_key, routing_key) + self.assertEqual(batch.custom_payload, custom_payload) + + batch.clear() + self.assertFalse(batch._statements_and_parameters) + self.assertIsNone(batch.keyspace) + self.assertIsNone(batch.routing_key) + self.assertFalse(batch.custom_payload) + + batch.add(ss) + + def test_clear_empty(self): + batch = BatchStatement() + batch.clear() + self.assertFalse(batch._statements_and_parameters) + self.assertIsNone(batch.keyspace) + self.assertIsNone(batch.routing_key) + self.assertFalse(batch.custom_payload) + + batch.add('something') From 67cc5ae1f0bdad9fc0d5e609c96cb218212d3158 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 6 May 2016 15:41:42 -0500 Subject: [PATCH 0617/2005] unit test BatchStatement.add_all --- tests/unit/test_query.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/unit/test_query.py b/tests/unit/test_query.py index e843330638..687bca7c50 100644 --- a/tests/unit/test_query.py +++ b/tests/unit/test_query.py @@ -57,3 +57,12 @@ def test_clear_empty(self): self.assertFalse(batch.custom_payload) batch.add('something') + + def test_add_all(self): + batch = BatchStatement() + statements = ['%s'] * 10 + parameters = [(i,) for i in range(10)] + batch.add_all(statements, parameters) + bound_statements = [t[1] for t in batch._statements_and_parameters] + str_parameters = [str(i) for i in range(10)] + self.assertEqual(bound_statements, str_parameters) From cce8590598644116b62e57dd5c383ac762d2a54d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 6 May 2016 16:27:05 -0500 Subject: [PATCH 0618/2005] context managers for Cluster and Session PYTHON-521 --- cassandra/cluster.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 19ab86b3a1..cfc23d45c1 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1012,6 +1012,12 @@ def shutdown(self): _discard_cluster_shutdown(self) + def __enter__(self): + return self + + def __exit__(self, *args): + self.shutdown() + def _new_session(self): session = Session(self, self.metadata.all_hosts()) self._session_register_user_types(session) @@ -1910,6 +1916,12 @@ def shutdown(self): for pool in self._pools.values(): pool.shutdown() + def __enter__(self): + return self + + def __exit__(self, *args): + self.shutdown() + def add_or_renew_pool(self, host, is_host_addition): """ For internal use only. From 66969c2826b486c4e4622568994eb951320dbcd3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 6 May 2016 16:27:20 -0500 Subject: [PATCH 0619/2005] Integration tests for Cluster and Session ctx managers PYTHON-521 --- tests/integration/standard/test_cluster.py | 47 ++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index d6650a4796..381df413b9 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -669,3 +669,50 @@ def test_address_translator_with_mixed_nodes(self): c.connect() for host in c.metadata.all_hosts(): self.assertEqual(adder_map.get(str(host)), host.broadcast_address) + +class ContextManagementTest(unittest.TestCase): + + load_balancing_policy = WhiteListRoundRobinPolicy(['127.0.0.1']) + cluster_kwargs = {'load_balancing_policy': load_balancing_policy, + 'schema_metadata_enabled': False, + 'token_metadata_enabled': False} + + def test_no_connect(self): + with Cluster() as cluster: + self.assertFalse(cluster.is_shutdown) + self.assertTrue(cluster.is_shutdown) + + def test_simple_nested(self): + with Cluster(**self.cluster_kwargs) as cluster: + with cluster.connect() as session: + self.assertFalse(cluster.is_shutdown) + self.assertFalse(session.is_shutdown) + self.assertTrue(session.execute('select release_version from system.local')[0]) + self.assertTrue(session.is_shutdown) + self.assertTrue(cluster.is_shutdown) + + def test_cluster_no_session(self): + with Cluster(**self.cluster_kwargs) as cluster: + session = cluster.connect() + self.assertFalse(cluster.is_shutdown) + self.assertFalse(session.is_shutdown) + self.assertTrue(session.execute('select release_version from system.local')[0]) + self.assertTrue(session.is_shutdown) + self.assertTrue(cluster.is_shutdown) + + def test_session_no_cluster(self): + cluster = Cluster(**self.cluster_kwargs) + unmanaged_session = cluster.connect() + with cluster.connect() as session: + self.assertFalse(cluster.is_shutdown) + self.assertFalse(session.is_shutdown) + self.assertFalse(unmanaged_session.is_shutdown) + self.assertTrue(session.execute('select release_version from system.local')[0]) + self.assertTrue(session.is_shutdown) + self.assertFalse(cluster.is_shutdown) + self.assertFalse(unmanaged_session.is_shutdown) + unmanaged_session.shutdown() + self.assertTrue(unmanaged_session.is_shutdown) + self.assertFalse(cluster.is_shutdown) + cluster.shutdown() + self.assertTrue(cluster.is_shutdown) From 270108ee7dd17be383ef30e55f7c4979a324e926 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 May 2016 12:13:08 -0500 Subject: [PATCH 0620/2005] return namedtuple from execute_concurrent PYTHON-362 --- cassandra/concurrent.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index 75c2604db8..8098b436d6 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -13,6 +13,7 @@ # limitations under the License. +from collections import namedtuple from heapq import heappush, heappop from itertools import cycle import six @@ -25,6 +26,9 @@ import logging log = logging.getLogger(__name__) + +ExecutionResult = namedtuple('ExecutionResult', ['success', 'result_or_exc']) + def execute_concurrent(session, statements_and_parameters, concurrency=100, raise_on_first_error=True, results_generator=False): """ Executes a sequence of (statement, parameters) tuples concurrently. Each @@ -153,7 +157,7 @@ class ConcurrentExecutorGenResults(_ConcurrentExecutor): def _put_result(self, result, idx, success): with self._condition: - heappush(self._results_queue, (idx, (success, result))) + heappush(self._results_queue, (idx, ExecutionResult(success, result))) self._execute_next() self._condition.notify() @@ -183,7 +187,7 @@ def execute(self, concurrency, fail_fast): return super(ConcurrentExecutorListResults, self).execute(concurrency, fail_fast) def _put_result(self, result, idx, success): - self._results_queue.append((idx, (success, result))) + self._results_queue.append((idx, ExecutionResult(success, result))) with self._condition: self._current += 1 if not success and self._fail_fast: From 8bdc0148134bb7aa9f88fdeb91d22e5615c26d4e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 May 2016 13:01:18 -0500 Subject: [PATCH 0621/2005] less confusing range in error message --- cassandra/cluster.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cfc23d45c1..c5890451d9 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -727,7 +727,6 @@ def __init__(self, self.status_event_refresh_window, schema_metadata_enabled, token_metadata_enabled) - def register_user_type(self, keyspace, user_type, klass): """ Registers a class to use to represent a particular user-defined type. @@ -803,7 +802,7 @@ def set_min_requests_per_connection(self, host_distance, min_requests): "when using protocol_version 1 or 2.") if min_requests < 0 or min_requests > 126 or \ min_requests >= self._max_requests_per_connection[host_distance]: - raise ValueError("min_requests must be in the range [0, 127) and less than the max_requests for this host_distance (%d)" % + raise ValueError("min_requests must be 0-126 and less than the max_requests for this host_distance (%d)" % (self._min_requests_per_connection[host_distance],)) self._min_requests_per_connection[host_distance] = min_requests @@ -824,7 +823,7 @@ def set_max_requests_per_connection(self, host_distance, max_requests): "when using protocol_version 1 or 2.") if max_requests < 1 or max_requests > 127 or \ max_requests <= self._min_requests_per_connection[host_distance]: - raise ValueError("max_requests must be in the range (0, 127] and greater than the min_requests for this host_distance (%d)" % + raise ValueError("max_requests must be 1-127 and greater than the min_requests for this host_distance (%d)" % (self._min_requests_per_connection[host_distance],)) self._max_requests_per_connection[host_distance] = max_requests @@ -3162,7 +3161,7 @@ def _set_result(self, response): retry_type, consistency = retry if retry_type in (RetryPolicy.RETRY, RetryPolicy.RETRY_NEXT_HOST): self._query_retries += 1 - reuse = retry_type == RetryPolicy.RETRY + reuse = retry_type == RetryPolicy.RETRY self._retry(reuse_connection=reuse, consistency_level=consistency) elif retry_type is RetryPolicy.RETHROW: self._set_final_exception(response.to_exception()) @@ -3622,4 +3621,3 @@ def was_applied(self): return row[0] else: return row['[applied]'] - From fb444ee35a1b43cbaf35821f6593f21aa1c77a9d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 May 2016 14:03:17 -0500 Subject: [PATCH 0622/2005] explicit init kwargs for concrete statement classes PYTHON-219 --- cassandra/query.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index cefdeb53b8..a284434ada 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -325,15 +325,18 @@ class SimpleStatement(Statement): A simple, un-prepared query. """ - def __init__(self, query_string, *args, **kwargs): + def __init__(self, query_string, retry_policy=None, consistency_level=None, routing_key=None, + serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None, + custom_payload=None): """ `query_string` should be a literal CQL statement with the exception of parameter placeholders that will be filled through the `parameters` argument of :meth:`.Session.execute()`. - All arguments to :class:`Statement` apply to this class as well + See :class:`Statement` attributes for a description of the other parameters. """ - Statement.__init__(self, *args, **kwargs) + Statement.__init__(self, retry_policy, consistency_level, routing_key, + serial_consistency_level, fetch_size, keyspace, custom_payload) self._query_string = query_string @property @@ -449,11 +452,13 @@ class BoundStatement(Statement): The sequence of values that were bound to the prepared statement. """ - def __init__(self, prepared_statement, *args, **kwargs): + def __init__(self, prepared_statement, retry_policy=None, consistency_level=None, routing_key=None, + serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None, + custom_payload=None): """ `prepared_statement` should be an instance of :class:`PreparedStatement`. - All arguments to :class:`Statement` apply to this class as well + See :class:`Statement` attributes for a description of the other parameters. """ self.prepared_statement = prepared_statement @@ -467,7 +472,8 @@ def __init__(self, prepared_statement, *args, **kwargs): if meta: self.keyspace = meta[0].keyspace_name - Statement.__init__(self, *args, **kwargs) + Statement.__init__(self, retry_policy, consistency_level, routing_key, + serial_consistency_level, fetch_size, keyspace, custom_payload) def bind(self, values): """ From ccb2e6b1533bbed4901475e85f1bb72f766185a9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 May 2016 14:04:00 -0500 Subject: [PATCH 0623/2005] reorg cassandra.query doc Hoping to avoid confusion on statement initializers by removing the signature and dropping the position of the abstract class. PYTHON-219 --- docs/api/cassandra/query.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/api/cassandra/query.rst b/docs/api/cassandra/query.rst index 55c56cf168..fcd79739b9 100644 --- a/docs/api/cassandra/query.rst +++ b/docs/api/cassandra/query.rst @@ -11,9 +11,6 @@ .. autofunction:: ordered_dict_factory -.. autoclass:: Statement - :members: - .. autoclass:: SimpleStatement :members: @@ -23,6 +20,9 @@ .. autoclass:: BoundStatement :members: +.. autoclass:: Statement () + :members: + .. autodata:: UNSET_VALUE :annotation: From 88b013daac765cec04626cc3d5fdad804fd0b19b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 May 2016 14:31:26 -0500 Subject: [PATCH 0624/2005] basic check to detect improper statement positional parameters PYTHON-219 --- cassandra/query.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/query.py b/cassandra/query.py index a284434ada..8662f0bda4 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -221,6 +221,8 @@ class Statement(object): def __init__(self, retry_policy=None, consistency_level=None, routing_key=None, serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None, custom_payload=None): + if retry_policy and not hasattr(retry_policy, 'on_read_timeout'): # just checking one method to detect positional parameter errors + raise ValueError('retry_policy should implement cassandra.policies.RetryPolicy') self.retry_policy = retry_policy if consistency_level is not None: self.consistency_level = consistency_level From 3a26efc2514bedc4e2f8925b413699081fc28375 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 11 May 2016 12:23:45 -0500 Subject: [PATCH 0625/2005] add Time.time conversion function --- cassandra/util.py | 7 +++++++ tests/unit/test_util_types.py | 5 +++++ 2 files changed, 12 insertions(+) diff --git a/cassandra/util.py b/cassandra/util.py index ec590e4b5d..f4bc1b1c94 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -924,6 +924,13 @@ def nanosecond(self): """ return self.nanosecond_time % Time.SECOND + def time(self): + """ + Return a built-in datetime.time (nanosecond precision truncated to micros). + """ + return datetime.time(hour=self.hour, minute=self.minute, second=self.second, + microsecond=self.nanosecond // Time.MICRO) + def _from_timestamp(self, t): if t >= Time.DAY: raise ValueError("value must be less than number of nanoseconds in a day (%d)" % Time.DAY) diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py index 53ab9d0752..5bcaa6e724 100644 --- a/tests/unit/test_util_types.py +++ b/tests/unit/test_util_types.py @@ -131,6 +131,11 @@ def test_from_time(self): tt = Time(expected_time) self.assertEqual(tt, expected_time) + def test_as_time(self): + expected_time = datetime.time(12, 1, 2, 3) + tt = Time(expected_time) + self.assertEqual(tt.time(), expected_time) + def test_equals(self): # util.Time self equality self.assertEqual(Time(1234), Time(1234)) From f67d18a5ef310c64510e0cc7dca14c1be7cf6f33 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 11 May 2016 12:26:19 -0500 Subject: [PATCH 0626/2005] date and time doc page PYTHON-394 --- docs/dates_and_times.rst | 87 ++++++++++++++++++++++++++++++++++++++++ docs/index.rst | 4 ++ 2 files changed, 91 insertions(+) create mode 100644 docs/dates_and_times.rst diff --git a/docs/dates_and_times.rst b/docs/dates_and_times.rst new file mode 100644 index 0000000000..5b13cb7f55 --- /dev/null +++ b/docs/dates_and_times.rst @@ -0,0 +1,87 @@ +Working with Dates and Times +============================ + +This document is meant to provide on overview of the assumptions and limitations of the driver time handling, the +reasoning behind it, and describe approaches to working with these types. + +timestamps (Cassandra DateType) +------------------------------- + +Timestamps in Cassandra are timezone-naive timestamps encoded as microseconds since UNIX epoch. Clients working with +timestamps in this database usually find it easiest to reason about them if they are always assumed to be UTC. To quote the +pytz documentation, "The preferred way of dealing with times is to always work in UTC, converting to localtime only when +generating output to be read by humans." The driver adheres to this tenant, and assumes UTC is always in the database. The +driver attempts to make this correct on the way in, and assumes no timezone on the way out. + +Write Path +~~~~~~~~~~ +When inserting timestamps, the driver handles serialization for the write path as follows: + +If the input is a ``datetime.datetime``, the serialization is normalized by starting with the ``utctimetuple()`` of the +value. + +- If the ``datetime`` object is timezone-aware, the timestamp is shifted, and represents the UTC timestamp equivalent. +- If the ``datetime`` object is timezone-naive, this results in no shift -- any ``datetime`` with no timezone information is assumed to be UTC + +Note the second point above applies even to "local" times created using ``now()``:: + + >>> d = datetime.now() + + >>> print(d.tzinfo) + None + + +These do not contain timezone information intrinsically, so they will be assumed to be UTC and not shifted. When generating +timestamps in the application, it is clearer to use ``datetime.utcnow()`` to be explicit about it. + +If the input for a timestamp is numeric, it is assumed to be a epoch-relative microsecond timestamp, as specified in the +CQL spec -- no scaling or conversion is done. + +Read Path +~~~~~~~~~ +The driver always assumes persisted timestamps are UTC and makes no attempt to localize them. Returned values are +timezone-naive ``datetime.datetime``. We follow this approach because the datetime API has deficiencies around daylight +saving time, and the defacto package for handling this is a third-party package (we try to minimize external dependencies +and not make decisions for the integrator). + +The decision for how to handle timezones is left to the application. For the most part it is straightforward to apply +localization to the ``datetime``\s returned by queries. One prevalent method is to use pytz for localization:: + + import pytz + user_tz = pytz.timezone('US/Central') + timestamp_naive = row.ts + timestamp_utc = pytz.utc.localize(timestamp_naive) + timestamp_presented = timestamp_utc.astimezone(user_tz) + +This is the most robust approach (likely refactored into a function). If it is deemed too cumbersome to apply for all call +sites in the application, it is possible to patch the driver with custom deserialization for this type. However, doing +this depends depends some on internal APIs and what extensions are present, so we will only mention the possibility, and +not spell it out here. + +date, time (Cassandra DateType) +------------------------------- +Date and time in Cassandra are idealized markers, much like ``datetime.date`` and ``datetime.time`` in the Python standard +library. Unlike these Python implementations, the Cassandra encoding supports much wider ranges. To accommodate these +ranges without overflow, this driver returns these data in custom types: :class:`.util.Date` and :class:`.util.Time`. + +Write Path +~~~~~~~~~~ +For simple (not prepared) statements, the input values for each of these can be either a string literal or an encoded +integer. See `Working with dates `_ +or `Working with time `_ for details +on the encoding or string formats. + +For prepared statements, the driver accepts anything that can be used to construct the :class:`.util.Date` or +:class:`.util.Time` classes. See the linked API docs for details. + +Read Path +~~~~~~~~~ +The driver always returns custom types for ``date`` and ``time``. + +The driver returns :class:`.util.Date` for ``date`` in order to accommodate the wider range of values without overflow. +For applications working within the supported range of [``datetime.MINYEAR``, ``datetime.MAXYEAR``], these are easily +converted to standard ``datetime.date`` insances using :meth:`.Date.date`. + +The driver returns :class:`.util.Time` for ``time`` in order to retain nanosecond precision stored in the database. +For applications not concerned with this level of precision, these are easily converted to standard ``datetime.time`` +insances using :meth:`.Time.time`. diff --git a/docs/index.rst b/docs/index.rst index 903a750666..c68e8653cb 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -42,6 +42,9 @@ Contents :doc:`security` An overview of the security features of the driver. +:doc:`dates_and_times` + Some discussion on the driver's approach to working with timestamp, date, time types + :doc:`faq` A collection of Frequently Asked Questions @@ -58,6 +61,7 @@ Contents security user_defined_types object_mapper + dates_and_times faq Getting Help From fb25b15a5bc4c8cb6a021bbe732126a8b6e1a98d Mon Sep 17 00:00:00 2001 From: Will Liu Date: Wed, 11 May 2016 14:40:07 -0400 Subject: [PATCH 0627/2005] Updates to queryset docs * Cannot filter by primary_key only (in example, created 'id' and set as a 'partition_key') * Updated import from 'cqlengine' to 'columns' and 'functions' --- docs/cqlengine/queryset.rst | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/cqlengine/queryset.rst b/docs/cqlengine/queryset.rst index 18287f924d..ff328b0ce4 100644 --- a/docs/cqlengine/queryset.rst +++ b/docs/cqlengine/queryset.rst @@ -209,13 +209,14 @@ TimeUUID Functions .. code-block:: python class DataStream(Model): - time = cqlengine.TimeUUID(primary_key=True) - data = cqlengine.Bytes() + id = columns.UUID(partition_key=True) + time = columns.TimeUUID(primary_key=True) + data = columns.Bytes() min_time = datetime(1982, 1, 1) max_time = datetime(1982, 3, 9) - DataStream.filter(time__gt=cqlengine.MinTimeUUID(min_time), time__lt=cqlengine.MaxTimeUUID(max_time)) + DataStream.filter(time__gt=functions.MinTimeUUID(min_time), time__lt=functions.MaxTimeUUID(max_time)) Token Function ============== @@ -230,8 +231,8 @@ Token Function .. code-block:: python class Items(Model): - id = cqlengine.Text(primary_key=True) - data = cqlengine.Bytes() + id = columns.Text(primary_key=True) + data = columns.Bytes() query = Items.objects.all().limit(10) From b5845ec4e9ff6302c4797a8241016d96c3d61d2a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 11 May 2016 14:37:19 -0500 Subject: [PATCH 0628/2005] populate dse version and workload on host, when available PYTHON-555 --- cassandra/cluster.py | 4 ++++ cassandra/pool.py | 12 ++++++++++++ 2 files changed, 16 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c5890451d9..5deed968e2 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2440,6 +2440,8 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, host.listen_address = local_row.get("listen_address") host.broadcast_address = local_row.get("broadcast_address") host.release_version = local_row.get("release_version") + host.dse_version = local_row.get("dse_version") + host.dse_workload = local_row.get("workload") if partitioner and tokens: token_map[host] = tokens @@ -2473,6 +2475,8 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, host.broadcast_address = row.get("peer") host.release_version = row.get("release_version") + host.dse_version = row.get("dse_version") + host.dse_workload = row.get("workload") if partitioner and tokens: token_map[host] = tokens diff --git a/cassandra/pool.py b/cassandra/pool.py index 134eb25486..c47c275d5b 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -83,6 +83,18 @@ class Host(object): release_version as queried from the control connection system tables """ + dse_version = None + """ + dse_version as queried from the control connection system tables. Only populated when connecting to + DSE with this property available. Not queried if :attr:`~.Cluster.token_metadata_enabled` is ``False``. + """ + + dse_workload = None + """ + DSE workload queried from the control connection system tables. Only populated when connecting to + DSE with this property available. Not queried if :attr:`~.Cluster.token_metadata_enabled` is ``False``. + """ + _datacenter = None _rack = None _reconnection_handler = None From 982f1469ad573612574c7a5d893f7ad94ac519e7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 11 May 2016 15:32:50 -0500 Subject: [PATCH 0629/2005] Avoid node refresh for known, up hosts. PYTHON-557 --- cassandra/cluster.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c5890451d9..9e087a7332 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2515,13 +2515,22 @@ def _delay_for_event_type(self, event_type, delay_window): self._event_schedule_times[event_type] = this_time return delay + def _refresh_nodes_if_not_up(self, addr): + """ + Used to mitigate refreshes for nodes that are already known. + Some versions of the server send superfluous NEW_NODE messages in addition to UP events. + """ + host = self._cluster.metadata.get_host(addr) + if not host or not host.is_up: + self.refresh_node_list_and_token_map() + def _handle_topology_change(self, event): change_type = event["change_type"] addr = self._translate_address(event["address"][0]) if change_type == "NEW_NODE" or change_type == "MOVED_NODE": if self._topology_event_refresh_window >= 0: delay = self._delay_for_event_type('topology_change', self._topology_event_refresh_window) - self._cluster.scheduler.schedule_unique(delay, self.refresh_node_list_and_token_map) + self._cluster.scheduler.schedule_unique(delay, self._refresh_nodes_if_not_up, addr) elif change_type == "REMOVED_NODE": host = self._cluster.metadata.get_host(addr) self._cluster.scheduler.schedule_unique(0, self._cluster.remove_host, host) From af3c346b335989530bbd7382f8064a31d24b32fc Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 11 May 2016 15:46:17 -0500 Subject: [PATCH 0630/2005] update unit tests for new conditional refresh PYTHON-557 --- tests/unit/test_control_connection.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index d15801a36c..e5a6dcc105 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -347,7 +347,7 @@ def test_handle_topology_change(self): } self.cluster.scheduler.reset_mock() self.control_connection._handle_topology_change(event) - self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_node_list_and_token_map) + self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection._refresh_nodes_if_not_up, '1.2.3.4') event = { 'change_type': 'REMOVED_NODE', @@ -363,7 +363,7 @@ def test_handle_topology_change(self): } self.cluster.scheduler.reset_mock() self.control_connection._handle_topology_change(event) - self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_node_list_and_token_map) + self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection._refresh_nodes_if_not_up, '1.2.3.4') def test_handle_status_change(self): event = { @@ -453,7 +453,7 @@ def test_refresh_disabled(self): cc_no_schema_refresh._handle_status_change(status_event) cc_no_schema_refresh._handle_topology_change(topo_event) cluster.scheduler.schedule_unique.assert_has_calls([call(ANY, cc_no_schema_refresh.refresh_node_list_and_token_map), - call(ANY, cc_no_schema_refresh.refresh_node_list_and_token_map)]) + call(ANY, cc_no_schema_refresh._refresh_nodes_if_not_up, '1.2.3.4')]) cc_no_topo_refresh = ControlConnection(cluster, 1, 0, -1, 0) cluster.scheduler.reset_mock() From 869e1ab616c9771ba6af9d171a908675309aebd1 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 13 May 2016 13:27:02 -0500 Subject: [PATCH 0631/2005] PYTHON-362 tweaking existing test to validate that generated results are of the type ExecutionResult --- tests/integration/standard/test_concurrent.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index 60243bb654..c1e12133ae 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -19,7 +19,7 @@ from cassandra import InvalidRequest, ConsistencyLevel, ReadTimeout, WriteTimeout, OperationTimedOut, \ ReadFailure, WriteFailure from cassandra.cluster import Cluster -from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args +from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args, ExecutionResult from cassandra.policies import HostDistance from cassandra.query import tuple_factory, SimpleStatement @@ -151,6 +151,12 @@ def test_execute_concurrent_with_args_generator(self): self.assertTrue(success) self.assertFalse(result) + results = self.execute_concurrent_args_helper(self.session, statement, parameters, results_generator=True) + for result in results: + self.assertTrue(isinstance(result, ExecutionResult)) + self.assertTrue(result.success) + self.assertFalse(result.result_or_exc) + # read statement = SimpleStatement( "SELECT v FROM test3rf.test WHERE k=%s", @@ -158,6 +164,7 @@ def test_execute_concurrent_with_args_generator(self): parameters = [(i, ) for i in range(num_statements)] results = self.execute_concurrent_args_helper(self.session, statement, parameters, results_generator=True) + for i in range(num_statements): result = next(results) self.assertEqual((True, [(i,)]), result) From 86172dd5798b74149a0fbda00a98b32587f29e17 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 13 May 2016 14:57:32 -0500 Subject: [PATCH 0632/2005] fix lbp tests following host state management change 01a3825 also fix logging in util and remove unused parameter --- tests/integration/long/test_consistency.py | 4 +- .../long/test_loadbalancingpolicies.py | 169 ++++++++---------- tests/integration/long/utils.py | 13 +- 3 files changed, 80 insertions(+), 106 deletions(-) diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index 43734c7dc8..538d924a65 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -131,7 +131,7 @@ def _test_tokenaware_one_node_down(self, keyspace, rf, accepted): load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), protocol_version=PROTOCOL_VERSION) session = cluster.connect() - wait_for_up(cluster, 1, wait=False) + wait_for_up(cluster, 1) wait_for_up(cluster, 2) create_schema(cluster, session, keyspace, replication_factor=rf) @@ -183,7 +183,7 @@ def test_rfthree_tokenaware_none_down(self): load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), protocol_version=PROTOCOL_VERSION) session = cluster.connect() - wait_for_up(cluster, 1, wait=False) + wait_for_up(cluster, 1) wait_for_up(cluster, 2) create_schema(cluster, session, keyspace, replication_factor=3) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index f05253175b..54ed0d0a0e 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -43,11 +43,40 @@ def setUp(self): remove_cluster() # clear ahead of test so it doesn't use one left in unknown state self.coordinator_stats = CoordinatorStats() self.prepared = None + self.probe_cluster = None + + def tearDown(self): + if self.probe_cluster: + self.probe_cluster.shutdown() @classmethod def teardown_class(cls): remove_cluster() + def _connect_probe_cluster(self): + if not self.probe_cluster: + # distinct cluster so we can see the status of nodes ignored by the LBP being tested + self.probe_cluster = Cluster(load_balancing_policy=RoundRobinPolicy(), + schema_metadata_enabled=False, token_metadata_enabled=False) + self.probe_session = self.probe_cluster.connect() + + def _wait_for_nodes_up(self, nodes): + self._connect_probe_cluster() + for n in nodes: + wait_for_up(self.probe_cluster, n) + + def _wait_for_nodes_down(self, nodes): + self._connect_probe_cluster() + for n in nodes: + wait_for_down(self.probe_cluster, n) + + def _cluster_session_with_lbp(self, lbp): + # create a cluster with no delay on events + cluster = Cluster(load_balancing_policy=lbp, protocol_version=PROTOCOL_VERSION, + topology_event_refresh_window=0, status_event_refresh_window=0) + session = cluster.connect() + return cluster, session + def _insert(self, session, keyspace, count=12, consistency_level=ConsistencyLevel.ONE): session.execute('USE %s' % keyspace) @@ -132,13 +161,8 @@ def test_token_aware_is_used_by_default(self): def test_roundrobin(self): use_singledc() keyspace = 'test_roundrobin' - cluster = Cluster( - load_balancing_policy=RoundRobinPolicy(), - protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - wait_for_up(cluster, 1, wait=False) - wait_for_up(cluster, 2, wait=False) - wait_for_up(cluster, 3) + cluster, session = self._cluster_session_with_lbp(RoundRobinPolicy()) + self._wait_for_nodes_up(range(1, 4)) create_schema(cluster, session, keyspace, replication_factor=3) self._insert(session, keyspace) @@ -149,7 +173,7 @@ def test_roundrobin(self): self.coordinator_stats.assert_query_count_equals(self, 3, 4) force_stop(3) - wait_for_down(cluster, 3) + self._wait_for_nodes_down([3]) self.coordinator_stats.reset_counts() self._query(session, keyspace) @@ -160,8 +184,8 @@ def test_roundrobin(self): decommission(1) start(3) - wait_for_down(cluster, 1) - wait_for_up(cluster, 3) + self._wait_for_nodes_down([1]) + self._wait_for_nodes_up([3]) self.coordinator_stats.reset_counts() self._query(session, keyspace) @@ -169,18 +193,13 @@ def test_roundrobin(self): self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 6) self.coordinator_stats.assert_query_count_equals(self, 3, 6) + cluster.shutdown() def test_roundrobin_two_dcs(self): use_multidc([2, 2]) keyspace = 'test_roundrobin_two_dcs' - cluster = Cluster( - load_balancing_policy=RoundRobinPolicy(), - protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - wait_for_up(cluster, 1, wait=False) - wait_for_up(cluster, 2, wait=False) - wait_for_up(cluster, 3, wait=False) - wait_for_up(cluster, 4) + cluster, session = self._cluster_session_with_lbp(RoundRobinPolicy()) + self._wait_for_nodes_up(range(1, 5)) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) @@ -197,7 +216,7 @@ def test_roundrobin_two_dcs(self): # reset control connection self._insert(session, keyspace, count=1000) - wait_for_up(cluster, 5) + self._wait_for_nodes_up([5]) self.coordinator_stats.reset_counts() self._query(session, keyspace) @@ -213,14 +232,8 @@ def test_roundrobin_two_dcs(self): def test_roundrobin_two_dcs_2(self): use_multidc([2, 2]) keyspace = 'test_roundrobin_two_dcs_2' - cluster = Cluster( - load_balancing_policy=RoundRobinPolicy(), - protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - wait_for_up(cluster, 1, wait=False) - wait_for_up(cluster, 2, wait=False) - wait_for_up(cluster, 3, wait=False) - wait_for_up(cluster, 4) + cluster, session = self._cluster_session_with_lbp(RoundRobinPolicy()) + self._wait_for_nodes_up(range(1, 5)) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) @@ -237,7 +250,7 @@ def test_roundrobin_two_dcs_2(self): # reset control connection self._insert(session, keyspace, count=1000) - wait_for_up(cluster, 5) + self._wait_for_nodes_up([5]) self.coordinator_stats.reset_counts() self._query(session, keyspace) @@ -253,15 +266,8 @@ def test_roundrobin_two_dcs_2(self): def test_dc_aware_roundrobin_two_dcs(self): use_multidc([3, 2]) keyspace = 'test_dc_aware_roundrobin_two_dcs' - cluster = Cluster( - load_balancing_policy=DCAwareRoundRobinPolicy('dc1'), - protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - wait_for_up(cluster, 1, wait=False) - wait_for_up(cluster, 2, wait=False) - wait_for_up(cluster, 3, wait=False) - wait_for_up(cluster, 4, wait=False) - wait_for_up(cluster, 5) + cluster, session = self._cluster_session_with_lbp(DCAwareRoundRobinPolicy('dc1')) + self._wait_for_nodes_up(range(1, 6)) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) @@ -278,15 +284,8 @@ def test_dc_aware_roundrobin_two_dcs(self): def test_dc_aware_roundrobin_two_dcs_2(self): use_multidc([3, 2]) keyspace = 'test_dc_aware_roundrobin_two_dcs_2' - cluster = Cluster( - load_balancing_policy=DCAwareRoundRobinPolicy('dc2'), - protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - wait_for_up(cluster, 1, wait=False) - wait_for_up(cluster, 2, wait=False) - wait_for_up(cluster, 3, wait=False) - wait_for_up(cluster, 4, wait=False) - wait_for_up(cluster, 5) + cluster, session = self._cluster_session_with_lbp(DCAwareRoundRobinPolicy('dc2')) + self._wait_for_nodes_up(range(1, 6)) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) @@ -303,14 +302,8 @@ def test_dc_aware_roundrobin_two_dcs_2(self): def test_dc_aware_roundrobin_one_remote_host(self): use_multidc([2, 2]) keyspace = 'test_dc_aware_roundrobin_one_remote_host' - cluster = Cluster( - load_balancing_policy=DCAwareRoundRobinPolicy('dc2', used_hosts_per_remote_dc=1), - protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - wait_for_up(cluster, 1, wait=False) - wait_for_up(cluster, 2, wait=False) - wait_for_up(cluster, 3, wait=False) - wait_for_up(cluster, 4) + cluster, session = self._cluster_session_with_lbp(DCAwareRoundRobinPolicy('dc2', used_hosts_per_remote_dc=1)) + self._wait_for_nodes_up(range(1, 5)) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) @@ -323,7 +316,7 @@ def test_dc_aware_roundrobin_one_remote_host(self): self.coordinator_stats.reset_counts() bootstrap(5, 'dc1') - wait_for_up(cluster, 5) + self._wait_for_nodes_up([5]) self._query(session, keyspace) @@ -336,8 +329,7 @@ def test_dc_aware_roundrobin_one_remote_host(self): self.coordinator_stats.reset_counts() decommission(3) decommission(4) - wait_for_down(cluster, 3, wait=True) - wait_for_down(cluster, 4, wait=True) + self._wait_for_nodes_down([3, 4]) self._query(session, keyspace) @@ -350,7 +342,7 @@ def test_dc_aware_roundrobin_one_remote_host(self): self.coordinator_stats.reset_counts() decommission(5) - wait_for_down(cluster, 5, wait=True) + self._wait_for_nodes_down([5]) self._query(session, keyspace) @@ -364,7 +356,7 @@ def test_dc_aware_roundrobin_one_remote_host(self): self.coordinator_stats.reset_counts() decommission(1) - wait_for_down(cluster, 1, wait=True) + self._wait_for_nodes_down([1]) self._query(session, keyspace) @@ -395,13 +387,8 @@ def test_token_aware_prepared(self): def token_aware(self, keyspace, use_prepared=False): use_singledc() - cluster = Cluster( - load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), - protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - wait_for_up(cluster, 1, wait=False) - wait_for_up(cluster, 2, wait=False) - wait_for_up(cluster, 3) + cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy())) + self._wait_for_nodes_up([1, 2, 3]) create_schema(cluster, session, keyspace, replication_factor=1) self._insert(session, keyspace) @@ -420,7 +407,7 @@ def token_aware(self, keyspace, use_prepared=False): self.coordinator_stats.reset_counts() force_stop(2) - wait_for_down(cluster, 2, wait=True) + self._wait_for_nodes_down([2]) try: self._query(session, keyspace, use_prepared=use_prepared) @@ -432,7 +419,7 @@ def token_aware(self, keyspace, use_prepared=False): self.coordinator_stats.reset_counts() start(2) - wait_for_up(cluster, 2, wait=True) + self._wait_for_nodes_up([2]) self._query(session, keyspace, use_prepared=use_prepared) @@ -442,7 +429,7 @@ def token_aware(self, keyspace, use_prepared=False): self.coordinator_stats.reset_counts() stop(2) - wait_for_down(cluster, 2, wait=True) + self._wait_for_nodes_down([2]) try: self._query(session, keyspace, use_prepared=use_prepared) @@ -452,9 +439,9 @@ def token_aware(self, keyspace, use_prepared=False): self.coordinator_stats.reset_counts() start(2) - wait_for_up(cluster, 2, wait=True) + self._wait_for_nodes_up([2]) decommission(2) - wait_for_down(cluster, 2, wait=True) + self._wait_for_nodes_down([2]) self._query(session, keyspace, use_prepared=use_prepared) @@ -471,13 +458,8 @@ def test_token_aware_composite_key(self): use_singledc() keyspace = 'test_token_aware_composite_key' table = 'composite' - cluster = Cluster( - load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), - protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - wait_for_up(cluster, 1, wait=False) - wait_for_up(cluster, 2, wait=False) - wait_for_up(cluster, 3) + cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy())) + self._wait_for_nodes_up([1, 2, 3]) create_schema(cluster, session, keyspace, replication_factor=2) session.execute('CREATE TABLE %s (' @@ -500,13 +482,8 @@ def test_token_aware_composite_key(self): def test_token_aware_with_rf_2(self, use_prepared=False): use_singledc() keyspace = 'test_token_aware_with_rf_2' - cluster = Cluster( - load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), - protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - wait_for_up(cluster, 1, wait=False) - wait_for_up(cluster, 2, wait=False) - wait_for_up(cluster, 3) + cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy())) + self._wait_for_nodes_up([1, 2, 3]) create_schema(cluster, session, keyspace, replication_factor=2) self._insert(session, keyspace) @@ -518,7 +495,7 @@ def test_token_aware_with_rf_2(self, use_prepared=False): self.coordinator_stats.reset_counts() stop(2) - wait_for_down(cluster, 2, wait=True) + self._wait_for_nodes_down([2]) self._query(session, keyspace) @@ -530,10 +507,8 @@ def test_token_aware_with_rf_2(self, use_prepared=False): def test_token_aware_with_local_table(self): use_singledc() - cluster = Cluster( - load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), - protocol_version=PROTOCOL_VERSION) - session = cluster.connect() + cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy())) + self._wait_for_nodes_up([1, 2, 3]) p = session.prepare("SELECT * FROM system.local WHERE key=?") # this would blow up prior to 61b4fad @@ -546,13 +521,11 @@ def test_white_list(self): use_singledc() keyspace = 'test_white_list' - cluster = Cluster(('127.0.0.2',), - load_balancing_policy=WhiteListRoundRobinPolicy((IP_FORMAT % 2,)), - protocol_version=PROTOCOL_VERSION) + cluster = Cluster(('127.0.0.2',), load_balancing_policy=WhiteListRoundRobinPolicy((IP_FORMAT % 2,)), + protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, + status_event_refresh_window=0) session = cluster.connect() - wait_for_up(cluster, 1, wait=False) - wait_for_up(cluster, 2, wait=False) - wait_for_up(cluster, 3) + self._wait_for_nodes_up([1, 2, 3]) create_schema(cluster, session, keyspace) self._insert(session, keyspace) @@ -564,12 +537,12 @@ def test_white_list(self): # white list policy should not allow reconnecting to ignored hosts force_stop(3) - wait_for_down(cluster, 3) + self._wait_for_nodes_down([3]) self.assertFalse(cluster.metadata._hosts[IP_FORMAT % 3].is_currently_reconnecting()) self.coordinator_stats.reset_counts() force_stop(2) - time.sleep(10) + self._wait_for_nodes_down([2]) try: self._query(session, keyspace) diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index f850d6a1a8..bd48e96f4e 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -123,14 +123,14 @@ def bootstrap(node, data_center=None, token=None): def ring(node): - print('From node%s:' % node) get_node(node).nodetool('ring') -def wait_for_up(cluster, node, wait=True): +def wait_for_up(cluster, node): tries = 0 + addr = IP_FORMAT % node while tries < 100: - host = cluster.metadata.get_host(IP_FORMAT % node) + host = cluster.metadata.get_host(addr) if host and host.is_up: log.debug("Done waiting for node %s to be up", node) return @@ -139,10 +139,11 @@ def wait_for_up(cluster, node, wait=True): tries += 1 time.sleep(1) - raise RuntimeError("Host {0} is not up after 100 attempts".format(IP_FORMAT.format(node))) + # todo: don't mix string interpolation methods in the same package + raise RuntimeError("Host {0} is not up after {1} attempts".format(addr, tries)) -def wait_for_down(cluster, node, wait=True): +def wait_for_down(cluster, node): log.debug("Waiting for node %s to be down", node) tries = 0 while tries < 100: @@ -155,4 +156,4 @@ def wait_for_down(cluster, node, wait=True): tries += 1 time.sleep(1) - raise RuntimeError("Host {0} is not down after 100 attempts".format(IP_FORMAT.format(node))) + raise RuntimeError("Host {0} is not down after {1} attempts".format(addr, tries)) From d39665f5cec4c8ca86fa8546eb6f96d68c838f07 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 12 May 2016 15:48:25 -0500 Subject: [PATCH 0633/2005] normalize BytesToken values to binary type fixes UnicodeDecodeError when one value has non-ascii values PYTHON-559 --- cassandra/metadata.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 18ac50c651..e5a32bca79 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1519,10 +1519,8 @@ class BytesToken(Token): def __init__(self, token_string): """ `token_string` should be string representing the token. """ - if not isinstance(token_string, six.string_types): - raise TypeError( - "Tokens for ByteOrderedPartitioner should be strings (got %s)" - % (type(token_string),)) + if isinstance(token_string, six.text_type): + token_string = token_string.encode('utf-8') self.value = token_string From db6346e9bd7ece980257a0f37e895b2ea0faaef3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 12 May 2016 16:41:29 -0500 Subject: [PATCH 0634/2005] pass unit test on new BytesToken init; still need to expand tests --- tests/unit/test_metadata.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 1992a71475..c7b6c2a5e7 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -315,12 +315,6 @@ def test_bytes_tokens(self): self.assertEqual(bytes_token.hash_fn(str(cassandra.metadata.MAX_LONG)), str(cassandra.metadata.MAX_LONG)) self.assertEqual(str(bytes_token), "") - try: - bytes_token = BytesToken(cassandra.metadata.MIN_LONG - 1) - self.fail('Tokens for ByteOrderedPartitioner should be only strings') - except TypeError: - pass - class KeyspaceMetadataTest(unittest.TestCase): From 33ae06d0eeb799f9d0b070740fae4c2878e1a159 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 13 May 2016 09:54:33 -0500 Subject: [PATCH 0635/2005] Correct BytesToken metadata parsing. --- cassandra/metadata.py | 42 ++++++++++++++------- tests/integration/standard/test_metadata.py | 6 +-- 2 files changed, 32 insertions(+), 16 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index e5a32bca79..3eb0e4561e 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from binascii import unhexlify from bisect import bisect_right from collections import defaultdict, Mapping from hashlib import md5 @@ -271,7 +272,7 @@ def rebuild_token_map(self, partitioner, token_map): ring = [] for host, token_strings in six.iteritems(token_map): for token_string in token_strings: - token = token_class(token_string) + token = token_class.from_string(token_string) ring.append(token) token_to_host_owner[token] = host @@ -1441,6 +1442,9 @@ class Token(object): Abstract class representing a token. """ + def __init__(self, token): + self.value = token + @classmethod def hash_fn(cls, key): return key @@ -1449,6 +1453,13 @@ def hash_fn(cls, key): def from_key(cls, key): return cls(cls.hash_fn(key)) + @classmethod + def from_string(cls, token_string): + raise NotImplementedError() + """ `token_string` should be the string representation from the server. """ + # The hash partitioners just store the deciman value + return cls(int(token_string)) + def __cmp__(self, other): if self.value < other.value: return -1 @@ -1478,7 +1489,16 @@ class NoMurmur3(Exception): pass -class Murmur3Token(Token): +class HashToken(Token): + + @classmethod + def from_string(cls, token_string): + """ `token_string` should be the string representation from the server. """ + # The hash partitioners just store the deciman value + return cls(int(token_string)) + + +class Murmur3Token(HashToken): """ A token for ``Murmur3Partitioner``. """ @@ -1492,11 +1512,11 @@ def hash_fn(cls, key): raise NoMurmur3() def __init__(self, token): - """ `token` should be an int or string representing the token. """ + """ `token` is an int or string representing the token. """ self.value = int(token) -class MD5Token(Token): +class MD5Token(HashToken): """ A token for ``RandomPartitioner``. """ @@ -1507,21 +1527,17 @@ def hash_fn(cls, key): key = key.encode('UTF-8') return abs(varint_unpack(md5(key).digest())) - def __init__(self, token): - """ `token` should be an int or string representing the token. """ - self.value = int(token) - class BytesToken(Token): """ A token for ``ByteOrderedPartitioner``. """ - def __init__(self, token_string): - """ `token_string` should be string representing the token. """ - if isinstance(token_string, six.text_type): - token_string = token_string.encode('utf-8') - self.value = token_string + @classmethod + def from_string(cls, token_string): + """ `token_string` should be the string representation from the server. """ + # The BOP stores a hex string + return cls(unhexlify(token_string)) class TriggerMetadata(object): diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 4d86723fb9..ce6532da6d 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1312,7 +1312,7 @@ def test_token(self): cluster.shutdown() def test_getting_replicas(self): - tokens = [MD5Token(str(i)) for i in range(0, (2 ** 127 - 1), 2 ** 125)] + tokens = [MD5Token(i) for i in range(0, (2 ** 127 - 1), 2 ** 125)] hosts = [Host("ip%d" % i, SimpleConvictionPolicy) for i in range(len(tokens))] token_to_primary_replica = dict(zip(tokens, hosts)) keyspace = KeyspaceMetadata("ks", True, "SimpleStrategy", {"replication_factor": "1"}) @@ -1327,12 +1327,12 @@ def test_getting_replicas(self): # shift the tokens back by one for token, expected_host in zip(tokens, hosts): - replicas = token_map.get_replicas("ks", MD5Token(str(token.value - 1))) + replicas = token_map.get_replicas("ks", MD5Token(token.value - 1)) self.assertEqual(set(replicas), set([expected_host])) # shift the tokens forward by one for i, token in enumerate(tokens): - replicas = token_map.get_replicas("ks", MD5Token(str(token.value + 1))) + replicas = token_map.get_replicas("ks", MD5Token(token.value + 1)) expected_host = hosts[(i + 1) % len(hosts)] self.assertEqual(set(replicas), set([expected_host])) From 10c1247847595abb30588b055ea54c61248d0417 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 13 May 2016 15:50:14 -0500 Subject: [PATCH 0636/2005] more complete unit tests for BytesToken PYTHON-559 --- tests/unit/test_metadata.py | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index c7b6c2a5e7..98a1bb1e27 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -17,11 +17,13 @@ except ImportError: import unittest # noqa +from binascii import unhexlify from mock import Mock import os import six import cassandra +from cassandra.marshal import uint16_unpack, uint16_pack from cassandra.metadata import (Murmur3Token, MD5Token, BytesToken, ReplicationStrategy, NetworkTopologyStrategy, SimpleStrategy, @@ -309,11 +311,32 @@ def test_md5_tokens(self): class BytesTokensTest(unittest.TestCase): def test_bytes_tokens(self): - bytes_token = BytesToken(str(cassandra.metadata.MIN_LONG - 1)) + bytes_token = BytesToken(unhexlify('01')) + self.assertEqual(bytes_token.value, six.b('\x01')) + self.assertEqual(str(bytes_token), "" % bytes_token.value) self.assertEqual(bytes_token.hash_fn('123'), '123') self.assertEqual(bytes_token.hash_fn(123), 123) self.assertEqual(bytes_token.hash_fn(str(cassandra.metadata.MAX_LONG)), str(cassandra.metadata.MAX_LONG)) - self.assertEqual(str(bytes_token), "") + + def test_from_string(self): + from_unicode = BytesToken.from_string(six.text_type('0123456789abcdef')) + from_bin = BytesToken.from_string(six.b('0123456789abcdef')) + self.assertEqual(from_unicode, from_bin) + self.assertIsInstance(from_unicode.value, six.binary_type) + self.assertIsInstance(from_bin.value, six.binary_type) + + def test_comparison(self): + tok = BytesToken.from_string(six.text_type('0123456789abcdef')) + token_high_order = uint16_unpack(tok.value[0:2]) + self.assertLess(BytesToken(uint16_pack(token_high_order - 1)), tok) + self.assertGreater(BytesToken(uint16_pack(token_high_order + 1)), tok) + + def test_comparison_unicode(self): + value = six.b('\'_-()"\xc2\xac') + t0 = BytesToken(value) + t1 = BytesToken.from_string('00') + self.assertGreater(t0, t1) + self.assertFalse(t0 < t1) class KeyspaceMetadataTest(unittest.TestCase): From 4a262d5fc777fb361592144fa421a2eb544d5ac1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 16 May 2016 10:51:14 -0500 Subject: [PATCH 0637/2005] allow connecting to previously ignored hosts required after removing setting eager up for ignored hosts in PYTHON-531 --- cassandra/cluster.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index bb083a4ebc..60c98f2022 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1981,9 +1981,11 @@ def update_created_pools(self): for host in self.cluster.metadata.all_hosts(): distance = self._load_balancer.distance(host) pool = self._pools.get(host) - if not pool or pool.is_shutdown: - if distance != HostDistance.IGNORED and host.is_up: + # we don't eagerly set is_up on previously ignored hosts. None is included here + # to allow us to attempt connections to hosts that have gone from ignored to something + # else. + if distance != HostDistance.IGNORED and host.is_up in (True, None): self.add_or_renew_pool(host, False) elif distance != pool.host_distance: # the distance has changed From e4e9461e622046ae9a1cd3fad65845e238535f37 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 16 May 2016 11:24:49 -0500 Subject: [PATCH 0638/2005] remove leftover debug print statements --- tests/unit/test_metadata.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 98a1bb1e27..7a28f19a24 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -424,18 +424,18 @@ class UnicodeIdentifiersTests(unittest.TestCase): def test_keyspace_name(self): km = KeyspaceMetadata(self.name, False, 'SimpleStrategy', {'replication_factor': 1}) - print(km.export_as_string()) + km.export_as_string() def test_table_name(self): tm = TableMetadata(self.name, self.name) - print(tm.export_as_string()) + tm.export_as_string() def test_column_name_single_partition(self): tm = TableMetadata('ks', 'table') cm = ColumnMetadata(tm, self.name, u'int') tm.columns[cm.name] = cm tm.partition_key.append(cm) - print(tm.export_as_string()) + tm.export_as_string() def test_column_name_single_partition_single_clustering(self): tm = TableMetadata('ks', 'table') @@ -445,7 +445,7 @@ def test_column_name_single_partition_single_clustering(self): cm = ColumnMetadata(tm, self.name + 'x', u'int') tm.columns[cm.name] = cm tm.clustering_key.append(cm) - print(tm.export_as_string()) + tm.export_as_string() def test_column_name_multiple_partition(self): tm = TableMetadata('ks', 'table') @@ -455,22 +455,22 @@ def test_column_name_multiple_partition(self): cm = ColumnMetadata(tm, self.name + 'x', u'int') tm.columns[cm.name] = cm tm.partition_key.append(cm) - print(tm.export_as_string()) + tm.export_as_string() def test_index(self): im = IndexMetadata(self.name, self.name, self.name, kind='', index_options={'target': self.name}) - print(im.export_as_string()) + im.export_as_string() im = IndexMetadata(self.name, self.name, self.name, kind='CUSTOM', index_options={'target': self.name, 'class_name': 'Class'}) - print(im.export_as_string()) + im.export_as_string() def test_function(self): fm = Function(self.name, self.name, (u'int', u'int'), (u'x', u'y'), u'int', u'language', self.name, False) - print(fm.export_as_string()) + fm.export_as_string() def test_aggregate(self): am = Aggregate(self.name, self.name, (u'text',), self.name, u'text', self.name, self.name, u'text') - print(am.export_as_string()) + am.export_as_string() def test_user_type(self): um = UserType(self.name, self.name, [self.name, self.name], [u'int', u'text']) - print(um.export_as_string()) + um.export_as_string() From 7e809e14e33998863ff057cc86c9dc65a0206043 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 16 May 2016 11:27:28 -0500 Subject: [PATCH 0639/2005] make BytesToken parsing work with pypy3 PYTHON-559 --- cassandra/metadata.py | 3 +++ tests/unit/test_metadata.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 3eb0e4561e..7f3433c3a7 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1536,6 +1536,9 @@ class BytesToken(Token): @classmethod def from_string(cls, token_string): """ `token_string` should be the string representation from the server. """ + # unhexlify works fine with unicode input in everythin but pypy3, where it Raises "TypeError: 'str' does not support the buffer interface" + if isinstance(token_string, six.text_type): + token_string = token_string.encode('ascii') # The BOP stores a hex string return cls(unhexlify(token_string)) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 7a28f19a24..f3c6c05787 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -311,7 +311,7 @@ def test_md5_tokens(self): class BytesTokensTest(unittest.TestCase): def test_bytes_tokens(self): - bytes_token = BytesToken(unhexlify('01')) + bytes_token = BytesToken(unhexlify(six.b('01'))) self.assertEqual(bytes_token.value, six.b('\x01')) self.assertEqual(str(bytes_token), "" % bytes_token.value) self.assertEqual(bytes_token.hash_fn('123'), '123') From 7dd12f0756ebb97b23283aa3eb8d3a4100668f36 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 16 May 2016 12:09:20 -0500 Subject: [PATCH 0640/2005] python 2.6 format strings in benchmark scripts --- benchmarks/callback_full_pipeline.py | 2 +- benchmarks/future_batches.py | 2 +- benchmarks/future_full_throttle.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/benchmarks/callback_full_pipeline.py b/benchmarks/callback_full_pipeline.py index a7990d80fa..5f5cc35819 100644 --- a/benchmarks/callback_full_pipeline.py +++ b/benchmarks/callback_full_pipeline.py @@ -43,7 +43,7 @@ def insert_next(self, previous_result=sentinel): i = next(self.num_started) if i <= self.num_queries: - key = "{}-{}".format(self.thread_num, i) + key = "{0}-{1}".format(self.thread_num, i) future = self.run_query(key, timeout=None) future.add_callbacks(self.insert_next, self.insert_next) diff --git a/benchmarks/future_batches.py b/benchmarks/future_batches.py index c8305369e9..5600a3232d 100644 --- a/benchmarks/future_batches.py +++ b/benchmarks/future_batches.py @@ -35,7 +35,7 @@ def run(self): except queue.Empty: break - key = "{}-{}".format(self.thread_num, i) + key = "{0}-{1}".format(self.thread_num, i) future = self.run_query(key) futures.put_nowait(future) diff --git a/benchmarks/future_full_throttle.py b/benchmarks/future_full_throttle.py index 2e47b19f9b..8fbd06b500 100644 --- a/benchmarks/future_full_throttle.py +++ b/benchmarks/future_full_throttle.py @@ -26,7 +26,7 @@ def run(self): self.start_profile() for i in range(self.num_queries): - key = "{}-{}".format(self.thread_num, i) + key = "{0}-{1}".format(self.thread_num, i) future = self.run_query(key) futures.append(future) From 6e2e220728e40e5026df4878ef02f89ce1f86fb4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 16 May 2016 12:09:58 -0500 Subject: [PATCH 0641/2005] getsockopt for asyncore pipewrapper --- cassandra/io/asyncorereactor.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 54f985e897..8481ab7e56 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -62,6 +62,12 @@ def fileno(self): def close(self): os.close(self.fd) + def getsockopt(self, level, optname, buflen=None): + # act like an unerrored socket for the asyncore error handling + if level == socket.SOL_SOCKET and optname == socket.SO_ERROR and not buflen: + return 0 + raise NotImplementedError() + class _AsyncoreDispatcher(asyncore.dispatcher): From 417ba3be1e292e0abe8193826ede09f88d4014b1 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 16 May 2016 13:04:15 -0500 Subject: [PATCH 0642/2005] PYTHON-365 adding test case --- tests/integration/standard/test_cluster.py | 39 +++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 381df413b9..0f741c11a8 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -21,6 +21,7 @@ from mock import patch import time from uuid import uuid4 +import logging import cassandra from cassandra.cluster import Cluster, NoHostAvailable @@ -31,7 +32,7 @@ from cassandra.protocol import MAX_SUPPORTED_VERSION from cassandra.query import SimpleStatement, TraceUnavailable -from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node, CASSANDRA_VERSION, execute_until_pass, execute_with_long_wait_retry, BasicExistingKeyspaceUnitTestCase, get_node +from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node, CASSANDRA_VERSION, execute_until_pass, execute_with_long_wait_retry, get_node, MockLoggingHandler from tests.integration.util import assert_quiescent_pool_state @@ -716,3 +717,39 @@ def test_session_no_cluster(self): self.assertFalse(cluster.is_shutdown) cluster.shutdown() self.assertTrue(cluster.is_shutdown) + + +class DuplicateRpcTest(unittest.TestCase): + + load_balancing_policy = WhiteListRoundRobinPolicy(['127.0.0.1']) + + def setUp(self): + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=self.load_balancing_policy) + self.session = self.cluster.connect() + self.session.execute("UPDATE system.peers SET rpc_address = '127.0.0.1' WHERE peer='127.0.0.2'") + + def tearDown(self): + self.session.execute("UPDATE system.peers SET rpc_address = '127.0.0.2' WHERE peer='127.0.0.2'") + self.cluster.shutdown() + + def test_duplicate(self): + """ + Test duplicate RPC addresses. + + Modifies the system.peers table to make hosts have the same rpc address. Ensures such hosts are filtered out and a message is logged + + @since 3.4 + @jira_ticket PYTHON-366 + @expected_result only one hosts' metadata will be populated + + @test_category metadata + """ + mock_handler = MockLoggingHandler() + logger = logging.getLogger(cassandra.cluster.__name__) + logger.addHandler(mock_handler) + test_cluster = self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=self.load_balancing_policy) + test_cluster.connect() + warnings = mock_handler.messages.get("warning") + self.assertEqual(len(warnings), 1) + self.assertTrue('multiple' in warnings[0]) + logger.removeHandler(mock_handler) From 3d72ca0e6ee1d32b7b35867d808cd07ac3bc488b Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 16 May 2016 15:00:04 -0500 Subject: [PATCH 0643/2005] PYTHON-521 adding tests documentation --- tests/integration/standard/test_cluster.py | 37 ++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 0f741c11a8..e618e6a6f2 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -671,6 +671,7 @@ def test_address_translator_with_mixed_nodes(self): for host in c.metadata.all_hosts(): self.assertEqual(adder_map.get(str(host)), host.broadcast_address) + class ContextManagementTest(unittest.TestCase): load_balancing_policy = WhiteListRoundRobinPolicy(['127.0.0.1']) @@ -679,11 +680,29 @@ class ContextManagementTest(unittest.TestCase): 'token_metadata_enabled': False} def test_no_connect(self): + """ + Test cluster context without connecting. + + @since 3.4 + @jira_ticket PYTHON-521 + @expected_result context should still be valid + + @test_category configuration + """ with Cluster() as cluster: self.assertFalse(cluster.is_shutdown) self.assertTrue(cluster.is_shutdown) def test_simple_nested(self): + """ + Test cluster and session contexts nested in one another. + + @since 3.4 + @jira_ticket PYTHON-521 + @expected_result cluster/session should be crated and shutdown appropriately. + + @test_category configuration + """ with Cluster(**self.cluster_kwargs) as cluster: with cluster.connect() as session: self.assertFalse(cluster.is_shutdown) @@ -693,6 +712,15 @@ def test_simple_nested(self): self.assertTrue(cluster.is_shutdown) def test_cluster_no_session(self): + """ + Test cluster context without session context. + + @since 3.4 + @jira_ticket PYTHON-521 + @expected_result Session should be created correctly. Cluster should shutdown outside of context + + @test_category configuration + """ with Cluster(**self.cluster_kwargs) as cluster: session = cluster.connect() self.assertFalse(cluster.is_shutdown) @@ -702,6 +730,15 @@ def test_cluster_no_session(self): self.assertTrue(cluster.is_shutdown) def test_session_no_cluster(self): + """ + Test session context without cluster context. + + @since 3.4 + @jira_ticket PYTHON-521 + @expected_result session should be created correctly. Session should shutdown correctly outside of context + + @test_category configuration + """ cluster = Cluster(**self.cluster_kwargs) unmanaged_session = cluster.connect() with cluster.connect() as session: From 7f7f798a03e3a8b8f67487133fef0040807b0dad Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 17 May 2016 16:39:31 -0500 Subject: [PATCH 0644/2005] remove extra lines following refactor --- cassandra/metadata.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 7f3433c3a7..4ff0c4f4d3 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1456,9 +1456,6 @@ def from_key(cls, key): @classmethod def from_string(cls, token_string): raise NotImplementedError() - """ `token_string` should be the string representation from the server. """ - # The hash partitioners just store the deciman value - return cls(int(token_string)) def __cmp__(self, other): if self.value < other.value: From e21b89b63fdd0163485686c202c60ce3c192e116 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 18 May 2016 11:24:57 -0500 Subject: [PATCH 0645/2005] PYTHON-555 adding test for dse specific metadata --- tests/integration/__init__.py | 1 + tests/integration/standard/test_metadata.py | 23 ++++++++++++++++++--- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index cd55757582..1ea27228f0 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -156,6 +156,7 @@ def _get_cass_version_from_dse(dse_version): greaterthancass21 = unittest.skipUnless(CASSANDRA_VERSION >= '2.2', 'Cassandra version 2.2 or greater required') greaterthanorequalcass30 = unittest.skipUnless(CASSANDRA_VERSION >= '3.0', 'Cassandra version 3.0 or greater required') lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < '3.0', 'Cassandra version less then 3.0 required') +dseonly = unittest.skipUnless(DSE_VERSION, "Test is only applicalbe to DSE clusters") def wait_for_node_socket(node, timeout): diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index ce6532da6d..c317e50c3e 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -34,9 +34,8 @@ from cassandra.pool import Host from tests.integration import get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, \ - BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION - -from tests.unit.cython.utils import notcython + BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION, \ + BasicExistingSegregatedKeyspaceUnitTestCase, dseonly, DSE_VERSION def setup_module(): @@ -2419,3 +2418,21 @@ def test_metadata_with_quoted_identifiers(self): value_column = mv_columns[2] self.assertIsNotNone(value_column) self.assertEquals(value_column.name, 'the Value') + + +@dseonly +class DSEMetadataTest(BasicExistingSegregatedKeyspaceUnitTestCase): + + def test_dse_specific_meta(self): + """ + Test to ensure DSE metadata is populated appropriately. + @since 3.4 + @jira_ticket PYTHON-555 + @expected_result metadata for dse_version, and dse_workload should be populated on dse clusters + + @test_category metadata + """ + for host in self.cluster.metadata.all_hosts(): + self.assertIsNotNone(host.dse_version, "Dse version not populated as expected") + self.assertEqual(host.dse_version, DSE_VERSION) + self.assertTrue("Cassandra" in host.dse_workload) From 1a9ca7c5f7eed7bda565e2d5d98b576b81b2d78d Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 12 May 2016 17:05:37 -0500 Subject: [PATCH 0646/2005] PYTHON-556 Adding basic test for prepare_on_all_hosts --- tests/integration/__init__.py | 6 +++ tests/integration/standard/test_cluster.py | 2 +- tests/integration/standard/test_query.py | 52 ++++++++++++++++++++-- 3 files changed, 55 insertions(+), 5 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 1ea27228f0..64efcaa4c2 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -529,6 +529,12 @@ def reset(self): 'critical': [], } + def get_message_count(self, level, sub_string): + count = 0 + for msg in self.messages.get(level): + if sub_string in msg: + count+=1 + return count class BasicExistingKeyspaceUnitTestCase(BasicKeyspaceUnitTestCase): """ diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index e618e6a6f2..0bca7bddc6 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -641,7 +641,7 @@ def test_address_translator_basic(self): @since 3.3 @jira_ticket PYTHON-69 - @expected_result only one hosts' metadata will be populeated + @expected_result only one hosts' metadata will be populated @test_category metadata """ diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 881eed54dd..4bd742bc42 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -19,14 +19,14 @@ import unittest2 as unittest except ImportError: import unittest # noqa - -from cassandra import ConsistencyLevel, Unavailable, InvalidRequest +import logging +from cassandra import ConsistencyLevel, Unavailable, InvalidRequest, cluster from cassandra.query import (PreparedStatement, BoundStatement, SimpleStatement, BatchStatement, BatchType, dict_factory, TraceUnavailable) from cassandra.cluster import Cluster, NoHostAvailable -from cassandra.policies import HostDistance +from cassandra.policies import HostDistance, RoundRobinPolicy -from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, greaterthanprotocolv3, get_node +from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, greaterthanprotocolv3, MockLoggingHandler import time import re @@ -340,6 +340,50 @@ def test_bound_keyspace(self): self.assertEqual(bound.keyspace, 'test3rf') +class ForcedHostSwitchPolicy(RoundRobinPolicy): + + def make_query_plan(self, working_keyspace=None, query=None): + if query is not None and "system.local" in str(query): + if hasattr(self, 'counter'): + self.counter += 1 + else: + self.counter = 0 + index = self.counter % 3 + a = list(self._live_hosts) + value = [a[index]] + return value + else: + return list(self._live_hosts) + + +class PreparedStatementArgTest(unittest.TestCase): + + def test_prepare_on_all_hosts(self): + """ + Test to validate prepare_on_all_hosts flag is honored. + + Use a special ForcedHostSwitchPolicy to ensure prepared queries are cycled over nodes that should not + have them prepared. Check the logs to insure they are being re-prepared on those nodes + + @since 3.4.0 + @jira_ticket PYTHON-556 + @expected_result queries will have to re-prepared on hosts that aren't the control connection + """ + white_list = ForcedHostSwitchPolicy() + clus = Cluster( + load_balancing_policy=white_list, + protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, reprepare_on_up=False) + session = clus.connect() + mock_handler = MockLoggingHandler() + logger = logging.getLogger(cluster.__name__) + logger.addHandler(mock_handler) + select_statement = session.prepare("SELECT * FROM system.local") + session.execute(select_statement) + session.execute(select_statement) + session.execute(select_statement) + self.assertEqual(2, mock_handler.get_message_count('debug', "Re-preparing")) + + class PrintStatementTests(unittest.TestCase): """ Test that shows the format used when printing Statements From b77e2e78cc8bae94d1e6bf211e76b7afec164957 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 19 May 2016 14:37:41 -0400 Subject: [PATCH 0647/2005] Resolve hosts in WhiteListRoundRobinPolicy --- cassandra/policies.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 4725f068c5..68cd6eb8ba 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -16,6 +16,7 @@ import logging from random import randint from threading import Lock +import socket from cassandra import ConsistencyLevel, OperationTimedOut @@ -402,11 +403,15 @@ def __init__(self, hosts): The `hosts` parameter should be a sequence of hosts to permit connections to. """ + self._allowed_hosts = hosts + self._allowed_hosts_resolved = [endpoint[4][0] for a in self._allowed_hosts + for endpoint in socket.getaddrinfo(a, None, socket.AF_UNSPEC, socket.SOCK_STREAM)] + RoundRobinPolicy.__init__(self) def populate(self, cluster, hosts): - self._live_hosts = frozenset(h for h in hosts if h.address in self._allowed_hosts) + self._live_hosts = frozenset(h for h in hosts if h.address in self._allowed_hosts_resolved) if len(hosts) <= 1: self._position = 0 @@ -414,17 +419,17 @@ def populate(self, cluster, hosts): self._position = randint(0, len(hosts) - 1) def distance(self, host): - if host.address in self._allowed_hosts: + if host.address in self._allowed_hosts_resolved: return HostDistance.LOCAL else: return HostDistance.IGNORED def on_up(self, host): - if host.address in self._allowed_hosts: + if host.address in self._allowed_hosts_resolved: RoundRobinPolicy.on_up(self, host) def on_add(self, host): - if host.address in self._allowed_hosts: + if host.address in self._allowed_hosts_resolved: RoundRobinPolicy.on_add(self, host) From 546812e28c10c33e2db445b9f85e1cdce5f29534 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 19 May 2016 14:57:39 -0500 Subject: [PATCH 0648/2005] not all types support null in C* 2.0 --- tests/integration/standard/test_cython_protocol_handlers.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index 783df2617f..87f2bc8424 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -12,7 +12,7 @@ from cassandra.cluster import Cluster, NoHostAvailable from cassandra.protocol import ProtocolHandler, LazyProtocolHandler, NumpyProtocolHandler, ConfigurationException from cassandra.cython_deps import HAVE_CYTHON, HAVE_NUMPY -from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1, drop_keyspace_shutdown_cluster, VERIFY_CYTHON, BasicSharedKeyspaceUnitTestCase, execute_with_retry_tolerant +from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1, drop_keyspace_shutdown_cluster, VERIFY_CYTHON, BasicSharedKeyspaceUnitTestCase, execute_with_retry_tolerant, greaterthancass20 from tests.integration.datatype_utils import update_datatypes from tests.integration.standard.utils import ( create_table_with_all_types, get_all_primitive_params, get_primitive_datatypes) @@ -232,7 +232,8 @@ def setUp(self): self.session.row_factory = tuple_factory @numpytest - def test_null_types3(self): + @greaterthancass20 + def test_null_types(self): """ Test to validate that the numpy protocol handler can deal with null values. @since 3.3.0 From 92dad99f33bf6be5f186e30490347505ea0b1f66 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 19 May 2016 16:00:37 -0400 Subject: [PATCH 0649/2005] add WhiteListRoundRobinPolicyTest.test_hosts_with_hostname --- tests/unit/test_policies.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 6640ccf1fa..33a712d5a6 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -28,7 +28,7 @@ from cassandra import ConsistencyLevel from cassandra.cluster import Cluster from cassandra.metadata import Metadata -from cassandra.policies import (RoundRobinPolicy, DCAwareRoundRobinPolicy, +from cassandra.policies import (RoundRobinPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy, TokenAwarePolicy, SimpleConvictionPolicy, HostDistance, ExponentialReconnectionPolicy, RetryPolicy, WriteType, @@ -1113,3 +1113,17 @@ def test_unavailable(self): query=None, consistency=ONE, required_replicas=3, alive_replicas=1, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, ConsistencyLevel.ONE) + + +class WhiteListRoundRobinPolicyTest(unittest.TestCase): + + def test_hosts_with_hostname(self): + hosts = ['localhost'] + policy = WhiteListRoundRobinPolicy(hosts) + host = Host("127.0.0.1", SimpleConvictionPolicy) + policy.populate(None, [host]) + + qplan = list(policy.make_query_plan()) + self.assertEqual(sorted(qplan), [host]) + + self.assertEqual(policy.distance(host), HostDistance.LOCAL) From 8fcf93dc8ff79fa5b053fa4f68a9ccc6a115af62 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 19 May 2016 19:55:43 -0500 Subject: [PATCH 0650/2005] Stabilizing LBP tests --- .../long/test_loadbalancingpolicies.py | 53 ++++++++++--------- 1 file changed, 28 insertions(+), 25 deletions(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 54ed0d0a0e..9687b0ce69 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -60,15 +60,19 @@ def _connect_probe_cluster(self): schema_metadata_enabled=False, token_metadata_enabled=False) self.probe_session = self.probe_cluster.connect() - def _wait_for_nodes_up(self, nodes): - self._connect_probe_cluster() + def _wait_for_nodes_up(self, nodes, cluster=None): + if not cluster: + self._connect_probe_cluster() + cluster = self.probe_cluster for n in nodes: - wait_for_up(self.probe_cluster, n) + wait_for_up(cluster, n) - def _wait_for_nodes_down(self, nodes): - self._connect_probe_cluster() + def _wait_for_nodes_down(self, nodes, cluster=None): + if not cluster: + self._connect_probe_cluster() + cluster = self.probe_cluster for n in nodes: - wait_for_down(self.probe_cluster, n) + wait_for_down(cluster, n) def _cluster_session_with_lbp(self, lbp): # create a cluster with no delay on events @@ -162,8 +166,7 @@ def test_roundrobin(self): use_singledc() keyspace = 'test_roundrobin' cluster, session = self._cluster_session_with_lbp(RoundRobinPolicy()) - self._wait_for_nodes_up(range(1, 4)) - + self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=3) self._insert(session, keyspace) self._query(session, keyspace) @@ -173,7 +176,7 @@ def test_roundrobin(self): self.coordinator_stats.assert_query_count_equals(self, 3, 4) force_stop(3) - self._wait_for_nodes_down([3]) + self._wait_for_nodes_down([3], cluster) self.coordinator_stats.reset_counts() self._query(session, keyspace) @@ -184,8 +187,8 @@ def test_roundrobin(self): decommission(1) start(3) - self._wait_for_nodes_down([1]) - self._wait_for_nodes_up([3]) + self._wait_for_nodes_down([1], cluster) + self._wait_for_nodes_up([3], cluster) self.coordinator_stats.reset_counts() self._query(session, keyspace) @@ -199,7 +202,7 @@ def test_roundrobin_two_dcs(self): use_multidc([2, 2]) keyspace = 'test_roundrobin_two_dcs' cluster, session = self._cluster_session_with_lbp(RoundRobinPolicy()) - self._wait_for_nodes_up(range(1, 5)) + self._wait_for_nodes_up(range(1, 5), cluster) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) @@ -216,7 +219,7 @@ def test_roundrobin_two_dcs(self): # reset control connection self._insert(session, keyspace, count=1000) - self._wait_for_nodes_up([5]) + self._wait_for_nodes_up([5], cluster) self.coordinator_stats.reset_counts() self._query(session, keyspace) @@ -233,7 +236,7 @@ def test_roundrobin_two_dcs_2(self): use_multidc([2, 2]) keyspace = 'test_roundrobin_two_dcs_2' cluster, session = self._cluster_session_with_lbp(RoundRobinPolicy()) - self._wait_for_nodes_up(range(1, 5)) + self._wait_for_nodes_up(range(1, 5), cluster) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) @@ -250,7 +253,7 @@ def test_roundrobin_two_dcs_2(self): # reset control connection self._insert(session, keyspace, count=1000) - self._wait_for_nodes_up([5]) + self._wait_for_nodes_up([5], cluster) self.coordinator_stats.reset_counts() self._query(session, keyspace) @@ -388,7 +391,7 @@ def test_token_aware_prepared(self): def token_aware(self, keyspace, use_prepared=False): use_singledc() cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy())) - self._wait_for_nodes_up([1, 2, 3]) + self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=1) self._insert(session, keyspace) @@ -407,7 +410,7 @@ def token_aware(self, keyspace, use_prepared=False): self.coordinator_stats.reset_counts() force_stop(2) - self._wait_for_nodes_down([2]) + self._wait_for_nodes_down([2], cluster) try: self._query(session, keyspace, use_prepared=use_prepared) @@ -419,7 +422,7 @@ def token_aware(self, keyspace, use_prepared=False): self.coordinator_stats.reset_counts() start(2) - self._wait_for_nodes_up([2]) + self._wait_for_nodes_up([2], cluster) self._query(session, keyspace, use_prepared=use_prepared) @@ -429,7 +432,7 @@ def token_aware(self, keyspace, use_prepared=False): self.coordinator_stats.reset_counts() stop(2) - self._wait_for_nodes_down([2]) + self._wait_for_nodes_down([2], cluster) try: self._query(session, keyspace, use_prepared=use_prepared) @@ -439,9 +442,9 @@ def token_aware(self, keyspace, use_prepared=False): self.coordinator_stats.reset_counts() start(2) - self._wait_for_nodes_up([2]) + self._wait_for_nodes_up([2], cluster) decommission(2) - self._wait_for_nodes_down([2]) + self._wait_for_nodes_down([2], cluster) self._query(session, keyspace, use_prepared=use_prepared) @@ -459,7 +462,7 @@ def test_token_aware_composite_key(self): keyspace = 'test_token_aware_composite_key' table = 'composite' cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy())) - self._wait_for_nodes_up([1, 2, 3]) + self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=2) session.execute('CREATE TABLE %s (' @@ -483,7 +486,7 @@ def test_token_aware_with_rf_2(self, use_prepared=False): use_singledc() keyspace = 'test_token_aware_with_rf_2' cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy())) - self._wait_for_nodes_up([1, 2, 3]) + self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=2) self._insert(session, keyspace) @@ -495,7 +498,7 @@ def test_token_aware_with_rf_2(self, use_prepared=False): self.coordinator_stats.reset_counts() stop(2) - self._wait_for_nodes_down([2]) + self._wait_for_nodes_down([2],cluster) self._query(session, keyspace) @@ -508,7 +511,7 @@ def test_token_aware_with_rf_2(self, use_prepared=False): def test_token_aware_with_local_table(self): use_singledc() cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy())) - self._wait_for_nodes_up([1, 2, 3]) + self._wait_for_nodes_up(range(1, 4), cluster) p = session.prepare("SELECT * FROM system.local WHERE key=?") # this would blow up prior to 61b4fad From b3dfbce58b16fb0a6cfa8ab2e9e8d3b65298683e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 20 May 2016 08:24:05 -0500 Subject: [PATCH 0651/2005] 3.4 changelog update --- CHANGELOG.rst | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7ffc4ec388..86bb9089a2 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,32 @@ +3.4.0 +===== + +Features +-------- +* Include DSE version and workload in Host data (PYTHON-555) +* Add a context manager to Cluster and Session (PYTHON-521) +* Better Error Message for Unsupported Protocol Version (PYTHON-157) +* Make the error message explicitly state when an error comes from the server (PYTHON-412) +* Short Circuit meta refresh on topo change if NEW_NODE already exists (PYTHON-557) +* Show warning when the wrong config is passed to SimpleStatement (PYTHON-219) +* Return namedtuple result pairs from execute_concurrent (PYTHON-362) +* BatchStatement should enforce batch size limit in a better way (PYTHON-151) +* Validate min/max request thresholds for connection pool scaling (PYTHON-220) +* Handle or warn about multiple hosts with the same rpc_address (PYTHON-365) +* Write docs around working with datetime and timezones (PYTHON-394) + +Bug Fixes +-------- +* High CPU utilization when using asyncore event loop (PYTHON-239) +* Fix CQL Export for non-ASCII Identifiers (PYTHON-447) +* Make stress scripts Python 2.6 compatible (PYTHON-434) +* UnicodeDecodeError when unicode characters in key in BOP (PYTHON-559) +* WhiteListRoundRobinPolicy should resolve hosts (PYTHON-565) +* Cluster and Session do not GC after leaving scope (PYTHON-135) +* Don't wait for schema agreement on ignored nodes (PYTHON-531) +* Reprepare on_up with many clients causes node overload (PYTHON-556) +* None inserted into host map when control connection node is decommissioned (PYTHON-548) + 3.3.0 ===== May 2, 2016 From 1afa1f4b2f29fafa99b7f5e551b717278df5515f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 23 May 2016 09:16:07 -0500 Subject: [PATCH 0652/2005] weakref.ref does not support keyword args in python 2.7.12+ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With thanks to Jan Urbański in #585 for the heads up. --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 60c98f2022..99d58eb6cc 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2262,7 +2262,7 @@ def _try_connect(self, host): # _clear_watcher will be called when this ControlConnection is about to be finalized # _watch_callback will get the actual callback from the Connection and relay it to # this object (after a dereferencing a weakref) - self_weakref = weakref.ref(self, callback=partial(_clear_watcher, weakref.proxy(connection))) + self_weakref = weakref.ref(self, partial(_clear_watcher, weakref.proxy(connection))) try: connection.register_watchers({ "TOPOLOGY_CHANGE": partial(_watch_callback, self_weakref, '_handle_topology_change'), From feab7e70590f897c634587ff472bab09456f31b2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 23 May 2016 16:14:46 -0500 Subject: [PATCH 0653/2005] 3.4 release ver and final changelog --- CHANGELOG.rst | 2 ++ cassandra/__init__.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 86bb9089a2..d9867fde25 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,6 @@ 3.4.0 ===== +May 24, 2016 Features -------- @@ -26,6 +27,7 @@ Bug Fixes * Don't wait for schema agreement on ignored nodes (PYTHON-531) * Reprepare on_up with many clients causes node overload (PYTHON-556) * None inserted into host map when control connection node is decommissioned (PYTHON-548) +* weakref.ref does not accept keyword arguments (github #585) 3.3.0 ===== diff --git a/cassandra/__init__.py b/cassandra/__init__.py index da8c56755d..f7d46f6f9f 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 3, 0, 'post0') +__version_info__ = (3, 4, 0) __version__ = '.'.join(map(str, __version_info__)) From efa9a5410214b078725fd4b87d14be5882654215 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 23 May 2016 16:21:24 -0500 Subject: [PATCH 0654/2005] post-release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index f7d46f6f9f..5650966260 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 4, 0) +__version_info__ = (3, 4, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From acae4c7cbd83b431d2b79c96585b17c938704ae6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 24 May 2016 13:11:49 -0500 Subject: [PATCH 0655/2005] additional doc tweaks for new 3.4 features --- cassandra/cluster.py | 2 ++ cassandra/concurrent.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 99d58eb6cc..df3c213ac9 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -211,6 +211,8 @@ class Cluster(object): >>> ... >>> cluster.shutdown() + ``Cluster`` and ``Session`` also provide context management functions + which implicitly handle shutdown when leaving scope. """ contact_points = ['127.0.0.1'] diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index 8098b436d6..48cbab3e24 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -57,8 +57,8 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais footprint is marginal CPU overhead (more thread coordination and sorting out-of-order results on-the-fly). - A sequence of ``(success, result_or_exc)`` tuples is returned in the same - order that the statements were passed in. If ``success`` is :const:`False`, + A sequence of ``ExecutionResult(success, result_or_exc)`` namedtuples is returned + in the same order that the statements were passed in. If ``success`` is :const:`False`, there was an error executing the statement, and ``result_or_exc`` will be an :class:`Exception`. If ``success`` is :const:`True`, ``result_or_exc`` will be the query result. From fe8a842e2a82f0768f2e8cfae85e929a393a7821 Mon Sep 17 00:00:00 2001 From: Jonathan Ballet Date: Thu, 26 May 2016 11:40:57 +0200 Subject: [PATCH 0656/2005] Don't modify the Metadatas' hosts structure while iterating over it On Python 3.x, dict.values() returns a view object and not a list of the values. Updating the content of the original dict while iterating over the values may raise a RuntimeError exception, which is the case in the Cluster._refresh_node_list_and_token_map() method to remove discovered hosts which are not part of the specified cluster contact points. The bug doesn't happen using Python 2.x as dict.values() returns a new list containing the values of the dictionary, without any other reference to original dictionary object. This fix forces the original Python 2.x behavior to be the same in Python 3.x as well. PYTHON-572 --- cassandra/metadata.py | 2 +- tests/unit/test_metadata.py | 18 +++++++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 4ff0c4f4d3..2c3d137086 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -324,7 +324,7 @@ def all_hosts(self): Returns a list of all known :class:`.Host` instances in the cluster. """ with self._hosts_lock: - return self._hosts.values() + return list(self._hosts.values()) REPLICATION_STRATEGY_CLASS_PREFIX = "org.apache.cassandra.locator." diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index f3c6c05787..6d3ae5b9ef 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -30,7 +30,9 @@ LocalStrategy, protect_name, protect_names, protect_value, is_valid_name, UserType, KeyspaceMetadata, get_schema_parser, - _UnknownStrategy, ColumnMetadata, TableMetadata, IndexMetadata, Function, Aggregate) + _UnknownStrategy, ColumnMetadata, TableMetadata, + IndexMetadata, Function, Aggregate, + Metadata) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host @@ -474,3 +476,17 @@ def test_aggregate(self): def test_user_type(self): um = UserType(self.name, self.name, [self.name, self.name], [u'int', u'text']) um.export_as_string() + + +class HostsTests(unittest.TestCase): + def test_iterate_all_hosts_and_modify(self): + metadata = Metadata() + metadata.add_or_return_host(Host('dc1.1', SimpleConvictionPolicy)) + metadata.add_or_return_host(Host('dc1.2', SimpleConvictionPolicy)) + + assert len(metadata.all_hosts()) == 2 + + for host in metadata.all_hosts(): + metadata.remove_host(host) + + assert len(metadata.all_hosts()) == 0 From 04a9020faf0f16ef892cfccac9f52972284c2633 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 26 May 2016 09:37:02 -0500 Subject: [PATCH 0657/2005] match Cluster doc order to code, add missing attributes --- docs/api/cassandra/cluster.rst | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index fdea3e6789..8891a1ad30 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -11,8 +11,6 @@ .. autoattribute:: protocol_version - .. autoattribute:: port - .. autoattribute:: compression .. autoattribute:: auth_provider @@ -27,20 +25,20 @@ .. autoattribute:: address_translator - .. autoattribute:: connection_class - .. autoattribute:: metrics_enabled .. autoattribute:: metrics - .. autoattribute:: metadata - .. autoattribute:: ssl_options .. autoattribute:: sockopts .. autoattribute:: max_schema_agreement_wait + .. autoattribute:: metadata + + .. autoattribute:: connection_class + .. autoattribute:: control_connection_timeout .. autoattribute:: idle_heartbeat_interval @@ -49,6 +47,12 @@ .. autoattribute:: topology_event_refresh_window + .. autoattribute:: status_event_refresh_window + + .. autoattribute:: prepare_on_all_hosts + + .. autoattribute:: reprepare_on_up + .. autoattribute:: connect_timeout .. autoattribute:: schema_metadata_enabled From 1a6e7aadcd513dde58449f9403d1d8d05d40a119 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 26 May 2016 09:44:06 -0500 Subject: [PATCH 0658/2005] Remove duplicate docstring and add missing Cluster args --- cassandra/cluster.py | 6 ++++-- docs/api/cassandra/cluster.rst | 4 +++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index df3c213ac9..d311988dc8 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -617,8 +617,10 @@ def __init__(self, prepare_on_all_hosts=True, reprepare_on_up=True): """ - Any of the mutable Cluster attributes may be set as keyword arguments - to the constructor. + ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as + extablishing connection pools or refreshing metadata. + + Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. """ if contact_points is not None: if isinstance(contact_points, six.string_types): diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 8891a1ad30..df0d58ab58 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -5,7 +5,9 @@ .. autoclass:: Cluster ([contact_points=('127.0.0.1',)][, port=9042][, executor_threads=2], **attr_kwargs) - Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. + .. autoattribute:: contact_points + + .. autoattribute:: port .. autoattribute:: cql_version From 897474471fe5222f3e698b4ffad9c1f5836e5688 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 26 May 2016 12:15:52 -0500 Subject: [PATCH 0659/2005] gevent: don't confound socket timeout with zero read PYTHON-573 --- cassandra/io/geventreactor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index c825a8c1a2..65572a664c 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -135,6 +135,7 @@ def handle_read(self): log.debug("Exception in read for %s: %s", self, err) self.defunct(err) return # leave the read loop + continue if self._iobuf.tell(): self.process_io_buffer() From 40e7e63a980b1ec3c2014ba0dbe55a283e015d47 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 26 May 2016 12:28:28 -0500 Subject: [PATCH 0660/2005] test note and normalize asserts --- tests/unit/test_metadata.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 6d3ae5b9ef..fef360dba6 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -480,13 +480,16 @@ def test_user_type(self): class HostsTests(unittest.TestCase): def test_iterate_all_hosts_and_modify(self): + """ + PYTHON-572 + """ metadata = Metadata() metadata.add_or_return_host(Host('dc1.1', SimpleConvictionPolicy)) metadata.add_or_return_host(Host('dc1.2', SimpleConvictionPolicy)) - assert len(metadata.all_hosts()) == 2 + self.assertEqual(len(metadata.all_hosts()), 2) - for host in metadata.all_hosts(): + for host in metadata.all_hosts(): # this would previously raise in Py3 metadata.remove_host(host) - assert len(metadata.all_hosts()) == 0 + self.assertEqual(len(metadata.all_hosts()), 0) From 01a611a7534fc6ac5f971ca83bbed01db115737a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 26 May 2016 13:53:13 -0500 Subject: [PATCH 0661/2005] 3.4.1 changelog and version --- CHANGELOG.rst | 9 +++++++++ cassandra/__init__.py | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d9867fde25..f88e84c962 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,12 @@ +3.4.1 +===== +May 26, 2016 + +Bug Fixes +-------- +* Gevent connection closes on IO timeout (PYTHON-573) +* "dictionary changed size during iteration" with Python 3 (PYTHON-572) + 3.4.0 ===== May 24, 2016 diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 5650966260..81172c80af 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 4, 0, 'post0') +__version_info__ = (3, 4, 1) __version__ = '.'.join(map(str, __version_info__)) From 6700d3ad507ba95747aae78e778dbf47a0c877e5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 26 May 2016 14:37:12 -0500 Subject: [PATCH 0662/2005] post release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 81172c80af..67f5b147ff 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 4, 1) +__version_info__ = (3, 4, 1, 'post0') __version__ = '.'.join(map(str, __version_info__)) From 124d197a77c0b62749775f3e477c93a79ec23bec Mon Sep 17 00:00:00 2001 From: Eli Green Date: Thu, 2 Jun 2016 16:47:11 +0200 Subject: [PATCH 0663/2005] Avoid LWTExceptions when updating columns that are part of the condition --- cassandra/cqlengine/query.py | 14 +++++++++++++- .../integration/cqlengine/test_lwt_conditional.py | 15 +++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 05962b4e94..25a6073722 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1151,6 +1151,7 @@ class Row(Model): return nulled_columns = set() + updated_columns = set() us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl, timestamp=self._timestamp, conditionals=self._conditional, if_exists=self._if_exists) for name, val in values.items(): @@ -1171,13 +1172,17 @@ class Row(Model): continue us.add_update(col, val, operation=col_op) + updated_columns.add(col_name) if us.assignments: self._execute(us) + null_conditional = [condition for condition in self._conditional + if condition.field not in updated_columns] + if nulled_columns: ds = DeleteStatement(self.column_family_name, fields=nulled_columns, - where=self._where, conditionals=self._conditional, if_exists=self._if_exists) + where=self._where, conditionals=null_conditional, if_exists=self._if_exists) self._execute(ds) @@ -1262,6 +1267,8 @@ def update(self): conditionals=self._conditional, if_exists=self._if_exists) for name, col in self.instance._clustering_keys.items(): null_clustering_key = null_clustering_key and col._val_is_null(getattr(self.instance, name, None)) + + updated_columns = set() # get defined fields and their column names for name, col in self.model._columns.items(): # if clustering key is null, don't include non static columns @@ -1279,6 +1286,7 @@ def update(self): static_changed_only = static_changed_only and col.static statement.add_update(col, val, previous=val_mgr.previous_value) + updated_columns.add(col.db_field_name) if statement.assignments: for name, col in self.model._primary_keys.items(): @@ -1288,6 +1296,10 @@ def update(self): statement.add_where(col, EqualsOperator(), getattr(self.instance, name)) self._execute(statement) + # remove conditions on fields that have been updated + self._conditional = [condition for condition in self._conditional + if condition.field not in updated_columns] + if not null_clustering_key: self._delete_null_columns() diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index d273df9cc0..8395154c34 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -234,3 +234,18 @@ def test_update_to_none(self): self.assertIsNotNone(TestConditionalModel.objects(id=t.id).first().text) TestConditionalModel.objects(id=t.id).iff(count=5).update(text=None) self.assertIsNone(TestConditionalModel.objects(id=t.id).first().text) + + def test_column_delete_after_update(self): + # DML path + t = TestConditionalModel.create(text='something', count=5) + t.iff(count=5).update(text=None, count=6) + + self.assertIsNone(t.text) + self.assertEqual(t.count, 6) + + # QuerySet path + t = TestConditionalModel.create(text='something', count=5) + TestConditionalModel.objects(id=t.id).iff(count=5).update(text=None, count=6) + + self.assertIsNone(TestConditionalModel.objects(id=t.id).first().text) + self.assertEqual(TestConditionalModel.objects(id=t.id).first().count, 6) From 86d4f180abc80dc5fc13650cb7a3c2f6dfb073ab Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 2 Jun 2016 17:35:02 -0400 Subject: [PATCH 0664/2005] Fix cqlengine only() function when there are defered fields --- cassandra/cqlengine/query.py | 4 +- .../cqlengine/query/test_queryset.py | 39 +++++++++++++++++-- 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 05962b4e94..d838957cbe 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -971,8 +971,8 @@ def _select_fields(self): fields = self.model._columns.keys() if self._defer_fields: fields = [f for f in fields if f not in self._defer_fields] - elif self._only_fields: - fields = self._only_fields + if self._only_fields: + fields = [f for f in fields if f in self._only_fields] return [self.model._columns[f].db_field_name for f in fields] return super(ModelQuerySet, self)._select_fields() diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 9d01cd5628..53426583ff 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -218,16 +218,47 @@ def test_queryset_with_distinct(self): query3 = TestModel.objects.distinct(['test_id', 'attempt_id']) self.assertEqual(len(query3._distinct_fields), 2) - def test_defining_only_and_defer_fails(self): + def test_defining_only_fields(self): """ - Tests that trying to add fields to either only or defer, or doing so more than once fails + Tests defining only fields """ - def test_defining_only_or_defer_on_nonexistant_fields_fails(self): + q = TestModel.objects.only(['attempt_id', 'description']) + self.assertEqual(q._select_fields(), ['attempt_id', 'description']) + + with self.assertRaises(query.QueryException): + TestModel.objects.only(['nonexistant_field']) + + with self.assertRaises(query.QueryException): + TestModel.objects.only(['description']).only(['attempt_id']) + + q = TestModel.objects.only(['attempt_id', 'description']) + q = q.defer(['description']) + self.assertEqual(q._select_fields(), ['attempt_id']) + + # PYTHON-560 + q = TestModel.objects.filter(test_id=0).only(['test_id', 'attempt_id', 'description']) + self.assertEqual(q._select_fields(), ['attempt_id', 'description']) + + def test_defining_defer_fields(self): """ - Tests that setting only or defer fields that don't exist raises an exception + Tests defining defer fields """ + q = TestModel.objects.defer(['attempt_id', 'description']) + self.assertEqual(q._select_fields(), ['test_id', 'expected_result', 'test_result']) + + with self.assertRaises(query.QueryException): + TestModel.objects.defer(['nonexistant_field']) + + q = TestModel.objects.defer(['attempt_id', 'description']) + q = q.defer(['expected_result']) + self.assertEqual(q._select_fields(), ['test_id', 'test_result']) + + q = TestModel.objects.defer(['description', 'attempt_id']) + q = q.only(['description', 'test_id']) + self.assertEqual(q._select_fields(), ['test_id']) + class BaseQuerySetUsage(BaseCassEngTestCase): From 49aa66f17064a7a9e8403a31418350c2355a03ba Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 3 Jun 2016 08:44:10 -0400 Subject: [PATCH 0665/2005] Raise QueryException when there is no fields to select --- cassandra/cqlengine/query.py | 3 +++ .../cqlengine/query/test_queryset.py | 20 +++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index d838957cbe..10d27ab580 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -973,6 +973,9 @@ def _select_fields(self): fields = [f for f in fields if f not in self._defer_fields] if self._only_fields: fields = [f for f in fields if f in self._only_fields] + if not fields: + raise QueryException('No fields in select query. Only fields: "{0}", defer fields: "{1}"'.format( + ','.join(self._only_fields), ','.join(self._defer_fields))) return [self.model._columns[f].db_field_name for f in fields] return super(ModelQuerySet, self)._select_fields() diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 53426583ff..4469ab852c 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -223,15 +223,18 @@ def test_defining_only_fields(self): Tests defining only fields """ + # simple only definition q = TestModel.objects.only(['attempt_id', 'description']) self.assertEqual(q._select_fields(), ['attempt_id', 'description']) with self.assertRaises(query.QueryException): TestModel.objects.only(['nonexistant_field']) + # Cannot define more than once only fields with self.assertRaises(query.QueryException): TestModel.objects.only(['description']).only(['attempt_id']) + # only with defer fields q = TestModel.objects.only(['attempt_id', 'description']) q = q.defer(['description']) self.assertEqual(q._select_fields(), ['attempt_id']) @@ -240,25 +243,42 @@ def test_defining_only_fields(self): q = TestModel.objects.filter(test_id=0).only(['test_id', 'attempt_id', 'description']) self.assertEqual(q._select_fields(), ['attempt_id', 'description']) + # no fields to select + with self.assertRaises(query.QueryException): + q = TestModel.objects.only(['test_id']).defer(['test_id']) + q._select_fields() + + with self.assertRaises(query.QueryException): + q = TestModel.objects.filter(test_id=0).only(['test_id']) + q._select_fields() + + def test_defining_defer_fields(self): """ Tests defining defer fields """ + # simple defer definition q = TestModel.objects.defer(['attempt_id', 'description']) self.assertEqual(q._select_fields(), ['test_id', 'expected_result', 'test_result']) with self.assertRaises(query.QueryException): TestModel.objects.defer(['nonexistant_field']) + # defer more than one q = TestModel.objects.defer(['attempt_id', 'description']) q = q.defer(['expected_result']) self.assertEqual(q._select_fields(), ['test_id', 'test_result']) + # defer with only q = TestModel.objects.defer(['description', 'attempt_id']) q = q.only(['description', 'test_id']) self.assertEqual(q._select_fields(), ['test_id']) + # implicit defer + q = TestModel.objects.filter(test_id=0) + self.assertEqual(q._select_fields(), ['attempt_id', 'description', 'expected_result', 'test_result']) + class BaseQuerySetUsage(BaseCassEngTestCase): From 89cd1b18162f4bcc153329d0bbc05b5fded46687 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 3 Jun 2016 08:55:16 -0400 Subject: [PATCH 0666/2005] Add only and defer methods in cqlengine docs --- docs/api/cassandra/cqlengine/query.rst | 4 ++++ tests/integration/cqlengine/query/test_queryset.py | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/api/cassandra/cqlengine/query.rst b/docs/api/cassandra/cqlengine/query.rst index ad5489f207..461ec9b969 100644 --- a/docs/api/cassandra/cqlengine/query.rst +++ b/docs/api/cassandra/cqlengine/query.rst @@ -42,6 +42,10 @@ The methods here are used to filter, order, and constrain results. .. automethod:: allow_filtering + .. automethod:: only + + .. automethod:: defer + .. automethod:: timestamp .. automethod:: ttl diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 4469ab852c..a64765206e 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -228,7 +228,7 @@ def test_defining_only_fields(self): self.assertEqual(q._select_fields(), ['attempt_id', 'description']) with self.assertRaises(query.QueryException): - TestModel.objects.only(['nonexistant_field']) + TestModel.objects.only(['nonexistent_field']) # Cannot define more than once only fields with self.assertRaises(query.QueryException): @@ -263,7 +263,7 @@ def test_defining_defer_fields(self): self.assertEqual(q._select_fields(), ['test_id', 'expected_result', 'test_result']) with self.assertRaises(query.QueryException): - TestModel.objects.defer(['nonexistant_field']) + TestModel.objects.defer(['nonexistent_field']) # defer more than one q = TestModel.objects.defer(['attempt_id', 'description']) From 087e2cca6372f151537a85ce0d87b8f779e99035 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 16 May 2016 15:35:49 -0500 Subject: [PATCH 0667/2005] basic execution config profile definition --- cassandra/cluster.py | 63 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d311988dc8..f914adf8b0 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -196,6 +196,55 @@ def default_lbp_factory(): return DCAwareRoundRobinPolicy() +class ExecutionProfile(object): + load_balancing_policy = None + retry_policy = None + consistency_level = None + serial_consistency_level = None + request_timeout = None + row_factory = None + + def __init__(self, load_balancing_policy, retry_policy, consistency_level, + serial_consistency_level, request_timeout, row_factory): + self.load_balancing_policy = load_balancing_policy + self.retry_policy = retry_policy + self.consistency_level = consistency_level + self.serial_consistency_level = serial_consistency_level + self.request_timeout = request_timeout + self.row_factory = row_factory + + +class DefaultExecutionProfile(ExecutionProfile): + def __init__(self): + super(DefaultExecutionProfile).__init__( + default_lbp_factory(), + RetryPolicy(), + ConsistencyLevel.LOCAL_ONE, + None, + 10, + named_tuple_factory) + + +class ProfileManager(object): + + def __init__(self): + self.profiles = dict() + + def distance(self, host): + distances = set(p.load_balancing_policy.distance(host) for p in self._profiles.values()) + return HostDistance.LOCAL if HostDistance.LOCAL in distances else \ + HostDistance.REMOTE if HostDistance.REMOTE in distances else \ + HostDistance.IGNORED + + def populate(self, cluster, hosts): + for p in self._profiles.values(): + p.load_balancing_policy.populate(cluster, hosts) + + def check_supported(self): + for p in self._profiles.values(): + p.load_balancing_policy.check_supported() + + class Cluster(object): """ The main class to use when interacting with a Cassandra cluster. @@ -570,6 +619,8 @@ def token_metadata_enabled(self): def token_metadata_enabled(self, enabled): self.control_connection._token_meta_enabled = bool(enabled) + profile_manager = None + sessions = None control_connection = None scheduler = None @@ -669,6 +720,11 @@ def __init__(self, if connection_class is not None: self.connection_class = connection_class + self.profile_manager = ProfileManager() + self.profile_manager.profiles[None] = ExecutionProfile(self.load_balancing_policy, self.default_retry_policy, + Session.default_consistency_level, Session.default_timeout, + Session.row_factory) + self.metrics_enabled = metrics_enabled self.ssl_options = ssl_options self.sockopts = sockopts @@ -789,6 +845,13 @@ def __init__(self, street, zipcode): session.user_type_registered(keyspace, user_type, klass) UserType.evict_udt_class(keyspace, user_type) + def add_execution_profile(self, name, profile): + if not isinstance(profile, ExecutionProfile): + raise TypeError("profile must be an instance of ExecutionProfile") + if name is None: + raise ValueError("'None' as profile name is reserved for the default profile") + self.profile_manager.profiles[name] = profile + def get_min_requests_per_connection(self, host_distance): return self._min_requests_per_connection[host_distance] From 192dd3d44e5d57869d87b32fee0d5a66ab0e4aca Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 17 May 2016 16:09:02 -0500 Subject: [PATCH 0668/2005] make request execution use config profiles --- cassandra/cluster.py | 114 +++++++++++++++++++++++++++++-------------- 1 file changed, 78 insertions(+), 36 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index f914adf8b0..14600fd4d1 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -216,7 +216,7 @@ def __init__(self, load_balancing_policy, retry_policy, consistency_level, class DefaultExecutionProfile(ExecutionProfile): def __init__(self): - super(DefaultExecutionProfile).__init__( + super(DefaultExecutionProfile, self).__init__( default_lbp_factory(), RetryPolicy(), ConsistencyLevel.LOCAL_ONE, @@ -231,19 +231,35 @@ def __init__(self): self.profiles = dict() def distance(self, host): - distances = set(p.load_balancing_policy.distance(host) for p in self._profiles.values()) + distances = set(p.load_balancing_policy.distance(host) for p in self.profiles.values()) return HostDistance.LOCAL if HostDistance.LOCAL in distances else \ HostDistance.REMOTE if HostDistance.REMOTE in distances else \ HostDistance.IGNORED def populate(self, cluster, hosts): - for p in self._profiles.values(): + for p in self.profiles.values(): p.load_balancing_policy.populate(cluster, hosts) def check_supported(self): - for p in self._profiles.values(): + for p in self.profiles.values(): p.load_balancing_policy.check_supported() + def on_up(self, host): + for p in self.profiles.values(): + p.load_balancing_policy.on_up(host) + + def on_down(self, host): + for p in self.profiles.values(): + p.load_balancing_policy.on_up(host) + + def on_add(self, host): + for p in self.profiles.values(): + p.load_balancing_policy.on_add(host) + + def on_remove(self, host): + for p in self.profiles.values(): + p.load_balancing_policy.on_remove(host) + class Cluster(object): """ @@ -666,7 +682,8 @@ def __init__(self, address_translator=None, status_event_refresh_window=2, prepare_on_all_hosts=True, - reprepare_on_up=True): + reprepare_on_up=True, + execution_profiles=None): """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as extablishing connection pools or refreshing metadata. @@ -721,9 +738,16 @@ def __init__(self, self.connection_class = connection_class self.profile_manager = ProfileManager() - self.profile_manager.profiles[None] = ExecutionProfile(self.load_balancing_policy, self.default_retry_policy, - Session.default_consistency_level, Session.default_timeout, + self.profile_manager.profiles[None] = ExecutionProfile(self.load_balancing_policy, + self.default_retry_policy, + Session.default_consistency_level, + Session.default_serial_consistency_level, + Session.default_timeout, Session.row_factory) + # TODO: validate value types + if execution_profiles: + self.profile_manager.profiles.update(execution_profiles) + self.metrics_enabled = metrics_enabled self.ssl_options = ssl_options @@ -851,6 +875,10 @@ def add_execution_profile(self, name, profile): if name is None: raise ValueError("'None' as profile name is reserved for the default profile") self.profile_manager.profiles[name] = profile + profile.load_balancing_policy.populate(self, self.metadata.all_hosts()) + # todo: refacto + for session in self.sessions: + session.update_created_pools() def get_min_requests_per_connection(self, host_distance): return self._min_requests_per_connection[host_distance] @@ -1020,7 +1048,7 @@ def connect(self, keyspace=None): for listener in self.listeners: listener.on_add(host) - self.load_balancing_policy.populate( + self.profile_manager.populate( weakref.proxy(self), self.metadata.all_hosts()) try: @@ -1032,7 +1060,7 @@ def connect(self, keyspace=None): self.shutdown() raise - self.load_balancing_policy.check_supported() + self.profile_manager.check_supported() #todo: rename this method if self.idle_heartbeat_interval: self._idle_heartbeat = ConnectionHeartbeat(self.idle_heartbeat_interval, self.get_connection_holders) @@ -1096,7 +1124,7 @@ def _session_register_user_types(self, session): session.user_type_registered(keyspace, udt_name, klass) def _cleanup_failed_on_up_handling(self, host): - self.load_balancing_policy.on_down(host) + self.profile_manager.on_down(host) self.control_connection.on_down(host) for session in self.sessions: session.remove_pool(host) @@ -1177,7 +1205,7 @@ def on_up(self, host): session.remove_pool(host) log.debug("Signalling to load balancing policy that host %s is up", host) - self.load_balancing_policy.on_up(host) + self.profile_manager.on_up(host) log.debug("Signalling to control connection that host %s is up", host) self.control_connection.on_up(host) @@ -1211,7 +1239,7 @@ def on_up(self, host): return futures def _start_reconnector(self, host, is_host_addition): - if self.load_balancing_policy.distance(host) == HostDistance.IGNORED: + if self.profile_manager.distance(host) == HostDistance.IGNORED: return schedule = self.reconnection_policy.new_schedule() @@ -1250,7 +1278,7 @@ def on_down(self, host, is_host_addition, expect_host_to_be_down=False): log.warning("Host %s has been marked down", host) - self.load_balancing_policy.on_down(host) + self.profile_manager.on_down(host) self.control_connection.on_down(host) for session in self.sessions: session.on_down(host) @@ -1266,12 +1294,12 @@ def on_add(self, host, refresh_nodes=True): log.debug("Handling new host %r and notifying listeners", host) - distance = self.load_balancing_policy.distance(host) + distance = self.profile_manager.distance(host) if distance != HostDistance.IGNORED: self._prepare_all_queries(host) log.debug("Done preparing queries for new host %r", host) - self.load_balancing_policy.on_add(host) + self.profile_manager.on_add(host) self.control_connection.on_add(host, refresh_nodes) if distance == HostDistance.IGNORED: @@ -1336,7 +1364,7 @@ def on_remove(self, host): log.debug("Removing host %s", host) host.set_down() - self.load_balancing_policy.on_remove(host) + self.profile_manager.on_remove(host) for session in self.sessions: session.on_remove(host) for listener in self.listeners: @@ -1742,7 +1770,7 @@ def __init__(self, cluster, hosts): for future in futures: future.result() - def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None): + def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None, execution_profile=None): """ Execute the given query and synchronously wait for the response. @@ -1770,9 +1798,9 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_ If `query` is a Statement with its own custom_payload. The message payload will be a union of the two, with the values specified here taking precedence. """ - return self.execute_async(query, parameters, trace, custom_payload, timeout).result() + return self.execute_async(query, parameters, trace, custom_payload, timeout, execution_profile).result() - def execute_async(self, query, parameters=None, trace=False, custom_payload=None, timeout=_NOT_SET): + def execute_async(self, query, parameters=None, trace=False, custom_payload=None, timeout=_NOT_SET, execution_profile=None): """ Execute the given query and return a :class:`~.ResponseFuture` object which callbacks may be attached to for asynchronous response @@ -1817,26 +1845,38 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None ... log.exception("Operation failed:") """ - if timeout is _NOT_SET: - timeout = self.default_timeout - - future = self._create_response_future(query, parameters, trace, custom_payload, timeout) + future = self._create_response_future(query, parameters, trace, custom_payload, timeout, execution_profile) future._protocol_handler = self.client_protocol_handler future.send_request() return future - def _create_response_future(self, query, parameters, trace, custom_payload, timeout): + def _create_response_future(self, query, parameters, trace, custom_payload, timeout, execution_profile): """ Returns the ResponseFuture before calling send_request() on it """ prepared_statement = None + profiles = self.cluster.profile_manager.profiles + try: + exec_profile = execution_profile if isinstance(execution_profile, ExecutionProfile) else profiles[execution_profile] + except KeyError: + raise ValueError("Invalid execution_profile: '%s'; valid profiles are %s" % (execution_profile, profiles.keys())) + + # TODO: right now there is no guard making legacy config mutually exclusive + if timeout is _NOT_SET: + timeout = exec_profile.request_timeout + if isinstance(query, six.string_types): query = SimpleStatement(query) elif isinstance(query, PreparedStatement): query = query.bind(parameters) - cl = query.consistency_level if query.consistency_level is not None else self.default_consistency_level - serial_cl = query.serial_consistency_level if query.serial_consistency_level is not None else self.default_serial_consistency_level + # TODO: why do some things come on the statement, and others through the execution? + cl = query.consistency_level if query.consistency_level is not None else exec_profile.consistency_level + serial_cl = query.serial_consistency_level if query.serial_consistency_level is not None else exec_profile.serial_consistency_level + + retry_policy = query.retry_policy or exec_profile.retry_policy + row_factory = exec_profile.row_factory + load_balancing_policy = exec_profile.load_balancing_policy fetch_size = query.fetch_size if fetch_size is FETCH_SIZE_UNSET and self._protocol_version >= 2: @@ -1879,7 +1919,7 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time return ResponseFuture( self, message, query, timeout, metrics=self._metrics, - prepared_statement=prepared_statement) + prepared_statement=prepared_statement, retry_policy=retry_policy, row_factory=row_factory, load_balancer=load_balancing_policy) def prepare(self, query, custom_payload=None): """ @@ -2916,6 +2956,9 @@ class ResponseFuture(object): message = None default_timeout = None + _retry_policy = None + _load_balancer = None + _req_id = None _final_result = _NOT_SET _col_names = None @@ -2937,12 +2980,16 @@ class ResponseFuture(object): _warned_timeout = False - def __init__(self, session, message, query, timeout, metrics=None, prepared_statement=None): + def __init__(self, session, message, query, timeout, metrics=None, prepared_statement=None, + retry_policy=RetryPolicy(), row_factory=None, load_balancer=None): self.session = session - self.row_factory = session.row_factory + # TODO: normalize handling of retry policy and row factory + self.row_factory = row_factory or session.row_factory + self._load_balancer = load_balancer or session._load_balancer self.message = message self.query = query self.timeout = timeout + self._retry_policy = retry_policy self._metrics = metrics self.prepared_statement = prepared_statement self._callback_lock = Lock() @@ -2978,8 +3025,7 @@ def _make_query_plan(self): # convert the list/generator/etc to an iterator so that subsequent # calls to send_request (which retries may do) will resume where # they last left off - self.query_plan = iter(self.session._load_balancer.make_query_plan( - self.session.keyspace, self.query)) + self.query_plan = iter(self._load_balancer.make_query_plan(self.session.keyspace, self.query)) def send_request(self): """ Internal """ @@ -3160,11 +3206,7 @@ def _set_result(self, response): results = self.row_factory(*results) self._set_final_result(results) elif isinstance(response, ErrorMessage): - retry_policy = None - if self.query: - retry_policy = self.query.retry_policy - if not retry_policy: - retry_policy = self.session.cluster.default_retry_policy + retry_policy = self._retry_policy if isinstance(response, ReadTimeoutErrorMessage): if self._metrics is not None: From 0fed8e2eb7d44f82ccae8658b01505356d2a115f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 24 May 2016 15:05:21 -0500 Subject: [PATCH 0669/2005] quick change to define the default profile key --- cassandra/cluster.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 14600fd4d1..5751834588 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -260,6 +260,7 @@ def on_remove(self, host): for p in self.profiles.values(): p.load_balancing_policy.on_remove(host) +PROFILE_DEFAULT = None class Cluster(object): """ @@ -738,7 +739,8 @@ def __init__(self, self.connection_class = connection_class self.profile_manager = ProfileManager() - self.profile_manager.profiles[None] = ExecutionProfile(self.load_balancing_policy, + # TODO: propagate profile_default to exec method + self.profile_manager.profiles[PROFILE_DEFAULT] = ExecutionProfile(self.load_balancing_policy, self.default_retry_policy, Session.default_consistency_level, Session.default_serial_consistency_level, From 3462850230d2135802996421602e5f024883fccd Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 25 May 2016 11:38:43 -0500 Subject: [PATCH 0670/2005] EXEC_PROFILE_DEFAULT as an object --- cassandra/cluster.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 5751834588..1d5fd66956 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -260,7 +260,9 @@ def on_remove(self, host): for p in self.profiles.values(): p.load_balancing_policy.on_remove(host) -PROFILE_DEFAULT = None + +EXEC_PROFILE_DEFAULT = object() + class Cluster(object): """ @@ -739,13 +741,12 @@ def __init__(self, self.connection_class = connection_class self.profile_manager = ProfileManager() - # TODO: propagate profile_default to exec method - self.profile_manager.profiles[PROFILE_DEFAULT] = ExecutionProfile(self.load_balancing_policy, - self.default_retry_policy, - Session.default_consistency_level, - Session.default_serial_consistency_level, - Session.default_timeout, - Session.row_factory) + self.profile_manager.profiles[EXEC_PROFILE_DEFAULT] = ExecutionProfile(self.load_balancing_policy, + self.default_retry_policy, + Session.default_consistency_level, + Session.default_serial_consistency_level, + Session.default_timeout, + Session.row_factory) # TODO: validate value types if execution_profiles: self.profile_manager.profiles.update(execution_profiles) @@ -1772,7 +1773,7 @@ def __init__(self, cluster, hosts): for future in futures: future.result() - def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None, execution_profile=None): + def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None, execution_profile=EXEC_PROFILE_DEFAULT): """ Execute the given query and synchronously wait for the response. @@ -1802,7 +1803,7 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_ """ return self.execute_async(query, parameters, trace, custom_payload, timeout, execution_profile).result() - def execute_async(self, query, parameters=None, trace=False, custom_payload=None, timeout=_NOT_SET, execution_profile=None): + def execute_async(self, query, parameters=None, trace=False, custom_payload=None, timeout=_NOT_SET, execution_profile=EXEC_PROFILE_DEFAULT): """ Execute the given query and return a :class:`~.ResponseFuture` object which callbacks may be attached to for asynchronous response @@ -1852,7 +1853,7 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None future.send_request() return future - def _create_response_future(self, query, parameters, trace, custom_payload, timeout, execution_profile): + def _create_response_future(self, query, parameters, trace, custom_payload, timeout, execution_profile=EXEC_PROFILE_DEFAULT): """ Returns the ResponseFuture before calling send_request() on it """ prepared_statement = None From 842a4cb30112b92344c150855fee03ff95219de5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 25 May 2016 12:42:12 -0500 Subject: [PATCH 0671/2005] Fix ResponseFuture test to work with conf profiles --- tests/unit/test_response_future.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 8e047d91ae..ba94e1c30b 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -163,11 +163,11 @@ def test_unavailable_error_message(self): def test_retry_policy_says_ignore(self): session = self.make_session() query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)") - query.retry_policy = Mock() - query.retry_policy.on_unavailable.return_value = (RetryPolicy.IGNORE, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query, 1) + retry_policy = Mock() + retry_policy.on_unavailable.return_value = (RetryPolicy.IGNORE, None) + rf = ResponseFuture(session, message, query, 1, retry_policy=retry_policy) rf.send_request() result = Mock(spec=UnavailableErrorMessage, info={}) @@ -179,14 +179,15 @@ def test_retry_policy_says_retry(self): pool = session._pools.get.return_value query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)") - query.retry_policy = Mock() - query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETRY, ConsistencyLevel.ONE) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.QUORUM) connection = Mock(spec=Connection) pool.borrow_connection.return_value = (connection, 1) - rf = ResponseFuture(session, message, query, 1) + retry_policy = Mock() + retry_policy.on_unavailable.return_value = (RetryPolicy.RETRY, ConsistencyLevel.ONE) + + rf = ResponseFuture(session, message, query, 1, retry_policy=retry_policy) rf.send_request() rf.session._pools.get.assert_called_once_with('ip1') @@ -399,11 +400,11 @@ def test_multiple_errbacks(self): pool.borrow_connection.return_value = (connection, 1) query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)") - query.retry_policy = Mock() - query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query, 1) + retry_policy = Mock() + retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None) + rf = ResponseFuture(session, message, query, 1, retry_policy=retry_policy) rf.send_request() callback = Mock() From af59c56bce566d3f4d556b2b749fdc9195ef21d9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 31 May 2016 16:01:59 -0500 Subject: [PATCH 0672/2005] disallow concurrent use of legacy config and profiles PYTHON-569 --- cassandra/cluster.py | 200 ++++++++++++++++++++++----------- docs/api/cassandra/cluster.rst | 4 + tests/unit/test_cluster.py | 7 +- 3 files changed, 142 insertions(+), 69 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 1d5fd66956..3314b7d82d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -264,6 +264,12 @@ def on_remove(self, host): EXEC_PROFILE_DEFAULT = object() +class _ConfigMode(object): + UNCOMMITTED = 0 + LEGACY = 1 + PROFILES = 2 + + class Cluster(object): """ The main class to use when interacting with a Cassandra cluster. @@ -416,12 +422,23 @@ def auth_provider(self, value): a max delay of ten minutes. """ - default_retry_policy = RetryPolicy() - """ - A default :class:`.policies.RetryPolicy` instance to use for all - :class:`.Statement` objects which do not have a :attr:`~.Statement.retry_policy` - explicitly set. - """ + + _default_retry_policy = RetryPolicy() + @property + def default_retry_policy(self): + """ + A default :class:`.policies.RetryPolicy` instance to use for all + :class:`.Statement` objects which do not have a :attr:`~.Statement.retry_policy` + explicitly set. + """ + return self._default_retry_policy + + @default_retry_policy.setter + def default_retry_policy(self, policy): + if self._config_mode == _ConfigMode.PROFILES: + raise ValueError("Cannot set Cluster.default_retry_policy while using Configuration Profiles. Set this in a profile instead.") + self._default_retry_policy = policy + self._config_mode = _ConfigMode.LEGACY conviction_policy_factory = SimpleConvictionPolicy """ @@ -639,6 +656,7 @@ def token_metadata_enabled(self, enabled): self.control_connection._token_meta_enabled = bool(enabled) profile_manager = None + _config_mode = _ConfigMode.UNCOMMITTED sessions = None control_connection = None @@ -743,14 +761,20 @@ def __init__(self, self.profile_manager = ProfileManager() self.profile_manager.profiles[EXEC_PROFILE_DEFAULT] = ExecutionProfile(self.load_balancing_policy, self.default_retry_policy, - Session.default_consistency_level, - Session.default_serial_consistency_level, - Session.default_timeout, - Session.row_factory) - # TODO: validate value types - if execution_profiles: - self.profile_manager.profiles.update(execution_profiles) - + Session._default_consistency_level, + Session._default_serial_consistency_level, + Session._default_timeout, + Session._row_factory) + # legacy mode if either of these is not default + if load_balancing_policy or default_retry_policy: + if execution_profiles: + raise ValueError("Clusters constructed with execution_profiles should not specify legacy parameters " + "load_balancing_policy or default_retry_policy. Configure this in a profile instead.") + self._config_mode = _ConfigMode.LEGACY + else: + if execution_profiles: + self.profile_manager.profiles.update(execution_profiles) + self._config_mode = _ConfigMode.PROFILES self.metrics_enabled = metrics_enabled self.ssl_options = ssl_options @@ -1627,54 +1651,83 @@ class Session(object): keyspace = None is_shutdown = False - row_factory = staticmethod(named_tuple_factory) - """ - The format to return row results in. By default, each - returned row will be a named tuple. You can alternatively - use any of the following: + _row_factory = staticmethod(named_tuple_factory) + @property + def row_factory(self): + """ + The format to return row results in. By default, each + returned row will be a named tuple. You can alternatively + use any of the following: - - :func:`cassandra.query.tuple_factory` - return a result row as a tuple - - :func:`cassandra.query.named_tuple_factory` - return a result row as a named tuple - - :func:`cassandra.query.dict_factory` - return a result row as a dict - - :func:`cassandra.query.ordered_dict_factory` - return a result row as an OrderedDict + - :func:`cassandra.query.tuple_factory` - return a result row as a tuple + - :func:`cassandra.query.named_tuple_factory` - return a result row as a named tuple + - :func:`cassandra.query.dict_factory` - return a result row as a dict + - :func:`cassandra.query.ordered_dict_factory` - return a result row as an OrderedDict - """ + """ + return self._row_factory - default_timeout = 10.0 - """ - A default timeout, measured in seconds, for queries executed through - :meth:`.execute()` or :meth:`.execute_async()`. This default may be - overridden with the `timeout` parameter for either of those methods. + @row_factory.setter + def row_factory(self, rf): + self._validate_set_legacy_config('row_factory', rf) - Setting this to :const:`None` will cause no timeouts to be set by default. + _default_timeout = 10.0 + @property + def default_timeout(self): + """ + A default timeout, measured in seconds, for queries executed through + :meth:`.execute()` or :meth:`.execute_async()`. This default may be + overridden with the `timeout` parameter for either of those methods. - Please see :meth:`.ResponseFuture.result` for details on the scope and - effect of this timeout. + Setting this to :const:`None` will cause no timeouts to be set by default. - .. versionadded:: 2.0.0 - """ + Please see :meth:`.ResponseFuture.result` for details on the scope and + effect of this timeout. - default_consistency_level = ConsistencyLevel.LOCAL_ONE - """ - The default :class:`~ConsistencyLevel` for operations executed through - this session. This default may be overridden by setting the - :attr:`~.Statement.consistency_level` on individual statements. + .. versionadded:: 2.0.0 + """ + return self._default_timeout - .. versionadded:: 1.2.0 + @default_timeout.setter + def default_timeout(self, timeout): + self._validate_set_legacy_config('default_timeout', timeout) - .. versionchanged:: 3.0.0 + _default_consistency_level = ConsistencyLevel.LOCAL_ONE + @property + def default_consistency_level(self): + """ + The default :class:`~ConsistencyLevel` for operations executed through + this session. This default may be overridden by setting the + :attr:`~.Statement.consistency_level` on individual statements. - default changed from ONE to LOCAL_ONE - """ + .. versionadded:: 1.2.0 - default_serial_consistency_level = None - """ - The default :class:`~ConsistencyLevel` for serial phase of conditional updates executed through - this session. This default may be overridden by setting the - :attr:`~.Statement.serial_consistency_level` on individual statements. + .. versionchanged:: 3.0.0 - Only valid for ``protocol_version >= 2``. - """ + default changed from ONE to LOCAL_ONE + """ + return self._default_consistency_level + + @default_consistency_level.setter + def default_consistency_level(self, cl): + self._validate_set_legacy_config('default_consistency_level', cl) + + + _default_serial_consistency_level = None + @property + def default_serial_consistency_level(self): + """ + The default :class:`~ConsistencyLevel` for serial phase of conditional updates executed through + this session. This default may be overridden by setting the + :attr:`~.Statement.serial_consistency_level` on individual statements. + + Only valid for ``protocol_version >= 2``. + """ + return self._default_serial_consistency_level + + @default_serial_consistency_level.setter + def default_serial_consistency_level(self, cl): + self._validate_set_legacy_config('default_serial_consistency_level', cl) max_trace_wait = 2.0 """ @@ -1858,28 +1911,37 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time prepared_statement = None - profiles = self.cluster.profile_manager.profiles - try: - exec_profile = execution_profile if isinstance(execution_profile, ExecutionProfile) else profiles[execution_profile] - except KeyError: - raise ValueError("Invalid execution_profile: '%s'; valid profiles are %s" % (execution_profile, profiles.keys())) - - # TODO: right now there is no guard making legacy config mutually exclusive - if timeout is _NOT_SET: - timeout = exec_profile.request_timeout - if isinstance(query, six.string_types): query = SimpleStatement(query) elif isinstance(query, PreparedStatement): query = query.bind(parameters) - # TODO: why do some things come on the statement, and others through the execution? - cl = query.consistency_level if query.consistency_level is not None else exec_profile.consistency_level - serial_cl = query.serial_consistency_level if query.serial_consistency_level is not None else exec_profile.serial_consistency_level + if self.cluster._config_mode == _ConfigMode.LEGACY: + if timeout is _NOT_SET: + timeout = self.default_timeout + + cl = query.consistency_level if query.consistency_level is not None else self.default_consistency_level + serial_cl = query.serial_consistency_level if query.serial_consistency_level is not None else self.default_serial_consistency_level - retry_policy = query.retry_policy or exec_profile.retry_policy - row_factory = exec_profile.row_factory - load_balancing_policy = exec_profile.load_balancing_policy + retry_policy = query.retry_policy or self.cluster.default_retry_policy + row_factory = self.row_factory + load_balancing_policy = self.cluster.load_balancing_policy + else: + profiles = self.cluster.profile_manager.profiles + try: + exec_profile = execution_profile if isinstance(execution_profile, ExecutionProfile) else profiles[execution_profile] + except KeyError: + raise ValueError("Invalid execution_profile: '%s'; valid profiles are %s" % (execution_profile, profiles.keys())) + + if timeout is _NOT_SET: + timeout = exec_profile.request_timeout + + cl = query.consistency_level if query.consistency_level is not None else exec_profile.consistency_level + serial_cl = query.serial_consistency_level if query.serial_consistency_level is not None else exec_profile.serial_consistency_level + + retry_policy = query.retry_policy or exec_profile.retry_policy + row_factory = exec_profile.row_factory + load_balancing_policy = exec_profile.load_balancing_policy fetch_size = query.fetch_size if fetch_size is FETCH_SIZE_UNSET and self._protocol_version >= 2: @@ -2194,6 +2256,12 @@ def get_pool_state(self): def get_pools(self): return self._pools.values() + def _validate_set_legacy_config(self, attr_name, value): + if self.cluster._config_mode == _ConfigMode.PROFILES: + raise ValueError("Cannot set Session.%s while using Configuration Profiles. Set this in a profile instead." %(attr_name,)) + setattr(self, '_' + attr_name, value) + self.cluster._config_mode = _ConfigMode.LEGACY + class UserTypeDoesNotExist(Exception): """ diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index df0d58ab58..5e0ca04f03 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -22,6 +22,7 @@ .. autoattribute:: reconnection_policy .. autoattribute:: default_retry_policy + :annotation: = .. autoattribute:: conviction_policy_factory @@ -109,13 +110,16 @@ .. autoclass:: Session () .. autoattribute:: default_timeout + :annotation: = 10.0 .. autoattribute:: default_consistency_level :annotation: = LOCAL_ONE .. autoattribute:: default_serial_consistency_level + :annotation: = None .. autoattribute:: row_factory + :annotation: = .. autoattribute:: default_fetch_size diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index a43f52d55d..144d96c370 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -22,7 +22,7 @@ from cassandra import ConsistencyLevel, DriverException, Timeout, Unavailable, RequestExecutionException, ReadTimeout, WriteTimeout, CoordinationFailure, ReadFailure, WriteFailure, FunctionFailure, AlreadyExists,\ InvalidRequest, Unauthorized, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, RequestValidationException, ConfigurationException -from cassandra.cluster import _Scheduler, Session, Cluster +from cassandra.cluster import _Scheduler, Session, Cluster, DefaultExecutionProfile from cassandra.policies import HostDistance from cassandra.query import SimpleStatement @@ -136,16 +136,17 @@ def test_default_serial_consistency_level(self, *_): # default is None self.assertIsNone(s.default_serial_consistency_level) + default_exec_profile = DefaultExecutionProfile() sentinel = 1001 for cl in (None, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL, sentinel): s.default_serial_consistency_level = cl # default is passed through - f = s._create_response_future(query='', parameters=[], trace=False, custom_payload={}, timeout=100) + f = s._create_response_future(query='', parameters=[], trace=False, custom_payload={}, timeout=100, execution_profile=default_exec_profile) self.assertEqual(f.message.serial_consistency_level, cl) # any non-None statement setting takes precedence for cl_override in (ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL): - f = s._create_response_future(SimpleStatement(query_string='', serial_consistency_level=cl_override), parameters=[], trace=False, custom_payload={}, timeout=100) + f = s._create_response_future(SimpleStatement(query_string='', serial_consistency_level=cl_override), parameters=[], trace=False, custom_payload={}, timeout=100, execution_profile=default_exec_profile) self.assertEqual(s.default_serial_consistency_level, cl) self.assertEqual(f.message.serial_consistency_level, cl_override) From eaadf0db4467364fcc7abf2f09b0d8126c00bbe1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jun 2016 11:54:14 -0500 Subject: [PATCH 0673/2005] simplify RR policy, avoid errors with unpopulated instance --- cassandra/policies.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 68cd6eb8ba..d926873bc0 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -150,12 +150,11 @@ class RoundRobinPolicy(LoadBalancingPolicy): This load balancing policy is used by default. """ _live_hosts = frozenset(()) + _position = 0 def populate(self, cluster, hosts): self._live_hosts = frozenset(hosts) - if len(hosts) <= 1: - self._position = 0 - else: + if len(hosts) > 1: self._position = randint(0, len(hosts) - 1) def distance(self, host): From a9d2228a420af810a9c6c13fe3376c470aa2e0e9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jun 2016 14:52:26 -0500 Subject: [PATCH 0674/2005] Make the base ExecutionProfile ingest default values PYTHON-569 --- cassandra/cluster.py | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 3314b7d82d..5cdc27e2d1 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -204,27 +204,17 @@ class ExecutionProfile(object): request_timeout = None row_factory = None - def __init__(self, load_balancing_policy, retry_policy, consistency_level, - serial_consistency_level, request_timeout, row_factory): - self.load_balancing_policy = load_balancing_policy - self.retry_policy = retry_policy + def __init__(self, load_balancing_policy=None, retry_policy=None, + consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, + request_timeout=10.0, row_factory=named_tuple_factory): + self.load_balancing_policy = load_balancing_policy or default_lbp_factory() + self.retry_policy = retry_policy or RetryPolicy() self.consistency_level = consistency_level self.serial_consistency_level = serial_consistency_level self.request_timeout = request_timeout self.row_factory = row_factory -class DefaultExecutionProfile(ExecutionProfile): - def __init__(self): - super(DefaultExecutionProfile, self).__init__( - default_lbp_factory(), - RetryPolicy(), - ConsistencyLevel.LOCAL_ONE, - None, - 10, - named_tuple_factory) - - class ProfileManager(object): def __init__(self): From f647ee7b05f942cd120fca9b95d561c7a22dd235 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jun 2016 14:53:03 -0500 Subject: [PATCH 0675/2005] make add_execution_profile validate config mode PYTHON-569 --- cassandra/cluster.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 5cdc27e2d1..67926f789b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -889,11 +889,10 @@ def __init__(self, street, zipcode): def add_execution_profile(self, name, profile): if not isinstance(profile, ExecutionProfile): raise TypeError("profile must be an instance of ExecutionProfile") - if name is None: - raise ValueError("'None' as profile name is reserved for the default profile") + if self._config_mode == _ConfigMode.LEGACY: + raise ValueError("Cannot add execution profiles when legacy parameters are set explicitly. TODO: link to doc") self.profile_manager.profiles[name] = profile profile.load_balancing_policy.populate(self, self.metadata.all_hosts()) - # todo: refacto for session in self.sessions: session.update_created_pools() From faedc692c90b445dba1022046caf33e7dde228b8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jun 2016 15:17:40 -0500 Subject: [PATCH 0676/2005] reject execution_profile if we're in legacy mode PYTHON-569 --- cassandra/cluster.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 67926f789b..fe846ef5a5 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1906,6 +1906,9 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time query = query.bind(parameters) if self.cluster._config_mode == _ConfigMode.LEGACY: + if execution_profile is not EXEC_PROFILE_DEFAULT: + raise ValueError("Cannot specify execution_profile while using legacy parameters.") + if timeout is _NOT_SET: timeout = self.default_timeout From 5d68f8f4db07bf9199e66211045a6fa8c553cf12 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jun 2016 15:47:55 -0500 Subject: [PATCH 0677/2005] unit tests for config profile and legacy interaction PYTHON-569 --- tests/unit/test_cluster.py | 165 +++++++++++++++++++++++++++++++++++-- 1 file changed, 157 insertions(+), 8 deletions(-) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 144d96c370..6179c61c01 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -11,20 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - try: import unittest2 as unittest except ImportError: import unittest # noqa +from copy import copy from mock import patch, Mock - from cassandra import ConsistencyLevel, DriverException, Timeout, Unavailable, RequestExecutionException, ReadTimeout, WriteTimeout, CoordinationFailure, ReadFailure, WriteFailure, FunctionFailure, AlreadyExists,\ InvalidRequest, Unauthorized, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, RequestValidationException, ConfigurationException -from cassandra.cluster import _Scheduler, Session, Cluster, DefaultExecutionProfile -from cassandra.policies import HostDistance -from cassandra.query import SimpleStatement +from cassandra.cluster import _Scheduler, Session, Cluster, _NOT_SET, default_lbp_factory, \ + ExecutionProfile, _ConfigMode, EXEC_PROFILE_DEFAULT +from cassandra.policies import HostDistance, RetryPolicy, RoundRobinPolicy, DowngradingConsistencyRetryPolicy +from cassandra.query import SimpleStatement, named_tuple_factory, tuple_factory class ExceptionTypeTest(unittest.TestCase): @@ -136,17 +136,166 @@ def test_default_serial_consistency_level(self, *_): # default is None self.assertIsNone(s.default_serial_consistency_level) - default_exec_profile = DefaultExecutionProfile() sentinel = 1001 for cl in (None, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL, sentinel): s.default_serial_consistency_level = cl # default is passed through - f = s._create_response_future(query='', parameters=[], trace=False, custom_payload={}, timeout=100, execution_profile=default_exec_profile) + f = s._create_response_future(query='', parameters=[], trace=False, custom_payload={}, timeout=100) self.assertEqual(f.message.serial_consistency_level, cl) # any non-None statement setting takes precedence for cl_override in (ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL): - f = s._create_response_future(SimpleStatement(query_string='', serial_consistency_level=cl_override), parameters=[], trace=False, custom_payload={}, timeout=100, execution_profile=default_exec_profile) + f = s._create_response_future(SimpleStatement(query_string='', serial_consistency_level=cl_override), parameters=[], trace=False, custom_payload={}, timeout=100) self.assertEqual(s.default_serial_consistency_level, cl) self.assertEqual(f.message.serial_consistency_level, cl_override) + + +class ExecutionProfileTest(unittest.TestCase): + + def _verify_response_future_profile(self, rf, prof): + self.assertEqual(rf._load_balancer, prof.load_balancing_policy) + self.assertEqual(rf._retry_policy, prof.retry_policy) + self.assertEqual(rf.message.consistency_level, prof.consistency_level) + self.assertEqual(rf.message.serial_consistency_level, prof.serial_consistency_level) + self.assertEqual(rf.timeout, prof.request_timeout) + self.assertEqual(rf.row_factory, prof.row_factory) + + def test_default_exec_parameters(self): + cluster = Cluster() + self.assertEqual(cluster._config_mode, _ConfigMode.UNCOMMITTED) + self.assertEqual(cluster.load_balancing_policy.__class__, default_lbp_factory().__class__) + self.assertEqual(cluster.default_retry_policy.__class__, RetryPolicy) + session = Session(cluster, hosts=[]) + self.assertEqual(session.default_timeout, 10.0) + self.assertEqual(session.default_consistency_level, ConsistencyLevel.LOCAL_ONE) + self.assertEqual(session.default_serial_consistency_level, None) + self.assertEqual(session.row_factory, named_tuple_factory) + + def test_default_legacy(self): + cluster = Cluster(load_balancing_policy=RoundRobinPolicy(), default_retry_policy=DowngradingConsistencyRetryPolicy()) + self.assertEqual(cluster._config_mode, _ConfigMode.LEGACY) + session = Session(cluster, hosts=[]) + session.default_timeout = 3.7 + session.default_consistency_level = ConsistencyLevel.ALL + session.default_serial_consistency_level = ConsistencyLevel.SERIAL + # _create_response_future(self, query, parameters, trace, custom_payload, timeout, execution_profile=EXEC_PROFILE_DEFAULT): + rf = session._create_response_future("query", [], False, {}, _NOT_SET) + expected_profile = ExecutionProfile(cluster.load_balancing_policy, cluster.default_retry_policy, + session.default_consistency_level, session.default_serial_consistency_level, + session.default_timeout, session.row_factory) + self._verify_response_future_profile(rf, expected_profile) + + def test_default_profile(self): + non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(5)]) + cluster = Cluster(execution_profiles={'non-default': non_default_profile}) + session = Session(cluster, hosts=[]) + + self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES) + + default_profile = cluster.profile_manager.profiles[EXEC_PROFILE_DEFAULT] + rf = session._create_response_future("query", [], False, {}, _NOT_SET) + self._verify_response_future_profile(rf, default_profile) + + rf = session._create_response_future("query", [], False, {}, _NOT_SET, 'non-default') + self._verify_response_future_profile(rf, non_default_profile) + + def test_statement_params_override_legacy(self): + cluster = Cluster(load_balancing_policy=RoundRobinPolicy(), default_retry_policy=DowngradingConsistencyRetryPolicy()) + self.assertEqual(cluster._config_mode, _ConfigMode.LEGACY) + session = Session(cluster, hosts=[]) + + ss = SimpleStatement("query", retry_policy=DowngradingConsistencyRetryPolicy(), + consistency_level=ConsistencyLevel.ALL, serial_consistency_level=ConsistencyLevel.SERIAL) + my_timeout = 1.1234 + + self.assertNotEqual(ss.retry_policy.__class__, cluster.default_retry_policy) + self.assertNotEqual(ss.consistency_level, session.default_consistency_level) + self.assertNotEqual(ss._serial_consistency_level, session.default_serial_consistency_level) + self.assertNotEqual(my_timeout, session.default_timeout) + + rf = session._create_response_future(ss, [], False, {}, my_timeout) + expected_profile = ExecutionProfile(load_balancing_policy=cluster.load_balancing_policy, retry_policy=ss.retry_policy, + request_timeout=my_timeout, consistency_level=ss.consistency_level, + serial_consistency_level=ss._serial_consistency_level) + self._verify_response_future_profile(rf, expected_profile) + + def test_statement_params_override_profile(self): + non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(5)]) + cluster = Cluster(execution_profiles={'non-default': non_default_profile}) + session = Session(cluster, hosts=[]) + + self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES) + + rf = session._create_response_future("query", [], False, {}, _NOT_SET, 'non-default') + + ss = SimpleStatement("query", retry_policy=DowngradingConsistencyRetryPolicy(), + consistency_level=ConsistencyLevel.ALL, serial_consistency_level=ConsistencyLevel.SERIAL) + my_timeout = 1.1234 + + self.assertNotEqual(ss.retry_policy.__class__, rf._load_balancer.__class__) + self.assertNotEqual(ss.consistency_level, rf.message.consistency_level) + self.assertNotEqual(ss._serial_consistency_level, rf.message.serial_consistency_level) + self.assertNotEqual(my_timeout, rf.timeout) + + rf = session._create_response_future(ss, [], False, {}, my_timeout, 'non-default') + expected_profile = ExecutionProfile(non_default_profile.load_balancing_policy, ss.retry_policy, + ss.consistency_level, ss._serial_consistency_level, my_timeout, non_default_profile.row_factory) + self._verify_response_future_profile(rf, expected_profile) + + def test_no_profile_with_legacy(self): + # don't construct with both + self.assertRaises(ValueError, Cluster, load_balancing_policy=RoundRobinPolicy(), execution_profiles={'a': ExecutionProfile()}) + self.assertRaises(ValueError, Cluster, default_retry_policy=DowngradingConsistencyRetryPolicy(), execution_profiles={'a': ExecutionProfile()}) + self.assertRaises(ValueError, Cluster, load_balancing_policy=RoundRobinPolicy(), + default_retry_policy=DowngradingConsistencyRetryPolicy(), execution_profiles={'a': ExecutionProfile()}) + + # can't add after + cluster = Cluster(load_balancing_policy=RoundRobinPolicy()) + self.assertRaises(ValueError, cluster.add_execution_profile, 'name', ExecutionProfile()) + + # session settings lock out profiles + cluster = Cluster() + session = Session(cluster, hosts=[]) + for attr, value in (('default_timeout', 1), + ('default_consistency_level', ConsistencyLevel.ANY), + ('default_serial_consistency_level', ConsistencyLevel.SERIAL), + ('row_factory', tuple_factory)): + cluster._config_mode = _ConfigMode.UNCOMMITTED + setattr(session, attr, value) + self.assertRaises(ValueError, cluster.add_execution_profile, 'name', ExecutionProfile()) + + # don't accept profile + self.assertRaises(ValueError, session._create_response_future, "query", [], False, {}, _NOT_SET, execution_profile='some name here') + + def test_no_legacy_with_profile(self): + cluster_init = Cluster(execution_profiles={'name': ExecutionProfile()}) + cluster_add = Cluster() + cluster_add.add_execution_profile('name', ExecutionProfile()) + # for clusters with profiles added either way... + for c in (cluster_init, cluster_init): + # don't allow legacy parameters set + with self.assertRaises(ValueError): + c.default_retry_policy = RetryPolicy() + # lbp is not guarded because it would never have worked + # TODO: guard? + session = Session(c, hosts=[]) + for attr, value in (('default_timeout', 1), + ('default_consistency_level', ConsistencyLevel.ANY), + ('default_serial_consistency_level', ConsistencyLevel.SERIAL), + ('row_factory', tuple_factory)): + self.assertRaises(ValueError, setattr, session, attr, value) + + def test_profile_name_value(self): + + internalized_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(5)]) + cluster = Cluster(execution_profiles={'by-name': internalized_profile}) + session = Session(cluster, hosts=[]) + self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES) + + rf = session._create_response_future("query", [], False, {}, _NOT_SET, 'by-name') + self._verify_response_future_profile(rf, internalized_profile) + + by_value = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(5)]) + rf = session._create_response_future("query", [], False, {}, _NOT_SET, by_value) + self._verify_response_future_profile(rf, by_value) From 21309fa00c335fc083929a4319649b42a6d054b4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jun 2016 15:57:32 -0500 Subject: [PATCH 0678/2005] guard Cluster.lbp in the same way as other legacy params It would not have worked naturally to set this after init pools would not be updated implicitly, but there would be ways of actually doing it. PYTHON-569 --- cassandra/cluster.py | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index fe846ef5a5..52927e2030 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -390,20 +390,30 @@ def auth_provider(self, value): self._auth_provider = value - load_balancing_policy = None - """ - An instance of :class:`.policies.LoadBalancingPolicy` or - one of its subclasses. + _load_balancing_policy = None + @property + def load_balancing_policy(self): + """ + An instance of :class:`.policies.LoadBalancingPolicy` or + one of its subclasses. - .. versionchanged:: 2.6.0 + .. versionchanged:: 2.6.0 - Defaults to :class:`~.TokenAwarePolicy` (:class:`~.DCAwareRoundRobinPolicy`). - when using CPython (where the murmur3 extension is available). :class:`~.DCAwareRoundRobinPolicy` - otherwise. Default local DC will be chosen from contact points. + Defaults to :class:`~.TokenAwarePolicy` (:class:`~.DCAwareRoundRobinPolicy`). + when using CPython (where the murmur3 extension is available). :class:`~.DCAwareRoundRobinPolicy` + otherwise. Default local DC will be chosen from contact points. - **Please see** :class:`~.DCAwareRoundRobinPolicy` **for a discussion on default behavior with respect to - DC locality and remote nodes.** - """ + **Please see** :class:`~.DCAwareRoundRobinPolicy` **for a discussion on default behavior with respect to + DC locality and remote nodes.** + """ + return self._load_balancing_policy + + @load_balancing_policy.setter + def load_balancing_policy(self, lbp): + if self._config_mode == _ConfigMode.PROFILES: + raise ValueError("Cannot set Cluster.load_balancing_policy while using Configuration Profiles. Set this in a profile instead.") + self._load_balancing_policy = lbp + self._config_mode = _ConfigMode.LEGACY reconnection_policy = ExponentialReconnectionPolicy(1.0, 600.0) """ @@ -723,7 +733,7 @@ def __init__(self, raise TypeError("load_balancing_policy should not be a class, it should be an instance of that class") self.load_balancing_policy = load_balancing_policy else: - self.load_balancing_policy = default_lbp_factory() + self._load_balancing_policy = default_lbp_factory() # set internal attribute to avoid committing to legacy config mode if reconnection_policy is not None: if isinstance(reconnection_policy, type): From 599cacc5322e2b4681d90022c57096601200a24e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jun 2016 16:13:31 -0500 Subject: [PATCH 0679/2005] simplify cluster unit tests and cover more execution path PYTHON-569 --- tests/unit/test_cluster.py | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 6179c61c01..6f4dd64d96 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -123,7 +123,6 @@ def test_event_delay_timing(self, *_): class SessionTest(unittest.TestCase): # TODO: this suite could be expanded; for now just adding a test covering a PR - @patch('cassandra.cluster.ResponseFuture._make_query_plan') def test_default_serial_consistency_level(self, *_): """ Make sure default_serial_consistency_level passes through to a query message. @@ -131,7 +130,7 @@ def test_default_serial_consistency_level(self, *_): PR #510 """ - s = Session(Mock(protocol_version=4), []) + s = Session(Cluster(protocol_version=4), []) # default is None self.assertIsNone(s.default_serial_consistency_level) @@ -141,12 +140,12 @@ def test_default_serial_consistency_level(self, *_): s.default_serial_consistency_level = cl # default is passed through - f = s._create_response_future(query='', parameters=[], trace=False, custom_payload={}, timeout=100) + f = s.execute_async(query='') self.assertEqual(f.message.serial_consistency_level, cl) # any non-None statement setting takes precedence for cl_override in (ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL): - f = s._create_response_future(SimpleStatement(query_string='', serial_consistency_level=cl_override), parameters=[], trace=False, custom_payload={}, timeout=100) + f = s.execute_async(SimpleStatement(query_string='', serial_consistency_level=cl_override)) self.assertEqual(s.default_serial_consistency_level, cl) self.assertEqual(f.message.serial_consistency_level, cl_override) @@ -179,8 +178,7 @@ def test_default_legacy(self): session.default_timeout = 3.7 session.default_consistency_level = ConsistencyLevel.ALL session.default_serial_consistency_level = ConsistencyLevel.SERIAL - # _create_response_future(self, query, parameters, trace, custom_payload, timeout, execution_profile=EXEC_PROFILE_DEFAULT): - rf = session._create_response_future("query", [], False, {}, _NOT_SET) + rf = session.execute_async("query") expected_profile = ExecutionProfile(cluster.load_balancing_policy, cluster.default_retry_policy, session.default_consistency_level, session.default_serial_consistency_level, session.default_timeout, session.row_factory) @@ -194,10 +192,10 @@ def test_default_profile(self): self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES) default_profile = cluster.profile_manager.profiles[EXEC_PROFILE_DEFAULT] - rf = session._create_response_future("query", [], False, {}, _NOT_SET) + rf = session.execute_async("query") self._verify_response_future_profile(rf, default_profile) - rf = session._create_response_future("query", [], False, {}, _NOT_SET, 'non-default') + rf = session.execute_async("query", execution_profile='non-default') self._verify_response_future_profile(rf, non_default_profile) def test_statement_params_override_legacy(self): @@ -214,7 +212,7 @@ def test_statement_params_override_legacy(self): self.assertNotEqual(ss._serial_consistency_level, session.default_serial_consistency_level) self.assertNotEqual(my_timeout, session.default_timeout) - rf = session._create_response_future(ss, [], False, {}, my_timeout) + rf = session.execute_async(ss, timeout=my_timeout) expected_profile = ExecutionProfile(load_balancing_policy=cluster.load_balancing_policy, retry_policy=ss.retry_policy, request_timeout=my_timeout, consistency_level=ss.consistency_level, serial_consistency_level=ss._serial_consistency_level) @@ -227,7 +225,7 @@ def test_statement_params_override_profile(self): self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES) - rf = session._create_response_future("query", [], False, {}, _NOT_SET, 'non-default') + rf = session.execute_async("query", execution_profile='non-default') ss = SimpleStatement("query", retry_policy=DowngradingConsistencyRetryPolicy(), consistency_level=ConsistencyLevel.ALL, serial_consistency_level=ConsistencyLevel.SERIAL) @@ -238,7 +236,7 @@ def test_statement_params_override_profile(self): self.assertNotEqual(ss._serial_consistency_level, rf.message.serial_consistency_level) self.assertNotEqual(my_timeout, rf.timeout) - rf = session._create_response_future(ss, [], False, {}, my_timeout, 'non-default') + rf = session.execute_async(ss, timeout=my_timeout, execution_profile='non-default') expected_profile = ExecutionProfile(non_default_profile.load_balancing_policy, ss.retry_policy, ss.consistency_level, ss._serial_consistency_level, my_timeout, non_default_profile.row_factory) self._verify_response_future_profile(rf, expected_profile) @@ -266,7 +264,7 @@ def test_no_profile_with_legacy(self): self.assertRaises(ValueError, cluster.add_execution_profile, 'name', ExecutionProfile()) # don't accept profile - self.assertRaises(ValueError, session._create_response_future, "query", [], False, {}, _NOT_SET, execution_profile='some name here') + self.assertRaises(ValueError, session.execute_async, "query", execution_profile='some name here') def test_no_legacy_with_profile(self): cluster_init = Cluster(execution_profiles={'name': ExecutionProfile()}) @@ -293,9 +291,9 @@ def test_profile_name_value(self): session = Session(cluster, hosts=[]) self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES) - rf = session._create_response_future("query", [], False, {}, _NOT_SET, 'by-name') + rf = session.execute_async("query", execution_profile='by-name') self._verify_response_future_profile(rf, internalized_profile) by_value = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(5)]) - rf = session._create_response_future("query", [], False, {}, _NOT_SET, by_value) + rf = session.execute_async("query", execution_profile=by_value) self._verify_response_future_profile(rf, by_value) From 3961f2a9b194d54f99be242bf276b3a84b5863ed Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jun 2016 16:17:13 -0500 Subject: [PATCH 0680/2005] test legacy lbp rejection PYTHON-569 --- tests/unit/test_cluster.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 6f4dd64d96..4b33f60df2 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -271,13 +271,12 @@ def test_no_legacy_with_profile(self): cluster_add = Cluster() cluster_add.add_execution_profile('name', ExecutionProfile()) # for clusters with profiles added either way... - for c in (cluster_init, cluster_init): + for cluster in (cluster_init, cluster_init): # don't allow legacy parameters set - with self.assertRaises(ValueError): - c.default_retry_policy = RetryPolicy() - # lbp is not guarded because it would never have worked - # TODO: guard? - session = Session(c, hosts=[]) + for attr, value in (('default_retry_policy', RetryPolicy()), + ('load_balancing_policy', default_lbp_factory())): + self.assertRaises(ValueError, setattr, cluster, attr, value) + session = Session(cluster, hosts=[]) for attr, value in (('default_timeout', 1), ('default_consistency_level', ConsistencyLevel.ANY), ('default_serial_consistency_level', ConsistencyLevel.SERIAL), From 33b3cda49118ca42afc6f0ae4c9046181b5d0f08 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 2 Jun 2016 13:02:08 -0500 Subject: [PATCH 0681/2005] make internal signalling use profile_mgr for host distance PYTHON-569 --- cassandra/cluster.py | 43 ++++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 52927e2030..21785601a2 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -240,7 +240,7 @@ def on_up(self, host): def on_down(self, host): for p in self.profiles.values(): - p.load_balancing_policy.on_up(host) + p.load_balancing_policy.on_down(host) def on_add(self, host): for p in self.profiles.values(): @@ -250,6 +250,13 @@ def on_remove(self, host): for p in self.profiles.values(): p.load_balancing_policy.on_remove(host) + @property + def default(self): + """ + internal-only; no checks are done because this entry is populated on cluster init + """ + return self.profiles[EXEC_PROFILE_DEFAULT] + EXEC_PROFILE_DEFAULT = object() @@ -415,6 +422,10 @@ def load_balancing_policy(self, lbp): self._load_balancing_policy = lbp self._config_mode = _ConfigMode.LEGACY + @property + def _default_load_balancing_policy(self): + return self.profile_manager.default.load_balancing_policy + reconnection_policy = ExponentialReconnectionPolicy(1.0, 600.0) """ An instance of :class:`.policies.ReconnectionPolicy`. Defaults to an instance @@ -422,7 +433,6 @@ def load_balancing_policy(self, lbp): a max delay of ten minutes. """ - _default_retry_policy = RetryPolicy() @property def default_retry_policy(self): @@ -1086,7 +1096,7 @@ def connect(self, keyspace=None): self.shutdown() raise - self.profile_manager.check_supported() #todo: rename this method + self.profile_manager.check_supported() # todo: rename this method if self.idle_heartbeat_interval: self._idle_heartbeat = ConnectionHeartbeat(self.idle_heartbeat_interval, self.get_connection_holders) @@ -1230,7 +1240,7 @@ def on_up(self, host): for session in self.sessions: session.remove_pool(host) - log.debug("Signalling to load balancing policy that host %s is up", host) + log.debug("Signalling to load balancing policies that host %s is up", host) self.profile_manager.on_up(host) log.debug("Signalling to control connection that host %s is up", host) @@ -1711,7 +1721,6 @@ def default_consistency_level(self): def default_consistency_level(self, cl): self._validate_set_legacy_config('default_consistency_level', cl) - _default_serial_consistency_level = None @property def default_serial_consistency_level(self): @@ -1800,7 +1809,7 @@ def default_serial_consistency_level(self, cl): _lock = None _pools = None - _load_balancer = None + _profile_manager = None _metrics = None def __init__(self, cluster, hosts): @@ -1809,7 +1818,7 @@ def __init__(self, cluster, hosts): self._lock = RLock() self._pools = {} - self._load_balancer = cluster.load_balancing_policy + self._profile_manager = cluster.profile_manager self._metrics = cluster.metrics self._protocol_version = self.cluster.protocol_version @@ -2099,7 +2108,7 @@ def add_or_renew_pool(self, host, is_host_addition): """ For internal use only. """ - distance = self._load_balancer.distance(host) + distance = self._profile_manager.distance(host) if distance == HostDistance.IGNORED: return None @@ -2153,7 +2162,7 @@ def update_created_pools(self): For internal use only. """ for host in self.cluster.metadata.all_hosts(): - distance = self._load_balancer.distance(host) + distance = self._profile_manager.distance(host) pool = self._pools.get(host) if not pool or pool.is_shutdown: # we don't eagerly set is_up on previously ignored hosts. None is included here @@ -2260,7 +2269,7 @@ def get_pools(self): def _validate_set_legacy_config(self, attr_name, value): if self.cluster._config_mode == _ConfigMode.PROFILES: - raise ValueError("Cannot set Session.%s while using Configuration Profiles. Set this in a profile instead." %(attr_name,)) + raise ValueError("Cannot set Session.%s while using Configuration Profiles. Set this in a profile instead." % (attr_name,)) setattr(self, '_' + attr_name, value) self.cluster._config_mode = _ConfigMode.LEGACY @@ -2402,7 +2411,7 @@ def _reconnect_internal(self): a connection to that host. """ errors = {} - for host in self._cluster.load_balancing_policy.make_query_plan(): + for host in self._cluster._default_load_balancing_policy.make_query_plan(): try: return self._try_connect(host) except ConnectionException as exc: @@ -2682,9 +2691,9 @@ def _update_location_info(self, host, datacenter, rack): # If the dc/rack information changes, we need to update the load balancing policy. # For that, we remove and re-add the node against the policy. Not the most elegant, and assumes # that the policy will update correctly, but in practice this should work. - self._cluster.load_balancing_policy.on_down(host) + self._cluster.profile_manager.on_down(host) host.set_location_info(datacenter, rack) - self._cluster.load_balancing_policy.on_up(host) + self._cluster.profile_manager.on_up(host) return True def _delay_for_event_type(self, event_type, delay_window): @@ -2821,14 +2830,14 @@ def _get_schema_mismatches(self, peers_result, local_result, local_address): if local_row.get("schema_version"): versions[local_row.get("schema_version")].add(local_address) - lbp = self._cluster.load_balancing_policy + pm = self._cluster.profile_manager for row in peers_result: schema_ver = row.get('schema_version') if not schema_ver: continue addr = self._rpc_from_peer_row(row) peer = self._cluster.metadata.get_host(addr) - if peer and peer.is_up and lbp.distance(peer) != HostDistance.IGNORED: + if peer and peer.is_up and pm.distance(peer) != HostDistance.IGNORED: versions[schema_ver].add(addr) if len(versions) == 1: @@ -3030,7 +3039,7 @@ class ResponseFuture(object): default_timeout = None _retry_policy = None - _load_balancer = None + _profile_manager = None _req_id = None _final_result = _NOT_SET @@ -3058,7 +3067,7 @@ def __init__(self, session, message, query, timeout, metrics=None, prepared_stat self.session = session # TODO: normalize handling of retry policy and row factory self.row_factory = row_factory or session.row_factory - self._load_balancer = load_balancer or session._load_balancer + self._load_balancer = load_balancer or session.cluster._default_load_balancing_policy self.message = message self.query = query self.timeout = timeout From 0138eacdac6de4f26fac5caae2f9c830ac59fdc8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 2 Jun 2016 14:05:39 -0500 Subject: [PATCH 0682/2005] bubble up futures in Session.update_created_pools --- cassandra/cluster.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 21785601a2..39b8a14051 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2161,21 +2161,26 @@ def update_created_pools(self): For internal use only. """ + futures = set() for host in self.cluster.metadata.all_hosts(): distance = self._profile_manager.distance(host) pool = self._pools.get(host) + future = None if not pool or pool.is_shutdown: # we don't eagerly set is_up on previously ignored hosts. None is included here # to allow us to attempt connections to hosts that have gone from ignored to something # else. if distance != HostDistance.IGNORED and host.is_up in (True, None): - self.add_or_renew_pool(host, False) + future = self.add_or_renew_pool(host, False) elif distance != pool.host_distance: # the distance has changed if distance == HostDistance.IGNORED: - self.remove_pool(host) + future = self.remove_pool(host) else: pool.host_distance = distance + if future: + futures.add(future) + return futures def on_down(self, host): """ From 15c8236fef22644061b71fc87bc9f826fdedbdaa Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 2 Jun 2016 14:06:13 -0500 Subject: [PATCH 0683/2005] make add_execution_profile synchronous with pool creation PYTHON-569 --- cassandra/cluster.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 39b8a14051..534106d17e 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -20,7 +20,7 @@ import atexit from collections import defaultdict, Mapping -from concurrent.futures import ThreadPoolExecutor +from concurrent.futures import ThreadPoolExecutor, wait as wait_futures import logging from random import random import socket @@ -906,15 +906,20 @@ def __init__(self, street, zipcode): session.user_type_registered(keyspace, user_type, klass) UserType.evict_udt_class(keyspace, user_type) - def add_execution_profile(self, name, profile): + def add_execution_profile(self, name, profile, pool_wait_timeout=5): if not isinstance(profile, ExecutionProfile): raise TypeError("profile must be an instance of ExecutionProfile") if self._config_mode == _ConfigMode.LEGACY: raise ValueError("Cannot add execution profiles when legacy parameters are set explicitly. TODO: link to doc") self.profile_manager.profiles[name] = profile profile.load_balancing_policy.populate(self, self.metadata.all_hosts()) + futures = set() for session in self.sessions: - session.update_created_pools() + futures.update(session.update_created_pools()) + _, not_done = wait_futures(futures, pool_wait_timeout) + if not_done: + raise OperationTimedOut("Failed to create all new connection pools in the %ss timeout.") + def get_min_requests_per_connection(self, host_distance): return self._min_requests_per_connection[host_distance] From 234f84ae05189256f423c9be447940bbf06c4607 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 2 Jun 2016 14:06:59 -0500 Subject: [PATCH 0684/2005] basic config profile integration tests PYTHON-569 --- tests/integration/standard/test_cluster.py | 75 +++++++++++++++++++++- 1 file changed, 73 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 0bca7bddc6..53ed95e738 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -18,19 +18,20 @@ import unittest # noqa from collections import deque +from copy import copy from mock import patch import time from uuid import uuid4 import logging import cassandra -from cassandra.cluster import Cluster, NoHostAvailable +from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.concurrent import execute_concurrent from cassandra.policies import (RoundRobinPolicy, ExponentialReconnectionPolicy, RetryPolicy, SimpleConvictionPolicy, HostDistance, WhiteListRoundRobinPolicy, AddressTranslator) from cassandra.protocol import MAX_SUPPORTED_VERSION -from cassandra.query import SimpleStatement, TraceUnavailable +from cassandra.query import SimpleStatement, TraceUnavailable, tuple_factory from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node, CASSANDRA_VERSION, execute_until_pass, execute_with_long_wait_retry, get_node, MockLoggingHandler from tests.integration.util import assert_quiescent_pool_state @@ -618,6 +619,76 @@ def test_pool_management(self): cluster.shutdown() + def test_profile_load_balancing(self): + query = "select release_version from system.local" + node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) + with Cluster(execution_profiles={'node1': node1}) as cluster: + session = cluster.connect() + + # default is DCA RR for all hosts + expected_hosts = set(cluster.metadata.all_hosts()) + queried_hosts = set() + for _ in expected_hosts: + rs = session.execute(query) + queried_hosts.add(rs.response_future._current_host) + self.assertEqual(queried_hosts, expected_hosts) + + # by name we should only hit the one + expected_hosts = set(h for h in cluster.metadata.all_hosts() if h.address == '127.0.0.1') + queried_hosts = set() + for _ in cluster.metadata.all_hosts(): + rs = session.execute(query, execution_profile='node1') + queried_hosts.add(rs.response_future._current_host) + self.assertEqual(queried_hosts, expected_hosts) + + # use a copied instance and override the row factory + # assert last returned value can be accessed as a namedtuple so we can prove something different + named_tuple_row = rs[0] + self.assertIsInstance(named_tuple_row, tuple) + self.assertTrue(named_tuple_row.release_version) + + tmp_profile = copy(node1) + tmp_profile.row_factory = tuple_factory + queried_hosts = set() + for _ in cluster.metadata.all_hosts(): + rs = session.execute(query, execution_profile=tmp_profile) + queried_hosts.add(rs.response_future._current_host) + self.assertEqual(queried_hosts, expected_hosts) + tuple_row = rs[0] + self.assertIsInstance(tuple_row, tuple) + with self.assertRaises(AttributeError): + tuple_row.release_version + + # make sure original profile is not impacted + self.assertTrue(session.execute(query, execution_profile='node1')[0].release_version) + + def test_profile_pool_management(self): + node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) + node2 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2'])) + with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1, 'node2': node2}) as cluster: + session = cluster.connect() + pools = session.get_pool_state() + # there are more hosts, but we connected to the ones in the lbp aggregate + self.assertGreater(len(cluster.metadata.all_hosts()), 2) + self.assertEqual(set(h.address for h in pools), set(('127.0.0.1', '127.0.0.2'))) + + # dynamically update pools on add + node3 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.3'])) + cluster.add_execution_profile('node3', node3) + pools = session.get_pool_state() + self.assertEqual(set(h.address for h in pools), set(('127.0.0.1', '127.0.0.2', '127.0.0.3'))) + + def test_add_profile_timeout(self): + node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) + with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1}) as cluster: + session = cluster.connect() + pools = session.get_pool_state() + self.assertGreater(len(cluster.metadata.all_hosts()), 2) + self.assertEqual(set(h.address for h in pools), set(('127.0.0.1',))) + + node2 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2'])) + self.assertRaises(cassandra.OperationTimedOut, cluster.add_execution_profile, 'node2', node2, pool_wait_timeout=0.0000001) + class LocalHostAdressTranslator(AddressTranslator): From 4102026a306ccd49c66a7bb8e1e7b93fc0b8a202 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 2 Jun 2016 15:08:29 -0500 Subject: [PATCH 0685/2005] revise unit tests following cluster refactor PYTHON-569 --- tests/unit/test_control_connection.py | 5 +++-- tests/unit/test_response_future.py | 10 +++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index e5a6dcc105..e16562b475 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -22,7 +22,7 @@ from cassandra import OperationTimedOut, SchemaTargetType, SchemaChangeType from cassandra.protocol import ResultMessage, RESULT_KIND_ROWS -from cassandra.cluster import ControlConnection, _Scheduler +from cassandra.cluster import ControlConnection, _Scheduler, ProfileManager, EXEC_PROFILE_DEFAULT, ExecutionProfile from cassandra.pool import Host from cassandra.policies import (SimpleConvictionPolicy, RoundRobinPolicy, ConstantReconnectionPolicy, IdentityTranslator) @@ -59,7 +59,7 @@ def rebuild_token_map(self, partitioner, token_map): class MockCluster(object): max_schema_agreement_wait = 5 - load_balancing_policy = RoundRobinPolicy() + profile_manager = ProfileManager() reconnection_policy = ConstantReconnectionPolicy(2) address_translator = IdentityTranslator() down_host = None @@ -72,6 +72,7 @@ def __init__(self): self.removed_hosts = [] self.scheduler = Mock(spec=_Scheduler) self.executor = Mock(spec=ThreadPoolExecutor) + self.profile_manager.profiles[EXEC_PROFILE_DEFAULT] = ExecutionProfile(RoundRobinPolicy()) def add_host(self, address, datacenter, rack, signal=False, refresh_nodes=True): host = Host(address, SimpleConvictionPolicy, datacenter, rack) diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index ba94e1c30b..ad5bb3e93b 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -40,7 +40,7 @@ def make_basic_session(self): def make_session(self): session = self.make_basic_session() - session._load_balancer.make_query_plan.return_value = ['ip1', 'ip2'] + session.cluster._default_load_balancing_policy.make_query_plan.return_value = ['ip1', 'ip2'] session._pools.get.return_value.is_shutdown = False return session @@ -54,7 +54,7 @@ def make_mock_response(self, results): def test_result_message(self): session = self.make_basic_session() - session._load_balancer.make_query_plan.return_value = ['ip1', 'ip2'] + session.cluster._default_load_balancing_policy.make_query_plan.return_value = ['ip1', 'ip2'] pool = session._pools.get.return_value pool.is_shutdown = False @@ -279,7 +279,7 @@ def test_all_retries_fail(self): def test_all_pools_shutdown(self): session = self.make_basic_session() - session._load_balancer.make_query_plan.return_value = ['ip1', 'ip2'] + session.cluster._default_load_balancing_policy.make_query_plan.return_value = ['ip1', 'ip2'] session._pools.get.return_value.is_shutdown = True rf = ResponseFuture(session, Mock(), Mock(), 1) @@ -288,7 +288,7 @@ def test_all_pools_shutdown(self): def test_first_pool_shutdown(self): session = self.make_basic_session() - session._load_balancer.make_query_plan.return_value = ['ip1', 'ip2'] + session.cluster._default_load_balancing_policy.make_query_plan.return_value = ['ip1', 'ip2'] # first return a pool with is_shutdown=True, then is_shutdown=False session._pools.get.side_effect = [Mock(is_shutdown=True), Mock(is_shutdown=False)] @@ -302,7 +302,7 @@ def test_first_pool_shutdown(self): def test_timeout_getting_connection_from_pool(self): session = self.make_basic_session() - session._load_balancer.make_query_plan.return_value = ['ip1', 'ip2'] + session.cluster._default_load_balancing_policy.make_query_plan.return_value = ['ip1', 'ip2'] # the first pool will raise an exception on borrow_connection() exc = NoConnectionsAvailable() From 4a37da29885a964316754ba47267932b5f515f59 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 3 Jun 2016 11:25:35 -0500 Subject: [PATCH 0686/2005] execution profile docs PYTHON-569 --- cassandra/cluster.py | 63 ++++++++++++- docs/api/cassandra/cluster.rst | 7 ++ docs/execution_profiles.rst | 159 +++++++++++++++++++++++++++++++++ docs/index.rst | 14 +-- 4 files changed, 235 insertions(+), 8 deletions(-) create mode 100644 docs/execution_profiles.rst diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 534106d17e..8eca2bfe70 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -198,11 +198,49 @@ def default_lbp_factory(): class ExecutionProfile(object): load_balancing_policy = None + """ + An instance of :class:`.policies.LoadBalancingPolicy` or one of its subclasses. + + Used in determining host distance for establishing connections, and routing requests. + + Defaults to ``TokenAwarePolicy(DCAwareRoundRobinPolicy())`` if not specified + """ + retry_policy = None - consistency_level = None + """ + An instance of :class:`.policies.RetryPolicy` instance used when :class:`.Statement` objects do not have a + :attr:`~.Statement.retry_policy` explicitly set. + + Defaults to :class:`.RetryPolicy` if not specified + """ + + consistency_level = ConsistencyLevel.LOCAL_ONE + """ + :class:`.ConsistencyLevel` used when not specified on a :class:`.Statement`. + """ + serial_consistency_level = None - request_timeout = None - row_factory = None + """ + Serial :class:`.ConsistencyLevel` used when not specified on a :class:`.Statement` (for LWT conditional statements). + """ + + request_timeout = 10.0 + """ + Request timeout used when not overridden in :meth:`.Session.execute` + """ + + row_factory = staticmethod(tuple_factory) + """ + A callable to format results, accepting ``(colnames, rows)`` where ``colnames`` is a list of column names, and + ``rows`` is a list of tuples, with each tuple representing a row of parsed values. + + Some example implementations: + + - :func:`cassandra.query.tuple_factory` - return a result row as a tuple + - :func:`cassandra.query.named_tuple_factory` - return a result row as a named tuple + - :func:`cassandra.query.dict_factory` - return a result row as a dict + - :func:`cassandra.query.ordered_dict_factory` - return a result row as an OrderedDict + """ def __init__(self, load_balancing_policy=None, retry_policy=None, consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, @@ -259,6 +297,12 @@ def default(self): EXEC_PROFILE_DEFAULT = object() +""" +Key for the ``Cluster`` default execution profile, used when no other profile is selected in +``Session.execute(execution_profile)``. + +Use this as the key in ``Cluster(execution_profiles)`` to override the default profile. +""" class _ConfigMode(object): @@ -907,6 +951,19 @@ def __init__(self, street, zipcode): UserType.evict_udt_class(keyspace, user_type) def add_execution_profile(self, name, profile, pool_wait_timeout=5): + """ + Adds an :class:`.ExecutionProfile` to the cluster. This makes it available for use by ``name`` in :meth:`.Session.execute` + and :meth:`.Session.execute_async`. + + Normally profiles will be injected at cluster initialization via ``Cluster(execution_profiles)``. This method + provides a way of adding them dynamically. + + Adding a new profile updates the connection pools according to the specified ``load_balancing_policy``. By default, + this method will wait up to five seconds for the pool creation to complete, so the profile can be used immediately + upon return. This behavior can be controlled using ``pool_wait_timeout`` (see + `concurrent.futures.wait `_ + for timeout semantics). + """ if not isinstance(profile, ExecutionProfile): raise TypeError("profile must be an instance of ExecutionProfile") if self._config_mode == _ConfigMode.LEGACY: diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 5e0ca04f03..bebea2ca0c 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -74,6 +74,8 @@ .. automethod:: unregister_listener + .. automethod:: add_execution_profile + .. automethod:: set_max_requests_per_connection .. automethod:: get_max_requests_per_connection @@ -106,6 +108,11 @@ .. automethod:: set_meta_refresh_enabled +.. autoclass:: ExecutionProfile + :members: + +.. autodata:: EXEC_PROFILE_DEFAULT + :annotation: .. autoclass:: Session () diff --git a/docs/execution_profiles.rst b/docs/execution_profiles.rst new file mode 100644 index 0000000000..66678c1c1c --- /dev/null +++ b/docs/execution_profiles.rst @@ -0,0 +1,159 @@ +Execution Profiles (experimental) +================================= + +Execution profiles are an experimental API aimed at making it easier to execute requests in different ways within +a single connected ``Session``. Execution profiles are being introduced the exploding number of configuration options, +especially as the database platform evolves more complex workloads. + +The Execution Profile API is being introduced now, in an experimental capacity in order to take advantage of it in +existing projects, and to guage interest and feedback in the community. For now, the legacy configuration remains +intact, but legacy and Execution Profile APIs cannot be used simultaneously on the same client ``Cluster``. + +This document explains how Execution Profiles relate to existing settings, and shows how to use the new profiles for +request execution. + +Mapping Legacy Parameters to Profiles +------------------------------------- + +Execution profiles can inherit from :class:`.cluster.ExecutionProfile`, and currently provide the following options, +previously input from the noted attributes: + +- load_balancing_policy - :attr:`.Cluster.load_balancing_policy` +- request_timeout - :attr:`.Session.default_timeout`, optional :meth:`.Session.execute` parameter +- retry_policy - :attr:`.Cluster.default_retry_policy`, optional :attr:`.Statement.retry_policy` attribute +- consistency_level - :attr:`.Session.default_consistency_level`, optional :attr:`.Statement.consistency_level` attribute +- serial_consistency_level - :attr:`.Session.default_serial_consistency_level`, optional :attr:`.Statement.serial_consistency_level` attribute +- row_factory - :attr:`.Session.row_factory` attribute + +When using the new API, these parameters can be defined by instances of :class:`.cluster.ExecutionProfile`. + +Using Execution Profiles +------------------------ +Default +~~~~~~~ + +.. code:: python + + from cassandra.cluster import Cluster + cluster = Cluster() + session = cluster.connect() + local_query = 'SELECT rpc_address FROM system.local' + for _ in cluster.metadata.all_hosts(): + print session.execute(local_query)[0] + + +.. parsed-literal:: + + Row(rpc_address='127.0.0.2') + Row(rpc_address='127.0.0.1') + + +The default execution profile is built from Cluster parameters and default Session attributes. This profile matches existing default +parameters. + +Initializing cluster with profiles +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code:: python + + from cassandra.cluster import ExecutionProfile + from cassandra.policies import WhiteListRoundRobinPolicy + + node1_profile = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) + node2_profile = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2'])) + + profiles = {'node1': node1_profile, 'node2': node2_profile} + session = Cluster(execution_profiles=profiles).connect() + for _ in cluster.metadata.all_hosts(): + print session.execute(local_query, execution_profile='node1')[0] + + +.. parsed-literal:: + + Row(rpc_address='127.0.0.1') + Row(rpc_address='127.0.0.1') + + +.. code:: python + + for _ in cluster.metadata.all_hosts(): + print session.execute(local_query, execution_profile='node2')[0] + + +.. parsed-literal:: + + Row(rpc_address='127.0.0.2') + Row(rpc_address='127.0.0.2') + + +.. code:: python + + for _ in cluster.metadata.all_hosts(): + print session.execute(local_query)[0] + + +.. parsed-literal:: + + Row(rpc_address='127.0.0.2') + Row(rpc_address='127.0.0.1') + +Note that, even when custom profiles are injected, the default ``TokenAwarePolicy(DCAwareRoundRobinPolicy())`` is still +present. To override the default, specify a policy with the :data:`~.cluster.EXEC_PROFILE_DEFAULT` key. + +.. code:: python + + from cassandra.cluster import EXEC_PROFILE_DEFAULT + profile = ExecutionProfile(request_timeout=30) + cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: profile}) + + +Adding named profiles +~~~~~~~~~~~~~~~~~~~~~ + +New profiles can be added constructing from scratch, or deriving from default: + +.. code:: python + + from cassandra.cluster import ExecutionProfile + from cassandra.policies import WhiteListRoundRobinPolicy + locked_execution = ExecutionProfile() + locked_execution.load_balancing_policy = WhiteListRoundRobinPolicy(['127.0.0.1']) + node1_profile = 'node1_whitelist' + cluster.add_execution_profile(node1_profile, locked_execution) + + for _ in cluster.metadata.all_hosts(): + print session.execute(local_query, execution_profile=node1_profile)[0] + + +.. parsed-literal:: + + Row(rpc_address='127.0.0.1') + Row(rpc_address='127.0.0.1') + +See :meth:`.Cluster.add_execution_profile` for details and optional parameters. + +Passing a profile instance without mapping +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We also have the ability to pass profile instances to be used for execution, but not added to the mapping: + +.. code:: python + + from copy import copy + from cassandra.query import tuple_factory + + tmp = copy(node1_profile) + tmp.request_timeout = 100 + tmp.row_factory = tuple_factory + + print session.execute(local_query, execution_profile=tmp)[0] + print session.execute(local_query, execution_profile='node1')[0] + +.. parsed-literal:: + + ('127.0.0.1',) + Row(rpc_address='127.0.0.1') + +As shown above, the ``tmp`` profile shares a load balancing policy with one managed by the cluster. If this technique +is not used, the application would need to initialize and maintain the policy state manually. + diff --git a/docs/index.rst b/docs/index.rst index c68e8653cb..3adec84446 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -16,7 +16,7 @@ Contents How to install the driver. :doc:`getting_started` - A guide through the first steps of connecting to Cassandra and executing queries. + A guide through the first steps of connecting to Cassandra and executing queries :doc:`object_mapper` Introduction to the integrated object mapper, cqlengine @@ -25,22 +25,25 @@ Contents The API documentation. :doc:`upgrading` - A guide to upgrading versions of the driver. + A guide to upgrading versions of the driver + +:doc:`execution_profiles` + An introduction to a more flexible way of configuring request execution :doc:`performance` Tips for getting good performance. :doc:`query_paging` - Notes on paging large query results. + Notes on paging large query results :doc:`lwt` Working with results of conditional requests :doc:`user_defined_types` - Working with Cassandra 2.1's user-defined types. + Working with Cassandra 2.1's user-defined types :doc:`security` - An overview of the security features of the driver. + An overview of the security features of the driver :doc:`dates_and_times` Some discussion on the driver's approach to working with timestamp, date, time types @@ -55,6 +58,7 @@ Contents installation getting_started upgrading + execution_profiles performance query_paging lwt From 915919e065a34826ed694e7ddb93f3a936d53f13 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Tue, 7 Jun 2016 08:26:20 +0200 Subject: [PATCH 0687/2005] Typo. --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f88e84c962..42eab9dc4e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -222,7 +222,7 @@ Bug Fixes 2.7.2 ===== -Setpember 14, 2015 +September 14, 2015 Bug Fixes --------- From 9ea856459dc900b9517d9ecbb94b65e4e4ce8de7 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 9 Mar 2016 16:54:32 -0600 Subject: [PATCH 0688/2005] Initial Appveryor enablement --- appveyor.yml | 26 ++++++++++++++ appveyor/appveyor.ps1 | 66 +++++++++++++++++++++++++++++++++++ appveyor/run_test.ps1 | 35 +++++++++++++++++++ tests/unit/test_concurrent.py | 9 ++--- 4 files changed, 132 insertions(+), 4 deletions(-) create mode 100644 appveyor.yml create mode 100644 appveyor/appveyor.ps1 create mode 100644 appveyor/run_test.ps1 diff --git a/appveyor.yml b/appveyor.yml new file mode 100644 index 0000000000..dd3f1a97dc --- /dev/null +++ b/appveyor.yml @@ -0,0 +1,26 @@ +environment: + matrix: + - PYTHON: "C:\\Python27-x64" + cassandra_version: 3.0.5 + ci_type: standard + - PYTHON: "C:\\Python34" + cassandra_version: 3.0.5 + ci_type: unit +os: Visual Studio 2015 +platform: + - x64 +install: + - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%" + - ps: .\appveyor\appveyor.ps1 +build_script: + - cmd: | + "%VS140COMNTOOLS%\..\..\VC\vcvarsall.bat" x86_amd64 + python setup.py install --no-cython +test_script: + - ps: .\appveyor\run_test.ps1 +cache: + - C:\Users\appveyor\.m2 + - C:\ProgramData\chocolatey\bin + - C:\ProgramData\chocolatey\lib + - C:\Users\appveyor\jce_policy-1.7.0.zip + - C:\Users\appveyor\jce_policy-1.8.0.zip \ No newline at end of file diff --git a/appveyor/appveyor.ps1 b/appveyor/appveyor.ps1 new file mode 100644 index 0000000000..af7ce5cc6d --- /dev/null +++ b/appveyor/appveyor.ps1 @@ -0,0 +1,66 @@ +$env:JAVA_HOME="C:\Program Files\Java\jdk1.8.0" +$env:PATH="$($env:JAVA_HOME)\bin;$($env:PATH)" +$env:CCM_PATH="C:\Users\appveyor\ccm" +$env:CASSANDRA_VERSION=$env:cassandra_version +python --version +python -c "import platform; print(platform.architecture())" +# Install Ant +Start-Process cinst -ArgumentList @("-y","ant") -Wait -NoNewWindow +# Workaround for ccm, link ant.exe -> ant.bat +If (!(Test-Path C:\ProgramData\chocolatey\bin\ant.bat)) { + cmd /c mklink C:\ProgramData\chocolatey\bin\ant.bat C:\ProgramData\chocolatey\bin\ant.exe +} + + +$jce_indicator = "$target\README.txt" +# Install Java Cryptographic Extensions, needed for SSL. +If (!(Test-Path $jce_indicator)) { + $zip = "C:\Users\appveyor\jce_policy-$($env:java_version).zip" + $target = "$($env:JAVA_HOME)\jre\lib\security" + # If this file doesn't exist we know JCE hasn't been installed. + $url = "https://www.dropbox.com/s/po4308hlwulpvep/UnlimitedJCEPolicyJDK7.zip?dl=1" + $extract_folder = "UnlimitedJCEPolicy" + If ($env:java_version -eq "1.8.0") { + $url = "https://www.dropbox.com/s/al1e6e92cjdv7m7/jce_policy-8.zip?dl=1" + $extract_folder = "UnlimitedJCEPolicyJDK8" + } + # Download zip to staging area if it doesn't exist, we do this because + # we extract it to the directory based on the platform and we want to cache + # this file so it can apply to all platforms. + if(!(Test-Path $zip)) { + (new-object System.Net.WebClient).DownloadFile($url, $zip) + } + + Add-Type -AssemblyName System.IO.Compression.FileSystem + [System.IO.Compression.ZipFile]::ExtractToDirectory($zip, $target) + + $jcePolicyDir = "$target\$extract_folder" + Move-Item $jcePolicyDir\* $target\ -force + Remove-Item $jcePolicyDir +} + +# Install Python Dependencies for CCM. +Start-Process python -ArgumentList "-m pip install psutil pyYaml six numpy" -Wait -NoNewWindow + +# Clone ccm from git and use master. +If (!(Test-Path $env:CCM_PATH)) { + Start-Process git -ArgumentList "clone https://github.com/pcmanus/ccm.git $($env:CCM_PATH)" -Wait -NoNewWindow +} + + +# Copy ccm -> ccm.py so windows knows to run it. +If (!(Test-Path $env:CCM_PATH\ccm.py)) { + Copy-Item "$env:CCM_PATH\ccm" "$env:CCM_PATH\ccm.py" +} + +$env:PYTHONPATH="$($env:CCM_PATH);$($env:PYTHONPATH)" +$env:PATH="$($env:CCM_PATH);$($env:PATH)" + +# Predownload cassandra version for CCM if it isn't already downloaded. +If (!(Test-Path C:\Users\appveyor\.ccm\repository\$env:cassandra_version)) { + Start-Process python -ArgumentList "$($env:CCM_PATH)\ccm.py create -v $($env:cassandra_version) -n 1 predownload" -Wait -NoNewWindow + Start-Process python -ArgumentList "$($env:CCM_PATH)\ccm.py remove predownload" -Wait -NoNewWindow +} + +Start-Process python -ArgumentList "-m pip install -r test-requirements.txt" -Wait -NoNewWindow +Start-Process python -ArgumentList "-m pip install nose-ignore-docstring" -Wait -NoNewWindow \ No newline at end of file diff --git a/appveyor/run_test.ps1 b/appveyor/run_test.ps1 new file mode 100644 index 0000000000..34ba227c0a --- /dev/null +++ b/appveyor/run_test.ps1 @@ -0,0 +1,35 @@ +Set-ExecutionPolicy Unrestricted +Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope Process -force +Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope CurrentUser -force +Get-ExecutionPolicy -List +echo $env:Path +echo $env:JAVA_HOME +echo $env:PYTHONPATH +echo $env:CASSANDRA_VERSION +echo $env:ci_type +python --version +python -c "import platform; print(platform.architecture())" + +$wc = New-Object 'System.Net.WebClient' +nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit +echo "uploading unit results" +$wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\unit_results.xml)) + +if($env:ci_type -eq 'standard' -Or $env:ci_type -eq 'long'){ + echo "Running CQLEngine integration tests" + nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=cqlengine_results.xml .\tests\integration\cqlengine + $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\cqlengine_results.xml)) + echo "uploading CQLEngine test results" + + echo "Running standard integration tests" + nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml .\tests\integration\standard + $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\standard_results.xml)) + echo "uploading standard integration test results" +} + +if($env:ci_type -eq 'long'){ + nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=cqlengine_results.xml .\tests\integration\cqlengine + $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\cqlengine_results.xml)) + echo "uploading standard integration test results" +} +exit 0 diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index a1f953220f..a535bf2260 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -211,10 +211,11 @@ def insert_and_validate_list_generator(self, reverse, slowdown): t = TimedCallableInvoker(our_handler, slowdown=slowdown) t.start() - results = execute_concurrent(mock_session, statements_and_params, results_generator=True) - - self.validate_result_ordering(results) - t.stop() + try: + results = execute_concurrent(mock_session, statements_and_params, results_generator=True) + self.validate_result_ordering(results) + finally: + t.stop() def validate_result_ordering(self, results): """ From 17fbbefa9100b50eb4675992c12f7a26a470078e Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 7 Jun 2016 13:48:26 -0500 Subject: [PATCH 0689/2005] Adding test documention, tweaking tests for PYTHON-560 --- .../cqlengine/query/test_queryset.py | 27 ++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index a64765206e..0776d67943 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -221,8 +221,13 @@ def test_queryset_with_distinct(self): def test_defining_only_fields(self): """ Tests defining only fields - """ + @since 3.5 + @jira_ticket PYTHON-560 + @expected_result deferred fields should not be returned + + @test_category object_mapper + """ # simple only definition q = TestModel.objects.only(['attempt_id', 'description']) self.assertEqual(q._select_fields(), ['attempt_id', 'description']) @@ -239,7 +244,12 @@ def test_defining_only_fields(self): q = q.defer(['description']) self.assertEqual(q._select_fields(), ['attempt_id']) - # PYTHON-560 + # Eliminate all results confirm exception is thrown + q = TestModel.objects.only(['description']) + q = q.defer(['description']) + with self.assertRaises(query.QueryException): + q._select_fields() + q = TestModel.objects.filter(test_id=0).only(['test_id', 'attempt_id', 'description']) self.assertEqual(q._select_fields(), ['attempt_id', 'description']) @@ -252,10 +262,15 @@ def test_defining_only_fields(self): q = TestModel.objects.filter(test_id=0).only(['test_id']) q._select_fields() - def test_defining_defer_fields(self): """ Tests defining defer fields + + @since 3.5 + @jira_ticket PYTHON-560 + @expected_result deferred fields should not be returned + + @test_category object_mapper """ # simple defer definition @@ -275,6 +290,12 @@ def test_defining_defer_fields(self): q = q.only(['description', 'test_id']) self.assertEqual(q._select_fields(), ['test_id']) + # Eliminate all results confirm exception is thrown + q = TestModel.objects.defer(['description', 'attempt_id']) + q = q.only(['description']) + with self.assertRaises(query.QueryException): + q._select_fields() + # implicit defer q = TestModel.objects.filter(test_id=0) self.assertEqual(q._select_fields(), ['attempt_id', 'description', 'expected_result', 'test_result']) From 9128e16996ddffabf65ba1e6f9b169b8acfac3af Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 9 Jun 2016 10:12:13 -0400 Subject: [PATCH 0690/2005] Add API to get the host metadata associated with the control connection node --- cassandra/cluster.py | 8 ++++++++ docs/api/cassandra/cluster.rst | 2 ++ .../standard/test_control_connection.py | 17 ++++++++++++++++- 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d311988dc8..9d1bef058d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1360,6 +1360,14 @@ def _target_type_from_refresh_args(keyspace, table, usertype, function, aggregat return SchemaTargetType.KEYSPACE return None + def get_control_connection_host(self): + """ + Returns the control connection host metadata. + """ + connection = getattr(self.control_connection, '_connection') + host = connection.host if connection else None + return self.metadata.get_host(host) if host else None + def refresh_schema_metadata(self, max_schema_agreement_wait=None): """ Synchronously refresh all schema metadata. diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index df0d58ab58..c5938a3beb 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -89,6 +89,8 @@ .. automethod:: set_max_connections_per_host + .. automethod:: get_control_connection_host + .. automethod:: refresh_schema_metadata .. automethod:: refresh_keyspace_metadata diff --git a/tests/integration/standard/test_control_connection.py b/tests/integration/standard/test_control_connection.py index 07c5bac992..ade053a07f 100644 --- a/tests/integration/standard/test_control_connection.py +++ b/tests/integration/standard/test_control_connection.py @@ -39,7 +39,6 @@ def setUp(self): "Native protocol 3,0+ is required for UDTs using %r" % (PROTOCOL_VERSION,)) self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect() def tearDown(self): try: @@ -65,6 +64,7 @@ def test_drop_keyspace(self): @test_category connection """ + self.session = self.cluster.connect() self.session.execute(""" CREATE KEYSPACE keyspacetodrop WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' } @@ -76,3 +76,18 @@ def test_drop_keyspace(self): self.session.execute("DROP KEYSPACE keyspacetodrop") cc_id_post_drop = id(self.cluster.control_connection._connection) self.assertEqual(cc_id_post_drop, cc_id_pre_drop) + + def test_get_control_connection_host(self): + """ + Test to validate Cluster.get_control_connection_host() metadata + """ + + host = self.cluster.get_control_connection_host() + self.assertEqual(host, None) + + self.session = self.cluster.connect() + cc_host = self.cluster.control_connection._connection.host + + host = self.cluster.get_control_connection_host() + self.assertEqual(host.address, cc_host) + self.assertEqual(host.is_up, True) From d5379e7044a7629efb8483f64ec1c17f3d26e272 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Jun 2016 11:49:06 -0500 Subject: [PATCH 0691/2005] refactor Session exec profile retrieval PYTHON-569 --- cassandra/cluster.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 8eca2bfe70..12dd920ef3 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2000,21 +2000,17 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time row_factory = self.row_factory load_balancing_policy = self.cluster.load_balancing_policy else: - profiles = self.cluster.profile_manager.profiles - try: - exec_profile = execution_profile if isinstance(execution_profile, ExecutionProfile) else profiles[execution_profile] - except KeyError: - raise ValueError("Invalid execution_profile: '%s'; valid profiles are %s" % (execution_profile, profiles.keys())) + execution_profile = self._get_execution_profile(execution_profile) if timeout is _NOT_SET: - timeout = exec_profile.request_timeout + timeout = execution_profile.request_timeout - cl = query.consistency_level if query.consistency_level is not None else exec_profile.consistency_level - serial_cl = query.serial_consistency_level if query.serial_consistency_level is not None else exec_profile.serial_consistency_level + cl = query.consistency_level if query.consistency_level is not None else execution_profile.consistency_level + serial_cl = query.serial_consistency_level if query.serial_consistency_level is not None else execution_profile.serial_consistency_level - retry_policy = query.retry_policy or exec_profile.retry_policy - row_factory = exec_profile.row_factory - load_balancing_policy = exec_profile.load_balancing_policy + retry_policy = query.retry_policy or execution_profile.retry_policy + row_factory = execution_profile.row_factory + load_balancing_policy = execution_profile.load_balancing_policy fetch_size = query.fetch_size if fetch_size is FETCH_SIZE_UNSET and self._protocol_version >= 2: @@ -2059,6 +2055,13 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time self, message, query, timeout, metrics=self._metrics, prepared_statement=prepared_statement, retry_policy=retry_policy, row_factory=row_factory, load_balancer=load_balancing_policy) + def _get_execution_profile(self, ep): + profiles = self.cluster.profile_manager.profiles + try: + return ep if isinstance(ep, ExecutionProfile) else profiles[ep] + except KeyError: + raise ValueError("Invalid execution_profile: '%s'; valid profiles are %s" % (ep, profiles.keys())) + def prepare(self, query, custom_payload=None): """ Prepares a query string, returning a :class:`~cassandra.query.PreparedStatement` From 2469be41e3c9c809d6040164c02f2a4535a5dd43 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Jun 2016 13:07:10 -0500 Subject: [PATCH 0692/2005] add a way to clone/update a profile PYTHON-569 --- cassandra/cluster.py | 16 ++++++++++++++++ docs/api/cassandra/cluster.rst | 2 ++ 2 files changed, 18 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 12dd920ef3..857eb68079 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -21,6 +21,7 @@ import atexit from collections import defaultdict, Mapping from concurrent.futures import ThreadPoolExecutor, wait as wait_futures +from copy import copy import logging from random import random import socket @@ -2062,6 +2063,21 @@ def _get_execution_profile(self, ep): except KeyError: raise ValueError("Invalid execution_profile: '%s'; valid profiles are %s" % (ep, profiles.keys())) + def execution_profile_clone_update(self, ep, **kwargs): + """ + Returns a clone of the ``ep`` profile. ``kwargs`` can be specified to update attributes + of the returned profile. + + This is a shollow clone, so any objects referenced by the profile are shared. This means Load Balancing Policy + is maintained by inclusion in the active profiles. It also means updating any other rich objects will be seen + by the active profile. In cases where this is not desirable, be sure to replace the instance instead of manipulating + the shared object. + """ + clone = copy(self._get_execution_profile(ep)) + for attr, value in kwargs.items(): + setattr(clone, attr, value) + return clone + def prepare(self, query, custom_payload=None): """ Prepares a query string, returning a :class:`~cassandra.query.PreparedStatement` diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index bebea2ca0c..6a92c8d846 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -146,6 +146,8 @@ .. automethod:: set_keyspace(keyspace) + .. automethod:: execution_profile_clone_update + .. autoclass:: ResponseFuture () .. autoattribute:: query From 1164984d67e68158f2aedf44bed93b40313e77ac Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Jun 2016 13:29:50 -0500 Subject: [PATCH 0693/2005] Cluster.add_execution_profile will not accept duplicate profile keys PYTHON-569 --- cassandra/cluster.py | 4 +++- tests/unit/test_cluster.py | 19 ++++++++++++++++++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 857eb68079..b149d05dd3 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -954,7 +954,7 @@ def __init__(self, street, zipcode): def add_execution_profile(self, name, profile, pool_wait_timeout=5): """ Adds an :class:`.ExecutionProfile` to the cluster. This makes it available for use by ``name`` in :meth:`.Session.execute` - and :meth:`.Session.execute_async`. + and :meth:`.Session.execute_async`. This method will raise if the profile already exists. Normally profiles will be injected at cluster initialization via ``Cluster(execution_profiles)``. This method provides a way of adding them dynamically. @@ -969,6 +969,8 @@ def add_execution_profile(self, name, profile, pool_wait_timeout=5): raise TypeError("profile must be an instance of ExecutionProfile") if self._config_mode == _ConfigMode.LEGACY: raise ValueError("Cannot add execution profiles when legacy parameters are set explicitly. TODO: link to doc") + if name in self.profile_manager.profiles: + raise ValueError("Profile %s already exists") self.profile_manager.profiles[name] = profile profile.load_balancing_policy.populate(self, self.metadata.all_hosts()) futures = set() diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 4b33f60df2..b3f7b99040 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -261,7 +261,7 @@ def test_no_profile_with_legacy(self): ('row_factory', tuple_factory)): cluster._config_mode = _ConfigMode.UNCOMMITTED setattr(session, attr, value) - self.assertRaises(ValueError, cluster.add_execution_profile, 'name', ExecutionProfile()) + self.assertRaises(ValueError, cluster.add_execution_profile, 'name' + attr, ExecutionProfile()) # don't accept profile self.assertRaises(ValueError, session.execute_async, "query", execution_profile='some name here') @@ -296,3 +296,20 @@ def test_profile_name_value(self): by_value = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(5)]) rf = session.execute_async("query", execution_profile=by_value) self._verify_response_future_profile(rf, by_value) + + + def test_no_profiles_same_name(self): + # can override default in init + cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(), 'one': ExecutionProfile()}) + + # cannot update default + self.assertRaises(ValueError, cluster.add_execution_profile, EXEC_PROFILE_DEFAULT, ExecutionProfile()) + + # cannot update named init + self.assertRaises(ValueError, cluster.add_execution_profile, 'one', ExecutionProfile()) + + # can add new name + cluster.add_execution_profile('two', ExecutionProfile()) + + # cannot add a profile added dynamically + self.assertRaises(ValueError, cluster.add_execution_profile, 'two', ExecutionProfile()) From 3c8073f1e9adee976b47b8e877b4661f4a3a1da9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Jun 2016 13:55:05 -0500 Subject: [PATCH 0694/2005] unit test for Session.execution_profile_clone_update PYTHON-569 --- tests/unit/test_cluster.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index b3f7b99040..96d9a9442b 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -297,6 +297,35 @@ def test_profile_name_value(self): rf = session.execute_async("query", execution_profile=by_value) self._verify_response_future_profile(rf, by_value) + def test_exec_profile_clone(self): + + cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(), 'one': ExecutionProfile()}) + session = Session(cluster, hosts=[]) + + profile_attrs = {'request_timeout': 1, + 'consistency_level': ConsistencyLevel.ANY, + 'serial_consistency_level': ConsistencyLevel.SERIAL, + 'row_factory': tuple_factory, + 'retry_policy': RetryPolicy(), + 'load_balancing_policy': default_lbp_factory()} + reference_attributes = ('retry_policy', 'load_balancing_policy') + + # default and one named + for profile in (EXEC_PROFILE_DEFAULT, 'one'): + active = cluster.profile_manager.profiles[profile] + clone = session.execution_profile_clone_update(profile) + self.assertIsNot(clone, active) + + all_updated = session.execution_profile_clone_update(clone, **profile_attrs) + self.assertIsNot(all_updated, clone) + for attr, value in profile_attrs.items(): + self.assertEqual(getattr(clone, attr), getattr(active, attr)) + if attr in reference_attributes: + self.assertIs(getattr(clone, attr), getattr(active, attr)) + self.assertNotEqual(getattr(all_updated, attr), getattr(active, attr)) + + # cannot clone nonexistent profile + self.assertRaises(ValueError, session.execution_profile_clone_update, 'DOES NOT EXIST', **profile_attrs) def test_no_profiles_same_name(self): # can override default in init From e75f44ee7c1c42284c07573ccc5c0cb67b74d5a0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Jun 2016 14:06:15 -0500 Subject: [PATCH 0695/2005] doc updates from peer input --- docs/execution_profiles.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/execution_profiles.rst b/docs/execution_profiles.rst index 66678c1c1c..d5799b4b01 100644 --- a/docs/execution_profiles.rst +++ b/docs/execution_profiles.rst @@ -2,11 +2,11 @@ Execution Profiles (experimental) ================================= Execution profiles are an experimental API aimed at making it easier to execute requests in different ways within -a single connected ``Session``. Execution profiles are being introduced the exploding number of configuration options, -especially as the database platform evolves more complex workloads. +a single connected ``Session``. Execution profiles are being introduced to deal with the exploding number of +configuration options, especially as the database platform evolves more complex workloads. The Execution Profile API is being introduced now, in an experimental capacity in order to take advantage of it in -existing projects, and to guage interest and feedback in the community. For now, the legacy configuration remains +existing projects, and to gauge interest and feedback in the community. For now, the legacy configuration remains intact, but legacy and Execution Profile APIs cannot be used simultaneously on the same client ``Cluster``. This document explains how Execution Profiles relate to existing settings, and shows how to use the new profiles for From a4199dcb786b5c9328407e96657850d45f036078 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Jun 2016 14:13:19 -0500 Subject: [PATCH 0696/2005] update EP docs following API tweaks PYTHON-569 --- docs/execution_profiles.rst | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/docs/execution_profiles.rst b/docs/execution_profiles.rst index d5799b4b01..a825ecd28e 100644 --- a/docs/execution_profiles.rst +++ b/docs/execution_profiles.rst @@ -114,10 +114,7 @@ New profiles can be added constructing from scratch, or deriving from default: .. code:: python - from cassandra.cluster import ExecutionProfile - from cassandra.policies import WhiteListRoundRobinPolicy - locked_execution = ExecutionProfile() - locked_execution.load_balancing_policy = WhiteListRoundRobinPolicy(['127.0.0.1']) + locked_execution = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) node1_profile = 'node1_whitelist' cluster.add_execution_profile(node1_profile, locked_execution) @@ -139,13 +136,10 @@ We also have the ability to pass profile instances to be used for execution, but .. code:: python - from copy import copy from cassandra.query import tuple_factory - tmp = copy(node1_profile) - tmp.request_timeout = 100 - tmp.row_factory = tuple_factory - + tmp = session.execution_profile_clone_update('node1', request_timeout=100, row_factory=tuple_factory) + print session.execute(local_query, execution_profile=tmp)[0] print session.execute(local_query, execution_profile='node1')[0] @@ -154,6 +148,5 @@ We also have the ability to pass profile instances to be used for execution, but ('127.0.0.1',) Row(rpc_address='127.0.0.1') -As shown above, the ``tmp`` profile shares a load balancing policy with one managed by the cluster. If this technique -is not used, the application would need to initialize and maintain the policy state manually. - +The new profile is a shallow copy, so the ``tmp`` profile shares a load balancing policy with one managed by the cluster. +If reference objects are to be updated in the clone, one would typically set those attributes to a new instance. From 7c9a4e08c84053412148b2f1ebcd0fc6d3eb44ee Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 9 Jun 2016 15:22:10 -0500 Subject: [PATCH 0697/2005] Decouple workload and start logic --- tests/integration/__init__.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 64efcaa4c2..62a58896a4 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -281,11 +281,10 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back if PROTOCOL_VERSION >= 4: jvm_args = [" -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler"] - + if(len(workloads) > 0): + for node in CCM_CLUSTER.nodes.values(): + node.set_workloads(workloads) if start: - if(len(workloads) > 0): - for node in CCM_CLUSTER.nodes.values(): - node.set_workloads(workloads) log.debug("Starting CCM cluster: {0}".format(cluster_name)) CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jvm_args) # Added to wait for slow nodes to start up From d6b7ddfdd0c8d27639c0652cc11485650068febf Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 9 Jun 2016 17:35:59 -0400 Subject: [PATCH 0698/2005] Fix _connection attribute access --- cassandra/cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 9d1bef058d..5e4ca8a712 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1364,7 +1364,7 @@ def get_control_connection_host(self): """ Returns the control connection host metadata. """ - connection = getattr(self.control_connection, '_connection') + connection = self.control_connection._connection host = connection.host if connection else None return self.metadata.get_host(host) if host else None @@ -2913,7 +2913,7 @@ def _on_timeout(self): if self.is_schema_agreed: errors = {self._current_host.address: "Client request timeout. See Session.execute[_async](timeout)"} else: - connection = getattr(self.session.cluster.control_connection, '_connection') + connection = self.session.cluster.control_connection._connection host = connection.host if connection else 'unknown' errors = {host: "Request timed out while waiting for schema agreement. See Session.execute[_async](timeout) and Cluster.max_schema_agreement_wait."} From 89e2200281d4326a972360c60959ddc87e699441 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 10 Jun 2016 16:44:22 -0500 Subject: [PATCH 0699/2005] PYTHON-569 adding test documentation, and few more tests --- cassandra/cluster.py | 3 + .../integration/cqlengine/test_consistency.py | 3 +- tests/integration/standard/test_cluster.py | 109 +++++++++++++++++- 3 files changed, 112 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ece1d1ef66..6f7d733265 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1754,6 +1754,7 @@ def row_factory(self, rf): self._validate_set_legacy_config('row_factory', rf) _default_timeout = 10.0 + @property def default_timeout(self): """ @@ -1775,6 +1776,7 @@ def default_timeout(self, timeout): self._validate_set_legacy_config('default_timeout', timeout) _default_consistency_level = ConsistencyLevel.LOCAL_ONE + @property def default_consistency_level(self): """ @@ -1795,6 +1797,7 @@ def default_consistency_level(self, cl): self._validate_set_legacy_config('default_consistency_level', cl) _default_serial_consistency_level = None + @property def default_serial_consistency_level(self): """ diff --git a/tests/integration/cqlengine/test_consistency.py b/tests/integration/cqlengine/test_consistency.py index 61cf3f0d50..d52a43e3de 100644 --- a/tests/integration/cqlengine/test_consistency.py +++ b/tests/integration/cqlengine/test_consistency.py @@ -68,7 +68,6 @@ def test_update_uses_consistency(self): args = m.call_args self.assertEqual(CL.ALL, args[0][0].consistency_level) - def test_batch_consistency(self): with mock.patch.object(self.session, 'execute') as m: @@ -114,7 +113,7 @@ def test_delete(self): def test_default_consistency(self): # verify global assumed default - self.assertEqual(Session.default_consistency_level, ConsistencyLevel.LOCAL_ONE) + self.assertEqual(Session._default_consistency_level, ConsistencyLevel.LOCAL_ONE) # verify that this session default is set according to connection.setup # assumes tests/cqlengine/__init__ setup uses CL.ONE diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 53ed95e738..d5945cb167 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -43,7 +43,6 @@ def setup_module(): class ClusterTests(unittest.TestCase): - def test_host_resolution(self): """ Test to insure A records are resolved appropriately. @@ -620,6 +619,15 @@ def test_pool_management(self): cluster.shutdown() def test_profile_load_balancing(self): + """ + Tests that profile load balancing policies are honored. + + @since 3.5 + @jira_ticket PYTHON-569 + @expected_result Execution Policy should be used when applicable. + + @test_category config_profiles + """ query = "select release_version from system.local" node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) with Cluster(execution_profiles={'node1': node1}) as cluster: @@ -662,7 +670,96 @@ def test_profile_load_balancing(self): # make sure original profile is not impacted self.assertTrue(session.execute(query, execution_profile='node1')[0].release_version) + def test_profile_lb_swap(self): + """ + Tests that profile load balancing policies are not shared + + Creates two LBP, runs a few queries, and validates that each LBP is execised + seperately between EP's + + @since 3.5 + @jira_ticket PYTHON-569 + @expected_result LBP should not be shared. + + @test_category config_profiles + """ + query = "select release_version from system.local" + rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) + rr2 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) + exec_profiles = {'rr1': rr1, 'rr2': rr2} + with Cluster(execution_profiles=exec_profiles) as cluster: + session = cluster.connect() + + # default is DCA RR for all hosts + expected_hosts = set(cluster.metadata.all_hosts()) + rr1_queried_hosts = set() + rr2_queried_hosts = set() + + rs = session.execute(query, execution_profile='rr1') + rr1_queried_hosts.add(rs.response_future._current_host) + rs = session.execute(query, execution_profile='rr2') + rr2_queried_hosts.add(rs.response_future._current_host) + + self.assertEqual(rr2_queried_hosts, rr1_queried_hosts) + + def test_clone_shared_lbp(self): + """ + Tests that profile load balancing policies are shared on clone + + Creates one LBP clones it, and ensures that the LBP is shared between + the two EP's + + @since 3.5 + @jira_ticket PYTHON-569 + @expected_result LBP is shared + + @test_category config_profiles + """ + query = "select release_version from system.local" + rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) + exec_profiles = {'rr1': rr1} + with Cluster(execution_profiles=exec_profiles) as cluster: + session = cluster.connect() + rr1_clone = session.execution_profile_clone_update('rr1', row_factory=tuple_factory) + cluster.add_execution_profile("rr1_clone", rr1_clone) + rr1_queried_hosts = set() + rr1_clone_queried_hosts = set() + rs = session.execute(query, execution_profile='rr1') + rr1_queried_hosts.add(rs.response_future._current_host) + rs = session.execute(query, execution_profile='rr1_clone') + rr1_clone_queried_hosts.add(rs.response_future._current_host) + self.assertNotEqual(rr1_clone_queried_hosts, rr1_queried_hosts) + + def test_missing_exec_prof(self): + """ + Tests to verify that using an unknown profile raises a ValueError + + @since 3.5 + @jira_ticket PYTHON-569 + @expected_result ValueError + + @test_category config_profiles + """ + query = "select release_version from system.local" + rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) + rr2 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) + exec_profiles = {'rr1': rr1, 'rr2': rr2} + with Cluster(execution_profiles=exec_profiles) as cluster: + session = cluster.connect() + with self.assertRaises(ValueError): + session.execute(query, execution_profile='rr3') + def test_profile_pool_management(self): + """ + Tests that changes to execution profiles correctly impact our cluster's pooling + + @since 3.5 + @jira_ticket PYTHON-569 + @expected_result pools should be correctly updated as EP's are added and removed + + @test_category config_profiles + """ + node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) node2 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2'])) with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1, 'node2': node2}) as cluster: @@ -679,6 +776,16 @@ def test_profile_pool_management(self): self.assertEqual(set(h.address for h in pools), set(('127.0.0.1', '127.0.0.2', '127.0.0.3'))) def test_add_profile_timeout(self): + """ + Tests that EP Timeouts are honored. + + @since 3.5 + @jira_ticket PYTHON-569 + @expected_result EP timeouts should override defaults + + @test_category config_profiles + """ + node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1}) as cluster: session = cluster.connect() From 2796ee5ec7dd8f62705f2d06e5e753a40346f819 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 10 Jun 2016 18:12:20 -0500 Subject: [PATCH 0700/2005] PYTHON-583 Adding documentation, enhancing integration test --- .../integration/standard/test_control_connection.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/integration/standard/test_control_connection.py b/tests/integration/standard/test_control_connection.py index ade053a07f..d486f35f8e 100644 --- a/tests/integration/standard/test_control_connection.py +++ b/tests/integration/standard/test_control_connection.py @@ -80,6 +80,12 @@ def test_drop_keyspace(self): def test_get_control_connection_host(self): """ Test to validate Cluster.get_control_connection_host() metadata + + @since 3.5.0 + @jira_ticket PYTHON-583 + @expected_result the control connection metadata should accurately reflect cluster state. + + @test_category metadata """ host = self.cluster.get_control_connection_host() @@ -91,3 +97,9 @@ def test_get_control_connection_host(self): host = self.cluster.get_control_connection_host() self.assertEqual(host.address, cc_host) self.assertEqual(host.is_up, True) + + # reconnect and make sure that the new host is reflected correctly + self.cluster.control_connection._reconnect() + new_host = self.cluster.get_control_connection_host() + self.assertNotEqual(host, new_host) + From 0e5e9eddb82974d85b1eaa0fbc007ffc165692e8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 15 Jun 2016 09:49:36 -0500 Subject: [PATCH 0701/2005] Make C.add_exec..policy on_up hosts Fixes issues like DCA LBP not selecting a default local_dc PYTHON-569 --- cassandra/cluster.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 6f7d733265..064408bfb1 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -22,17 +22,17 @@ from collections import defaultdict, Mapping from concurrent.futures import ThreadPoolExecutor, wait as wait_futures from copy import copy +from functools import partial, wraps +from itertools import groupby, count import logging from random import random +import six +from six.moves import filter, range, queue as Queue import socket import sys import time from threading import Lock, RLock, Thread, Event -import six -from six.moves import range -from six.moves import queue as Queue - import weakref from weakref import WeakValueDictionary try: @@ -40,9 +40,6 @@ except ImportError: from cassandra.util import WeakSet # NOQA -from functools import partial, wraps -from itertools import groupby, count - from cassandra import (ConsistencyLevel, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, SchemaTargetType, DriverException) @@ -973,6 +970,9 @@ def add_execution_profile(self, name, profile, pool_wait_timeout=5): raise ValueError("Profile %s already exists") self.profile_manager.profiles[name] = profile profile.load_balancing_policy.populate(self, self.metadata.all_hosts()) + # on_up after populate allows things like DCA LBP to choose default local dc + for host in filter(lambda h: h.is_up, self.metadata.all_hosts()): + profile.load_balancing_policy.on_up(host) futures = set() for session in self.sessions: futures.update(session.update_created_pools()) From 03ca2f5aae45802264e82c675b881d60f1269407 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 15 Jun 2016 12:46:25 -0500 Subject: [PATCH 0702/2005] PYTHON-569 adding test for TA LBP with execution profiles --- tests/integration/standard/test_cluster.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index d5945cb167..62244b93f3 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -702,6 +702,23 @@ def test_profile_lb_swap(self): self.assertEqual(rr2_queried_hosts, rr1_queried_hosts) + def test_ta_lbp(self): + """ + Test that execution profiles containing token aware LBP can be added + + @since 3.5 + @jira_ticket PYTHON-569 + @expected_result Queries can run + + @test_category config_profiles + """ + query = "select release_version from system.local" + ta1 = ExecutionProfile() + with Cluster() as cluster: + session = cluster.connect() + cluster.add_execution_profile("ta1", ta1) + rs = session.execute(query, execution_profile='ta1') + def test_clone_shared_lbp(self): """ Tests that profile load balancing policies are shared on clone From 0a006083e881aee95999d0c8c8a731132895983f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 14 Jun 2016 19:13:26 -0400 Subject: [PATCH 0703/2005] Fix asyncore re-initialization in case of a fork --- cassandra/io/asyncorereactor.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 8481ab7e56..af7de4a805 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -278,6 +278,8 @@ def initialize_reactor(cls): @classmethod def handle_fork(cls): + global _dispatcher_map + _dispatcher_map = {} if cls._loop: cls._loop._cleanup() cls._loop = None From e0f2fb11d1a715300a4c05cf936c53368dd17200 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 16 Jun 2016 13:28:00 -0500 Subject: [PATCH 0704/2005] Excluding cython null test for C* 2.1 --- tests/integration/standard/test_cython_protocol_handlers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index 87f2bc8424..3560709faa 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -12,7 +12,7 @@ from cassandra.cluster import Cluster, NoHostAvailable from cassandra.protocol import ProtocolHandler, LazyProtocolHandler, NumpyProtocolHandler, ConfigurationException from cassandra.cython_deps import HAVE_CYTHON, HAVE_NUMPY -from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1, drop_keyspace_shutdown_cluster, VERIFY_CYTHON, BasicSharedKeyspaceUnitTestCase, execute_with_retry_tolerant, greaterthancass20 +from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1, drop_keyspace_shutdown_cluster, VERIFY_CYTHON, BasicSharedKeyspaceUnitTestCase, execute_with_retry_tolerant, greaterthancass21 from tests.integration.datatype_utils import update_datatypes from tests.integration.standard.utils import ( create_table_with_all_types, get_all_primitive_params, get_primitive_datatypes) @@ -232,7 +232,7 @@ def setUp(self): self.session.row_factory = tuple_factory @numpytest - @greaterthancass20 + @greaterthancass21 def test_null_types(self): """ Test to validate that the numpy protocol handler can deal with null values. From 38e85b34ba97e35a3dac8c61840fe1f82ab19599 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 16 Jun 2016 15:42:09 -0500 Subject: [PATCH 0705/2005] typo --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 064408bfb1..99509d2233 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1945,7 +1945,7 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None Execute the given query and return a :class:`~.ResponseFuture` object which callbacks may be attached to for asynchronous response delivery. You may also call :meth:`~.ResponseFuture.result()` - on the :class:`.ResponseFuture` to syncronously block for results at + on the :class:`.ResponseFuture` to synchronously block for results at any time. If `trace` is set to :const:`True`, you may get the query trace descriptors using From 215ab4286a27257b5f3a09de6dbfffff4c0e5e72 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 16 Jun 2016 17:39:31 -0500 Subject: [PATCH 0706/2005] Updating build yaml for C* 3.7 --- build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.yaml b/build.yaml index e9f4b0c863..42b88e8a79 100644 --- a/build.yaml +++ b/build.yaml @@ -8,7 +8,7 @@ cassandra: - 2.1 - 2.2 - 3.0 - - 3.4 + - 3.7 env: EVENT_LOOP_MANAGER: - libev From 0c881641d243982c79c0ff23b0cfece76d263768 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 17 Jun 2016 13:42:51 -0500 Subject: [PATCH 0707/2005] readme update --- README.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/README.rst b/README.rst index 88d50dabdb..b0726a5a75 100644 --- a/README.rst +++ b/README.rst @@ -70,10 +70,6 @@ and the IRC channel. For IRC, use the #datastax-drivers channel on irc.freenode.net. If you don't have an IRC client, you can use `freenode's web-based client `_. -Features to be Added --------------------- -* C extension for encoding/decoding messages - License ------- Copyright 2013-2016 DataStax From 1ab4b7865b5dc2921ce8389313904105b1ebb515 Mon Sep 17 00:00:00 2001 From: sjohnson4590 Date: Fri, 17 Jun 2016 14:10:07 -0500 Subject: [PATCH 0708/2005] fixed typo in CONTRIBUTING.rst Drvier to Driver --- CONTRIBUTING.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 6d39d8df69..1c8ca7001c 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -5,7 +5,7 @@ Contributions are welcome in the form of bug reports or pull requests. Bug Reports ----------- -Quality bug reports are welcome at the `DataStax Python Drvier JIRA `_. +Quality bug reports are welcome at the `DataStax Python Driver JIRA `_. There are plenty of `good resources `_ describing how to create good bug reports. They will not be repeated in detail here, but in general, the bug report include where appropriate: From 46024dab44075c4438077f6e8b29060bae19edaf Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 17 Jun 2016 17:50:01 -0400 Subject: [PATCH 0709/2005] 3.5.0 changelog and version --- CHANGELOG.rst | 20 ++++++++++++++++++-- cassandra/__init__.py | 2 +- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 42eab9dc4e..c6e2dc8adb 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,9 +1,25 @@ +3.5.0 +===== +June X, 2016 + +Features +-------- + +* Optional Configuration Profiles for the core driver (PYTHON-569) +* API to get the host metadata associated with the control connection node (PYTHON-583) + +Bug Fixes +--------- + +* Clean up Asyncore socket map when fork is detected (PYTHON-577) +* cqlengine: QuerySet only() is not respected when there are deferred fields (PYTHON-560) + 3.4.1 ===== May 26, 2016 Bug Fixes --------- +--------- * Gevent connection closes on IO timeout (PYTHON-573) * "dictionary changed size during iteration" with Python 3 (PYTHON-572) @@ -26,7 +42,7 @@ Features * Write docs around working with datetime and timezones (PYTHON-394) Bug Fixes --------- +--------- * High CPU utilization when using asyncore event loop (PYTHON-239) * Fix CQL Export for non-ASCII Identifiers (PYTHON-447) * Make stress scripts Python 2.6 compatible (PYTHON-434) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 67f5b147ff..c8212c70e3 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 4, 1, 'post0') +__version_info__ = (3, 5, 0) __version__ = '.'.join(map(str, __version_info__)) From 079d2a3f2426d01f4bb8a722f8c3e590f7747639 Mon Sep 17 00:00:00 2001 From: Lutz Steinborn Date: Sat, 18 Jun 2016 15:57:48 +0200 Subject: [PATCH 0710/2005] cassandra_init() added **kwargs --- docs/cqlengine/third_party.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/cqlengine/third_party.rst b/docs/cqlengine/third_party.rst index c4c99dbf54..997a8a2559 100644 --- a/docs/cqlengine/third_party.rst +++ b/docs/cqlengine/third_party.rst @@ -17,7 +17,7 @@ Here's how, in substance, CQLengine can be plugged to `Celery from cqlengine.connection import ( cluster as cql_cluster, session as cql_session) - def cassandra_init(): + def cassandra_init(**kwargs): """ Initialize a clean Cassandra connection. """ if cql_cluster is not None: cql_cluster.shutdown() @@ -52,7 +52,7 @@ This is the code required for proper connection handling of CQLengine for a pass else: @postfork - def cassandra_init(): + def cassandra_init(**kwargs): """ Initialize a new Cassandra session in the context. Ensures that a new session is returned for every new request. From b48ec060fae50568e0384609f08178c750270fc3 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 20 Jun 2016 14:09:28 -0500 Subject: [PATCH 0711/2005] Updating UDT alter test to proper syntax --- tests/integration/standard/test_udts.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 8d4411a17e..441b518a6f 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -676,7 +676,7 @@ def test_type_alteration(self): self.assertIn(type_name, s.cluster.metadata.keyspaces['udttests'].user_types) s.execute('CREATE TABLE %s (k int PRIMARY KEY, v frozen<%s>)' % (self.table_name, type_name)) - s.execute('INSERT INTO %s (k, v) VALUES (0, 1)' % (self.table_name,)) + s.execute('INSERT INTO %s (k, v) VALUES (0, {v0 : 1})' % (self.table_name,)) s.cluster.register_user_type('udttests', type_name, dict) @@ -688,14 +688,14 @@ def test_type_alteration(self): val = s.execute('SELECT v FROM %s' % self.table_name)[0][0] self.assertEqual(val['v0'], 1) self.assertIsNone(val['v1']) - s.execute("INSERT INTO %s (k, v) VALUES (0, (2, 'sometext'))" % (self.table_name,)) + s.execute("INSERT INTO %s (k, v) VALUES (0, {v0 : 2, v1 : 'sometext'})" % (self.table_name,)) val = s.execute('SELECT v FROM %s' % self.table_name)[0][0] self.assertEqual(val['v0'], 2) self.assertEqual(val['v1'], 'sometext') # alter field type s.execute('ALTER TYPE %s ALTER v1 TYPE blob' % (type_name,)) - s.execute("INSERT INTO %s (k, v) VALUES (0, (3, 0xdeadbeef))" % (self.table_name,)) + s.execute("INSERT INTO %s (k, v) VALUES (0, {v0 : 3, v1 : 0xdeadbeef})" % (self.table_name,)) val = s.execute('SELECT v FROM %s' % self.table_name)[0][0] self.assertEqual(val['v0'], 3) self.assertEqual(val['v1'], six.b('\xde\xad\xbe\xef')) From ed13cb3dfd5d247d1f2bdd66645534dd105bc79a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 20 Jun 2016 15:48:16 -0500 Subject: [PATCH 0712/2005] documentor hooks --- docs.yaml | 8 ++++++++ docs/.nav | 13 +++++++++++++ docs/index.rst | 7 ------- 3 files changed, 21 insertions(+), 7 deletions(-) create mode 100644 docs.yaml create mode 100644 docs/.nav diff --git a/docs.yaml b/docs.yaml new file mode 100644 index 0000000000..aa30ed5df3 --- /dev/null +++ b/docs.yaml @@ -0,0 +1,8 @@ +title: DataStax Python Driver for Apache Cassandra +summary: DataStax Python Driver for Apache Cassandra Documentation +output: docs/_build/ +sections: + - title: N/A + prefix: / + type: sphinx + directory: docs diff --git a/docs/.nav b/docs/.nav new file mode 100644 index 0000000000..1af261dd19 --- /dev/null +++ b/docs/.nav @@ -0,0 +1,13 @@ +installation +getting_started +object_mapper +api +upgrading +execution_profiles +performance +query_paging +lwt +user_defined_types +security +dates_and_times +faq diff --git a/docs/index.rst b/docs/index.rst index 3adec84446..762daad60c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -83,10 +83,3 @@ Please report any bugs and make any feature requests on the `JIRA `_ issue tracker. If you would like to contribute, please feel free to open a pull request. - -Indices and Tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` From 050555ac979ac3812c9c91572cb20d73066e33c8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 21 Jun 2016 11:27:25 -0500 Subject: [PATCH 0713/2005] ticket title tweak --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index c6e2dc8adb..dda7decbae 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,7 +5,7 @@ June X, 2016 Features -------- -* Optional Configuration Profiles for the core driver (PYTHON-569) +* Optional Execution Profiles for the core driver (PYTHON-569) * API to get the host metadata associated with the control connection node (PYTHON-583) Bug Fixes From 064e45d077b51914a61990208a41658ed9566012 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 24 Jun 2016 16:21:41 -0500 Subject: [PATCH 0714/2005] doc punctuation --- docs/execution_profiles.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/execution_profiles.rst b/docs/execution_profiles.rst index a825ecd28e..9ccaec5746 100644 --- a/docs/execution_profiles.rst +++ b/docs/execution_profiles.rst @@ -5,7 +5,7 @@ Execution profiles are an experimental API aimed at making it easier to execute a single connected ``Session``. Execution profiles are being introduced to deal with the exploding number of configuration options, especially as the database platform evolves more complex workloads. -The Execution Profile API is being introduced now, in an experimental capacity in order to take advantage of it in +The Execution Profile API is being introduced now, in an experimental capacity, in order to take advantage of it in existing projects, and to gauge interest and feedback in the community. For now, the legacy configuration remains intact, but legacy and Execution Profile APIs cannot be used simultaneously on the same client ``Cluster``. From e4c70e78deb38af3c6271970b8e65d6df0022382 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 24 Jun 2016 16:28:38 -0500 Subject: [PATCH 0715/2005] add CDC to recognized table options PYTHON-593 CASSANDRA-8844 --- cassandra/metadata.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 2c3d137086..1cd801eed2 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2104,6 +2104,7 @@ class SchemaParserV3(SchemaParserV22): recognized_table_options = ( 'bloom_filter_fp_chance', 'caching', + 'cdc', 'comment', 'compaction', 'compression', From 337a341a6ae8a41b2e8915b403d91ccc0423cb7a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 27 Jun 2016 09:18:24 -0500 Subject: [PATCH 0716/2005] changelog update for 593 --- CHANGELOG.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index dda7decbae..7814e2dbf1 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,13 +4,12 @@ June X, 2016 Features -------- - * Optional Execution Profiles for the core driver (PYTHON-569) * API to get the host metadata associated with the control connection node (PYTHON-583) +* Expose CDC option in table metadata CQL (PYTHON-593) Bug Fixes --------- - * Clean up Asyncore socket map when fork is detected (PYTHON-577) * cqlengine: QuerySet only() is not respected when there are deferred fields (PYTHON-560) From 60dfbcd6fdf2faaa444f8f950b5188106df29976 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 27 Jun 2016 11:43:09 -0500 Subject: [PATCH 0717/2005] changelog date --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7814e2dbf1..273657131a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,6 @@ 3.5.0 ===== -June X, 2016 +June 27, 2016 Features -------- From 91039ef6bda23a657e00b578647c6b758779219f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 27 Jun 2016 15:20:54 -0500 Subject: [PATCH 0718/2005] add 3.5 to docs --- docs.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs.yaml b/docs.yaml index aa30ed5df3..a0e7fb3218 100644 --- a/docs.yaml +++ b/docs.yaml @@ -6,3 +6,6 @@ sections: prefix: / type: sphinx directory: docs +versions: + - name: 3.5.0 + ref: 3.5.0 From f47a61e96eb6f69f2bc87f0c5501780ade6368c5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 28 Jun 2016 13:53:31 -0500 Subject: [PATCH 0719/2005] post-release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index c8212c70e3..d6b2b52165 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 5, 0) +__version_info__ = (3, 5, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From 2bdbfa0951ca6f3d6c31b9ece5984cb3b1e8aab2 Mon Sep 17 00:00:00 2001 From: Tyler Hobbs Date: Tue, 28 Jun 2016 16:21:12 -0500 Subject: [PATCH 0720/2005] Fix unsupported protocol version error message --- cassandra/connection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index f43edc4b5d..7ec72ba798 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -149,8 +149,8 @@ class ProtocolVersionUnsupported(ConnectionException): Server rejected startup message due to unsupported protocol version """ def __init__(self, host, startup_version): - super(ProtocolVersionUnsupported, self).__init__("Unsupported protocol version on %s: %d", - (host, startup_version)) + msg = "Unsupported protocol version on %s: %d" % (host, startup_version) + super(ProtocolVersionUnsupported, self).__init__(msg, host) self.startup_version = startup_version From 81c5aef702dfb25296b4841a99c360166b2d0812 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 29 Jun 2016 14:44:18 -0500 Subject: [PATCH 0721/2005] fix unit tests using non-existent mock function PYTHON-591 --- tests/unit/test_host_connection_pool.py | 17 +++++++++-------- tests/unit/test_response_future.py | 2 +- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index fb0ca21711..5fe230f402 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -165,7 +165,8 @@ def test_spawn_when_at_max(self): def test_return_defunct_connection(self): host = Mock(spec=Host, address='ip1') session = self.make_session() - conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100) + conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, + max_request_id=100, signaled_error=False) session.cluster.connection_factory.return_value = conn pool = HostConnectionPool(host, HostDistance.LOCAL, session) @@ -177,14 +178,14 @@ def test_return_defunct_connection(self): pool.return_connection(conn) # the connection should be closed a new creation scheduled - conn.close.assert_called_once() - session.submit.assert_called_once() + self.assertTrue(session.submit.call_args) self.assertFalse(pool.is_shutdown) def test_return_defunct_connection_on_down_host(self): host = Mock(spec=Host, address='ip1') session = self.make_session() - conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100, signaled_error=False) + conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, + max_request_id=100, signaled_error=False) session.cluster.connection_factory.return_value = conn pool = HostConnectionPool(host, HostDistance.LOCAL, session) @@ -196,15 +197,15 @@ def test_return_defunct_connection_on_down_host(self): pool.return_connection(conn) # the connection should be closed a new creation scheduled - session.cluster.signal_connection_failure.assert_called_once() - conn.close.assert_called_once() + self.assertTrue(session.cluster.signal_connection_failure.call_args) + self.assertTrue(conn.close.call_args) self.assertFalse(session.submit.called) self.assertTrue(pool.is_shutdown) def test_return_closed_connection(self): host = Mock(spec=Host, address='ip1') session = self.make_session() - conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=True, max_request_id=100) + conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=True, max_request_id=100, signaled_error=False) session.cluster.connection_factory.return_value = conn pool = HostConnectionPool(host, HostDistance.LOCAL, session) @@ -216,7 +217,7 @@ def test_return_closed_connection(self): pool.return_connection(conn) # a new creation should be scheduled - session.submit.assert_called_once() + self.assertTrue(session.submit.call_args) self.assertFalse(pool.is_shutdown) def test_host_instantiations(self): diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index ad5bb3e93b..ef68512230 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -480,7 +480,7 @@ def test_prepared_query_not_found(self): result = Mock(spec=PreparedQueryNotFound, info='a' * 16) rf._set_result(result) - session.submit.assert_called_once() + self.assertTrue(session.submit.call_args) args, kwargs = session.submit.call_args self.assertEqual(rf._reprepare, args[-2]) self.assertIsInstance(args[-1], PrepareMessage) From 3e876e0a712216560880133d1e638ceefc0baf66 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 29 Jun 2016 14:45:15 -0500 Subject: [PATCH 0722/2005] relax mock test requirement now that they readded 2.6 support PYTHON-591 --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 4c917da6c6..500795357c 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,7 +1,7 @@ -r requirements.txt scales nose -mock<=1.0.1 +mock!=1.1.* ccm>=2.0 unittest2 PyYAML From 9b52af704b58bff5e18749755533d853850316c4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 30 Jun 2016 15:00:41 -0500 Subject: [PATCH 0723/2005] correct parameter order on cqlenigine exec count assertion --- tests/integration/cqlengine/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index e61698e82c..3f163ded64 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -96,7 +96,7 @@ def wrapped_function(*args, **kwargs): else: test_case = args[0] # Check to see if the count is what you expect - test_case.assertEqual(count.get_counter(), expected, msg="Expected number of cassandra.cqlengine.connection.execute calls doesn't match actual number invoked Expected: {0}, Invoked {1}".format(count.get_counter(), expected)) + test_case.assertEqual(count.get_counter(), expected, msg="Expected number of cassandra.cqlengine.connection.execute calls ({0}) doesn't match actual number invoked ({1})".format(expected, count.get_counter())) return to_return # Name of the wrapped function must match the original or unittest will error out. wrapped_function.__name__ = fn.__name__ From 2242171befdc1ebe6c79f76ef4065a17c24aad3d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 30 Jun 2016 15:02:23 -0500 Subject: [PATCH 0724/2005] don't quote bool for cql quoted bool is optional in table option DDL, and disallowed in CQL DML PYTHON-596 --- cassandra/encoder.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 6d8b6ce8a2..98d562d1bc 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -40,8 +40,7 @@ def cql_quote(term): # The ordering of this method is important for the result of this method to # be a native str type (for both Python 2 and 3) - # Handle quoting of native str and bool types - if isinstance(term, (str, bool)): + if isinstance(term, str): return "'%s'" % str(term).replace("'", "''") # This branch of the if statement will only be used by Python 2 to catch # unicode strings, text_type is used to prevent type errors with Python 3. From bd4bc29a97577d7b8901c78745217d3c229a60bd Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 30 Jun 2016 15:03:19 -0500 Subject: [PATCH 0725/2005] cqle: remove special handling for bool in ValueQuoter now that cql_quote does not quote bool types, this is not needed PYTHON-596 --- cassandra/cqlengine/statements.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 3867704a77..44ae165e8b 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -35,9 +35,7 @@ def __init__(self, value): def __unicode__(self): from cassandra.encoder import cql_quote - if isinstance(self.value, bool): - return 'true' if self.value else 'false' - elif isinstance(self.value, (list, tuple)): + if isinstance(self.value, (list, tuple)): return '[' + ', '.join([cql_quote(v) for v in self.value]) + ']' elif isinstance(self.value, dict): return '{' + ', '.join([cql_quote(k) + ':' + cql_quote(v) for k, v in self.value.items()]) + '}' From e96e005a859d4592fb3da8c696e03df2b4e1b7ed Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 30 Jun 2016 15:04:56 -0500 Subject: [PATCH 0726/2005] add a test using boolean values with __in operator PYTHON-596 --- .../cqlengine/query/test_queryset.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 0776d67943..21dc5c4b93 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -894,7 +894,6 @@ def test_success_case(self): class TestInOperator(BaseQuerySetUsage): - @execute_count(1) def test_kwarg_success_case(self): """ Tests the in operator works with the kwarg query method """ @@ -907,6 +906,23 @@ def test_query_expression_success_case(self): q = TestModel.filter(TestModel.test_id.in_([0, 1])) assert q.count() == 8 + @execute_count(5) + def test_bool(self): + """ + PYTHON-596 + """ + class bool_model(Model): + k = columns.Integer(primary_key=True) + b = columns.Boolean(primary_key=True) + v = columns.Integer(default=3) + sync_table(bool_model) + + bool_model.create(k=0, b=True) + bool_model.create(k=0, b=False) + self.assertEqual(len(bool_model.objects.all()), 2) + self.assertEqual(len(bool_model.objects.filter(k=0, b=True)), 1) + self.assertEqual(len(bool_model.objects.filter(k=0, b=False)), 1) + @greaterthancass20 class TestContainsOperator(BaseQuerySetUsage): From bca0cee969b29c71daff9bc7830c5d8773b7f621 Mon Sep 17 00:00:00 2001 From: Richard O'Dwyer Date: Wed, 29 Jun 2016 16:53:23 +0100 Subject: [PATCH 0727/2005] Support ordering on column objects --- cassandra/cqlengine/columns.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 14b70915a7..c14b6d575c 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -168,6 +168,36 @@ def __init__(self, self.position = Column.instance_counter Column.instance_counter += 1 + def __ne__(self, other): + if isinstance(other, Column): + return self.position != other.position + return NotImplemented + + def __eq__(self, other): + if isinstance(other, Column): + return self.position == other.position + return NotImplemented + + def __lt__(self, other): + if isinstance(other, Column): + return self.position < other.position + return NotImplemented + + def __le__(self, other): + if isinstance(other, Column): + return self.position <= other.position + return NotImplemented + + def __gt__(self, other): + if isinstance(other, Column): + return self.position > other.position + return NotImplemented + + def __ge__(self, other): + if isinstance(other, Column): + return self.position >= other.position + return NotImplemented + def validate(self, value): """ Returns a cleaned and validated value. Raises a ValidationError From cfd3ddc9b84cbe9d3f392c9cf50f5dde6ecd2c2a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 1 Jul 2016 10:06:31 -0500 Subject: [PATCH 0728/2005] test cqlengine.Columns comparison PYTHON-595 --- tests/unit/cqlengine/__init__.py | 14 ++++++ tests/unit/cqlengine/test_columns.py | 68 ++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) create mode 100644 tests/unit/cqlengine/__init__.py create mode 100644 tests/unit/cqlengine/test_columns.py diff --git a/tests/unit/cqlengine/__init__.py b/tests/unit/cqlengine/__init__.py new file mode 100644 index 0000000000..87fc3685e0 --- /dev/null +++ b/tests/unit/cqlengine/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tests/unit/cqlengine/test_columns.py b/tests/unit/cqlengine/test_columns.py new file mode 100644 index 0000000000..181c103515 --- /dev/null +++ b/tests/unit/cqlengine/test_columns.py @@ -0,0 +1,68 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from cassandra.cqlengine.columns import Column + + +class ColumnTest(unittest.TestCase): + + def test_comparisons(self): + c0 = Column() + c1 = Column() + self.assertEqual(c1.position - c0.position, 1) + + # __ne__ + self.assertNotEqual(c0, c1) + self.assertNotEqual(c0, object()) + + # __eq__ + self.assertEqual(c0, c0) + self.assertFalse(c0 == object()) + + # __lt__ + self.assertLess(c0, c1) + try: + c0 < object() # this raises for Python 3 + except TypeError: + pass + + # __le__ + self.assertLessEqual(c0, c1) + self.assertLessEqual(c0, c0) + try: + c0 <= object() # this raises for Python 3 + except TypeError: + pass + + # __gt__ + self.assertGreater(c1, c0) + try: + c1 > object() # this raises for Python 3 + except TypeError: + pass + + # __ge__ + self.assertGreaterEqual(c1, c0) + self.assertGreaterEqual(c1, c1) + try: + c1 >= object() # this raises for Python 3 + except TypeError: + pass + + From 4128d4942cbf9401de145a717c018406781bf941 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 1 Jul 2016 13:48:03 -0500 Subject: [PATCH 0729/2005] limit recursion when failing to execute in conncurrent PYTHON-585 --- cassandra/concurrent.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index 48cbab3e24..a08c0292e3 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -94,6 +94,8 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais class _ConcurrentExecutor(object): + max_error_recursion = 100 + def __init__(self, session, statements_and_params): self.session = session self._enum_statements = enumerate(iter(statements_and_params)) @@ -102,6 +104,7 @@ def __init__(self, session, statements_and_params): self._results_queue = [] self._current = 0 self._exec_count = 0 + self._exec_depth = 0 def execute(self, concurrency, fail_fast): self._fail_fast = fail_fast @@ -125,6 +128,7 @@ def _execute_next(self): pass def _execute(self, idx, statement, params): + self._exec_depth += 1 try: future = self.session.execute_async(statement, params, timeout=None) args = (future, idx) @@ -135,7 +139,15 @@ def _execute(self, idx, statement, params): # exc_info with fail_fast to preserve stack trace info when raising on the client thread # (matches previous behavior -- not sure why we wouldn't want stack trace in the other case) e = sys.exc_info() if self._fail_fast and six.PY2 else exc - self._put_result(e, idx, False) + + # If we're not failing fast and all executions are raising, there is a chance of recursing + # here as subsequent requests are attempted. If we hit this threshold, schedule this result/retry + # and let the event loop thread return. + if self._exec_depth < self.max_error_recursion: + self._put_result(e, idx, False) + else: + self.session.submit(self._put_result, e, idx, False) + self._exec_depth -= 1 def _on_success(self, result, future, idx): future.clear_callbacks() From 34b1c7638cc00eba00fe0ffee673c8fba54c01d8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 1 Jul 2016 14:01:48 -0500 Subject: [PATCH 0730/2005] test demonstrating execut concurrent recursion limiting PYTHON-585 --- tests/unit/test_concurrent.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index a535bf2260..948f6f2502 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -17,12 +17,15 @@ import unittest2 as unittest except ImportError: import unittest # noqa + from itertools import cycle from mock import Mock import time import threading from six.moves.queue import PriorityQueue +import sys +from cassandra.cluster import Cluster, Session from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args @@ -229,3 +232,19 @@ def validate_result_ordering(self, results): current_time_added = list(result)[0] self.assertLess(last_time_added, current_time_added) last_time_added = current_time_added + + def test_recursion_limited(self): + """ + Verify that recursion is controlled when raise_on_first_error=False and something is wrong with the query. + + PYTHON-585 + """ + max_recursion = sys.getrecursionlimit() + s = Session(Cluster(), []) + self.assertRaises(TypeError, execute_concurrent_with_args, s, "doesn't matter", [('param',)] * max_recursion, raise_on_first_error=True) + + results = execute_concurrent_with_args(s, "doesn't matter", [('param',)] * max_recursion, raise_on_first_error=False) # previously + self.assertEqual(len(results), max_recursion) + for r in results: + self.assertFalse(r[0]) + self.assertIsInstance(r[1], TypeError) From 577d67ca05a5e38febb82afcf502a70b2d0133c2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 1 Jul 2016 14:25:06 -0500 Subject: [PATCH 0731/2005] legacy html rewrites for docs.yaml --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index a0e7fb3218..b337d5dd7b 100644 --- a/docs.yaml +++ b/docs.yaml @@ -9,3 +9,5 @@ sections: versions: - name: 3.5.0 ref: 3.5.0 +redirects: + - \A\/(.*)/\Z: /\1.html From 72780f4ee30e7a134f2f92846cf4287881c577e8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 5 Jul 2016 15:40:27 -0500 Subject: [PATCH 0732/2005] return from connect when at least one pool is connected PYTHON-105 --- cassandra/cluster.py | 26 +++++++++---------- tests/integration/standard/test_cluster.py | 12 ++++----- tests/integration/standard/test_connection.py | 2 +- tests/integration/standard/test_metrics.py | 15 ++++++----- 4 files changed, 29 insertions(+), 26 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 99509d2233..cfce064844 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -20,7 +20,7 @@ import atexit from collections import defaultdict, Mapping -from concurrent.futures import ThreadPoolExecutor, wait as wait_futures +from concurrent.futures import ThreadPoolExecutor, FIRST_COMPLETED, wait as wait_futures from copy import copy from functools import partial, wraps from itertools import groupby, count @@ -1127,7 +1127,7 @@ def protocol_downgrade(self, host_addr, previous_version): else: raise DriverException("Cannot downgrade protocol version (%d) below minimum supported version: %d" % (new_version, MIN_SUPPORTED_VERSION)) - def connect(self, keyspace=None): + def connect(self, keyspace=None, wait_for_all_pools=False): """ Creates and returns a new :class:`~.Session` object. If `keyspace` is specified, that keyspace will be the default keyspace for @@ -1167,9 +1167,9 @@ def connect(self, keyspace=None): self._idle_heartbeat = ConnectionHeartbeat(self.idle_heartbeat_interval, self.get_connection_holders) self._is_setup = True - session = self._new_session() - if keyspace: - session.set_keyspace(keyspace) + session = self._new_session(keyspace) + if wait_for_all_pools: + wait_futures(session._initial_connect_futures) return session def get_connection_holders(self): @@ -1213,8 +1213,8 @@ def __enter__(self): def __exit__(self, *args): self.shutdown() - def _new_session(self): - session = Session(self, self.metadata.all_hosts()) + def _new_session(self, keyspace): + session = Session(self, self.metadata.all_hosts(), keyspace) self._session_register_user_types(session) self.sessions.add(session) return session @@ -1888,9 +1888,10 @@ def default_serial_consistency_level(self, cl): _profile_manager = None _metrics = None - def __init__(self, cluster, hosts): + def __init__(self, cluster, hosts, keyspace=None): self.cluster = cluster self.hosts = hosts + self.keyspace = keyspace self._lock = RLock() self._pools = {} @@ -1901,14 +1902,13 @@ def __init__(self, cluster, hosts): self.encoder = Encoder() # create connection pools in parallel - futures = [] + self._initial_connect_futures = set() for host in hosts: future = self.add_or_renew_pool(host, is_host_addition=False) - if future is not None: - futures.append(future) + if future: + self._initial_connect_futures.add(future) + wait_futures(self._initial_connect_futures, return_when=FIRST_COMPLETED) - for future in futures: - future.result() def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None, execution_profile=EXEC_PROFILE_DEFAULT): """ diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 62244b93f3..2294d398b5 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -516,14 +516,14 @@ def test_idle_heartbeat(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=interval) if PROTOCOL_VERSION < 3: cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) # This test relies on impl details of connection req id management to see if heartbeats # are being sent. May need update if impl is changed connection_request_ids = {} for h in cluster.get_connection_holders(): for c in h.get_connections(): - # make sure none are idle (should have startup messages) + # make sure none are idle (should have startup messages self.assertFalse(c.is_idle) with c.lock: connection_request_ids[id(c)] = deque(c.request_ids) # copy of request ids @@ -558,7 +558,7 @@ def test_idle_heartbeat(self): self.assertEqual(len(holders), len(cluster.metadata.all_hosts()) + 1) # hosts pools, 1 for cc # include additional sessions - session2 = cluster.connect() + session2 = cluster.connect(wait_for_all_pools=True) holders = cluster.get_connection_holders() self.assertIn(cluster.control_connection, holders) @@ -631,7 +631,7 @@ def test_profile_load_balancing(self): query = "select release_version from system.local" node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) with Cluster(execution_profiles={'node1': node1}) as cluster: - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) # default is DCA RR for all hosts expected_hosts = set(cluster.metadata.all_hosts()) @@ -688,7 +688,7 @@ def test_profile_lb_swap(self): rr2 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) exec_profiles = {'rr1': rr1, 'rr2': rr2} with Cluster(execution_profiles=exec_profiles) as cluster: - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) # default is DCA RR for all hosts expected_hosts = set(cluster.metadata.all_hosts()) @@ -780,7 +780,7 @@ def test_profile_pool_management(self): node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) node2 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2'])) with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1, 'node2': node2}) as cluster: - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) pools = session.get_pool_state() # there are more hosts, but we connected to the ones in the lbp aggregate self.assertGreater(len(cluster.metadata.all_hosts()), 2) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 2d07b92038..69566c80ad 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -99,7 +99,7 @@ class HeartbeatTest(unittest.TestCase): def setUp(self): self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=1) - self.session = self.cluster.connect() + self.session = self.cluster.connect(wait_for_all_pools=True) def tearDown(self): self.cluster.shutdown() diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 13758b65ad..aba83de420 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -14,6 +14,8 @@ import time +from cassandra.policies import WhiteListRoundRobinPolicy, FallthroughRetryPolicy + try: import unittest2 as unittest except ImportError: @@ -33,8 +35,11 @@ def setup_module(): class MetricsTests(unittest.TestCase): def setUp(self): - self.cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect("test3rf") + contact_point = ['127.0.0.2'] + self.cluster = Cluster(contact_points=contact_point, metrics_enabled=True, protocol_version=PROTOCOL_VERSION, + load_balancing_policy=WhiteListRoundRobinPolicy(contact_point), + default_retry_policy=FallthroughRetryPolicy()) + self.session = self.cluster.connect("test3rf", wait_for_all_pools=True) def tearDown(self): self.cluster.shutdown() @@ -44,8 +49,6 @@ def test_connection_error(self): Trigger and ensure connection_errors are counted Stop all node with the driver knowing about the "DOWN" states. """ - - # Test writes for i in range(0, 100): self.session.execute_async("INSERT INTO test (k, v) VALUES ({0}, {1})".format(i, i)) @@ -145,13 +148,13 @@ def test_unavailable(self): query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL) with self.assertRaises(Unavailable): self.session.execute(query) - self.assertEqual(2, self.cluster.metrics.stats.unavailables) + self.assertEqual(self.cluster.metrics.stats.unavailables, 1) # Test write query = SimpleStatement("SELECT * FROM test", consistency_level=ConsistencyLevel.ALL) with self.assertRaises(Unavailable): self.session.execute(query, timeout=None) - self.assertEqual(4, self.cluster.metrics.stats.unavailables) + self.assertEqual(self.cluster.metrics.stats.unavailables, 2) finally: get_node(1).start(wait_other_notice=True, wait_for_binary_proto=True) # Give some time for the cluster to come back up, for the next test From 62bbb2a2d3bdba6a08db0f85aab50bfd59f24344 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 6 Jul 2016 11:55:02 -0400 Subject: [PATCH 0733/2005] Fallback to select partition keys when all fields are defered --- cassandra/cqlengine/query.py | 3 +++ tests/integration/cqlengine/query/test_queryset.py | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 10d27ab580..eaa51b4c55 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -971,6 +971,9 @@ def _select_fields(self): fields = self.model._columns.keys() if self._defer_fields: fields = [f for f in fields if f not in self._defer_fields] + # select the partition keys if all model fields are set defer + if not fields: + fields = self.model._partition_keys if self._only_fields: fields = [f for f in fields if f in self._only_fields] if not fields: diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 21dc5c4b93..ee1cc751e7 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -300,6 +300,10 @@ def test_defining_defer_fields(self): q = TestModel.objects.filter(test_id=0) self.assertEqual(q._select_fields(), ['attempt_id', 'description', 'expected_result', 'test_result']) + # when all fields are defered, it fallbacks select the partition keys + q = TestModel.objects.defer(['test_id', 'attempt_id', 'description', 'expected_result', 'test_result']) + self.assertEqual(q._select_fields(), ['test_id']) + class BaseQuerySetUsage(BaseCassEngTestCase): From 800d1fb6ab929cac953463817e6f4dc9ee187c54 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 6 Jul 2016 13:49:57 -0500 Subject: [PATCH 0734/2005] use prepared metadata and skip row result meta in protocol v2+ PYTHON-71 --- cassandra/cluster.py | 16 ++--- cassandra/connection.py | 13 ++-- cassandra/protocol.py | 59 ++++++++++++------- cassandra/query.py | 31 +++++----- cassandra/row_parser.pyx | 4 +- .../standard/test_custom_protocol_handler.py | 4 +- tests/unit/test_connection.py | 8 +-- tests/unit/test_parameter_binding.py | 8 ++- tests/unit/test_response_future.py | 10 ++-- 9 files changed, 86 insertions(+), 67 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 99509d2233..f7d0704c10 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2045,11 +2045,11 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time query_string, cl, serial_cl, fetch_size, timestamp=timestamp) elif isinstance(query, BoundStatement): + prepared_statement = query.prepared_statement message = ExecuteMessage( - query.prepared_statement.query_id, query.values, cl, + prepared_statement.query_id, query.values, cl, serial_cl, fetch_size, - timestamp=timestamp) - prepared_statement = query.prepared_statement + timestamp=timestamp, skip_meta=bool(prepared_statement.result_metadata)) elif isinstance(query, BatchStatement): if self._protocol_version < 2: raise UnsupportedOperation( @@ -2124,14 +2124,14 @@ def prepare(self, query, custom_payload=None): future = ResponseFuture(self, message, query=None, timeout=self.default_timeout) try: future.send_request() - query_id, column_metadata, pk_indexes = future.result() + query_id, bind_metadata, pk_indexes, result_metadata = future.result() except Exception: log.exception("Error preparing query:") raise prepared_statement = PreparedStatement.from_message( - query_id, column_metadata, pk_indexes, self.cluster.metadata, query, self.keyspace, - self._protocol_version) + query_id, bind_metadata, pk_indexes, self.cluster.metadata, query, self.keyspace, + self._protocol_version, result_metadata) prepared_statement.custom_payload = future.custom_payload self.cluster.add_prepared(query_id, prepared_statement) @@ -3254,7 +3254,9 @@ def _query(self, host, message=None, cb=None): # TODO get connectTimeout from cluster settings connection, request_id = pool.borrow_connection(timeout=2.0) self._connection = connection - connection.send_msg(message, request_id, cb=cb, encoder=self._protocol_handler.encode_message, decoder=self._protocol_handler.decode_message) + result_meta = self.prepared_statement.result_metadata if self.prepared_statement else [] + connection.send_msg(message, request_id, cb=cb, encoder=self._protocol_handler.encode_message, decoder=self._protocol_handler.decode_message, + result_metadata=result_meta) return request_id except NoConnectionsAvailable as exc: log.debug("All connections for host %s are at capacity, moving to the next host", host) diff --git a/cassandra/connection.py b/cassandra/connection.py index 7ec72ba798..ccdebebca9 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -404,7 +404,7 @@ def try_callback(cb): id(self), self.host, exc_info=True) # run first callback from this thread to ensure pool state before leaving - cb, _ = requests.popitem()[1] + cb, _, _ = requests.popitem()[1] try_callback(cb) if not requests: @@ -414,7 +414,7 @@ def try_callback(cb): # The default callback and retry logic is fairly expensive -- we don't # want to tie up the event thread when there are many requests def err_all_callbacks(): - for cb, _ in requests.values(): + for cb, _, _ in requests.values(): try_callback(cb) if len(requests) < Connection.CALLBACK_ERR_THREAD_THRESHOLD: err_all_callbacks() @@ -445,7 +445,7 @@ def handle_pushed(self, response): except Exception: log.exception("Pushed event handler errored, ignoring:") - def send_msg(self, msg, request_id, cb, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message): + def send_msg(self, msg, request_id, cb, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=None): if self.is_defunct: raise ConnectionShutdown("Connection to %s is defunct" % self.host) elif self.is_closed: @@ -453,7 +453,7 @@ def send_msg(self, msg, request_id, cb, encoder=ProtocolHandler.encode_message, # queue the decoder function with the request # this allows us to inject custom functions per request to encode, decode messages - self._requests[request_id] = (cb, decoder) + self._requests[request_id] = (cb, decoder, result_metadata) self.push(encoder(msg, request_id, self.protocol_version, compressor=self.compressor)) return request_id @@ -578,8 +578,9 @@ def process_msg(self, header, body): if stream_id < 0: callback = None decoder = ProtocolHandler.decode_message + result_metadata = None else: - callback, decoder = self._requests.pop(stream_id, None) + callback, decoder, result_metadata = self._requests.pop(stream_id) with self.lock: self.request_ids.append(stream_id) @@ -587,7 +588,7 @@ def process_msg(self, header, body): try: response = decoder(header.version, self.user_type_map, stream_id, - header.flags, header.opcode, body, self.decompressor) + header.flags, header.opcode, body, self.decompressor, result_metadata) except Exception as exc: log.exception("Error decoding response from Cassandra. " "%s; buffer: %r", header, self._iobuf.getvalue()) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 4c63d557d5..e9e4450f5a 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -126,7 +126,7 @@ def __init__(self, code, message, info): self.info = info @classmethod - def recv_body(cls, f, protocol_version, user_type_map): + def recv_body(cls, f, *args): code = read_int(f) msg = read_string(f) subcls = error_classes.get(code, cls) @@ -378,7 +378,7 @@ class ReadyMessage(_MessageType): name = 'READY' @classmethod - def recv_body(cls, f, protocol_version, user_type_map): + def recv_body(cls, *args): return cls() @@ -390,7 +390,7 @@ def __init__(self, authenticator): self.authenticator = authenticator @classmethod - def recv_body(cls, f, protocol_version, user_type_map): + def recv_body(cls, f, *args): authname = read_string(f) return cls(authenticator=authname) @@ -422,7 +422,7 @@ def __init__(self, challenge): self.challenge = challenge @classmethod - def recv_body(cls, f, protocol_version, user_type_map): + def recv_body(cls, f, *args): return cls(read_binary_longstring(f)) @@ -445,7 +445,7 @@ def __init__(self, token): self.token = token @classmethod - def recv_body(cls, f, protocol_version, user_type_map): + def recv_body(cls, f, *args): return cls(read_longstring(f)) @@ -466,7 +466,7 @@ def __init__(self, cql_versions, options): self.options = options @classmethod - def recv_body(cls, f, protocol_version, user_type_map): + def recv_body(cls, f, *args): options = read_stringmultimap(f) cql_versions = options.pop('CQL_VERSION') return cls(cql_versions=cql_versions, options=options) @@ -474,7 +474,7 @@ def recv_body(cls, f, protocol_version, user_type_map): # used for QueryMessage and ExecuteMessage _VALUES_FLAG = 0x01 -_SKIP_METADATA_FLAG = 0x01 +_SKIP_METADATA_FLAG = 0x02 _PAGE_SIZE_FLAG = 0x04 _WITH_PAGING_STATE_FLAG = 0x08 _WITH_SERIAL_CONSISTENCY_FLAG = 0x10 @@ -577,14 +577,14 @@ def __init__(self, kind, results, paging_state=None): self.paging_state = paging_state @classmethod - def recv_body(cls, f, protocol_version, user_type_map): + def recv_body(cls, f, protocol_version, user_type_map, result_metadata): kind = read_int(f) paging_state = None if kind == RESULT_KIND_VOID: results = None elif kind == RESULT_KIND_ROWS: paging_state, results = cls.recv_results_rows( - f, protocol_version, user_type_map) + f, protocol_version, user_type_map, result_metadata) elif kind == RESULT_KIND_SET_KEYSPACE: ksname = read_string(f) results = ksname @@ -597,8 +597,9 @@ def recv_body(cls, f, protocol_version, user_type_map): return cls(kind, results, paging_state) @classmethod - def recv_results_rows(cls, f, protocol_version, user_type_map): + def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map) + column_metadata = column_metadata or result_metadata rowcount = read_int(f) rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)] colnames = [c[2] for c in column_metadata] @@ -607,24 +608,29 @@ def recv_results_rows(cls, f, protocol_version, user_type_map): tuple(ctype.from_binary(val, protocol_version) for ctype, val in zip(coltypes, row)) for row in rows] - return (paging_state, (colnames, parsed_rows)) + return paging_state, (colnames, parsed_rows) @classmethod def recv_results_prepared(cls, f, protocol_version, user_type_map): query_id = read_binary_string(f) - column_metadata, pk_indexes = cls.recv_prepared_metadata(f, protocol_version, user_type_map) - return (query_id, column_metadata, pk_indexes) + bind_metadata, pk_indexes, result_metadata = cls.recv_prepared_metadata(f, protocol_version, user_type_map) + return query_id, bind_metadata, pk_indexes, result_metadata @classmethod def recv_results_metadata(cls, f, user_type_map): flags = read_int(f) - glob_tblspec = bool(flags & cls._FLAGS_GLOBAL_TABLES_SPEC) colcount = read_int(f) if flags & cls._HAS_MORE_PAGES_FLAG: paging_state = read_binary_longstring(f) else: paging_state = None + + no_meta = bool(flags & cls._NO_METADATA_FLAG) + if no_meta: + return paging_state, [] + + glob_tblspec = bool(flags & cls._FLAGS_GLOBAL_TABLES_SPEC) if glob_tblspec: ksname = read_string(f) cfname = read_string(f) @@ -644,17 +650,17 @@ def recv_results_metadata(cls, f, user_type_map): @classmethod def recv_prepared_metadata(cls, f, protocol_version, user_type_map): flags = read_int(f) - glob_tblspec = bool(flags & cls._FLAGS_GLOBAL_TABLES_SPEC) colcount = read_int(f) pk_indexes = None if protocol_version >= 4: num_pk_indexes = read_int(f) pk_indexes = [read_short(f) for _ in range(num_pk_indexes)] + glob_tblspec = bool(flags & cls._FLAGS_GLOBAL_TABLES_SPEC) if glob_tblspec: ksname = read_string(f) cfname = read_string(f) - column_metadata = [] + bind_metadata = [] for _ in range(colcount): if glob_tblspec: colksname = ksname @@ -664,8 +670,13 @@ def recv_prepared_metadata(cls, f, protocol_version, user_type_map): colcfname = read_string(f) colname = read_string(f) coltype = cls.read_type(f, user_type_map) - column_metadata.append(ColumnMetadata(colksname, colcfname, colname, coltype)) - return column_metadata, pk_indexes + bind_metadata.append(ColumnMetadata(colksname, colcfname, colname, coltype)) + + if protocol_version >= 2: + _, result_metadata = cls.recv_results_metadata(f, user_type_map) + return bind_metadata, pk_indexes, result_metadata + else: + return bind_metadata, pk_indexes, None @classmethod def recv_results_schema_change(cls, f, protocol_version): @@ -727,7 +738,7 @@ class ExecuteMessage(_MessageType): def __init__(self, query_id, query_params, consistency_level, serial_consistency_level=None, fetch_size=None, - paging_state=None, timestamp=None): + paging_state=None, timestamp=None, skip_meta=False): self.query_id = query_id self.query_params = query_params self.consistency_level = consistency_level @@ -735,6 +746,7 @@ def __init__(self, query_id, query_params, consistency_level, self.fetch_size = fetch_size self.paging_state = paging_state self.timestamp = timestamp + self.skip_meta = skip_meta def send_body(self, f, protocol_version): write_string(f, self.query_id) @@ -768,6 +780,8 @@ def send_body(self, f, protocol_version): raise UnsupportedOperation( "Protocol-level timestamps may only be used with protocol version " "3 or higher. Consider setting Cluster.protocol_version to 3.") + if self.skip_meta: + flags |= _SKIP_METADATA_FLAG write_byte(f, flags) write_short(f, len(self.query_params)) for param in self.query_params: @@ -782,6 +796,7 @@ def send_body(self, f, protocol_version): write_long(f, self.timestamp) + class BatchMessage(_MessageType): opcode = 0x0D name = 'BATCH' @@ -851,7 +866,7 @@ def __init__(self, event_type, event_args): self.event_args = event_args @classmethod - def recv_body(cls, f, protocol_version, user_type_map): + def recv_body(cls, f, protocol_version, *args): event_type = read_string(f).upper() if event_type in known_event_types: read_method = getattr(cls, 'recv_' + event_type.lower()) @@ -960,7 +975,7 @@ def _write_header(f, version, flags, stream_id, opcode, length): @classmethod def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcode, body, - decompressor): + decompressor, result_metadata): """ Decodes a native protocol message body @@ -1002,7 +1017,7 @@ def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcod log.warning("Unknown protocol flags set: %02x. May cause problems.", flags) msg_class = cls.message_types_by_opcode[opcode] - msg = msg_class.recv_body(body, protocol_version, user_type_map) + msg = msg_class.recv_body(body, protocol_version, user_type_map, result_metadata) msg.stream_id = stream_id msg.trace_id = trace_id msg.custom_payload = custom_payload diff --git a/cassandra/query.py b/cassandra/query.py index 8662f0bda4..65cb6ba9e0 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -219,8 +219,7 @@ class Statement(object): _routing_key = None def __init__(self, retry_policy=None, consistency_level=None, routing_key=None, - serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None, - custom_payload=None): + serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None, custom_payload=None): if retry_policy and not hasattr(retry_policy, 'on_read_timeout'): # just checking one method to detect positional parameter errors raise ValueError('retry_policy should implement cassandra.policies.RetryPolicy') self.retry_policy = retry_policy @@ -362,36 +361,34 @@ class PreparedStatement(object): may affect performance (as the operation requires a network roundtrip). """ - column_metadata = None + column_metadata = None #TODO: make this bind_metadata in next major + consistency_level = None + custom_payload = None + fetch_size = FETCH_SIZE_UNSET + keyspace = None # change to prepared_keyspace in major release + protocol_version = None query_id = None query_string = None - keyspace = None # change to prepared_keyspace in major release - + result_metadata = None routing_key_indexes = None _routing_key_index_set = None - - consistency_level = None serial_consistency_level = None - protocol_version = None - - fetch_size = FETCH_SIZE_UNSET - - custom_payload = None - def __init__(self, column_metadata, query_id, routing_key_indexes, query, - keyspace, protocol_version): + keyspace, protocol_version, result_metadata): self.column_metadata = column_metadata self.query_id = query_id self.routing_key_indexes = routing_key_indexes self.query_string = query self.keyspace = keyspace self.protocol_version = protocol_version + self.result_metadata = result_metadata @classmethod - def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, query, prepared_keyspace, protocol_version): + def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, + query, prepared_keyspace, protocol_version, result_metadata): if not column_metadata: - return PreparedStatement(column_metadata, query_id, None, query, prepared_keyspace, protocol_version) + return PreparedStatement(column_metadata, query_id, None, query, prepared_keyspace, protocol_version, result_metadata) if pk_indexes: routing_key_indexes = pk_indexes @@ -416,7 +413,7 @@ def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, q pass # statement; just leave routing_key_indexes as None return PreparedStatement(column_metadata, query_id, routing_key_indexes, - query, prepared_keyspace, protocol_version) + query, prepared_keyspace, protocol_version, result_metadata) def bind(self, values): """ diff --git a/cassandra/row_parser.pyx b/cassandra/row_parser.pyx index ec2b83bed7..8422d544d3 100644 --- a/cassandra/row_parser.pyx +++ b/cassandra/row_parser.pyx @@ -18,13 +18,15 @@ from cassandra.deserializers import make_deserializers include "ioutils.pyx" def make_recv_results_rows(ColumnParser colparser): - def recv_results_rows(cls, f, int protocol_version, user_type_map): + def recv_results_rows(cls, f, int protocol_version, user_type_map, result_metadata): """ Parse protocol data given as a BytesIO f into a set of columns (e.g. list of tuples) This is used as the recv_results_rows method of (Fast)ResultMessage """ paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map) + column_metadata = column_metadata or result_metadata + colnames = [c[2] for c in column_metadata] coltypes = [c[3] for c in column_metadata] diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 63a8380902..c6818f7f4b 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -126,7 +126,7 @@ class CustomResultMessageRaw(ResultMessage): type_codes = my_type_codes @classmethod - def recv_results_rows(cls, f, protocol_version, user_type_map): + def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map) rowcount = read_int(f) rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)] @@ -155,7 +155,7 @@ class CustomResultMessageTracked(ResultMessage): checked_rev_row_set = set() @classmethod - def recv_results_rows(cls, f, protocol_version, user_type_map): + def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map) rowcount = read_int(f) rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)] diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 2ac10a590f..b8cb640b46 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -112,7 +112,7 @@ def test_negative_body_length(self, *args): def test_unsupported_cql_version(self, *args): c = self.make_connection() - c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message)} + c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message, [])} c.defunct = Mock() c.cql_version = "3.0.3" @@ -135,7 +135,7 @@ def test_unsupported_cql_version(self, *args): def test_prefer_lz4_compression(self, *args): c = self.make_connection() - c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message)} + c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message, [])} c.defunct = Mock() c.cql_version = "3.0.3" @@ -158,7 +158,7 @@ def test_prefer_lz4_compression(self, *args): def test_requested_compression_not_available(self, *args): c = self.make_connection() - c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message)} + c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message, [])} c.defunct = Mock() # request lz4 compression c.compression = "lz4" @@ -188,7 +188,7 @@ def test_requested_compression_not_available(self, *args): def test_use_requested_compression(self, *args): c = self.make_connection() - c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message)} + c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message, [])} c.defunct = Mock() # request snappy compression c.compression = "snappy" diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index d48b5d9573..555dfe3834 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -91,7 +91,7 @@ def setUpClass(cls): routing_key_indexes=[1, 0], query=None, keyspace='keyspace', - protocol_version=cls.protocol_version) + protocol_version=cls.protocol_version, result_metadata=None) cls.bound = BoundStatement(prepared_statement=cls.prepared) def test_invalid_argument_type(self): @@ -130,7 +130,8 @@ def test_inherit_fetch_size(self): routing_key_indexes=[], query=None, keyspace=keyspace, - protocol_version=self.protocol_version) + protocol_version=self.protocol_version, + result_metadata=None) prepared_statement.fetch_size = 1234 bound_statement = BoundStatement(prepared_statement=prepared_statement) self.assertEqual(1234, bound_statement.fetch_size) @@ -163,7 +164,8 @@ def test_values_none(self): routing_key_indexes=[], query=None, keyspace='whatever', - protocol_version=self.protocol_version) + protocol_version=self.protocol_version, + result_metadata=None) bound = prepared_statement.bind(None) self.assertListEqual(bound.values, []) diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index ad5bb3e93b..da7ab35341 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -67,7 +67,7 @@ def test_result_message(self): rf.session._pools.get.assert_called_once_with('ip1') pool.borrow_connection.assert_called_once_with(timeout=ANY) - connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message) + connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=[]) rf._set_result(self.make_mock_response([{'col': 'val'}])) result = rf.result() @@ -192,7 +192,7 @@ def test_retry_policy_says_retry(self): rf.session._pools.get.assert_called_once_with('ip1') pool.borrow_connection.assert_called_once_with(timeout=ANY) - connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message) + connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=[]) result = Mock(spec=UnavailableErrorMessage, info={}) rf._set_result(result) @@ -210,7 +210,7 @@ def test_retry_policy_says_retry(self): # an UnavailableException rf.session._pools.get.assert_called_with('ip1') pool.borrow_connection.assert_called_with(timeout=ANY) - connection.send_msg.assert_called_with(rf.message, 2, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message) + connection.send_msg.assert_called_with(rf.message, 2, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=[]) def test_retry_with_different_host(self): session = self.make_session() @@ -225,7 +225,7 @@ def test_retry_with_different_host(self): rf.session._pools.get.assert_called_once_with('ip1') pool.borrow_connection.assert_called_once_with(timeout=ANY) - connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message) + connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=[]) self.assertEqual(ConsistencyLevel.QUORUM, rf.message.consistency_level) result = Mock(spec=OverloadedErrorMessage, info={}) @@ -243,7 +243,7 @@ def test_retry_with_different_host(self): # it should try with a different host rf.session._pools.get.assert_called_with('ip2') pool.borrow_connection.assert_called_with(timeout=ANY) - connection.send_msg.assert_called_with(rf.message, 2, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message) + connection.send_msg.assert_called_with(rf.message, 2, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=[]) # the consistency level should be the same self.assertEqual(ConsistencyLevel.QUORUM, rf.message.consistency_level) From dcd47a3a943036d8e65de52b826fd2ca8b4dae85 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 6 Jul 2016 14:37:42 -0500 Subject: [PATCH 0735/2005] unset Host.is_up for optimistically set contact points --- cassandra/cluster.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 99509d2233..37197f6183 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1154,6 +1154,13 @@ def connect(self, keyspace=None): try: self.control_connection.connect() + + # we set all contact points up for connecting, but we won't infer state after this + for address in self.contact_points_resolved: + h = self.metadata.get_host(address) + if h and self.profile_manager.distance(h) == HostDistance.IGNORED: + h.is_up = None + log.debug("Control connection created") except Exception: log.exception("Control connection failed to connect, " From 8addbad42c428f0086b13b09495b4d2b1950243f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 6 Jul 2016 14:38:20 -0500 Subject: [PATCH 0736/2005] update Host.is_up for ignored hosts on events --- cassandra/cluster.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 37197f6183..748f562643 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1341,6 +1341,7 @@ def on_up(self, host): else: if not have_future: with host.lock: + host.set_up() host._currently_handling_node_up = False # for testing purposes @@ -1379,10 +1380,11 @@ def on_down(self, host, is_host_addition, expect_host_to_be_down=False): return with host.lock: - if (not host.is_up and not expect_host_to_be_down) or host.is_currently_reconnecting(): + was_up = host.is_up + host.set_down() + if (not was_up and not expect_host_to_be_down) or host.is_currently_reconnecting(): return - host.set_down() log.warning("Host %s has been marked down", host) From 31abbc81703d5be1c92bd19d753319916b58143c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 6 Jul 2016 16:09:05 -0500 Subject: [PATCH 0737/2005] Make field overriding compute the right position and pk attributes Would rather make this fail model validation, but don't want to break existing model behavior. PYTHON-576 --- cassandra/cqlengine/models.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index e940955ed4..4c5d9c86e0 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -797,14 +797,6 @@ def __new__(cls, name, bases, attrs): options = attrs.get('__options__') or {} attrs['__default_ttl__'] = options.get('default_time_to_live') - def _transform_column(col_name, col_obj): - column_dict[col_name] = col_obj - if col_obj.primary_key: - primary_keys[col_name] = col_obj - col_obj.set_column_name(col_name) - # set properties - attrs[col_name] = ColumnDescriptor(col_obj) - column_definitions = [(k, v) for k, v in attrs.items() if isinstance(v, columns.Column)] column_definitions = sorted(column_definitions, key=lambda x: x[1].position) @@ -849,6 +841,14 @@ def _get_polymorphic_base(bases): has_partition_keys = any(v.partition_key for (k, v) in column_definitions) + def _transform_column(col_name, col_obj): + column_dict[col_name] = col_obj + if col_obj.primary_key: + primary_keys[col_name] = col_obj + col_obj.set_column_name(col_name) + # set properties + attrs[col_name] = ColumnDescriptor(col_obj) + partition_key_index = 0 # transform column definitions for k, v in column_definitions: @@ -868,6 +868,12 @@ def _get_polymorphic_base(bases): if v.partition_key: v._partition_key_index = partition_key_index partition_key_index += 1 + + overriding = column_dict.get(k) + if overriding: + v.position = overriding.position + v.partition_key = overriding.partition_key + v._partition_key_index = overriding._partition_key_index _transform_column(k, v) partition_keys = OrderedDict(k for k in primary_keys.items() if k[1].partition_key) From d9ef7fc18ead9b2177ba112f097a3dc6c47fcf33 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 6 Jul 2016 16:48:27 -0500 Subject: [PATCH 0738/2005] PYTHON-105 tweak tests to wait for pools to be up --- tests/integration/long/test_ssl.py | 2 +- tests/integration/standard/test_cluster.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 9ae84f38d6..48d74e55f0 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -86,7 +86,7 @@ def validate_ssl_options(ssl_options): raise RuntimeError("Failed to connect to SSL cluster after 5 attempts") try: cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options=ssl_options) - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) break except Exception: ex_type, ex, tb = sys.exc_info() diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 2294d398b5..9a2d026084 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -67,11 +67,11 @@ def test_host_duplication(self): @test_category connection """ cluster = Cluster(contact_points=["localhost", "127.0.0.1", "localhost", "localhost", "localhost"], protocol_version=PROTOCOL_VERSION, connect_timeout=1) - cluster.connect() + cluster.connect(wait_for_all_pools=True) self.assertEqual(len(cluster.metadata.all_hosts()), 3) cluster.shutdown() cluster = Cluster(contact_points=["127.0.0.1", "localhost"], protocol_version=PROTOCOL_VERSION, connect_timeout=1) - cluster.connect() + cluster.connect(wait_for_all_pools=True) self.assertEqual(len(cluster.metadata.all_hosts()), 3) cluster.shutdown() @@ -805,7 +805,7 @@ def test_add_profile_timeout(self): node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1}) as cluster: - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) pools = session.get_pool_state() self.assertGreater(len(cluster.metadata.all_hosts()), 2) self.assertEqual(set(h.address for h in pools), set(('127.0.0.1',))) From 9bdd3fbaa376d15d03446679d64b62fd8dd7dc6b Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 6 Jul 2016 16:54:24 -0500 Subject: [PATCH 0739/2005] Fix pool size changing during iteration --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cfce064844..a5bb1aca00 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2189,7 +2189,7 @@ def shutdown(self): else: self.is_shutdown = True - for pool in self._pools.values(): + for pool in list(self._pools.values()): pool.shutdown() def __enter__(self): From b91d199a5866a3f9500a7260082be700eea2d95e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 7 Jul 2016 14:17:45 -0500 Subject: [PATCH 0740/2005] handle None conditional PYTHON-580 --- cassandra/cqlengine/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 25a6073722..a2202b407a 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1178,7 +1178,7 @@ class Row(Model): self._execute(us) null_conditional = [condition for condition in self._conditional - if condition.field not in updated_columns] + if condition.field not in updated_columns] if self._conditional else None if nulled_columns: ds = DeleteStatement(self.column_family_name, fields=nulled_columns, @@ -1298,7 +1298,7 @@ def update(self): # remove conditions on fields that have been updated self._conditional = [condition for condition in self._conditional - if condition.field not in updated_columns] + if condition.field not in updated_columns] if self._conditional else None if not null_clustering_key: self._delete_null_columns() From ccbf5aefaf0b815d6697834c1ade6ee8b4c5a192 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 7 Jul 2016 14:18:41 -0500 Subject: [PATCH 0741/2005] only compute new conditional if we're doing a delete PYTHON-580 --- cassandra/cqlengine/query.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index a2202b407a..adf33c296c 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1177,10 +1177,10 @@ class Row(Model): if us.assignments: self._execute(us) - null_conditional = [condition for condition in self._conditional - if condition.field not in updated_columns] if self._conditional else None - if nulled_columns: + null_conditional = [condition for condition in self._conditional + if condition.field not in updated_columns] if self._conditional else None + ds = DeleteStatement(self.column_family_name, fields=nulled_columns, where=self._where, conditionals=null_conditional, if_exists=self._if_exists) self._execute(ds) @@ -1296,11 +1296,10 @@ def update(self): statement.add_where(col, EqualsOperator(), getattr(self.instance, name)) self._execute(statement) - # remove conditions on fields that have been updated - self._conditional = [condition for condition in self._conditional - if condition.field not in updated_columns] if self._conditional else None - if not null_clustering_key: + # remove conditions on fields that have been updated + self._conditional = [condition for condition in self._conditional + if condition.field not in updated_columns] if self._conditional else None self._delete_null_columns() def save(self): From 80a12baa89456edbf68c643f075093c7f586f319 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 7 Jul 2016 14:21:35 -0500 Subject: [PATCH 0742/2005] don't change DMLQuery conditionals during delete phase PYTHON-580 --- cassandra/cqlengine/query.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index adf33c296c..369df223b0 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1178,11 +1178,10 @@ class Row(Model): self._execute(us) if nulled_columns: - null_conditional = [condition for condition in self._conditional - if condition.field not in updated_columns] if self._conditional else None - + delete_conditional = [condition for condition in self._conditional + if condition.field not in updated_columns] if self._conditional else None ds = DeleteStatement(self.column_family_name, fields=nulled_columns, - where=self._where, conditionals=null_conditional, if_exists=self._if_exists) + where=self._where, conditionals=delete_conditional, if_exists=self._if_exists) self._execute(ds) @@ -1229,11 +1228,11 @@ def batch(self, batch_obj): self._batch = batch_obj return self - def _delete_null_columns(self): + def _delete_null_columns(self, conditionals=None): """ executes a delete query to remove columns that have changed to null """ - ds = DeleteStatement(self.column_family_name, conditionals=self._conditional, if_exists=self._if_exists) + ds = DeleteStatement(self.column_family_name, conditionals=conditionals, if_exists=self._if_exists) deleted_fields = False for _, v in self.instance._values.items(): col = v.column @@ -1298,9 +1297,9 @@ def update(self): if not null_clustering_key: # remove conditions on fields that have been updated - self._conditional = [condition for condition in self._conditional - if condition.field not in updated_columns] if self._conditional else None - self._delete_null_columns() + delete_conditionals = [condition for condition in self._conditional + if condition.field not in updated_columns] if self._conditional else None + self._delete_null_columns(delete_conditionals) def save(self): """ From 21c796e09642b83020f4cedc7085c32d54797c0d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 7 Jul 2016 15:38:17 -0500 Subject: [PATCH 0743/2005] Remove nonexistent contact points from metadata after discovery PYTHON-549 --- cassandra/cluster.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index afb9188127..e149dff5a5 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2783,9 +2783,8 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, for old_host in self._cluster.metadata.all_hosts(): if old_host.address != connection.host and old_host.address not in found_hosts: should_rebuild_token_map = True - if old_host.address not in self._cluster.contact_points: - log.debug("[control connection] Removing host not found in peers metadata: %r", old_host) - self._cluster.remove_host(old_host) + log.debug("[control connection] Removing host not found in peers metadata: %r", old_host) + self._cluster.remove_host(old_host) log.debug("[control connection] Finished fetching ring info") if partitioner and should_rebuild_token_map: From fa3601531904bb33ffeb1639f35a5e2eeea960ca Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 8 Jul 2016 11:46:42 -0500 Subject: [PATCH 0744/2005] don't downgrade protocol when explicitly set PYTHON-537 --- cassandra/cluster.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index afb9188127..501e5d1ef5 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -356,10 +356,11 @@ class Cluster(object): """ The maximum version of the native protocol to use. - The driver will automatically downgrade version based on a negotiation with - the server, but it is most efficient to set this to the maximum supported - by your version of Cassandra. Setting this will also prevent conflicting - versions negotiated if your cluster is upgraded. + If not set in the constructor, the driver will automatically downgrade + version based on a negotiation with the server, but it is most efficient + to set this to the maximum supported by your version of Cassandra. + Setting this will also prevent conflicting versions negotiated if your + cluster is upgraded. Version 2 of the native protocol adds support for lightweight transactions, batch operations, and automatic query paging. The v2 protocol is @@ -388,6 +389,8 @@ class Cluster(object): +-------------------+-------------------+ | 2.2 | 1, 2, 3, 4 | +-------------------+-------------------+ + | 3.x | 3, 4 | + +-------------------+-------------------+ """ compression = True @@ -719,6 +722,7 @@ def token_metadata_enabled(self, enabled): _prepared_statements = None _prepared_statement_lock = None _idle_heartbeat = None + _protocol_version_explicit = False _user_types = None """ @@ -742,7 +746,7 @@ def __init__(self, ssl_options=None, sockopts=None, cql_version=None, - protocol_version=4, + protocol_version=_NOT_SET, executor_threads=2, max_schema_agreement_wait=10, control_connection_timeout=2.0, @@ -777,7 +781,11 @@ def __init__(self, for endpoint in socket.getaddrinfo(a, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)] self.compression = compression - self.protocol_version = protocol_version + + if protocol_version is not _NOT_SET: + self.protocol_version = protocol_version + self._protocol_version_explicit = True + self.auth_provider = auth_provider if load_balancing_policy is not None: @@ -1117,6 +1125,9 @@ def _make_connection_kwargs(self, address, kwargs_dict): return kwargs_dict def protocol_downgrade(self, host_addr, previous_version): + if self._protocol_version_explicit: + raise DriverException("ProtocolError returned from server while using explicitly set client protocol_version %d" % (previous_version,)) + new_version = previous_version - 1 if new_version < self.protocol_version: if new_version >= MIN_SUPPORTED_VERSION: From 3c5266093e4cefcd7ace1bb0bfc79e92282b0f70 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 8 Jul 2016 16:49:30 -0500 Subject: [PATCH 0745/2005] Don't omit ignored nodes from schema agreement, if they're up see #615 --- cassandra/cluster.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 501e5d1ef5..1007513233 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2949,14 +2949,13 @@ def _get_schema_mismatches(self, peers_result, local_result, local_address): if local_row.get("schema_version"): versions[local_row.get("schema_version")].add(local_address) - pm = self._cluster.profile_manager for row in peers_result: schema_ver = row.get('schema_version') if not schema_ver: continue addr = self._rpc_from_peer_row(row) peer = self._cluster.metadata.get_host(addr) - if peer and peer.is_up and pm.distance(peer) != HostDistance.IGNORED: + if peer and peer.is_up is not False: versions[schema_ver].add(addr) if len(versions) == 1: From 5c06ac6f60689188c8169ca10c41e8ce5f8efa27 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 13 Jul 2016 12:34:57 -0500 Subject: [PATCH 0746/2005] PYTHON-537 adding boundry integration tests --- tests/integration/__init__.py | 49 ++++++++++++++++++---- tests/integration/standard/test_cluster.py | 39 ++++++++++++++++- 2 files changed, 79 insertions(+), 9 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 62a58896a4..26a6d5bb5d 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -137,14 +137,47 @@ def _get_cass_version_from_dse(dse_version): CCM_KWARGS['dse_credentials_file'] = DSE_CRED -if CASSANDRA_VERSION >= '2.2': - default_protocol_version = 4 -elif CASSANDRA_VERSION >= '2.1': - default_protocol_version = 3 -elif CASSANDRA_VERSION >= '2.0': - default_protocol_version = 2 -else: - default_protocol_version = 1 +def get_default_protocol(): + + if CASSANDRA_VERSION >= '2.2': + return 4 + elif CASSANDRA_VERSION >= '2.1': + return 3 + elif CASSANDRA_VERSION >= '2.0': + return 2 + else: + return 1 + + +def get_unsupported_lower_protocol(): + """ + This is used to determine the lowest protocol version that is NOT + supported by the version of C* running + """ + + if CASSANDRA_VERSION >= '3.0': + return 2 + else: + return None + + +def get_unsupported_upper_protocol(): + """ + This is used to determine the highest protocol version that is NOT + supported by the version of C* running + """ + + if CASSANDRA_VERSION >= '2.2': + return None + if CASSANDRA_VERSION >= '2.1': + return 4 + elif CASSANDRA_VERSION >= '2.0': + return 3 + else: + return None + +default_protocol_version = get_default_protocol() + PROTOCOL_VERSION = int(os.getenv('PROTOCOL_VERSION', default_protocol_version)) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 9a2d026084..4da9eb225b 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -33,7 +33,8 @@ from cassandra.protocol import MAX_SUPPORTED_VERSION from cassandra.query import SimpleStatement, TraceUnavailable, tuple_factory -from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node, CASSANDRA_VERSION, execute_until_pass, execute_with_long_wait_retry, get_node, MockLoggingHandler +from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node, CASSANDRA_VERSION, execute_until_pass, execute_with_long_wait_retry, get_node,\ + MockLoggingHandler, get_unsupported_lower_protocol, get_unsupported_upper_protocol from tests.integration.util import assert_quiescent_pool_state @@ -175,6 +176,42 @@ def test_protocol_negotiation(self): cluster.shutdown() + def test_invalid_protocol_negotation(self): + """ + Test for protocol negotiation when explicit versions are set + + If an explicit protocol version that is not compatible with the server version is set + an exception should be thrown. It should not attempt to negotiate + + for reference supported protocol version to server versions is as follows/ + + 1.2 -> 1 + 2.0 -> 2, 1 + 2.1 -> 3, 2, 1 + 2.2 -> 4, 3, 2, 1 + 3.X -> 4, 3 + + @since 3.6.0 + @jira_ticket PYTHON-537 + @expected_result downgrading should not be allowed when explicit protocol versions are set. + + @test_category connection + """ + + upper_bound = get_unsupported_upper_protocol() + if upper_bound is not None: + cluster = Cluster(protocol_version=upper_bound) + with self.assertRaises(NoHostAvailable): + cluster.connect() + cluster.shutdown() + + lower_bound = get_unsupported_lower_protocol() + if lower_bound is not None: + cluster = Cluster(protocol_version=lower_bound) + with self.assertRaises(NoHostAvailable): + cluster.connect() + cluster.shutdown() + def test_connect_on_keyspace(self): """ Ensure clusters that connect on a keyspace, do From 10ae19e4abe659f37345732a2628261556097baf Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 14 Jul 2016 10:47:49 -0400 Subject: [PATCH 0747/2005] Removed an inconsistent sleep use in a test --- .../cqlengine/query/test_queryset.py | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index ee1cc751e7..dcebe56e82 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -851,16 +851,12 @@ def test_tzaware_datetime_support(self): def test_success_case(self): """ Test that the min and max time uuid functions work as expected """ pk = uuid4() - TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='1') - time.sleep(0.2) - TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='2') - time.sleep(0.2) - midpoint = datetime.utcnow() - time.sleep(0.2) - TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='3') - time.sleep(0.2) - TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='4') - time.sleep(0.2) + startpoint = datetime.utcnow() + TimeUUIDQueryModel.create(partition=pk, time=uuid_from_time(startpoint + timedelta(seconds=1)), data='1') + TimeUUIDQueryModel.create(partition=pk, time=uuid_from_time(startpoint + timedelta(seconds=2)), data='2') + midpoint = startpoint + timedelta(seconds=3) + TimeUUIDQueryModel.create(partition=pk, time=uuid_from_time(startpoint + timedelta(seconds=4)), data='3') + TimeUUIDQueryModel.create(partition=pk, time=uuid_from_time(startpoint + timedelta(seconds=5)), data='4') # test kwarg filtering q = TimeUUIDQueryModel.filter(partition=pk, time__lte=functions.MaxTimeUUID(midpoint)) @@ -1373,5 +1369,3 @@ def test_defaultFetchSize(self): smiths = list(People2.filter(last_name="Smith")) self.assertEqual(len(smiths), 5) self.assertTrue(smiths[0].last_name is not None) - - From de8351c3968b338c07c392c94076df817d2908ff Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 14 Jul 2016 13:21:26 -0500 Subject: [PATCH 0748/2005] PYTHON-71 test metadata across protocol versions --- tests/integration/__init__.py | 20 +++++++++++++ tests/integration/standard/test_query.py | 36 +++++++++++++++++++++++- 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 26a6d5bb5d..2eee0faf99 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -149,6 +149,26 @@ def get_default_protocol(): return 1 +def get_supported_protocol_versions(): + """ + 1.2 -> 1 + 2.0 -> 2, 1 + 2.1 -> 3, 2, 1 + 2.2 -> 4, 3, 2, 1 + 3.X -> 4, 3 +` """ + if CASSANDRA_VERSION >= '3.0': + return (3, 4) + elif CASSANDRA_VERSION >= '2.2': + return (1, 2, 3, 4) + elif CASSANDRA_VERSION >= '2.1': + return (1, 2, 3) + elif CASSANDRA_VERSION >= '2.0': + return (1, 2) + else: + return (1) + + def get_unsupported_lower_protocol(): """ This is used to determine the lowest protocol version that is NOT diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 4bd742bc42..3dde078393 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -26,7 +26,7 @@ from cassandra.cluster import Cluster, NoHostAvailable from cassandra.policies import HostDistance, RoundRobinPolicy -from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, greaterthanprotocolv3, MockLoggingHandler +from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions import time import re @@ -356,6 +356,39 @@ def make_query_plan(self, working_keyspace=None, query=None): return list(self._live_hosts) +class PreparedStatementMetdataTest(unittest.TestCase): + + def test_prepared_metadata_generation(self): + """ + Test to validate that result metadata is appropriately populated across protocol version + + In protocol version 1 result metadata is retrieved everytime the statement is issued. In all + other protocol versions it's set once upon the prepare, then re-used. This test ensures that it manifests + it's self the same across multiple protocol versions. + + @since 3.6.0 + @jira_ticket PYTHON-71 + @expected_result result metadata is consistent. + """ + + base_line = None + for proto_version in get_supported_protocol_versions(): + cluster = Cluster(protocol_version=proto_version) + session = cluster.connect() + select_statement = session.prepare("SELECT * FROM system.local") + if proto_version == 1: + self.assertEqual(select_statement.result_metadata, None) + else: + self.assertNotEqual(select_statement.result_metadata, None) + future = session.execute_async(select_statement) + results = future.result() + if base_line is None: + base_line = results[0].__dict__.keys() + else: + self.assertEqual(base_line, results[0].__dict__.keys()) + cluster.shutdown() + + class PreparedStatementArgTest(unittest.TestCase): def test_prepare_on_all_hosts(self): @@ -905,6 +938,7 @@ def test_mv_filtering(self): self.assertEquals(results[1].day, 2) self.assertEquals(results[1].score, 1000) self.assertEquals(results[1].user, "tjake") + #import pdb; pdb.set_trace() # Test montly high range queries prepared_query = self.session.prepare("SELECT * FROM {0}.monthlyhigh WHERE game=? AND year=? AND month=? and score >= ? and score <= ?".format(self.keyspace_name)) From 50d349f26eabb9e14e7b23641931063d3f3b5009 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 14 Jul 2016 16:26:58 -0400 Subject: [PATCH 0749/2005] Fix pk__token equality filter --- cassandra/cqlengine/query.py | 5 +++-- tests/integration/cqlengine/query/test_queryoperators.py | 6 +++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 02101f6e06..c3d7506abb 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -545,7 +545,7 @@ def _parse_filter_arg(self, arg): if len(statement) == 1: return arg, None elif len(statement) == 2: - return statement[0], statement[1] + return (statement[0], statement[1]) if arg != 'pk__token' else (arg, None) else: raise QueryException("Can't parse '{0}'".format(arg)) @@ -954,7 +954,8 @@ class ModelQuerySet(AbstractQuerySet): def _validate_select_where(self): """ Checks that a filterset will not create invalid select statement """ # check that there's either a =, a IN or a CONTAINS (collection) relationship with a primary key or indexed field - equal_ops = [self.model._get_column_by_db_name(w.field) for w in self._where if isinstance(w.operator, EqualsOperator)] + equal_ops = [self.model._get_column_by_db_name(w.field) \ + for w in self._where if isinstance(w.operator, EqualsOperator) and not isinstance(w.value, Token)] token_comparison = any([w for w in self._where if isinstance(w.value, Token)]) if not any(w.primary_key or w.index for w in equal_ops) and not token_comparison and not self._allow_filtering: raise QueryException(('Where clauses require either =, a IN or a CONTAINS (collection) ' diff --git a/tests/integration/cqlengine/query/test_queryoperators.py b/tests/integration/cqlengine/query/test_queryoperators.py index c2a2a74206..055e8f3db2 100644 --- a/tests/integration/cqlengine/query/test_queryoperators.py +++ b/tests/integration/cqlengine/query/test_queryoperators.py @@ -72,7 +72,7 @@ def tearDown(self): super(TestTokenFunction, self).tearDown() drop_table(TokenTestModel) - @execute_count(14) + @execute_count(15) def test_token_function(self): """ Tests that token functions work properly """ assert TokenTestModel.objects().count() == 0 @@ -91,6 +91,10 @@ def test_token_function(self): assert len(seen_keys) == 10 assert all([i in seen_keys for i in range(10)]) + # pk__token equality + r = TokenTestModel.objects(pk__token=functions.Token(last_token)) + self.assertEqual(len(r), 1) + def test_compound_pk_token_function(self): class TestModel(Model): From 4ec5045581b36f3209442d16291b9ea345334d2e Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 14 Jul 2016 17:15:57 -0500 Subject: [PATCH 0750/2005] cleaning up debug statement --- tests/integration/standard/test_query.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 3dde078393..ad860781a6 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -938,7 +938,6 @@ def test_mv_filtering(self): self.assertEquals(results[1].day, 2) self.assertEquals(results[1].score, 1000) self.assertEquals(results[1].user, "tjake") - #import pdb; pdb.set_trace() # Test montly high range queries prepared_query = self.session.prepare("SELECT * FROM {0}.monthlyhigh WHERE game=? AND year=? AND month=? and score >= ? and score <= ?".format(self.keyspace_name)) From b22f3eb4bf79b2378db6e9fc1c121d23482a75bf Mon Sep 17 00:00:00 2001 From: Ben Brostoff Date: Fri, 15 Jul 2016 09:52:18 -0400 Subject: [PATCH 0751/2005] Correct typo in partition_key check --- cassandra/cqlengine/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 02101f6e06..fdd11a0db5 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1354,7 +1354,7 @@ def delete(self): ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp, conditionals=self._conditional, if_exists=self._if_exists) for name, col in self.model._primary_keys.items(): val = getattr(self.instance, name) - if val is None and not col.parition_key: + if val is None and not col.partition_key: continue ds.add_where(col, EqualsOperator(), val) self._execute(ds) From d80e9ad746cf277774ca122b91fc5721ffb25ce0 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 15 Jul 2016 15:35:20 -0500 Subject: [PATCH 0752/2005] PYTHON-576 adding primary key override test for pypy --- tests/integration/__init__.py | 2 ++ .../integration/cqlengine/model/test_model.py | 36 ++++++++++++++++++- 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 2eee0faf99..e49addd9ea 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -23,6 +23,7 @@ import sys import time import traceback +import platform from threading import Event from subprocess import call from itertools import groupby @@ -210,6 +211,7 @@ def get_unsupported_upper_protocol(): greaterthanorequalcass30 = unittest.skipUnless(CASSANDRA_VERSION >= '3.0', 'Cassandra version 3.0 or greater required') lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < '3.0', 'Cassandra version less then 3.0 required') dseonly = unittest.skipUnless(DSE_VERSION, "Test is only applicalbe to DSE clusters") +pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") def wait_for_node_socket(node, timeout): diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index e46698ff75..b31b8d5aee 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -22,7 +22,8 @@ from cassandra.cqlengine.management import sync_table, drop_table, create_keyspace_simple, drop_keyspace from cassandra.cqlengine import models from cassandra.cqlengine.models import Model, ModelDefinitionException - +from uuid import uuid1 +from tests.integration import pypy class TestModel(unittest.TestCase): """ Tests the non-io functionality of models """ @@ -172,4 +173,37 @@ class IllegalFilterColumnModel(Model): my_primary_key = columns.Integer(primary_key=True) filter = columns.Text() +@pypy +class ModelOverWriteTest(unittest.TestCase): + + def test_model_over_write(self): + """ + Test to ensure overwriting of primary keys in model inheritance is allowed + + This is currently only an issue in PyPy. When PYTHON-504 is introduced this should + be updated error out and warn the user + + @since 3.6.0 + @jira_ticket PYTHON-576 + @expected_result primary keys can be overwritten via inheritance + + @test_category object_mapper + """ + class TimeModelBase(Model): + uuid = columns.TimeUUID(primary_key=True) + + class DerivedTimeModel(TimeModelBase): + __table_name__ = 'derived_time' + uuid = columns.TimeUUID(primary_key=True, partition_key=True) + value = columns.Text(required=False) + + # In case the table already exists in keyspace + drop_table(DerivedTimeModel) + + sync_table(DerivedTimeModel) + uuid_value = uuid1() + uuid_value2 = uuid1() + DerivedTimeModel.create(uuid=uuid_value, value="first") + DerivedTimeModel.create(uuid=uuid_value2, value="second") + DerivedTimeModel.objects.filter(uuid=uuid_value) From 7a18156c452b0b9b0984108abf4e8bb10297fe94 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 15 Jul 2016 15:59:47 -0500 Subject: [PATCH 0753/2005] PYTHON-599 updating test tag --- tests/integration/cqlengine/query/test_queryset.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index dcebe56e82..2ffe2209a6 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -268,6 +268,7 @@ def test_defining_defer_fields(self): @since 3.5 @jira_ticket PYTHON-560 + @jira_ticket PYTHON-599 @expected_result deferred fields should not be returned @test_category object_mapper From b6067fbfa4ea3b1713ee87d38faade02bddbdef2 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Mon, 18 Jul 2016 14:43:53 +0200 Subject: [PATCH 0754/2005] Extent noop model update tests to primary keys. --- .../cqlengine/model/test_updates.py | 38 ++++++++++++++++++- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index 242bffe12f..138b063246 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -79,8 +79,8 @@ def test_update_values(self): self.assertEqual(m2.count, m1.count) self.assertEqual(m2.text, m0.text) - def test_noop_model_update(self): - """ tests that calling update on a model with no changes will do nothing. """ + def test_noop_model_direct_update(self): + """ Tests that calling update on a model with no changes will do nothing. """ m0 = TestUpdateModel.create(count=5, text='monkey') with patch.object(self.session, 'execute') as execute: @@ -91,6 +91,40 @@ def test_noop_model_update(self): m0.update(count=5) assert execute.call_count == 0 + with patch.object(self.session, 'execute') as execute: + m0.update(partition=m0.partition) + assert execute.call_count == 0 + + with patch.object(self.session, 'execute') as execute: + m0.update(cluster=m0.cluster) + assert execute.call_count == 0 + + def test_noop_model_assignation_update(self): + """ Tests that assigning the same value on a model will do nothing. """ + # Create object and fetch it back to eliminate any hidden variable + # cache effect. + m0 = TestUpdateModel.create(count=5, text='monkey') + m1 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster) + + with patch.object(self.session, 'execute') as execute: + m1.save() + assert execute.call_count == 0 + + with patch.object(self.session, 'execute') as execute: + m1.count = 5 + m1.save() + assert execute.call_count == 0 + + with patch.object(self.session, 'execute') as execute: + m1.partition = m0.partition + m1.save() + assert execute.call_count == 0 + + with patch.object(self.session, 'execute') as execute: + m1.cluster = m0.cluster + m1.save() + assert execute.call_count == 0 + def test_invalid_update_kwarg(self): """ tests that passing in a kwarg to the update method that isn't a column will fail """ m0 = TestUpdateModel.create(count=5, text='monkey') From c316d3f7c6003d138578a94bbc113b48d848afb3 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Mon, 18 Jul 2016 18:36:43 +0200 Subject: [PATCH 0755/2005] Extend Text and Ascii columns tests. --- .../cqlengine/columns/test_validation.py | 136 ++++++++++++++++-- 1 file changed, 125 insertions(+), 11 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 0480fe43e8..9dc4742816 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -23,6 +23,7 @@ from cassandra import InvalidRequest from cassandra.cqlengine.columns import TimeUUID +from cassandra.cqlengine.columns import Ascii from cassandra.cqlengine.columns import Text from cassandra.cqlengine.columns import Integer from cassandra.cqlengine.columns import BigInt @@ -337,28 +338,130 @@ def test_default_zero_fields_validate(self): it.validate() -class TestText(BaseCassEngTestCase): +class TestAscii(BaseCassEngTestCase): def test_min_length(self): - # not required defaults to 0 - col = Text() - col.validate('') - col.validate('b') + """ Test arbitrary minimal lengths requirements. """ + Ascii(min_length=0).validate('') + Ascii(min_length=0).validate(None) + Ascii(min_length=0).validate('kevin') + + Ascii(min_length=1).validate('k') + + Ascii(min_length=5).validate('kevin') + Ascii(min_length=5).validate('kevintastic') - # required defaults to 1 with self.assertRaises(ValidationError): - Text(required=True).validate('') + Ascii(min_length=1).validate('') + + with self.assertRaises(ValidationError): + Ascii(min_length=1).validate(None) + + with self.assertRaises(ValidationError): + Ascii(min_length=6).validate('') + + with self.assertRaises(ValidationError): + Ascii(min_length=6).validate(None) + + with self.assertRaises(ValidationError): + Ascii(min_length=6).validate('kevin') + + def test_max_length(self): + """ Test arbitrary maximal lengths requirements. """ + Ascii(max_length=0).validate('') + Ascii(max_length=0).validate(None) + + Ascii(max_length=1).validate('') + Ascii(max_length=1).validate(None) + Ascii(max_length=1).validate('b') + + Ascii(max_length=5).validate('') + Ascii(max_length=5).validate(None) + Ascii(max_length=5).validate('b') + Ascii(max_length=5).validate('blake') + + with self.assertRaises(ValidationError): + Ascii(max_length=0).validate('b') + + with self.assertRaises(ValidationError): + Ascii(max_length=5).validate('blaketastic') + + def test_type_checking(self): + Ascii().validate('string') + Ascii().validate(u'unicode') + Ascii().validate(bytearray('bytearray', encoding='ascii')) + + with self.assertRaises(ValidationError): + Ascii().validate(5) + + with self.assertRaises(ValidationError): + Ascii().validate(True) + + def test_unaltering_validation(self): + """ Test the validation step doesn't re-interpret values. """ + self.assertEquals(Ascii().validate(''), '') + self.assertEquals(Ascii().validate(None), None) + self.assertEquals(Ascii().validate('yo'), 'yo') + + def test_non_required_validation(self): + """ Tests that validation is ok on none and blank values if required is False. """ + Ascii().validate('') + Ascii().validate(None) + + def test_required_validation(self): + """ Tests that validation raise on none and blank values if value required. """ + with self.assertRaises(ValidationError): + Ascii(required=True).validate('') - #test arbitrary lengths + with self.assertRaises(ValidationError): + Ascii(required=True).validate(None) + + +class TestText(BaseCassEngTestCase): + + def test_min_length(self): + """ Test arbitrary minimal lengths requirements. """ Text(min_length=0).validate('') + Text(min_length=0).validate(None) + Text(min_length=0).validate('blake') + + Text(min_length=1).validate('b') + Text(min_length=5).validate('blake') Text(min_length=5).validate('blaketastic') + + with self.assertRaises(ValidationError): + Text(min_length=1).validate('') + + with self.assertRaises(ValidationError): + Text(min_length=1).validate(None) + + with self.assertRaises(ValidationError): + Text(min_length=6).validate('') + + with self.assertRaises(ValidationError): + Text(min_length=6).validate(None) + with self.assertRaises(ValidationError): Text(min_length=6).validate('blake') def test_max_length(self): + """ Test arbitrary maximal lengths requirements. """ + Text(max_length=0).validate('') + Text(max_length=0).validate(None) + + Text(max_length=1).validate('') + Text(max_length=1).validate(None) + Text(max_length=1).validate('b') + Text(max_length=5).validate('') + Text(max_length=5).validate(None) + Text(max_length=5).validate('b') Text(max_length=5).validate('blake') + + with self.assertRaises(ValidationError): + Text(max_length=0).validate('b') + with self.assertRaises(ValidationError): Text(max_length=5).validate('blaketastic') @@ -367,20 +470,31 @@ def test_type_checking(self): Text().validate(u'unicode') Text().validate(bytearray('bytearray', encoding='ascii')) - with self.assertRaises(ValidationError): - Text(required=True).validate(None) - with self.assertRaises(ValidationError): Text().validate(5) with self.assertRaises(ValidationError): Text().validate(True) + def test_unaltering_validation(self): + """ Test the validation step doesn't re-interpret values. """ + self.assertEquals(Text().validate(''), '') + self.assertEquals(Text().validate(None), None) + self.assertEquals(Text().validate('yo'), 'yo') + def test_non_required_validation(self): """ Tests that validation is ok on none and blank values if required is False """ Text().validate('') Text().validate(None) + def test_required_validation(self): + """ Tests that validation raise on none and blank values if value required. """ + with self.assertRaises(ValidationError): + Text(required=True).validate('') + + with self.assertRaises(ValidationError): + Text(required=True).validate(None) + class TestExtraFieldsRaiseException(BaseCassEngTestCase): class TestModel(Model): From f1acf62fb44e2c0ad55a0b2d59dc48acf3cc0b94 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 18 Jul 2016 12:59:34 -0500 Subject: [PATCH 0756/2005] PYTHON-596 adding a test for server error --- .../cqlengine/query/test_queryset.py | 30 ++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 2ffe2209a6..ea303373b8 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -910,7 +910,13 @@ def test_query_expression_success_case(self): @execute_count(5) def test_bool(self): """ - PYTHON-596 + Adding coverage to cqlengine for bool types. + + @since 3.6 + @jira_ticket PYTHON-596 + @expected_result bool results should be filtered appropriately + + @test_category object_mapper """ class bool_model(Model): k = columns.Integer(primary_key=True) @@ -924,6 +930,28 @@ class bool_model(Model): self.assertEqual(len(bool_model.objects.filter(k=0, b=True)), 1) self.assertEqual(len(bool_model.objects.filter(k=0, b=False)), 1) + @execute_count(3) + def test_bool_filter(self): + """ + Test to ensure that we don't translate boolean objects to String unnecessarily in filter clauses + + @since 3.6 + @jira_ticket PYTHON-596 + @expected_result We should not receive a server error + + @test_category object_mapper + """ + class bool_model2(Model): + k = columns.Boolean(primary_key=True) + b = columns.Integer(primary_key=True) + v = columns.Text() + drop_table(bool_model2) + sync_table(bool_model2) + + bool_model2.create(k=True, b=1, v='a') + bool_model2.create(k=False, b=1, v='b') + self.assertEqual(len(list(bool_model2.objects(k__in=(True, False)))), 2) + @greaterthancass20 class TestContainsOperator(BaseQuerySetUsage): From 1ff384107c2a7d440cdb34cc8021b9ad6a0f41b3 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 18 Jul 2016 14:52:00 -0500 Subject: [PATCH 0757/2005] PYTHON-551 added test for ingored host state --- tests/integration/standard/test_cluster.py | 32 ++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 4da9eb225b..4c8339a2cf 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -42,8 +42,40 @@ def setup_module(): use_singledc() +class IgnoredHostPolicy(RoundRobinPolicy): + + def __init__(self, ignored_hosts): + self.ignored_hosts = ignored_hosts + RoundRobinPolicy.__init__(self) + + def distance(self, host): + if(str(host) in self.ignored_hosts): + return HostDistance.IGNORED + else: + return HostDistance.LOCAL + + class ClusterTests(unittest.TestCase): + def test_ignored_host_up(self): + """ + Test to ensure that is_up is not set by default on ignored hosts + + @since 3.6 + @jira_ticket PYTHON-551 + @expected_result ignored hosts should have None set for is_up + + @test_category connection + """ + ingored_host_policy = IgnoredHostPolicy(["127.0.0.2", "127.0.0.3"]) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=ingored_host_policy) + session = cluster.connect() + for host in cluster.metadata.all_hosts(): + if str(host) == "127.0.0.1": + self.assertTrue(host.is_up) + else: + self.assertIsNone(host.is_up) + def test_host_resolution(self): """ Test to insure A records are resolved appropriately. From 351d33296a7c489327e657ea9efe50e2f8cbda97 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 18 Jul 2016 13:02:17 -0400 Subject: [PATCH 0758/2005] table default ttl now relies on Cassandra only --- cassandra/cqlengine/models.py | 5 ++- cassandra/cqlengine/query.py | 2 +- tests/integration/cqlengine/test_ttl.py | 43 ++++++++++++++++++++++--- 3 files changed, 42 insertions(+), 8 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 4c5d9c86e0..6d16bc7384 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -352,7 +352,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): _table_name = None # used internally to cache a derived table name def __init__(self, **values): - self._ttl = self.__default_ttl__ + self._ttl = None self._timestamp = None self._conditional = None self._batch = None @@ -691,7 +691,6 @@ def save(self): self._set_persisted() - self._ttl = self.__default_ttl__ self._timestamp = None return self @@ -738,7 +737,6 @@ def update(self, **values): self._set_persisted() - self._ttl = self.__default_ttl__ self._timestamp = None return self @@ -794,6 +792,7 @@ def __new__(cls, name, bases, attrs): # short circuit __discriminator_value__ inheritance attrs['__discriminator_value__'] = attrs.get('__discriminator_value__') + # TODO __default__ttl__ should be removed in the next major release options = attrs.get('__options__') or {} attrs['__default_ttl__'] = options.get('default_time_to_live') diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index fdd11a0db5..9ef5b7dd81 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -299,7 +299,7 @@ def __init__(self, model): self._count = None self._batch = None - self._ttl = getattr(model, '__default_ttl__', None) + self._ttl = None self._consistency = None self._timestamp = None self._if_not_exists = False diff --git a/tests/integration/cqlengine/test_ttl.py b/tests/integration/cqlengine/test_ttl.py index ba2c1e0935..3e16292781 100644 --- a/tests/integration/cqlengine/test_ttl.py +++ b/tests/integration/cqlengine/test_ttl.py @@ -18,6 +18,7 @@ except ImportError: import unittest # noqa +from cassandra import InvalidRequest from cassandra.cqlengine.management import sync_table, drop_table from tests.integration.cqlengine.base import BaseCassEngTestCase from cassandra.cqlengine.models import Model @@ -158,6 +159,16 @@ def test_ttl_included_with_blind_update(self): @unittest.skipIf(CASSANDRA_VERSION < '2.0', "default_time_to_Live was introduce in C* 2.0, currently running {0}".format(CASSANDRA_VERSION)) class TTLDefaultTest(BaseDefaultTTLTest): + def get_default_ttl(self, table_name): + session = get_session() + try: + default_ttl = session.execute("SELECT default_time_to_live FROM system_schema.tables " + "WHERE keyspace_name = 'cqlengine_test' AND table_name = '{0}'".format(table_name)) + except InvalidRequest: + default_ttl = session.execute("SELECT default_time_to_live FROM system.schema_columnfamilies " + "WHERE keyspace_name = 'cqlengine_test' AND columnfamily_name = '{0}'".format(table_name)) + return default_ttl[0]['default_time_to_live'] + def test_default_ttl_not_set(self): session = get_session() @@ -166,6 +177,9 @@ def test_default_ttl_not_set(self): self.assertIsNone(o._ttl) + default_ttl = self.get_default_ttl('test_ttlmodel') + self.assertEqual(default_ttl, 0) + with mock.patch.object(session, 'execute') as m: TestTTLModel.objects(id=tid).update(text="aligators") @@ -174,23 +188,44 @@ def test_default_ttl_not_set(self): def test_default_ttl_set(self): session = get_session() + o = TestDefaultTTLModel.create(text="some text on ttl") tid = o.id - self.assertEqual(o._ttl, TestDefaultTTLModel.__default_ttl__) + # Should not be set, it's handled by Cassandra + self.assertIsNone(o._ttl) + + default_ttl = self.get_default_ttl('test_default_ttlmodel') + self.assertEqual(default_ttl, 20) with mock.patch.object(session, 'execute') as m: - TestDefaultTTLModel.objects(id=tid).update(text="aligators expired") + TestTTLModel.objects(id=tid).update(text="aligators expired") + # Should not be set either query = m.call_args[0][0].query_string - self.assertIn("USING TTL", query) + self.assertNotIn("USING TTL", query) + + def test_default_ttl_modify(self): + session = get_session() + + default_ttl = self.get_default_ttl('test_default_ttlmodel') + self.assertEqual(default_ttl, 20) + + TestDefaultTTLModel.__options__ = {'default_time_to_live': 10} + sync_table(TestDefaultTTLModel) + + default_ttl = self.get_default_ttl('test_default_ttlmodel') + self.assertEqual(default_ttl, 10) + + # Restore default TTL + TestDefaultTTLModel.__options__ = {'default_time_to_live': 20} + sync_table(TestDefaultTTLModel) def test_override_default_ttl(self): session = get_session() o = TestDefaultTTLModel.create(text="some text on ttl") tid = o.id - self.assertEqual(o._ttl, TestDefaultTTLModel.__default_ttl__) o.ttl(3600) self.assertEqual(o._ttl, 3600) From 02320a9906f5d2e3b9407622c2ecccd1df7c0f74 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 18 Jul 2016 15:21:21 -0400 Subject: [PATCH 0759/2005] Add some docs for default and per-query TTL --- docs/api/cassandra/cqlengine/models.rst | 6 +++-- docs/cqlengine/queryset.rst | 36 +++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/docs/api/cassandra/cqlengine/models.rst b/docs/api/cassandra/cqlengine/models.rst index d6f3391974..fd081fb190 100644 --- a/docs/api/cassandra/cqlengine/models.rst +++ b/docs/api/cassandra/cqlengine/models.rst @@ -32,8 +32,10 @@ Model .. autoattribute:: __keyspace__ - .. _ttl-change: - .. autoattribute:: __default_ttl__ + .. attribute:: __default_ttl__ + :annotation: = None + + Will be deprecated in release 4.0. You can set the default ttl by configuring the table ``__options__``. See :ref:`ttl-change` for more details. .. autoattribute:: __discriminator_value__ diff --git a/docs/cqlengine/queryset.rst b/docs/cqlengine/queryset.rst index ff328b0ce4..c9c33932f8 100644 --- a/docs/cqlengine/queryset.rst +++ b/docs/cqlengine/queryset.rst @@ -343,6 +343,42 @@ None means no timeout. Setting the timeout on the model is meaningless and will raise an AssertionError. +.. _ttl-change: + +Default TTL and Per Query TTL +============================= + +Model default TTL now relies on the *default_time_to_live* feature, introduced in Cassandra 2.0. It is not handled anymore in the CQLEngine Model (cassandra-driver >=3.6). You can set the default TTL of a table like this: + + Example: + + .. code-block:: python + + class User(Model): + __options__ = {'default_time_to_live': 20} + + user_id = columns.UUID(primary_key=True) + ... + +You can set TTL per-query if needed. Here are a some examples: + + Example: + + .. code-block:: python + + class User(Model): + __options__ = {'default_time_to_live': 20} + + user_id = columns.UUID(primary_key=True) + ... + + user = User.objects.create(user_id=1) # Default TTL 20 will be set automatically on the server + + user.ttl(30).update(age=21) # Update the TTL to 30 + User.objects.ttl(10).create(user_id=1) # TTL 10 + User(user_id=1, age=21).ttl(10).save() # TTL 10 + + Named Tables =================== From f0cfc6937821efc5f5c04f2d0a059a87fdfe883b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 19 Jul 2016 14:03:26 -0500 Subject: [PATCH 0760/2005] Remove superfluous quotes from non-cql typenames PYTHON-579 --- cassandra/cqltypes.py | 12 ++++++------ cassandra/metadata.py | 1 - cassandra/type_codes.py | 1 - 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 7eb0a2df58..22606eb508 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -107,7 +107,7 @@ def __new__(metacls, name, bases, dct): cls = type.__new__(metacls, name, bases, dct) if not name.startswith('_'): _casstypes[name] = cls - if not cls.typename.startswith("'org"): + if not cls.typename.startswith(apache_cassandra_type_prefix): _cqltypes[cls.typename] = cls return cls @@ -853,7 +853,7 @@ def cql_parameterized_type(cls): class UserType(TupleType): - typename = "'org.apache.cassandra.db.marshal.UserType'" + typename = "org.apache.cassandra.db.marshal.UserType" _cache = {} _module = sys.modules[__name__] @@ -956,7 +956,7 @@ def _make_udt_tuple_type(cls, name, field_names): class CompositeType(_ParameterizedType): - typename = "'org.apache.cassandra.db.marshal.CompositeType'" + typename = "org.apache.cassandra.db.marshal.CompositeType" num_subtypes = 'UNKNOWN' @classmethod @@ -986,7 +986,7 @@ def deserialize_safe(cls, byts, protocol_version): class DynamicCompositeType(CompositeType): - typename = "'org.apache.cassandra.db.marshal.DynamicCompositeType'" + typename = "org.apache.cassandra.db.marshal.DynamicCompositeType" class ColumnToCollectionType(_ParameterizedType): @@ -995,12 +995,12 @@ class ColumnToCollectionType(_ParameterizedType): Cassandra includes this. We don't actually need or want the extra information. """ - typename = "'org.apache.cassandra.db.marshal.ColumnToCollectionType'" + typename = "org.apache.cassandra.db.marshal.ColumnToCollectionType" num_subtypes = 'UNKNOWN' class ReversedType(_ParameterizedType): - typename = "'org.apache.cassandra.db.marshal.ReversedType'" + typename = "org.apache.cassandra.db.marshal.ReversedType" num_subtypes = 1 @classmethod diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 1cd801eed2..05f02c70d6 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1058,7 +1058,6 @@ def is_cql_compatible(self): """ comparator = getattr(self, 'comparator', None) if comparator: - # no such thing as DCT in CQL incompatible = issubclass(self.comparator, types.DynamicCompositeType) # no compact storage with more than one column beyond PK if there diff --git a/cassandra/type_codes.py b/cassandra/type_codes.py index 2f0ce8f5a0..daf882e46c 100644 --- a/cassandra/type_codes.py +++ b/cassandra/type_codes.py @@ -59,4 +59,3 @@ SetType = 0x0022 UserType = 0x0030 TupleType = 0x0031 - From f55c004871c3fd331308cd50ebf47117dafaee63 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 19 Jul 2016 14:07:48 -0500 Subject: [PATCH 0761/2005] make DCT return typestring with aliases included PYTHON-579 --- cassandra/cqltypes.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 22606eb508..aff79411e2 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -988,6 +988,14 @@ def deserialize_safe(cls, byts, protocol_version): class DynamicCompositeType(CompositeType): typename = "org.apache.cassandra.db.marshal.DynamicCompositeType" + @classmethod + def cass_parameterized_type_with(cls, subtypes, full=False, **kwargs): + if not full: # short-circuit for unparsed type + return super(DynamicCompositeType, cls).cass_parameterized_type_with(subtypes) + # DCT is always formatted "full", and will always have subtypes (otherwise it is normalized to CompositeType by the server) + sublist = ', '.join('%s=>%s' % (alias, typ.cass_parameterized_type(full=True)) for alias, typ in zip(cls.fieldnames, subtypes)) + return '%s(%s)' % (cls.typename, sublist) + class ColumnToCollectionType(_ParameterizedType): """ From ca4b65c81b2d535086052f6ca966f9d052e6ed75 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 19 Jul 2016 15:44:24 -0500 Subject: [PATCH 0762/2005] PYTHON-549 adding test for bad host exclusion --- tests/integration/standard/test_metadata.py | 22 +++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index c317e50c3e..c895772a8c 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -79,6 +79,28 @@ def test_host_release_version(self): self.assertTrue(host.release_version.startswith(CASSANDRA_VERSION)) +class MetaDataRemovalTest(unittest.TestCase): + + def setUp(self): + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, contact_points=['127.0.0.1','127.0.0.2', '127.0.0.3', '126.0.0.186']) + self.cluster.connect() + + def tearDown(self): + self.cluster.shutdown() + + def test_bad_contact_point(self): + """ + Checks to ensure that hosts that are not resolvable are excluded from the contact point list. + + @since 3.6 + @jira_ticket PYTHON-549 + @expected_result Invalid hosts on the contact list should be excluded + + @test_category metadata + """ + self.assertEqual(len(self.cluster.metadata.all_hosts()), 3) + + class SchemaMetadataTests(BasicSegregatedKeyspaceUnitTestCase): def test_schema_metadata_disable(self): From 16feaac50db9cc4ae432ba3a55658cb3b6c4fe66 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 19 Jul 2016 18:05:19 -0500 Subject: [PATCH 0763/2005] Modifying PR-622 to handle client side validation --- tests/integration/cqlengine/model/test_updates.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index 138b063246..bc39d142cf 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -91,13 +91,11 @@ def test_noop_model_direct_update(self): m0.update(count=5) assert execute.call_count == 0 - with patch.object(self.session, 'execute') as execute: + with self.assertRaises(ValidationError): m0.update(partition=m0.partition) - assert execute.call_count == 0 - with patch.object(self.session, 'execute') as execute: + with self.assertRaises(ValidationError): m0.update(cluster=m0.cluster) - assert execute.call_count == 0 def test_noop_model_assignation_update(self): """ Tests that assigning the same value on a model will do nothing. """ From 0e94a1d1396366179281a069d7afbecdbad16a43 Mon Sep 17 00:00:00 2001 From: Matt Stibbs Date: Wed, 20 Jul 2016 09:18:46 +0100 Subject: [PATCH 0764/2005] Add missing comma to code sample --- docs/getting_started.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 2d9c7ea461..c7cbc25970 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -179,7 +179,7 @@ Named place-holders use the ``%(name)s`` form: """ INSERT INTO users (name, credits, user_id, username) VALUES (%(name)s, %(credits)s, %(user_id)s, %(name)s) - """ + """, {'name': "John O'Reilly", 'credits': 42, 'user_id': uuid.uuid1()} ) From a6e6613674bda97004a26f3f34f4d4a25ec8b858 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 20 Jul 2016 10:47:36 -0500 Subject: [PATCH 0765/2005] DCT is not really a specialization of CompositeType --- cassandra/cqltypes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 7eb0a2df58..cd7560a77e 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -985,7 +985,7 @@ def deserialize_safe(cls, byts, protocol_version): return tuple(result) -class DynamicCompositeType(CompositeType): +class DynamicCompositeType(_ParameterizedType): typename = "'org.apache.cassandra.db.marshal.DynamicCompositeType'" From bd1e7e7537a359d232bbd4117eff90c466da019f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 20 Jul 2016 10:52:32 -0500 Subject: [PATCH 0766/2005] handle missing column aliases for composite types This fixes some edge cases where some metadata goes missing in an upgrade from C* 2.0 --> 2.1. PYTHON-562 --- cassandra/metadata.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 1cd801eed2..9e92f93851 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1777,12 +1777,9 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): comparator = types.lookup_casstype(row["comparator"]) table_meta.comparator = comparator - if issubclass(comparator, types.CompositeType): - column_name_types = comparator.subtypes - is_composite_comparator = True - else: - column_name_types = (comparator,) - is_composite_comparator = False + is_dct_comparator = issubclass(comparator, types.DynamicCompositeType) + is_composite_comparator = issubclass(comparator, types.CompositeType) + column_name_types = comparator.subtypes if is_composite_comparator else (comparator,) num_column_name_components = len(column_name_types) last_col = column_name_types[-1] @@ -1796,7 +1793,8 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): if column_aliases is not None: column_aliases = json.loads(column_aliases) - else: + + if not column_aliases: # json load failed or column_aliases empty PYTHON-562 column_aliases = [r.get('column_name') for r in clustering_rows] if is_composite_comparator: @@ -1819,10 +1817,10 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): # Some thrift tables define names in composite types (see PYTHON-192) if not column_aliases and hasattr(comparator, 'fieldnames'): - column_aliases = comparator.fieldnames + column_aliases = filter(None, comparator.fieldnames) else: is_compact = True - if column_aliases or not col_rows: + if column_aliases or not col_rows or is_dct_comparator: has_value = True clustering_size = num_column_name_components else: @@ -1867,7 +1865,7 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): if len(column_aliases) > i: column_name = column_aliases[i] else: - column_name = "column%d" % i + column_name = "column%d" % (i + 1) data_type = column_name_types[i] cql_type = _cql_from_cass_type(data_type) From 2302711075d47b634f2923931125ef986b5a0327 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 20 Jul 2016 17:13:17 +0200 Subject: [PATCH 0767/2005] Subclass Ascii column from Text to add min_length and max_length parameters. --- cassandra/cqlengine/columns.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index c14b6d575c..cb9368b957 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -309,13 +309,6 @@ def to_database(self, value): Bytes = Blob -class Ascii(Column): - """ - Stores a US-ASCII character string - """ - db_type = 'ascii' - - class Inet(Column): """ Stores an IP address in IPv4 or IPv6 format @@ -354,6 +347,13 @@ def validate(self, value): return value +class Ascii(Text): + """ + Stores a US-ASCII character string + """ + db_type = 'ascii' + + class Integer(Column): """ Stores a 32-bit signed integer value From 6c2da51f8acad63cdfb4adef6e374dd9d6c99869 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 20 Jul 2016 17:30:31 +0200 Subject: [PATCH 0768/2005] Allow length constraints to be set to zero. --- cassandra/cqlengine/columns.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index cb9368b957..057a4f8176 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -338,10 +338,10 @@ def validate(self, value): return if not isinstance(value, (six.string_types, bytearray)) and value is not None: raise ValidationError('{0} {1} is not a string'.format(self.column_name, type(value))) - if self.max_length: + if self.max_length is not None: if len(value) > self.max_length: raise ValidationError('{0} is longer than {1} characters'.format(self.column_name, self.max_length)) - if self.min_length: + if self.min_length is not None: if len(value) < self.min_length: raise ValidationError('{0} is shorter than {1} characters'.format(self.column_name, self.min_length)) return value From 27a1ab3bf5a443e5a1a726aa362a8a2a0ed38d63 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 20 Jul 2016 17:31:10 +0200 Subject: [PATCH 0769/2005] Do not allow negative string length constraints. --- cassandra/cqlengine/columns.py | 12 ++++++++++++ .../integration/cqlengine/columns/test_validation.py | 12 ++++++++++++ 2 files changed, 24 insertions(+) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 057a4f8176..d13ad49a7e 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -330,6 +330,18 @@ def __init__(self, min_length=None, max_length=None, **kwargs): """ self.min_length = min_length or (1 if kwargs.get('required', False) else None) self.max_length = max_length + + if self.min_length is not None: + if self.min_length < 0: + raise ValueError( + 'Minimum length is not allowed to be negative.') + + self.max_length = max_length + if self.max_length is not None: + if self.max_length < 0: + raise ValueError( + 'Maximum length is not allowed to be negative.') + super(Text, self).__init__(**kwargs) def validate(self, value): diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 9dc4742816..b3ba45471a 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -366,6 +366,9 @@ def test_min_length(self): with self.assertRaises(ValidationError): Ascii(min_length=6).validate('kevin') + with self.assertRaises(ValueError): + Ascii(min_length=-1) + def test_max_length(self): """ Test arbitrary maximal lengths requirements. """ Ascii(max_length=0).validate('') @@ -386,6 +389,9 @@ def test_max_length(self): with self.assertRaises(ValidationError): Ascii(max_length=5).validate('blaketastic') + with self.assertRaises(ValueError): + Ascii(max_length=-1) + def test_type_checking(self): Ascii().validate('string') Ascii().validate(u'unicode') @@ -445,6 +451,9 @@ def test_min_length(self): with self.assertRaises(ValidationError): Text(min_length=6).validate('blake') + with self.assertRaises(ValueError): + Text(min_length=-1) + def test_max_length(self): """ Test arbitrary maximal lengths requirements. """ Text(max_length=0).validate('') @@ -465,6 +474,9 @@ def test_max_length(self): with self.assertRaises(ValidationError): Text(max_length=5).validate('blaketastic') + with self.assertRaises(ValueError): + Text(max_length=-1) + def test_type_checking(self): Text().validate('string') Text().validate(u'unicode') From 0f981634bfa58f9858c7d97b6dd52c0ce1326a23 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 20 Jul 2016 17:33:21 +0200 Subject: [PATCH 0770/2005] Only enforce minimal length requirement if min_length is unset. --- cassandra/cqlengine/columns.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index d13ad49a7e..13c21575e3 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -328,7 +328,9 @@ def __init__(self, min_length=None, max_length=None, **kwargs): Defaults to 1 if this is a ``required`` column. Otherwise, None. :param int max_length: Sets the maximum length of this string, for validation purposes. """ - self.min_length = min_length or (1 if kwargs.get('required', False) else None) + self.min_length = ( + 1 if not min_length and kwargs.get('required', False) + else min_length) self.max_length = max_length if self.min_length is not None: From 5e6660165e6f62b8d75d455a89f0660dd5ad4c12 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 20 Jul 2016 17:45:32 +0200 Subject: [PATCH 0771/2005] Let None strings to be checked against minimal and maximal length constraints. --- cassandra/cqlengine/columns.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 13c21575e3..407596eb8b 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -338,7 +338,6 @@ def __init__(self, min_length=None, max_length=None, **kwargs): raise ValueError( 'Minimum length is not allowed to be negative.') - self.max_length = max_length if self.max_length is not None: if self.max_length < 0: raise ValueError( @@ -348,15 +347,13 @@ def __init__(self, min_length=None, max_length=None, **kwargs): def validate(self, value): value = super(Text, self).validate(value) - if value is None: - return if not isinstance(value, (six.string_types, bytearray)) and value is not None: raise ValidationError('{0} {1} is not a string'.format(self.column_name, type(value))) if self.max_length is not None: - if len(value) > self.max_length: + if value and len(value) > self.max_length: raise ValidationError('{0} is longer than {1} characters'.format(self.column_name, self.max_length)) - if self.min_length is not None: - if len(value) < self.min_length: + if self.min_length: + if (self.min_length and not value) or len(value) < self.min_length: raise ValidationError('{0} is shorter than {1} characters'.format(self.column_name, self.min_length)) return value From bba8d163097b777387054b67268145696ed2005a Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 20 Jul 2016 19:06:08 +0200 Subject: [PATCH 0772/2005] Check string length range constraints. --- cassandra/cqlengine/columns.py | 6 ++ .../cqlengine/columns/test_validation.py | 60 +++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 407596eb8b..36996a8679 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -343,6 +343,12 @@ def __init__(self, min_length=None, max_length=None, **kwargs): raise ValueError( 'Maximum length is not allowed to be negative.') + if self.min_length is not None and self.max_length is not None: + if self.max_length < self.min_length: + raise ValueError( + 'Maximum length must be greater or equal ' + 'to minimum length.') + super(Text, self).__init__(**kwargs) def validate(self, value): diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index b3ba45471a..fefa9efbe8 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -392,6 +392,18 @@ def test_max_length(self): with self.assertRaises(ValueError): Ascii(max_length=-1) + def test_length_range(self): + Ascii(min_length=0, max_length=0) + Ascii(min_length=0, max_length=1) + Ascii(min_length=10, max_length=10) + Ascii(min_length=10, max_length=11) + + with self.assertRaises(ValueError): + Ascii(min_length=10, max_length=9) + + with self.assertRaises(ValueError): + Ascii(min_length=1, max_length=0) + def test_type_checking(self): Ascii().validate('string') Ascii().validate(u'unicode') @@ -416,12 +428,30 @@ def test_non_required_validation(self): def test_required_validation(self): """ Tests that validation raise on none and blank values if value required. """ + Ascii(required=True).validate('k') + with self.assertRaises(ValidationError): Ascii(required=True).validate('') with self.assertRaises(ValidationError): Ascii(required=True).validate(None) + # With min_length set. + Ascii(required=True, min_length=0).validate('k') + Ascii(required=True, min_length=1).validate('k') + + with self.assertRaises(ValidationError): + Ascii(required=True, min_length=2).validate('k') + + # With max_length set. + Ascii(required=True, max_length=1).validate('k') + + with self.assertRaises(ValidationError): + Ascii(required=True, max_length=2).validate('kevin') + + with self.assertRaises(ValueError): + Ascii(required=True, max_length=0) + class TestText(BaseCassEngTestCase): @@ -477,6 +507,18 @@ def test_max_length(self): with self.assertRaises(ValueError): Text(max_length=-1) + def test_length_range(self): + Text(min_length=0, max_length=0) + Text(min_length=0, max_length=1) + Text(min_length=10, max_length=10) + Text(min_length=10, max_length=11) + + with self.assertRaises(ValueError): + Text(min_length=10, max_length=9) + + with self.assertRaises(ValueError): + Text(min_length=1, max_length=0) + def test_type_checking(self): Text().validate('string') Text().validate(u'unicode') @@ -501,12 +543,30 @@ def test_non_required_validation(self): def test_required_validation(self): """ Tests that validation raise on none and blank values if value required. """ + Text(required=True).validate('b') + with self.assertRaises(ValidationError): Text(required=True).validate('') with self.assertRaises(ValidationError): Text(required=True).validate(None) + # With min_length set. + Text(required=True, min_length=0).validate('b') + Text(required=True, min_length=1).validate('b') + + with self.assertRaises(ValidationError): + Text(required=True, min_length=2).validate('b') + + # With max_length set. + Text(required=True, max_length=1).validate('b') + + with self.assertRaises(ValidationError): + Text(required=True, max_length=2).validate('blake') + + with self.assertRaises(ValueError): + Text(required=True, max_length=0) + class TestExtraFieldsRaiseException(BaseCassEngTestCase): class TestModel(Model): From 3619462740a4c4369f80ea314d33c1df383dab4f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 20 Jul 2016 13:02:40 -0500 Subject: [PATCH 0773/2005] there is no return from libev Loop_start This is leftover from when the class was copied over from asyncore. --- cassandra/io/libevreactor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index a3e96a9a03..89e08cb149 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -102,10 +102,10 @@ def maybe_start(self): def _run_loop(self): while True: - end_condition = self._loop.start() + self._loop.start() # there are still active watchers, no deadlock with self._lock: - if not self._shutdown and (end_condition or self._live_conns): + if not self._shutdown and self._live_conns: log.debug("Restarting event loop") continue else: From 9d36dd97f68a02aec13b3345b09e8dff1c880515 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 20 Jul 2016 13:03:52 -0500 Subject: [PATCH 0774/2005] No need to null the loop reference during cleanup This removes a small (but inconsequential) race on shutdown, where the loop thread could access None when trying to start. PYTHON-578 --- cassandra/io/libevreactor.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 89e08cb149..9b19d1f318 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -135,7 +135,6 @@ def _cleanup(self): "Please call Cluster.shutdown() to avoid this.") log.debug("Event loop thread was joined") - self._loop = None def add_timer(self, timer): self._timers.add_timer(timer) From 13475197d32a28e881c35a919d373ed73d5668a6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 20 Jul 2016 13:19:14 -0500 Subject: [PATCH 0775/2005] remove race where event loop deletes watcher during cleanup PYTHON-578 --- cassandra/io/libevreactor.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 9b19d1f318..39f871a135 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -121,10 +121,7 @@ def _cleanup(self): for conn in self._live_conns | self._new_conns | self._closed_conns: conn.close() - if conn._write_watcher: - conn._write_watcher.stop() - if conn._read_watcher: - conn._read_watcher.stop() + map(lambda w: w.stop(), (w for w in (conn._write_watcher, conn._read_watcher) if w)) self.notify() # wake the timer watcher log.debug("Waiting for event loop thread to join...") From 76140dc0629f171f842d0941b0679a4d351f797d Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 20 Jul 2016 16:02:55 -0500 Subject: [PATCH 0776/2005] PYTHON-579 added check for DCT metadata subtypes --- tests/integration/standard/test_metadata.py | 26 ++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index c895772a8c..c02b067476 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -2057,7 +2057,31 @@ def test_bad_user_aggregate(self): self.assertIn("/*\nWarning:", m.export_as_string()) -class MaterializedViewMetadataTestSimple(BasicSharedKeyspaceUnitTestCase): +class DynamicCompositeTypeTest(BasicSharedKeyspaceUnitTestCase): + + def test_dct_alias(self): + """ + Tests to make sure DCT's have correct string formatting + + Constructs a DCT and check the format as generated. To insure it matches what is expected + + @since 3.6.0 + @jira_ticket PYTHON-579 + @expected_result DCT subtypes should always have fully qualified names + + @test_category metadata + """ + self.session.execute("CREATE TABLE {0}.{1} (" + "k int PRIMARY KEY," + "c1 'DynamicCompositeType(s => UTF8Type, i => Int32Type)'," + "c2 Text)".format(self.ks_name, self.function_table_name)) + dct_table = self.cluster.metadata.keyspaces.get(self.ks_name).tables.get(self.function_table_name) + + # Format can very slightly between versions, strip out whitespace for consistency sake + self.assertTrue("c1'org.apache.cassandra.db.marshal.DynamicCompositeType(s=>org.apache.cassandra.db.marshal.UTF8Type,i=>org.apache.cassandra.db.marshal.Int32Type)'" in dct_table.as_cql_query().replace(" ", "")) + + +class Materia3lizedViewMetadataTestSimple(BasicSharedKeyspaceUnitTestCase): def setUp(self): if CASS_SERVER_VERSION < (3, 0): From b4b42a18e54290bfd15e901a7ee99f75e86fb4f0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 20 Jul 2016 16:13:46 -0500 Subject: [PATCH 0777/2005] make num_subtypes UNKNOWN by default --- cassandra/cqltypes.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 4bd06f02cc..38a382e168 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -682,6 +682,8 @@ class VarcharType(UTF8Type): class _ParameterizedType(_CassandraType): + num_subtypes = 'UNKNOWN' + @classmethod def deserialize(cls, byts, protocol_version): if not cls.subtypes: @@ -802,7 +804,6 @@ def serialize_safe(cls, themap, protocol_version): class TupleType(_ParameterizedType): typename = 'tuple' - num_subtypes = 'UNKNOWN' @classmethod def deserialize_safe(cls, byts, protocol_version): @@ -957,7 +958,6 @@ def _make_udt_tuple_type(cls, name, field_names): class CompositeType(_ParameterizedType): typename = "org.apache.cassandra.db.marshal.CompositeType" - num_subtypes = 'UNKNOWN' @classmethod def cql_parameterized_type(cls): @@ -1004,7 +1004,6 @@ class ColumnToCollectionType(_ParameterizedType): information. """ typename = "org.apache.cassandra.db.marshal.ColumnToCollectionType" - num_subtypes = 'UNKNOWN' class ReversedType(_ParameterizedType): From 67ea5d48cbeb4db789ef8d2b00a7a7b573fa30b6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 20 Jul 2016 16:17:59 -0500 Subject: [PATCH 0778/2005] make connection timeout infinite by default removes the need for eventlet and gevent checking for timeouts PYTHON-600 --- cassandra/connection.py | 1 + cassandra/io/eventletreactor.py | 14 -------------- cassandra/io/geventreactor.py | 18 +++--------------- 3 files changed, 4 insertions(+), 29 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index ccdebebca9..11da8a4afe 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -345,6 +345,7 @@ def _connect_socket(self): self._socket = self._ssl_impl.wrap_socket(self._socket, **self.ssl_options) self._socket.settimeout(self.connect_timeout) self._socket.connect(sockaddr) + self._socket.settimeout(None) if self._check_hostname: ssl.match_hostname(self._socket.getpeercert(), self.host) sockerr = None diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index dfaea8bfb4..cf1616d45b 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -16,13 +16,10 @@ # Originally derived from MagnetoDB source: # https://github.com/stackforge/magnetodb/blob/2015.1.0b1/magnetodb/common/cassandra/io/eventletreactor.py -from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL import eventlet from eventlet.green import socket -import ssl from eventlet.queue import Queue import logging -import os from threading import Event import time @@ -34,15 +31,6 @@ log = logging.getLogger(__name__) -def is_timeout(err): - return ( - err in (EINPROGRESS, EALREADY, EWOULDBLOCK) or - (err == EINVAL and os.name in ('nt', 'ce')) or - (isinstance(err, ssl.SSLError) and err.args[0] == 'timed out') or - isinstance(err, socket.timeout) - ) - - class EventletConnection(Connection): """ An implementation of :class:`.Connection` that utilizes ``eventlet``. @@ -145,8 +133,6 @@ def handle_read(self): buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) except socket.error as err: - if is_timeout(err): - continue log.debug("Exception during socket recv for %s: %s", self, err) self.defunct(err) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 65572a664c..bf0a4cc181 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -18,26 +18,16 @@ import gevent.ssl import logging -import os import time from six.moves import range -from errno import EINVAL - from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager log = logging.getLogger(__name__) -def is_timeout(err): - return ( - (err == EINVAL and os.name in ('nt', 'ce')) or - isinstance(err, socket.timeout) - ) - - class GeventConnection(Connection): """ An implementation of :class:`.Connection` that utilizes ``gevent``. @@ -131,11 +121,9 @@ def handle_read(self): buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) except socket.error as err: - if not is_timeout(err): - log.debug("Exception in read for %s: %s", self, err) - self.defunct(err) - return # leave the read loop - continue + log.debug("Exception in read for %s: %s", self, err) + self.defunct(err) + return # leave the read loop if self._iobuf.tell(): self.process_io_buffer() From 039149aa4377abe223ab455582132c277b3e7851 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 21 Jul 2016 13:56:00 +0200 Subject: [PATCH 0779/2005] Replace deprecated assertEquals by assertEqual. --- .../integration/cqlengine/columns/test_validation.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index fefa9efbe8..4f4affe5e8 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -417,9 +417,9 @@ def test_type_checking(self): def test_unaltering_validation(self): """ Test the validation step doesn't re-interpret values. """ - self.assertEquals(Ascii().validate(''), '') - self.assertEquals(Ascii().validate(None), None) - self.assertEquals(Ascii().validate('yo'), 'yo') + self.assertEqual(Ascii().validate(''), '') + self.assertEqual(Ascii().validate(None), None) + self.assertEqual(Ascii().validate('yo'), 'yo') def test_non_required_validation(self): """ Tests that validation is ok on none and blank values if required is False. """ @@ -532,9 +532,9 @@ def test_type_checking(self): def test_unaltering_validation(self): """ Test the validation step doesn't re-interpret values. """ - self.assertEquals(Text().validate(''), '') - self.assertEquals(Text().validate(None), None) - self.assertEquals(Text().validate('yo'), 'yo') + self.assertEqual(Text().validate(''), '') + self.assertEqual(Text().validate(None), None) + self.assertEqual(Text().validate('yo'), 'yo') def test_non_required_validation(self): """ Tests that validation is ok on none and blank values if required is False """ From c509f5902fd9cabbcb8d025251705396b0de663b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 21 Jul 2016 11:25:51 -0500 Subject: [PATCH 0780/2005] override cql_parameterized_type for generating DCT CQL PYTHON-579 --- cassandra/cqltypes.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 38a382e168..b6a720e6c9 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -989,12 +989,9 @@ class DynamicCompositeType(_ParameterizedType): typename = "org.apache.cassandra.db.marshal.DynamicCompositeType" @classmethod - def cass_parameterized_type_with(cls, subtypes, full=False, **kwargs): - if not full: # short-circuit for unparsed type - return super(DynamicCompositeType, cls).cass_parameterized_type_with(subtypes) - # DCT is always formatted "full", and will always have subtypes (otherwise it is normalized to CompositeType by the server) - sublist = ', '.join('%s=>%s' % (alias, typ.cass_parameterized_type(full=True)) for alias, typ in zip(cls.fieldnames, subtypes)) - return '%s(%s)' % (cls.typename, sublist) + def cql_parameterized_type(cls): + sublist = ', '.join('%s=>%s' % (alias, typ.cass_parameterized_type(full=True)) for alias, typ in zip(cls.fieldnames, cls.subtypes)) + return "'%s(%s)'" % (cls.typename, sublist) class ColumnToCollectionType(_ParameterizedType): From 541fac775e2b245881f6b6302f23ecc15a8fd01e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 21 Jul 2016 12:21:31 -0400 Subject: [PATCH 0781/2005] Records metrics per cluster --- cassandra/metrics.py | 15 +++++++++- tests/integration/standard/test_metrics.py | 35 ++++++++++++++++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/cassandra/metrics.py b/cassandra/metrics.py index cf1f25c15d..6150271723 100644 --- a/cassandra/metrics.py +++ b/cassandra/metrics.py @@ -114,7 +114,9 @@ class Metrics(object): def __init__(self, cluster_proxy): log.debug("Starting metric capture") - self.stats = scales.collection('/cassandra', + # TODO, modify the path to /cassandra/{clusterid} in 4.0 + self.stats_id = str(id(cluster_proxy)) + self.stats = scales.collection('/_cassandra/{0}'.format(self.stats_id), scales.PmfStat('request_timer'), scales.IntStat('connection_errors'), scales.IntStat('write_timeouts'), @@ -132,6 +134,11 @@ def __init__(self, cluster_proxy): scales.Stat('open_connections', lambda: sum(sum(p.open_count for p in s._pools.values()) for s in cluster_proxy.sessions))) + # TODO, to be removed in 4.0 + # /cassandra contains the metrics of the first cluster registered + if 'cassandra' not in scales._Stats.stats: + scales._Stats.stats['cassandra'] = scales._Stats.stats['_cassandra'][self.stats_id] + self.request_timer = self.stats.request_timer self.connection_errors = self.stats.connection_errors self.write_timeouts = self.stats.write_timeouts @@ -164,3 +171,9 @@ def on_ignore(self): def on_retry(self): self.stats.retries += 1 + + def get_stats(self): + """ + Returns the metrics for the registered cluster instance. + """ + return scales.getStats()['_cassandra'][self.stats_id] diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index aba83de420..d33bb0e21f 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -173,3 +173,38 @@ def test_unavailable(self): # def test_retry(self): # # TODO: Look for ways to generate retries # pass + + def test_metrics_per_cluster(self): + """ + Test that metrics are per cluster. + """ + + cluster2 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, + default_retry_policy=FallthroughRetryPolicy()) + session2 = cluster2.connect("test3rf", wait_for_all_pools=True) + + query = SimpleStatement("SELECT * FROM test", consistency_level=ConsistencyLevel.ALL) + self.session.execute(query) + + # Pause node so it shows as unreachable to coordinator + get_node(1).pause() + + try: + # Test write + query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL) + with self.assertRaises(WriteTimeout): + self.session.execute(query, timeout=None) + finally: + get_node(1).resume() + + stats_cluster1 = self.cluster.metrics.get_stats() + stats_cluster2 = cluster2.metrics.get_stats() + + self.assertEqual(1, self.cluster.metrics.stats.write_timeouts) + self.assertEqual(0, cluster2.metrics.stats.write_timeouts) + + self.assertNotEqual(0.0, self.cluster.metrics.request_timer['mean']) + self.assertEqual(0.0, cluster2.metrics.request_timer['mean']) + + self.assertNotEqual(0.0, stats_cluster1['request_timer']['mean']) + self.assertEqual(0.0, stats_cluster2['request_timer']['mean']) From 117e9ef885733392c86e4faaa1a437f60f46ac12 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 21 Jul 2016 12:19:30 -0500 Subject: [PATCH 0782/2005] update legacy test with corrected DCT --- tests/integration/standard/test_metadata.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index c02b067476..aa652058ec 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1155,14 +1155,12 @@ def test_legacy_tables(self): CREATE TABLE legacy.composite_comp_with_col ( key blob, - b blob, - s text, - t timeuuid, + column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(b=>org.apache.cassandra.db.marshal.BytesType, s=>org.apache.cassandra.db.marshal.UTF8Type, t=>org.apache.cassandra.db.marshal.TimeUUIDType)', "b@6869746d65776974686d75736963" blob, "b@6d616d6d616a616d6d61" blob, - PRIMARY KEY (key, b, s, t) + PRIMARY KEY (key, column1) ) WITH COMPACT STORAGE - AND CLUSTERING ORDER BY (b ASC, s ASC, t ASC) + AND CLUSTERING ORDER BY (column1 ASC) AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = 'Stores file meta data' AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} @@ -1283,12 +1281,11 @@ def test_legacy_tables(self): CREATE TABLE legacy.composite_comp_no_col ( key blob, - column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(org.apache.cassandra.db.marshal.BytesType, org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.TimeUUIDType)', - column2 timeuuid, + column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(b=>org.apache.cassandra.db.marshal.BytesType, s=>org.apache.cassandra.db.marshal.UTF8Type, t=>org.apache.cassandra.db.marshal.TimeUUIDType)', value blob, - PRIMARY KEY (key, column1, column1, column2) + PRIMARY KEY (key, column1) ) WITH COMPACT STORAGE - AND CLUSTERING ORDER BY (column1 ASC, column1 ASC, column2 ASC) + AND CLUSTERING ORDER BY (column1 ASC) AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = 'Stores file meta data' AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} From 1a72f87554f20a0e89bb15ea5389b8d8c0e2255e Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Fri, 22 Jul 2016 11:21:16 +0200 Subject: [PATCH 0783/2005] Raise ValidationError on illegal characters provided to ASCII column. --- cassandra/cqlengine/columns.py | 19 +++++++++++++++++++ .../cqlengine/columns/test_validation.py | 12 ++++++++++++ 2 files changed, 31 insertions(+) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 36996a8679..0bb52d6bff 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -370,6 +370,25 @@ class Ascii(Text): """ db_type = 'ascii' + def validate(self, value): + """ Only allow ASCII and None values. + + Check against US-ASCII, a.k.a. 7-bit ASCII, a.k.a. ISO646-US, a.k.a. + the Basic Latin block of the Unicode character set. + + Source: https://github.com/apache/cassandra/blob + /3dcbe90e02440e6ee534f643c7603d50ca08482b/src/java/org/apache/cassandra + /serializers/AsciiSerializer.java#L29 + """ + value = super(Ascii, self).validate(value) + if value: + charset = value if isinstance( + value, (bytearray, )) else map(ord, value) + if not set(range(128)).issuperset(charset): + raise ValidationError( + '{!r} is not an ASCII string.'.format(value)) + return value + class Integer(Column): """ diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 4f4affe5e8..71a7e89a68 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -415,6 +415,14 @@ def test_type_checking(self): with self.assertRaises(ValidationError): Ascii().validate(True) + Ascii().validate("!#$%&\'()*+,-./") + + with self.assertRaises(ValidationError): + Ascii().validate('Beyonc' + chr(233)) + + with self.assertRaises(ValidationError): + Ascii().validate(u'Beyonc' + unichr(233)) + def test_unaltering_validation(self): """ Test the validation step doesn't re-interpret values. """ self.assertEqual(Ascii().validate(''), '') @@ -530,6 +538,10 @@ def test_type_checking(self): with self.assertRaises(ValidationError): Text().validate(True) + Text().validate("!#$%&\'()*+,-./") + Text().validate('Beyonc' + chr(233)) + Text().validate(u'Beyonc' + unichr(233)) + def test_unaltering_validation(self): """ Test the validation step doesn't re-interpret values. """ self.assertEqual(Text().validate(''), '') From 1116a6943f73bc6c25a63ab2b492e990ebb657fe Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 22 Jul 2016 14:02:56 -0500 Subject: [PATCH 0784/2005] use numpy masked arrays to represent empty cells PYTHON-553 --- cassandra/numpy_parser.pyx | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/cassandra/numpy_parser.pyx b/cassandra/numpy_parser.pyx index 1334e747c4..ed755d00a4 100644 --- a/cassandra/numpy_parser.pyx +++ b/cassandra/numpy_parser.pyx @@ -13,7 +13,7 @@ # limitations under the License. """ -This module provider an optional protocol parser that returns +This module provides an optional protocol parser that returns NumPy arrays. ============================================================================= @@ -25,7 +25,7 @@ as numpy is an optional dependency. include "ioutils.pyx" cimport cython -from libc.stdint cimport uint64_t +from libc.stdint cimport uint64_t, uint8_t from cpython.ref cimport Py_INCREF, PyObject from cassandra.bytesio cimport BytesIOReader @@ -35,7 +35,6 @@ from cassandra import cqltypes from cassandra.util import is_little_endian import numpy as np -# import pandas as pd cdef extern from "numpyFlags.h": # Include 'numpyFlags.h' into the generated C code to disable the @@ -52,11 +51,13 @@ ctypedef struct ArrDesc: Py_uintptr_t buf_ptr int stride # should be large enough as we allocate contiguous arrays int is_object + Py_uintptr_t mask_ptr arrDescDtype = np.dtype( [ ('buf_ptr', np.uintp) , ('stride', np.dtype('i')) , ('is_object', np.dtype('i')) + , ('mask_ptr', np.uintp) ], align=True) _cqltype_to_numpy = { @@ -70,6 +71,7 @@ _cqltype_to_numpy = { obj_dtype = np.dtype('O') +cdef uint8_t mask_true = 0x01 cdef class NumpyParser(ColumnParser): """Decode a ResultMessage into a bunch of NumPy arrays""" @@ -116,7 +118,11 @@ def make_arrays(ParseDesc desc, array_size): arr = make_array(coltype, array_size) array_descs[i]['buf_ptr'] = arr.ctypes.data array_descs[i]['stride'] = arr.strides[0] - array_descs[i]['is_object'] = coltype not in _cqltype_to_numpy + array_descs[i]['is_object'] = arr.dtype is obj_dtype + try: + array_descs[i]['mask_ptr'] = arr.mask.ctypes.data + except AttributeError: + array_descs[i]['mask_ptr'] = 0 arrays.append(arr) return array_descs, arrays @@ -126,8 +132,12 @@ def make_array(coltype, array_size): """ Allocate a new NumPy array of the given column type and size. """ - dtype = _cqltype_to_numpy.get(coltype, obj_dtype) - return np.empty((array_size,), dtype=dtype) + try: + a = np.ma.empty((array_size,), dtype=_cqltype_to_numpy[coltype]) + a.mask = np.zeros((array_size,), dtype=np.bool) + except KeyError: + a = np.empty((array_size,), dtype=obj_dtype) + return a #### Parse rows into NumPy arrays @@ -140,7 +150,6 @@ cdef inline int unpack_row( cdef Py_ssize_t i, rowsize = desc.rowsize cdef ArrDesc arr cdef Deserializer deserializer - for i in range(rowsize): get_buf(reader, &buf) arr = arrays[i] @@ -150,13 +159,14 @@ cdef inline int unpack_row( val = from_binary(deserializer, &buf, desc.protocol_version) Py_INCREF(val) ( arr.buf_ptr)[0] = val - elif buf.size < 0: - raise ValueError("Cannot handle NULL value") - else: + elif buf.size >= 0: memcpy( arr.buf_ptr, buf.ptr, buf.size) + else: + memcpy(arr.mask_ptr, &mask_true, 1) # Update the pointer into the array for the next time arrays[i].buf_ptr += arr.stride + arrays[i].mask_ptr += 1 return 0 From 250a1343557c4a5f18555a29e75d4e376f3373b7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 22 Jul 2016 16:30:02 -0500 Subject: [PATCH 0785/2005] make util.Date and util.Time lt robust with other types --- cassandra/util.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cassandra/util.py b/cassandra/util.py index f4bc1b1c94..7f17e85d18 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -973,6 +973,8 @@ def __eq__(self, other): microsecond=self.nanosecond // Time.MICRO) == other def __lt__(self, other): + if not isinstance(other, Time): + return NotImplemented return self.nanosecond_time < other.nanosecond_time def __repr__(self): @@ -1061,6 +1063,8 @@ def __eq__(self, other): return False def __lt__(self, other): + if not isinstance(other, Date): + return NotImplemented return self.days_from_epoch < other.days_from_epoch def __repr__(self): From 1b328c1164e3ca4c349d4a72c9315a5901cfd7e2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 22 Jul 2016 16:30:28 -0500 Subject: [PATCH 0786/2005] update numpy null test for new null handling PYTHON-553 --- .../standard/test_cython_protocol_handlers.py | 91 ++++++++----------- 1 file changed, 38 insertions(+), 53 deletions(-) diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index 3560709faa..7dc3db300e 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -7,10 +7,12 @@ except ImportError: import unittest -from cassandra import DriverException, Timeout, AlreadyExists +from itertools import count + from cassandra.query import tuple_factory from cassandra.cluster import Cluster, NoHostAvailable -from cassandra.protocol import ProtocolHandler, LazyProtocolHandler, NumpyProtocolHandler, ConfigurationException +from cassandra.concurrent import execute_concurrent_with_args +from cassandra.protocol import ProtocolHandler, LazyProtocolHandler, NumpyProtocolHandler from cassandra.cython_deps import HAVE_CYTHON, HAVE_NUMPY from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1, drop_keyspace_shutdown_cluster, VERIFY_CYTHON, BasicSharedKeyspaceUnitTestCase, execute_with_retry_tolerant, greaterthancass21 from tests.integration.datatype_utils import update_datatypes @@ -207,66 +209,49 @@ def verify_iterator_data(assertEqual, results): class NumpyNullTest(BasicSharedKeyspaceUnitTestCase): - # A dictionary containing table key to type. - # Boolean dictates whether or not the type can be deserialized with null value - NUMPY_TYPES = {"v1": ('bigint', False), - "v2": ('double', False), - "v3": ('float', False), - "v4": ('int', False), - "v5": ('smallint', False), - "v6": ("ascii", True), - "v7": ("blob", True), - "v8": ("boolean", True), - "v9": ("decimal", True), - "v10": ("inet", True), - "v11": ("text", True), - "v12": ("timestamp", True), - "v13": ("timeuuid", True), - "v14": ("uuid", True), - "v15": ("varchar", True), - "v16": ("varint", True), - } - - def setUp(self): - self.session.client_protocol_handler = NumpyProtocolHandler - self.session.row_factory = tuple_factory - @numpytest @greaterthancass21 def test_null_types(self): """ Test to validate that the numpy protocol handler can deal with null values. @since 3.3.0 + - updated 3.6.0: now numeric types used masked array @jira_ticket PYTHON-550 @expected_result Numpy can handle non mapped types' null values. @test_category data_types:serialization """ - - self.create_table_of_types() - self.session.execute("INSERT INTO {0}.{1} (k) VALUES (1)".format(self.keyspace_name, self.function_table_name)) - self.validate_types() - - def create_table_of_types(self): - """ - Builds a table containing all the numpy types - """ - base_ddl = '''CREATE TABLE {0}.{1} (k int PRIMARY KEY'''.format(self.keyspace_name, self.function_table_name, type) - for key, value in NumpyNullTest.NUMPY_TYPES.items(): - base_ddl = base_ddl+", {0} {1}".format(key, value[0]) - base_ddl = base_ddl+")" - execute_with_retry_tolerant(self.session, base_ddl, (DriverException, NoHostAvailable, Timeout), (ConfigurationException, AlreadyExists)) - - def validate_types(self): - """ - Selects each type from the table and expects either an exception or None depending on type - """ - for key, value in NumpyNullTest.NUMPY_TYPES.items(): - select = "SELECT {0} from {1}.{2}".format(key,self.keyspace_name, self.function_table_name) - if value[1]: - rs = execute_with_retry_tolerant(self.session, select, (NoHostAvailable), ()) - self.assertEqual(rs[0].get('v1'), None) + s = self.session + s.row_factory = tuple_factory + s.client_protocol_handler = NumpyProtocolHandler + + table = "%s.%s" % (self.keyspace_name, self.function_table_name) + create_table_with_all_types(table, s, 10) + + begin_unset = max(s.execute('select primkey from %s' % (table,))[0]['primkey']) + 1 + keys_null = range(begin_unset, begin_unset + 10) + + # scatter some emptry rows in here + insert = "insert into %s (primkey) values (%%s)" % (table,) + execute_concurrent_with_args(s, insert, ((k,) for k in keys_null)) + + result = s.execute("select * from %s" % (table,))[0] + + from numpy.ma import masked, MaskedArray + result_keys = result.pop('primkey') + mapped_index = [v[1] for v in sorted(zip(result_keys, count()))] + + had_masked = had_none = False + for col_array in result.values(): + # these have to be different branches (as opposed to comparing against an 'unset value') + # because None and `masked` have different identity and equals semantics + if isinstance(col_array, MaskedArray): + had_masked = True + [self.assertIsNot(col_array[i], masked) for i in mapped_index[:begin_unset]] + [self.assertIs(col_array[i], masked) for i in mapped_index[begin_unset:]] else: - with self.assertRaises(ValueError): - execute_with_retry_tolerant(self.session, select, (NoHostAvailable), ()) - + had_none = True + [self.assertIsNotNone(col_array[i]) for i in mapped_index[:begin_unset]] + [self.assertIsNone(col_array[i]) for i in mapped_index[begin_unset:]] + self.assertTrue(had_masked) + self.assertTrue(had_none) From ef7953ad842fdc77cd72221adfc1aa9a71c23f98 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 22 Jul 2016 18:10:54 -0500 Subject: [PATCH 0787/2005] PYTHON-600 adding long running ssl connection test --- tests/integration/__init__.py | 6 +++-- tests/integration/long/test_ssl.py | 40 ++++++++++++++++++++++++++++-- 2 files changed, 42 insertions(+), 4 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index e49addd9ea..ce420994c8 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -296,6 +296,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): log.debug("Using external CCM cluster {0}".format(CCM_CLUSTER.name)) else: log.debug("Using unnamed external cluster") + setup_keyspace(ipformat=ipformat, wait=False) return if is_current_cluster(cluster_name, nodes): @@ -442,9 +443,10 @@ def drop_keyspace_shutdown_cluster(keyspace_name, session, cluster): cluster.shutdown() -def setup_keyspace(ipformat=None): +def setup_keyspace(ipformat=None, wait=True): # wait for nodes to startup - time.sleep(10) + if wait: + time.sleep(10) if not ipformat: cluster = Cluster(protocol_version=PROTOCOL_VERSION) diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 48d74e55f0..99875afe48 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -17,7 +17,7 @@ except ImportError: import unittest -import os, sys, traceback, logging, ssl +import os, sys, traceback, logging, ssl, time from cassandra.cluster import Cluster, NoHostAvailable from cassandra import ConsistencyLevel from cassandra.query import SimpleStatement @@ -132,11 +132,47 @@ def test_can_connect_with_ssl_ca(self): @test_category connection:ssl """ + # find absolute path to client CA_CERTS + abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) + ssl_options = {'ca_certs': abs_path_ca_cert_path,'ssl_version': ssl.PROTOCOL_TLSv1} + validate_ssl_options(ssl_options=ssl_options) + + def test_can_connect_with_ssl_long_running(self): + """ + Test to validate that long running ssl connections continue to function past thier timeout window + + @since 3.6.0 + @jira_ticket PYTHON-600 + @expected_result The client can connect via SSL and preform some basic operations over a period of longer then a minute + + @test_category connection:ssl + """ + # find absolute path to client CA_CERTS abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) ssl_options = {'ca_certs': abs_path_ca_cert_path, 'ssl_version': ssl.PROTOCOL_TLSv1} - validate_ssl_options(ssl_options=ssl_options) + tries = 0 + while True: + if tries > 5: + raise RuntimeError("Failed to connect to SSL cluster after 5 attempts") + try: + cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options=ssl_options) + session = cluster.connect(wait_for_all_pools=True) + break + except Exception: + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + tries += 1 + + # attempt a few simple commands. + + for i in range(8): + rs = session.execute("SELECT * FROM system.local") + time.sleep(10) + + cluster.shutdown() def test_can_connect_with_ssl_ca_host_match(self): """ From 8390894e0b5f546caceecf7ffdd96c04d6739ffa Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 22 Jul 2016 19:23:34 -0400 Subject: [PATCH 0788/2005] Changed per-cluster metrics path and added ability to define an explicit stats name --- cassandra/metrics.py | 24 +++++++++++++++++----- tests/integration/standard/test_metrics.py | 11 +++++++++- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/cassandra/metrics.py b/cassandra/metrics.py index 6150271723..d56d785919 100644 --- a/cassandra/metrics.py +++ b/cassandra/metrics.py @@ -111,12 +111,14 @@ class Metrics(object): the driver currently has open. """ + _stats_counter = 0 + def __init__(self, cluster_proxy): log.debug("Starting metric capture") - # TODO, modify the path to /cassandra/{clusterid} in 4.0 - self.stats_id = str(id(cluster_proxy)) - self.stats = scales.collection('/_cassandra/{0}'.format(self.stats_id), + self.stats_name = 'cassandra-{0}'.format(str(self._stats_counter)) + Metrics._stats_counter += 1 + self.stats = scales.collection(self.stats_name, scales.PmfStat('request_timer'), scales.IntStat('connection_errors'), scales.IntStat('write_timeouts'), @@ -137,7 +139,7 @@ def __init__(self, cluster_proxy): # TODO, to be removed in 4.0 # /cassandra contains the metrics of the first cluster registered if 'cassandra' not in scales._Stats.stats: - scales._Stats.stats['cassandra'] = scales._Stats.stats['_cassandra'][self.stats_id] + scales._Stats.stats['cassandra'] = scales._Stats.stats[self.stats_name] self.request_timer = self.stats.request_timer self.connection_errors = self.stats.connection_errors @@ -176,4 +178,16 @@ def get_stats(self): """ Returns the metrics for the registered cluster instance. """ - return scales.getStats()['_cassandra'][self.stats_id] + return scales.getStats()[self.stats_name] + + def set_stats_name(self, stats_name): + """ + Set the metrics stats name. + The stats_name is a string used to access the metris through scales: scales.getStats()[] + Default is 'cassandra-'. + """ + + stats = scales._Stats.stats[self.stats_name] + del scales._Stats.stats[self.stats_name] + self.stats_name = stats_name + scales._Stats.stats[self.stats_name] = stats diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index d33bb0e21f..595bb285be 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -26,7 +26,7 @@ from cassandra.cluster import Cluster, NoHostAvailable from tests.integration import get_cluster, get_node, use_singledc, PROTOCOL_VERSION, execute_until_pass - +from greplin import scales def setup_module(): use_singledc() @@ -197,14 +197,23 @@ def test_metrics_per_cluster(self): finally: get_node(1).resume() + # Change the scales stats_name of the cluster2 + cluster2.metrics.set_stats_name('cluster2-metrics') + stats_cluster1 = self.cluster.metrics.get_stats() stats_cluster2 = cluster2.metrics.get_stats() + # Test direct access to stats self.assertEqual(1, self.cluster.metrics.stats.write_timeouts) self.assertEqual(0, cluster2.metrics.stats.write_timeouts) + # Test direct access to a child stats self.assertNotEqual(0.0, self.cluster.metrics.request_timer['mean']) self.assertEqual(0.0, cluster2.metrics.request_timer['mean']) + # Test access via metrics.get_stats() self.assertNotEqual(0.0, stats_cluster1['request_timer']['mean']) self.assertEqual(0.0, stats_cluster2['request_timer']['mean']) + + # Test access by stats_name + self.assertEqual(0.0, scales.getStats()['cluster2-metrics']['request_timer']['mean']) From c1dd8a3dea8640efe76f500efbf7e1b3071fbf04 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 25 Jul 2016 11:07:13 -0400 Subject: [PATCH 0789/2005] add simple validation in set_stats_name --- cassandra/metrics.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/metrics.py b/cassandra/metrics.py index d56d785919..644e44850f 100644 --- a/cassandra/metrics.py +++ b/cassandra/metrics.py @@ -187,6 +187,9 @@ def set_stats_name(self, stats_name): Default is 'cassandra-'. """ + if stats_name in scales._Stats.stats: + raise ValueError('"{0}" already exists in stats.'.format(stats_name)) + stats = scales._Stats.stats[self.stats_name] del scales._Stats.stats[self.stats_name] self.stats_name = stats_name From 5a23f1b1cc70ad82969b7380ebe267771075e76d Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 25 Jul 2016 17:23:25 -0500 Subject: [PATCH 0790/2005] PYTHON-561 adding test for duplicate metric names --- cassandra/metrics.py | 3 + tests/integration/__init__.py | 7 ++- tests/integration/standard/test_metrics.py | 64 ++++++++++++++++++++-- tests/integration/standard/utils.py | 1 + 4 files changed, 68 insertions(+), 7 deletions(-) diff --git a/cassandra/metrics.py b/cassandra/metrics.py index 644e44850f..d0c5b9e39c 100644 --- a/cassandra/metrics.py +++ b/cassandra/metrics.py @@ -187,6 +187,9 @@ def set_stats_name(self, stats_name): Default is 'cassandra-'. """ + if self.stats_name == stats_name: + return + if stats_name in scales._Stats.stats: raise ValueError('"{0}" already exists in stats.'.format(stats_name)) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index ce420994c8..bd9fe103cd 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -538,8 +538,8 @@ def create_keyspace(cls, rf): execute_with_long_wait_retry(cls.session, ddl) @classmethod - def common_setup(cls, rf, keyspace_creation=True, create_class_table=False): - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + def common_setup(cls, rf, keyspace_creation=True, create_class_table=False, metrics=False): + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, metrics_enabled=metrics) cls.session = cls.cluster.connect() cls.ks_name = cls.__name__.lower() if keyspace_creation: @@ -592,6 +592,7 @@ def get_message_count(self, level, sub_string): count+=1 return count + class BasicExistingKeyspaceUnitTestCase(BasicKeyspaceUnitTestCase): """ This is basic unit test defines class level teardown and setup methods. It assumes that keyspace is already defined, or created as part of the test. @@ -646,7 +647,7 @@ class BasicSharedKeyspaceUnitTestCaseWTable(BasicSharedKeyspaceUnitTestCase): """ @classmethod def setUpClass(self): - self.common_setup(2, True) + self.common_setup(3, True, True, True) class BasicSharedKeyspaceUnitTestCaseRF3(BasicSharedKeyspaceUnitTestCase): diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 595bb285be..0dd9cbb65a 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -27,6 +27,7 @@ from cassandra.cluster import Cluster, NoHostAvailable from tests.integration import get_cluster, get_node, use_singledc, PROTOCOL_VERSION, execute_until_pass from greplin import scales +from tests.integration import BasicSharedKeyspaceUnitTestCaseWTable def setup_module(): use_singledc() @@ -174,16 +175,24 @@ def test_unavailable(self): # # TODO: Look for ways to generate retries # pass + +class MetricsNamespaceTest(BasicSharedKeyspaceUnitTestCaseWTable): + def test_metrics_per_cluster(self): """ - Test that metrics are per cluster. + Test to validate that metrics can be scopped to invdividual clusters + @since 3.6.0 + @jira_ticket PYTHON-561 + @expected_result metrics should be scopped to a cluster level + + @test_category metrics """ cluster2 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, default_retry_policy=FallthroughRetryPolicy()) - session2 = cluster2.connect("test3rf", wait_for_all_pools=True) + cluster2.connect(self.ks_name, wait_for_all_pools=True) - query = SimpleStatement("SELECT * FROM test", consistency_level=ConsistencyLevel.ALL) + query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name), consistency_level=ConsistencyLevel.ALL) self.session.execute(query) # Pause node so it shows as unreachable to coordinator @@ -191,7 +200,7 @@ def test_metrics_per_cluster(self): try: # Test write - query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL) + query = SimpleStatement("INSERT INTO {0}.{0} (k, v) VALUES (2, 2)".format(self.ks_name), consistency_level=ConsistencyLevel.ALL) with self.assertRaises(WriteTimeout): self.session.execute(query, timeout=None) finally: @@ -217,3 +226,50 @@ def test_metrics_per_cluster(self): # Test access by stats_name self.assertEqual(0.0, scales.getStats()['cluster2-metrics']['request_timer']['mean']) + + cluster2.shutdown() + + def test_duplicate_metrics_per_cluster(self): + """ + Test to validate that cluster metrics names can't overlap. + @since 3.6.0 + @jira_ticket PYTHON-561 + @expected_result metric names should not be allowed to be same. + + @test_category metrics + """ + cluster2 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, + default_retry_policy=FallthroughRetryPolicy()) + + cluster3 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, + default_retry_policy=FallthroughRetryPolicy()) + + # Ensure duplicate metric names are not allowed + cluster2.metrics.set_stats_name("appcluster") + cluster2.metrics.set_stats_name("appcluster") + with self.assertRaises(ValueError): + cluster3.metrics.set_stats_name("appcluster") + cluster3.metrics.set_stats_name("devops") + + session2 = cluster2.connect(self.ks_name, wait_for_all_pools=True) + session3 = cluster3.connect(self.ks_name, wait_for_all_pools=True) + + # Basic validation that naming metrics doesn't impact their segration or accuracy + for i in range(10): + query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name), consistency_level=ConsistencyLevel.ALL) + session2.execute(query) + + for i in range(5): + query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name), consistency_level=ConsistencyLevel.ALL) + session3.execute(query) + + self.assertEqual(cluster2.metrics.get_stats().values()[2]['count'], 10) + self.assertEqual(cluster3.metrics.get_stats().values()[2]['count'], 5) + + # Check scales to ensure they are appropriately named + self.assertTrue("appcluster" in scales._Stats.stats.keys()) + self.assertTrue("devops" in scales._Stats.stats.keys()) + + + + diff --git a/tests/integration/standard/utils.py b/tests/integration/standard/utils.py index 4011047fc8..917b3a7f6e 100644 --- a/tests/integration/standard/utils.py +++ b/tests/integration/standard/utils.py @@ -4,6 +4,7 @@ from tests.integration.datatype_utils import PRIMITIVE_DATATYPES, get_sample + def create_table_with_all_types(table_name, session, N): """ Method that given a table_name and session construct a table that contains From 526472759afeda0bf01df0f03a9bb301a139607c Mon Sep 17 00:00:00 2001 From: Bohdan Tantsiura Date: Tue, 26 Jul 2016 12:24:13 +0300 Subject: [PATCH 0791/2005] Fix variable names in ResponseFuture.clear_callbacks --- cassandra/cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 3e1c8aa583..536ae71c14 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3777,8 +3777,8 @@ def add_callbacks(self, callback, errback, def clear_callbacks(self): with self._callback_lock: - self._callback = [] - self._errback = [] + self._callbacks = [] + self._errbacks = [] def __str__(self): result = "(no result yet)" if self._final_result is _NOT_SET else self._final_result From c023d0181e42ea98d61612dcfd8e3fed6f442758 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 27 Jul 2016 12:03:46 -0400 Subject: [PATCH 0792/2005] Add ContextQuery to allow switching the model keyspace easily --- cassandra/cqlengine/management.py | 36 ++++++- cassandra/cqlengine/query.py | 20 ++++ .../cqlengine/test_context_query.py | 99 +++++++++++++++++++ 3 files changed, 150 insertions(+), 5 deletions(-) create mode 100644 tests/integration/cqlengine/test_context_query.py diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 6978964ad0..fde1d97c59 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -21,7 +21,7 @@ from cassandra import metadata from cassandra.cqlengine import CQLEngineException -from cassandra.cqlengine import columns +from cassandra.cqlengine import columns, query from cassandra.cqlengine.connection import execute, get_cluster from cassandra.cqlengine.models import Model from cassandra.cqlengine.named import NamedTable @@ -119,9 +119,9 @@ def _get_index_name_by_column(table, column_name): return index_metadata.name -def sync_table(model): +def sync_table(model, keyspaces=None): """ - Inspects the model and creates / updates the corresponding table and columns. + Inspects the model and creates / updates the corresponding table and columns for all keyspaces. Any User Defined Types used in the table are implicitly synchronized. @@ -135,6 +135,20 @@ def sync_table(model): *There are plans to guard schema-modifying functions with an environment-driven conditional.* """ + + if keyspaces: + if not isinstance(keyspaces, (list, tuple)): + raise ValueError('keyspaces must be a list or a tuple.') + + for keyspace in keyspaces: + with query.ContextQuery(model, keyspace=keyspace) as m: + _sync_table(m) + else: + _sync_table(model) + + +def _sync_table(model): + if not _allow_schema_modification(): return @@ -431,15 +445,27 @@ def _update_options(model): return False -def drop_table(model): +def drop_table(model, keyspaces=None): """ - Drops the table indicated by the model, if it exists. + Drops the table indicated by the model, if it exists, for all keyspaces. **This function should be used with caution, especially in production environments. Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).** *There are plans to guard schema-modifying functions with an environment-driven conditional.* """ + + if keyspaces: + if not isinstance(keyspaces, (list, tuple)): + raise ValueError('keyspaces must be a list or a tuple.') + + for keyspace in keyspaces: + with query.ContextQuery(model, keyspace=keyspace) as m: + _drop_table(m) + else: + _drop_table(model) + +def _drop_table(model): if not _allow_schema_modification(): return diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index bed1999b3e..1adde8bd30 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -259,6 +259,26 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.execute() +class ContextQuery(object): + + def __init__(self, model, keyspace=None): + from cassandra.cqlengine import models + + if not issubclass(model, models.Model): + raise CQLEngineException("Models must be derived from base Model.") + + ks = keyspace if keyspace else model.__keyspace__ + new_type = type(model.__name__, (model,), {'__keyspace__': ks}) + + self.model = new_type + + def __enter__(self): + return self.model + + def __exit__(self, exc_type, exc_val, exc_tb): + return + + class AbstractQuerySet(object): def __init__(self, model): diff --git a/tests/integration/cqlengine/test_context_query.py b/tests/integration/cqlengine/test_context_query.py new file mode 100644 index 0000000000..ee947666e6 --- /dev/null +++ b/tests/integration/cqlengine/test_context_query.py @@ -0,0 +1,99 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from cassandra.cqlengine import columns +from cassandra.cqlengine.management import drop_keyspace, sync_table, create_keyspace_simple +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.query import ContextQuery +from tests.integration.cqlengine.base import BaseCassEngTestCase + + +class TestModel(Model): + + __keyspace__ = 'ks1' + + partition = columns.Integer(primary_key=True) + cluster = columns.Integer(primary_key=True) + count = columns.Integer() + text = columns.Text() + + +class ContextQueryTests(BaseCassEngTestCase): + + KEYSPACES = ('ks1', 'ks2', 'ks3', 'ks4') + + @classmethod + def setUpClass(cls): + super(ContextQueryTests, cls).setUpClass() + for ks in cls.KEYSPACES: + create_keyspace_simple(ks, 1) + sync_table(TestModel, keyspaces=cls.KEYSPACES) + + @classmethod + def tearDownClass(cls): + super(ContextQueryTests, cls).tearDownClass() + for ks in cls.KEYSPACES: + drop_keyspace(ks) + + def setUp(self): + super(ContextQueryTests, self).setUp() + for ks in self.KEYSPACES: + with ContextQuery(TestModel, keyspace=ks) as tm: + for obj in tm.all(): + obj.delete() + + def test_context_manager(self): + + for ks in self.KEYSPACES: + with ContextQuery(TestModel, keyspace=ks) as tm: + self.assertEqual(tm.__keyspace__, ks) + + def test_default_keyspace(self): + + # model keyspace write/read + for i in range(5): + TestModel.objects.create(partition=i, cluster=i) + + with ContextQuery(TestModel) as tm: + self.assertEqual(5, len(tm.objects.all())) + + with ContextQuery(TestModel, keyspace='ks1') as tm: + self.assertEqual(5, len(tm.objects.all())) + + for ks in self.KEYSPACES[1:]: + with ContextQuery(TestModel, keyspace=ks) as tm: + self.assertEqual(0, len(tm.objects.all())) + + def test_context_keyspace(self): + + for i in range(5): + with ContextQuery(TestModel, keyspace='ks4') as tm: + tm.objects.create(partition=i, cluster=i) + + with ContextQuery(TestModel, keyspace='ks4') as tm: + self.assertEqual(5, len(tm.objects.all())) + + self.assertEqual(0, len(TestModel.objects.all())) + + for ks in self.KEYSPACES[:2]: + with ContextQuery(TestModel, keyspace=ks) as tm: + self.assertEqual(0, len(tm.objects.all())) + + # simple data update + with ContextQuery(TestModel, keyspace='ks4') as tm: + obj = tm.objects.get(partition=1) + obj.update(count=42) + + self.assertEqual(42, tm.objects.get(partition=1).count) + From 760708cace20106e9c1b1fb3dd498fb8b8329b0d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 27 Jul 2016 12:28:40 -0400 Subject: [PATCH 0793/2005] improve docs of sync_table and drop_table --- cassandra/cqlengine/management.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index fde1d97c59..cc2a34599f 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -121,7 +121,9 @@ def _get_index_name_by_column(table, column_name): def sync_table(model, keyspaces=None): """ - Inspects the model and creates / updates the corresponding table and columns for all keyspaces. + Inspects the model and creates / updates the corresponding table and columns. + + If `keyspaces` is specified, the table will be synched for all specified keyspaces. Note that the `Model.__keyspace__` is ignored in that case. Any User Defined Types used in the table are implicitly synchronized. @@ -447,7 +449,9 @@ def _update_options(model): def drop_table(model, keyspaces=None): """ - Drops the table indicated by the model, if it exists, for all keyspaces. + Drops the table indicated by the model, if it exists. + + If `keyspaces` is specified, the table will be dropped for all specified keyspaces. Note that the `Model.__keyspace__` is ignored in that case. **This function should be used with caution, especially in production environments. Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).** From 31aebdf203b9f98d91b03c8490c753151d749eee Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 27 Jul 2016 15:51:47 -0400 Subject: [PATCH 0794/2005] 3.6.0 changelog --- CHANGELOG.rst | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 273657131a..3db920828b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,34 @@ +3.6.0 +===== +August 1, 2016 + +Features +-------- +* Handle null values in NumpyProtocolHandler (PYTHON-553) +* Collect greplin scales stats per cluster (PYTHON-561) +* Update mock unit test dependency requirement (PYTHON-591) +* Handle Missing CompositeType metadata following C* upgrade (PYTHON-562) +* Improve Host.is_up state for HostDistance.IGNORED hosts (PYTHON-551) +* Utilize v2 protocol's ability to skip result set metadata for prepared statement execution (PYTHON-71) +* Return from Cluster.connect() when first contact point connection(pool) is opened (PYTHON-105) +* cqlengine: Add ContextQuery to allow cqlengine models to switch the keyspace context easily (PYTHON-598) + +Bug Fixes +--------- +* Fix geventreactor with SSL support (PYTHON-600) +* Don't downgrade protocol version if explicitly set (PYTHON-537) +* Nonexistent contact point tries to connect indefinitely (PYTHON-549) +* Execute_concurrent can exceed max recursion depth in failure mode (PYTHON-585) +* Libev loop shutdown race (PYTHON-578) +* Include aliases in DCT type string (PYTHON-579) +* cqlengine: Comparison operators for Columns (PYTHON-595) +* cqlengine: disentangle default_time_to_live table option from model query default TTL (PYTHON-538) +* cqlengine: pk__token column name issue with the equality operator (PYTHON-584) +* cqlengine: Fix "__in" filtering operator converts True to string "True" automatically (PYTHON-596) +* cqlengine: Avoid LWTExceptions when updating columns that are part of the condition (PYTHON-580) +* cqlengine: Cannot execute a query when the filter contains all columns (PYTHON-599) +* cqlengine: routing key computation issue when a primary key column is overriden by model inheritance (PYTHON-576) + 3.5.0 ===== June 27, 2016 From 7f5b945e7f9baca0b0f616c4369d4aff08c63526 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 28 Jul 2016 14:21:23 +0200 Subject: [PATCH 0795/2005] Set default value of CQLengine columns on instantiation. --- cassandra/cqlengine/models.py | 7 +++ .../model/test_class_construction.py | 2 +- .../cqlengine/model/test_model_io.py | 43 +++++++++++++++++++ 3 files changed, 51 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 6d16bc7384..36bcbe6f13 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -368,6 +368,13 @@ def __init__(self, **values): value_mngr.explicit = name in values self._values[name] = value_mngr + # Set default values on instantiation. Thanks to this, we don't have + # to wait anylonger for a call to validate() to have CQLengine set + # default columns values. + for column_id, column_obj in self._columns.items(): + if column_id not in values and column_obj.has_default: + setattr(self, column_id, column_obj.get_default()) + def __repr__(self): return '{0}({1})'.format(self.__class__.__name__, ', '.join('{0}={1!r}'.format(k, getattr(self, k)) diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index 8147e41079..6efe7be961 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -47,7 +47,7 @@ class TestModel(Model): inst = TestModel() self.assertHasAttr(inst, 'id') self.assertHasAttr(inst, 'text') - self.assertIsNone(inst.id) + self.assertIsNotNone(inst.id) self.assertIsNone(inst.text) def test_db_map(self): diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 3faf62febc..afb2804b6b 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -468,6 +468,49 @@ def test_previous_value_tracking_on_instantiation(self): self.assertTrue(self.instance._values['count'].previous_value is None) self.assertTrue(self.instance.count is None) + def test_previous_value_tracking_on_instantiation_with_default(self): + + class TestDefaultValueTracking(Model): + id = columns.Integer(partition_key=True) + int1 = columns.Integer(default=123) + int2 = columns.Integer(default=456) + int3 = columns.Integer(default=lambda: random.randint(0, 1000)) + int4 = columns.Integer(default=lambda: random.randint(0, 1000)) + int5 = columns.Integer() + int6 = columns.Integer() + + instance = TestDefaultValueTracking( + id=1, + int1=9999, + int3=7777, + int5=5555) + + self.assertEquals(instance.id, 1) + self.assertEquals(instance.int1, 9999) + self.assertEquals(instance.int2, 456) + self.assertEquals(instance.int3, 7777) + self.assertIsNotNone(instance.int4) + self.assertIsInstance(instance.int4, int) + self.assertGreaterEqual(instance.int4, 0) + self.assertLessEqual(instance.int4, 1000) + self.assertEquals(instance.int5, 5555) + self.assertTrue(instance.int6 is None) + + # All previous values are unset as the object hasn't been persisted + # yet. + self.assertTrue(instance._values['id'].previous_value is None) + self.assertTrue(instance._values['int1'].previous_value is None) + self.assertTrue(instance._values['int2'].previous_value is None) + self.assertTrue(instance._values['int3'].previous_value is None) + self.assertTrue(instance._values['int4'].previous_value is None) + self.assertTrue(instance._values['int5'].previous_value is None) + self.assertTrue(instance._values['int6'].previous_value is None) + + # All explicitely set columns, and those with default values are + # flagged has changed. + self.assertTrue(set(instance.get_changed_columns()) == set([ + 'id', 'int1', 'int2', 'int3', 'int4', 'int5'])) + def test_save_to_none(self): """ Test update of column value of None with save() function. From a4cbed88b9ee054cfab5c0686906948b5b332d5c Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 28 Jul 2016 14:40:02 +0200 Subject: [PATCH 0796/2005] Add some more unit-tests around value setting on instantiation. --- .../model/test_class_construction.py | 21 +++++++++++++++++++ .../cqlengine/model/test_model_io.py | 18 +++++++++++++++- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index 6efe7be961..0115d0b7da 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -50,6 +50,27 @@ class TestModel(Model): self.assertIsNotNone(inst.id) self.assertIsNone(inst.text) + def test_values_on_instantiation(self): + """ + Tests defaults and user-provided values on instantiation. + """ + + class TestPerson(Model): + first_name = columns.Text(primary_key=True, default='kevin') + last_name = columns.Text(default='deldycke') + + # Check that defaults are available at instantiation. + inst1 = TestPerson() + self.assertHasAttr(inst1, 'first_name') + self.assertHasAttr(inst1, 'last_name') + self.assertEquals(inst1.first_name, 'kevin') + self.assertEquals(inst1.last_name, 'deldycke') + + # Check that values on instantiation overrides defaults. + inst2 = TestPerson(first_name='bob', last_name='joe') + self.assertEquals(inst2.first_name, 'bob') + self.assertEquals(inst2.last_name, 'joe') + def test_db_map(self): """ Tests that the db_map is properly defined diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index afb2804b6b..bd16f6da5b 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -72,7 +72,7 @@ def tearDownClass(cls): def test_model_save_and_load(self): """ - Tests that models can be saved and retrieved + Tests that models can be saved and retrieved, using the create method. """ tm = TestModel.create(count=8, text='123456789') self.assertIsInstance(tm, TestModel) @@ -83,6 +83,22 @@ def test_model_save_and_load(self): for cname in tm._columns.keys(): self.assertEqual(getattr(tm, cname), getattr(tm2, cname)) + def test_model_instantiation_save_and_load(self): + """ + Tests that models can be saved and retrieved, this time using the + natural model instantiation. + """ + tm = TestModel(count=8, text='123456789') + # Tests that values are available on instantiation. + self.assertIsNotNone(tm['id']) + self.assertEquals(tm.count, 8) + self.assertEquals(tm.text, '123456789') + tm.save() + tm2 = TestModel.objects(id=tm.id).first() + + for cname in tm._columns.keys(): + self.assertEquals(getattr(tm, cname), getattr(tm2, cname)) + def test_model_read_as_dict(self): """ Tests that columns of an instance can be read as a dict. From 14446e1764c79c6f46244d04f1ec5413c4c2bfe9 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 28 Jul 2016 14:40:29 +0200 Subject: [PATCH 0797/2005] Fix code layout. --- tests/integration/cqlengine/model/test_model_io.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index bd16f6da5b..775508ace4 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -38,8 +38,6 @@ from tests.integration.cqlengine import DEFAULT_KEYSPACE - - class TestModel(Model): id = columns.UUID(primary_key=True, default=lambda: uuid4()) From a4352c37bacdc09e3177003643bc23b731b66fb1 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 28 Jul 2016 15:01:18 +0200 Subject: [PATCH 0798/2005] Simplify default value setting. --- cassandra/cqlengine/models.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 36bcbe6f13..41dfc77770 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -361,20 +361,17 @@ def __init__(self, **values): self._values = {} for name, column in self._columns.items(): - value = values.get(name) + # Set default values on instantiation. Thanks to this, we don't have + # to wait anylonger for a call to validate() to have CQLengine set + # default columns values. + column_default = column.get_default() if column.has_default else None + value = values.get(name, column_default) if value is not None or isinstance(column, columns.BaseContainerColumn): value = column.to_python(value) value_mngr = column.value_manager(self, column, value) value_mngr.explicit = name in values self._values[name] = value_mngr - # Set default values on instantiation. Thanks to this, we don't have - # to wait anylonger for a call to validate() to have CQLengine set - # default columns values. - for column_id, column_obj in self._columns.items(): - if column_id not in values and column_obj.has_default: - setattr(self, column_id, column_obj.get_default()) - def __repr__(self): return '{0}({1})'.format(self.__class__.__name__, ', '.join('{0}={1!r}'.format(k, getattr(self, k)) From a346172387b4bcb421503ba7c3b08e2616e941f8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 28 Jul 2016 11:45:08 -0500 Subject: [PATCH 0799/2005] don't copy a queryset's result constructor This is not needed as it is created per-execution. It was causing problems with deepcopy in Python 2.6. --- cassandra/cqlengine/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index bed1999b3e..951bb50bf4 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -332,7 +332,7 @@ def __call__(self, *args, **kwargs): def __deepcopy__(self, memo): clone = self.__class__(self.model) for k, v in self.__dict__.items(): - if k in ['_con', '_cur', '_result_cache', '_result_idx', '_result_generator']: # don't clone these + if k in ['_con', '_cur', '_result_cache', '_result_idx', '_result_generator', '_construct_result']: # don't clone these, which are per-request-execution clone.__dict__[k] = None elif k == '_batch': # we need to keep the same batch instance across From d67eb868f45e5a31c149c39ada73d445fe1abf22 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 28 Jul 2016 11:46:14 -0500 Subject: [PATCH 0800/2005] fix test syntax to work in Python 2.6 --- tests/integration/cqlengine/query/test_named.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index 9cddbece17..55129cb985 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -342,7 +342,7 @@ def test_named_table_with_mv(self): # Populate the base table with data prepared_insert = self.session.prepare("""INSERT INTO {0}.scores (user, game, year, month, day, score) VALUES (?, ?, ? ,? ,?, ?)""".format(ks)) - parameters = {('pcmanus', 'Coup', 2015, 5, 1, 4000), + parameters = (('pcmanus', 'Coup', 2015, 5, 1, 4000), ('jbellis', 'Coup', 2015, 5, 3, 1750), ('yukim', 'Coup', 2015, 5, 3, 2250), ('tjake', 'Coup', 2015, 5, 3, 500), @@ -353,7 +353,7 @@ def test_named_table_with_mv(self): ('jbellis', 'Coup', 2015, 6, 20, 3500), ('jbellis', 'Checkers', 2015, 6, 20, 1200), ('jbellis', 'Chess', 2015, 6, 21, 3500), - ('pcmanus', 'Chess', 2015, 1, 25, 3200)} + ('pcmanus', 'Chess', 2015, 1, 25, 3200)) prepared_insert.consistency_level = ConsistencyLevel.ALL execute_concurrent_with_args(self.session, prepared_insert, parameters) From 0f18ded6fcd3c8f4c132929e10129e607efa3c3c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 28 Jul 2016 12:04:58 -0500 Subject: [PATCH 0801/2005] assertEquals --> assertEqual the former is deprecated --- .../model/test_class_construction.py | 8 +- .../cqlengine/model/test_model_io.py | 16 +-- tests/integration/standard/test_metadata.py | 62 +++++------ tests/integration/standard/test_query.py | 104 +++++++++--------- 4 files changed, 95 insertions(+), 95 deletions(-) diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index 0115d0b7da..e447056376 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -63,13 +63,13 @@ class TestPerson(Model): inst1 = TestPerson() self.assertHasAttr(inst1, 'first_name') self.assertHasAttr(inst1, 'last_name') - self.assertEquals(inst1.first_name, 'kevin') - self.assertEquals(inst1.last_name, 'deldycke') + self.assertEqual(inst1.first_name, 'kevin') + self.assertEqual(inst1.last_name, 'deldycke') # Check that values on instantiation overrides defaults. inst2 = TestPerson(first_name='bob', last_name='joe') - self.assertEquals(inst2.first_name, 'bob') - self.assertEquals(inst2.last_name, 'joe') + self.assertEqual(inst2.first_name, 'bob') + self.assertEqual(inst2.last_name, 'joe') def test_db_map(self): """ diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 775508ace4..c5fd5e37ca 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -89,13 +89,13 @@ def test_model_instantiation_save_and_load(self): tm = TestModel(count=8, text='123456789') # Tests that values are available on instantiation. self.assertIsNotNone(tm['id']) - self.assertEquals(tm.count, 8) - self.assertEquals(tm.text, '123456789') + self.assertEqual(tm.count, 8) + self.assertEqual(tm.text, '123456789') tm.save() tm2 = TestModel.objects(id=tm.id).first() for cname in tm._columns.keys(): - self.assertEquals(getattr(tm, cname), getattr(tm2, cname)) + self.assertEqual(getattr(tm, cname), getattr(tm2, cname)) def test_model_read_as_dict(self): """ @@ -499,15 +499,15 @@ class TestDefaultValueTracking(Model): int3=7777, int5=5555) - self.assertEquals(instance.id, 1) - self.assertEquals(instance.int1, 9999) - self.assertEquals(instance.int2, 456) - self.assertEquals(instance.int3, 7777) + self.assertEqual(instance.id, 1) + self.assertEqual(instance.int1, 9999) + self.assertEqual(instance.int2, 456) + self.assertEqual(instance.int3, 7777) self.assertIsNotNone(instance.int4) self.assertIsInstance(instance.int4, int) self.assertGreaterEqual(instance.int4, 0) self.assertLessEqual(instance.int4, 1000) - self.assertEquals(instance.int5, 5555) + self.assertEqual(instance.int5, 5555) self.assertTrue(instance.int6 is None) # All previous values are unset as the object hasn't been persisted diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index aa652058ec..ae3d7b138b 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -2226,37 +2226,37 @@ def test_create_view_metadata(self): self.assertIsNotNone(score_table.columns['score']) # Validate basic mv information - self.assertEquals(mv.keyspace_name, self.keyspace_name) - self.assertEquals(mv.name, "monthlyhigh") - self.assertEquals(mv.base_table_name, "scores") + self.assertEqual(mv.keyspace_name, self.keyspace_name) + self.assertEqual(mv.name, "monthlyhigh") + self.assertEqual(mv.base_table_name, "scores") self.assertFalse(mv.include_all_columns) # Validate that all columns are preset and correct mv_columns = list(mv.columns.values()) - self.assertEquals(len(mv_columns), 6) + self.assertEqual(len(mv_columns), 6) game_column = mv_columns[0] self.assertIsNotNone(game_column) - self.assertEquals(game_column.name, 'game') - self.assertEquals(game_column, mv.partition_key[0]) + self.assertEqual(game_column.name, 'game') + self.assertEqual(game_column, mv.partition_key[0]) year_column = mv_columns[1] self.assertIsNotNone(year_column) - self.assertEquals(year_column.name, 'year') - self.assertEquals(year_column, mv.partition_key[1]) + self.assertEqual(year_column.name, 'year') + self.assertEqual(year_column, mv.partition_key[1]) month_column = mv_columns[2] self.assertIsNotNone(month_column) - self.assertEquals(month_column.name, 'month') - self.assertEquals(month_column, mv.partition_key[2]) + self.assertEqual(month_column.name, 'month') + self.assertEqual(month_column, mv.partition_key[2]) def compare_columns(a, b, name): - self.assertEquals(a.name, name) - self.assertEquals(a.name, b.name) - self.assertEquals(a.table, b.table) - self.assertEquals(a.cql_type, b.cql_type) - self.assertEquals(a.is_static, b.is_static) - self.assertEquals(a.is_reversed, b.is_reversed) + self.assertEqual(a.name, name) + self.assertEqual(a.name, b.name) + self.assertEqual(a.table, b.table) + self.assertEqual(a.cql_type, b.cql_type) + self.assertEqual(a.is_static, b.is_static) + self.assertEqual(a.is_reversed, b.is_reversed) score_column = mv_columns[3] compare_columns(score_column, mv.clustering_key[0], 'score') @@ -2333,7 +2333,7 @@ def test_base_table_column_addition_mv(self): self.assertIn("fouls", mv_alltime.columns) mv_alltime_fouls_comumn = self.cluster.metadata.keyspaces[self.keyspace_name].views["alltimehigh"].columns['fouls'] - self.assertEquals(mv_alltime_fouls_comumn.cql_type, 'int') + self.assertEqual(mv_alltime_fouls_comumn.cql_type, 'int') def test_base_table_type_alter_mv(self): """ @@ -2374,7 +2374,7 @@ def test_base_table_type_alter_mv(self): self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 1) score_column = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores'].columns['score'] - self.assertEquals(score_column.cql_type, 'blob') + self.assertEqual(score_column.cql_type, 'blob') # until CASSANDRA-9920+CASSANDRA-10500 MV updates are only available later with an async event for i in range(10): @@ -2383,7 +2383,7 @@ def test_base_table_type_alter_mv(self): break time.sleep(.2) - self.assertEquals(score_mv_column.cql_type, 'blob') + self.assertEqual(score_mv_column.cql_type, 'blob') def test_metadata_with_quoted_identifiers(self): """ @@ -2436,31 +2436,31 @@ def test_metadata_with_quoted_identifiers(self): self.assertIsNotNone(t1_table.columns['the Value']) # Validate basic mv information - self.assertEquals(mv.keyspace_name, self.keyspace_name) - self.assertEquals(mv.name, "mv1") - self.assertEquals(mv.base_table_name, "t1") + self.assertEqual(mv.keyspace_name, self.keyspace_name) + self.assertEqual(mv.name, "mv1") + self.assertEqual(mv.base_table_name, "t1") self.assertFalse(mv.include_all_columns) # Validate that all columns are preset and correct mv_columns = list(mv.columns.values()) - self.assertEquals(len(mv_columns), 3) + self.assertEqual(len(mv_columns), 3) theKey_column = mv_columns[0] self.assertIsNotNone(theKey_column) - self.assertEquals(theKey_column.name, 'theKey') - self.assertEquals(theKey_column, mv.partition_key[0]) + self.assertEqual(theKey_column.name, 'theKey') + self.assertEqual(theKey_column, mv.partition_key[0]) cluster_column = mv_columns[1] self.assertIsNotNone(cluster_column) - self.assertEquals(cluster_column.name, 'the;Clustering') - self.assertEquals(cluster_column.name, mv.clustering_key[0].name) - self.assertEquals(cluster_column.table, mv.clustering_key[0].table) - self.assertEquals(cluster_column.is_static, mv.clustering_key[0].is_static) - self.assertEquals(cluster_column.is_reversed, mv.clustering_key[0].is_reversed) + self.assertEqual(cluster_column.name, 'the;Clustering') + self.assertEqual(cluster_column.name, mv.clustering_key[0].name) + self.assertEqual(cluster_column.table, mv.clustering_key[0].table) + self.assertEqual(cluster_column.is_static, mv.clustering_key[0].is_static) + self.assertEqual(cluster_column.is_reversed, mv.clustering_key[0].is_reversed) value_column = mv_columns[2] self.assertIsNotNone(value_column) - self.assertEquals(value_column.name, 'the Value') + self.assertEqual(value_column.name, 'the Value') @dseonly diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index ad860781a6..5e6f55e64e 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -914,73 +914,73 @@ def test_mv_filtering(self): query_statement = SimpleStatement("SELECT * FROM {0}.alltimehigh WHERE game='Coup'".format(self.keyspace_name), consistency_level=ConsistencyLevel.QUORUM) results = self.session.execute(query_statement) - self.assertEquals(results[0].game, 'Coup') - self.assertEquals(results[0].year, 2015) - self.assertEquals(results[0].month, 5) - self.assertEquals(results[0].day, 1) - self.assertEquals(results[0].score, 4000) - self.assertEquals(results[0].user, "pcmanus") + self.assertEqual(results[0].game, 'Coup') + self.assertEqual(results[0].year, 2015) + self.assertEqual(results[0].month, 5) + self.assertEqual(results[0].day, 1) + self.assertEqual(results[0].score, 4000) + self.assertEqual(results[0].user, "pcmanus") # Test prepared statement and daily high filtering prepared_query = self.session.prepare("SELECT * FROM {0}.dailyhigh WHERE game=? AND year=? AND month=? and day=?".format(self.keyspace_name)) bound_query = prepared_query.bind(("Coup", 2015, 6, 2)) results = self.session.execute(bound_query) - self.assertEquals(results[0].game, 'Coup') - self.assertEquals(results[0].year, 2015) - self.assertEquals(results[0].month, 6) - self.assertEquals(results[0].day, 2) - self.assertEquals(results[0].score, 2000) - self.assertEquals(results[0].user, "pcmanus") - - self.assertEquals(results[1].game, 'Coup') - self.assertEquals(results[1].year, 2015) - self.assertEquals(results[1].month, 6) - self.assertEquals(results[1].day, 2) - self.assertEquals(results[1].score, 1000) - self.assertEquals(results[1].user, "tjake") + self.assertEqual(results[0].game, 'Coup') + self.assertEqual(results[0].year, 2015) + self.assertEqual(results[0].month, 6) + self.assertEqual(results[0].day, 2) + self.assertEqual(results[0].score, 2000) + self.assertEqual(results[0].user, "pcmanus") + + self.assertEqual(results[1].game, 'Coup') + self.assertEqual(results[1].year, 2015) + self.assertEqual(results[1].month, 6) + self.assertEqual(results[1].day, 2) + self.assertEqual(results[1].score, 1000) + self.assertEqual(results[1].user, "tjake") # Test montly high range queries prepared_query = self.session.prepare("SELECT * FROM {0}.monthlyhigh WHERE game=? AND year=? AND month=? and score >= ? and score <= ?".format(self.keyspace_name)) bound_query = prepared_query.bind(("Coup", 2015, 6, 2500, 3500)) results = self.session.execute(bound_query) - self.assertEquals(results[0].game, 'Coup') - self.assertEquals(results[0].year, 2015) - self.assertEquals(results[0].month, 6) - self.assertEquals(results[0].day, 20) - self.assertEquals(results[0].score, 3500) - self.assertEquals(results[0].user, "jbellis") - - self.assertEquals(results[1].game, 'Coup') - self.assertEquals(results[1].year, 2015) - self.assertEquals(results[1].month, 6) - self.assertEquals(results[1].day, 9) - self.assertEquals(results[1].score, 2700) - self.assertEquals(results[1].user, "jmckenzie") - - self.assertEquals(results[2].game, 'Coup') - self.assertEquals(results[2].year, 2015) - self.assertEquals(results[2].month, 6) - self.assertEquals(results[2].day, 1) - self.assertEquals(results[2].score, 2500) - self.assertEquals(results[2].user, "iamaleksey") + self.assertEqual(results[0].game, 'Coup') + self.assertEqual(results[0].year, 2015) + self.assertEqual(results[0].month, 6) + self.assertEqual(results[0].day, 20) + self.assertEqual(results[0].score, 3500) + self.assertEqual(results[0].user, "jbellis") + + self.assertEqual(results[1].game, 'Coup') + self.assertEqual(results[1].year, 2015) + self.assertEqual(results[1].month, 6) + self.assertEqual(results[1].day, 9) + self.assertEqual(results[1].score, 2700) + self.assertEqual(results[1].user, "jmckenzie") + + self.assertEqual(results[2].game, 'Coup') + self.assertEqual(results[2].year, 2015) + self.assertEqual(results[2].month, 6) + self.assertEqual(results[2].day, 1) + self.assertEqual(results[2].score, 2500) + self.assertEqual(results[2].user, "iamaleksey") # Test filtered user high scores query_statement = SimpleStatement("SELECT * FROM {0}.filtereduserhigh WHERE game='Chess'".format(self.keyspace_name), consistency_level=ConsistencyLevel.QUORUM) results = self.session.execute(query_statement) - self.assertEquals(results[0].game, 'Chess') - self.assertEquals(results[0].year, 2015) - self.assertEquals(results[0].month, 6) - self.assertEquals(results[0].day, 21) - self.assertEquals(results[0].score, 3500) - self.assertEquals(results[0].user, "jbellis") - - self.assertEquals(results[1].game, 'Chess') - self.assertEquals(results[1].year, 2015) - self.assertEquals(results[1].month, 1) - self.assertEquals(results[1].day, 25) - self.assertEquals(results[1].score, 3200) - self.assertEquals(results[1].user, "pcmanus") + self.assertEqual(results[0].game, 'Chess') + self.assertEqual(results[0].year, 2015) + self.assertEqual(results[0].month, 6) + self.assertEqual(results[0].day, 21) + self.assertEqual(results[0].score, 3500) + self.assertEqual(results[0].user, "jbellis") + + self.assertEqual(results[1].game, 'Chess') + self.assertEqual(results[1].year, 2015) + self.assertEqual(results[1].month, 1) + self.assertEqual(results[1].day, 25) + self.assertEqual(results[1].score, 3200) + self.assertEqual(results[1].user, "pcmanus") class UnicodeQueryTest(BasicSharedKeyspaceUnitTestCase): From 760a7cde3d751d42890a281bc845b779e483be19 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 28 Jul 2016 12:23:15 -0500 Subject: [PATCH 0802/2005] Fixing py3 compatibility issue in test_metrics --- tests/integration/standard/test_metrics.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 0dd9cbb65a..18f35c15f1 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -263,8 +263,8 @@ def test_duplicate_metrics_per_cluster(self): query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name), consistency_level=ConsistencyLevel.ALL) session3.execute(query) - self.assertEqual(cluster2.metrics.get_stats().values()[2]['count'], 10) - self.assertEqual(cluster3.metrics.get_stats().values()[2]['count'], 5) + self.assertEqual(cluster2.metrics.get_stats()['request_timer']['count'], 10) + self.assertEqual(cluster3.metrics.get_stats()['request_timer']['count'], 5) # Check scales to ensure they are appropriately named self.assertTrue("appcluster" in scales._Stats.stats.keys()) From 7856f702a15ff0adb4590dbf5faa3ce97fa9a86a Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 28 Jul 2016 12:42:09 -0500 Subject: [PATCH 0803/2005] Fixing py3 compatiblity issue with AsciiTests --- tests/integration/cqlengine/columns/test_validation.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 71a7e89a68..062ae37c10 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -17,6 +17,7 @@ except ImportError: import unittest # noqa +import sys from datetime import datetime, timedelta, date, tzinfo from decimal import Decimal as D from uuid import uuid4, uuid1 @@ -420,8 +421,9 @@ def test_type_checking(self): with self.assertRaises(ValidationError): Ascii().validate('Beyonc' + chr(233)) - with self.assertRaises(ValidationError): - Ascii().validate(u'Beyonc' + unichr(233)) + if sys.version_info < (3, 1): + with self.assertRaises(ValidationError): + Ascii().validate(u'Beyonc' + unichr(233)) def test_unaltering_validation(self): """ Test the validation step doesn't re-interpret values. """ From 84f428c73a38e8813057d04cee552dcc7e828b27 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 28 Jul 2016 13:32:57 -0500 Subject: [PATCH 0804/2005] Restricting TypeTestsProtocol to C* V 2.2 --- tests/integration/standard/test_types.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 736e7957e2..f959d4d9f9 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -31,7 +31,7 @@ from tests.unit.cython.utils import cythontest from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass, notprotocolv1, \ - BasicSharedKeyspaceUnitTestCase, greaterthancass20, lessthancass30 + BasicSharedKeyspaceUnitTestCase, greaterthancass21, lessthancass30 from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, \ get_sample, get_collection_sample @@ -796,7 +796,7 @@ def test_cython_decimal(self): class TypeTestsProtocol(BasicSharedKeyspaceUnitTestCase): - @greaterthancass20 + @greaterthancass21 @lessthancass30 def test_nested_types_with_protocol_version(self): """ From d7811310d93a4bd0e4300216935a35a13a1f762d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 28 Jul 2016 14:46:23 -0500 Subject: [PATCH 0805/2005] format strings for python 2.6 --- tests/integration/standard/test_query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 5e6f55e64e..719f2b1fc9 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -191,7 +191,7 @@ def test_incomplete_query_trace(self): self.assertTrue(self._wait_for_trace_to_populate(trace.trace_id)) # Delete trace duration from the session (this is what the driver polls for "complete") - delete_statement = SimpleStatement("DELETE duration FROM system_traces.sessions WHERE session_id = {}".format(trace.trace_id), consistency_level=ConsistencyLevel.ALL) + delete_statement = SimpleStatement("DELETE duration FROM system_traces.sessions WHERE session_id = {0}".format(trace.trace_id), consistency_level=ConsistencyLevel.ALL) self.session.execute(delete_statement) self.assertTrue(self._wait_for_trace_to_delete(trace.trace_id)) @@ -225,7 +225,7 @@ def _wait_for_trace_to_delete(self, trace_id): return count != retry_max def _is_trace_present(self, trace_id): - select_statement = SimpleStatement("SElECT duration FROM system_traces.sessions WHERE session_id = {}".format(trace_id), consistency_level=ConsistencyLevel.ALL) + select_statement = SimpleStatement("SElECT duration FROM system_traces.sessions WHERE session_id = {0}".format(trace_id), consistency_level=ConsistencyLevel.ALL) ssrs = self.session.execute(select_statement) if(ssrs[0].duration is None): return False From aacabfa04d0887953c3349766236825cab430606 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 28 Jul 2016 14:46:37 -0500 Subject: [PATCH 0806/2005] don't exlcude CQL just for DCT, now that it's fixed --- cassandra/metadata.py | 8 +++----- tests/integration/standard/test_metadata.py | 9 +-------- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index a5d1bbfa94..dedaa2de7b 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1058,13 +1058,11 @@ def is_cql_compatible(self): """ comparator = getattr(self, 'comparator', None) if comparator: - incompatible = issubclass(self.comparator, types.DynamicCompositeType) - # no compact storage with more than one column beyond PK if there # are clustering columns - incompatible |= (self.is_compact_storage and - len(self.columns) > len(self.primary_key) + 1 and - len(self.clustering_key) >= 1) + incompatible = (self.is_compact_storage and + len(self.columns) > len(self.primary_key) + 1 and + len(self.clustering_key) >= 1) return not incompatible return True diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index ae3d7b138b..598dd83971 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1273,12 +1273,6 @@ def test_legacy_tables(self): AND read_repair_chance = 0.0 AND speculative_retry = 'NONE'; -/* -Warning: Table legacy.composite_comp_no_col omitted because it has constructs not compatible with CQL (was created via legacy API). - -Approximate structure, for reference: -(this should not be used to reproduce this schema) - CREATE TABLE legacy.composite_comp_no_col ( key blob, column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(b=>org.apache.cassandra.db.marshal.BytesType, s=>org.apache.cassandra.db.marshal.UTF8Type, t=>org.apache.cassandra.db.marshal.TimeUUIDType)', @@ -1297,8 +1291,7 @@ def test_legacy_tables(self): AND memtable_flush_period_in_ms = 0 AND min_index_interval = 128 AND read_repair_chance = 0.0 - AND speculative_retry = 'NONE'; -*/""" + AND speculative_retry = 'NONE';""" ccm = get_cluster() ccm.run_cli(cli_script) From f315867330892676fefc19e6ec2d07054c624d91 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 28 Jul 2016 15:26:52 -0500 Subject: [PATCH 0807/2005] PYTHON-598 Adding test documentation, fixing py3 compatibility issue --- .../cqlengine/columns/test_validation.py | 3 +- .../cqlengine/test_context_query.py | 28 +++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 062ae37c10..4980415208 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -542,7 +542,8 @@ def test_type_checking(self): Text().validate("!#$%&\'()*+,-./") Text().validate('Beyonc' + chr(233)) - Text().validate(u'Beyonc' + unichr(233)) + if sys.version_info < (3, 1): + Text().validate(u'Beyonc' + unichr(233)) def test_unaltering_validation(self): """ Test the validation step doesn't re-interpret values. """ diff --git a/tests/integration/cqlengine/test_context_query.py b/tests/integration/cqlengine/test_context_query.py index ee947666e6..b3941319e9 100644 --- a/tests/integration/cqlengine/test_context_query.py +++ b/tests/integration/cqlengine/test_context_query.py @@ -54,13 +54,33 @@ def setUp(self): obj.delete() def test_context_manager(self): + """ + Validates that when a context query is constructed that the + keyspace of the returned model is toggled appropriately + @since 3.6 + @jira_ticket PYTHON-598 + @expected_result default keyspace should be used + + @test_category query + """ + # model keyspace write/read for ks in self.KEYSPACES: with ContextQuery(TestModel, keyspace=ks) as tm: self.assertEqual(tm.__keyspace__, ks) + self.assertEqual(TestModel._get_keyspace(), 'ks1') + def test_default_keyspace(self): + """ + Tests the use of context queries with the default model keyspsace + @since 3.6 + @jira_ticket PYTHON-598 + @expected_result default keyspace should be used + + @test_category query + """ # model keyspace write/read for i in range(5): TestModel.objects.create(partition=i, cluster=i) @@ -76,7 +96,15 @@ def test_default_keyspace(self): self.assertEqual(0, len(tm.objects.all())) def test_context_keyspace(self): + """ + Tests the use of context queries with non default keyspaces + + @since 3.6 + @jira_ticket PYTHON-598 + @expected_result queries should be routed to appropriate keyspaces + @test_category query + """ for i in range(5): with ContextQuery(TestModel, keyspace='ks4') as tm: tm.objects.create(partition=i, cluster=i) From 80ff3b785e7edf7ad10d8b1ff4f896a60af6c2c6 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 29 Jul 2016 12:15:33 -0400 Subject: [PATCH 0808/2005] Add BatchQuery and ContextQuery in the docs --- cassandra/cqlengine/query.py | 30 ++++++++++++++++++++++++++ docs/api/cassandra/cqlengine/query.rst | 5 +++++ 2 files changed, 35 insertions(+) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index bee0219f51..4f4f15830f 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -136,6 +136,8 @@ class BatchQuery(object): Handles the batching of queries http://www.datastax.com/docs/1.2/cql_cli/cql/BATCH + + See :doc:`/cqlengine/batches` for more details. """ warn_multiple_exec = True @@ -173,6 +175,11 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on self._context_entered = False def add_query(self, query): + """ + Adds a query to the batch. + + :param query: The query + """ if not isinstance(query, BaseCQLStatement): raise CQLEngineException('only BaseCQLStatements can be added to a batch query') self.queries.append(query) @@ -202,6 +209,9 @@ def add_callback(self, fn, *args, **kwargs): self._callbacks.append((fn, args, kwargs)) def execute(self): + """ + Executes the batch. + """ if self._executed and self.warn_multiple_exec: msg = "Batch executed multiple times." if self._context_entered: @@ -260,8 +270,28 @@ def __exit__(self, exc_type, exc_val, exc_tb): class ContextQuery(object): + """ + A Context manager to allow a Model to switch context easily. Presently, the context only + specifies a keyspace for model IO. + + For example: + + .. code-block:: python + + with ContextQuery(Automobile, keyspace='test2') as A: + A.objects.create(manufacturer='honda', year=2008, model='civic') + print len(A.objects.all()) # 1 result + + with ContextQuery(Automobile, keyspace='test4') as A: + print len(A.objects.all()) # 0 result + + """ def __init__(self, model, keyspace=None): + """ + :param model: A model + :param keyspace: (optional) A keyspace name + """ from cassandra.cqlengine import models if not issubclass(model, models.Model): diff --git a/docs/api/cassandra/cqlengine/query.rst b/docs/api/cassandra/cqlengine/query.rst index 461ec9b969..ad6d850e10 100644 --- a/docs/api/cassandra/cqlengine/query.rst +++ b/docs/api/cassandra/cqlengine/query.rst @@ -54,6 +54,11 @@ The methods here are used to filter, order, and constrain results. .. automethod:: update +.. autoclass:: BatchQuery + :members: + +.. autoclass:: ContextQuery + .. autoclass:: DoesNotExist .. autoclass:: MultipleObjectsReturned From 7907d96d8c529852a6f8331149fa7306079abe1c Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 29 Jul 2016 12:29:31 -0400 Subject: [PATCH 0809/2005] some docs changes --- cassandra/cqlengine/query.py | 12 ++---------- docs/api/cassandra/cqlengine/query.rst | 3 +++ 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 4f4f15830f..e996baea3e 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -175,11 +175,6 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on self._context_entered = False def add_query(self, query): - """ - Adds a query to the batch. - - :param query: The query - """ if not isinstance(query, BaseCQLStatement): raise CQLEngineException('only BaseCQLStatements can be added to a batch query') self.queries.append(query) @@ -209,9 +204,6 @@ def add_callback(self, fn, *args, **kwargs): self._callbacks.append((fn, args, kwargs)) def execute(self): - """ - Executes the batch. - """ if self._executed and self.warn_multiple_exec: msg = "Batch executed multiple times." if self._context_entered: @@ -271,7 +263,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class ContextQuery(object): """ - A Context manager to allow a Model to switch context easily. Presently, the context only + A Context manager to allow a Model to switch context easily. Presently, the context only specifies a keyspace for model IO. For example: @@ -289,7 +281,7 @@ class ContextQuery(object): def __init__(self, model, keyspace=None): """ - :param model: A model + :param model: A model. This should be a class type, not an instance. :param keyspace: (optional) A keyspace name """ from cassandra.cqlengine import models diff --git a/docs/api/cassandra/cqlengine/query.rst b/docs/api/cassandra/cqlengine/query.rst index ad6d850e10..c0c8f285cf 100644 --- a/docs/api/cassandra/cqlengine/query.rst +++ b/docs/api/cassandra/cqlengine/query.rst @@ -57,6 +57,9 @@ The methods here are used to filter, order, and constrain results. .. autoclass:: BatchQuery :members: + .. automethod:: add_query + .. automethod:: execute + .. autoclass:: ContextQuery .. autoclass:: DoesNotExist From 22598545f221785dd68f939cc4a377e3dbab3f44 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 29 Jul 2016 15:05:54 -0400 Subject: [PATCH 0810/2005] 3.6.0 version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index d6b2b52165..1a02d8a892 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 5, 0, 'post0') +__version_info__ = (3, 6, 0) __version__ = '.'.join(map(str, __version_info__)) From 43cfe6078e92dde6b9505ccefe1fd3812535585d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 1 Aug 2016 13:43:56 -0500 Subject: [PATCH 0811/2005] silence sphinx warning about emphasis/strong without end --- cassandra/cqlengine/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index e996baea3e..77b724983a 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -196,8 +196,8 @@ def add_callback(self, fn, *args, **kwargs): :param fn: Callable object :type fn: callable - :param *args: Positional arguments to be passed to the callback at the time of execution - :param **kwargs: Named arguments to be passed to the callback at the time of execution + :param \*args: Positional arguments to be passed to the callback at the time of execution + :param \*\*kwargs: Named arguments to be passed to the callback at the time of execution """ if not callable(fn): raise ValueError("Value for argument 'fn' is {0} and is not a callable object.".format(type(fn))) From 1dd933747977413eaca30b322a6bd42a95e8fa0f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 1 Aug 2016 13:44:15 -0500 Subject: [PATCH 0812/2005] post-release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 1a02d8a892..015891038d 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 6, 0) +__version_info__ = (3, 6, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From a3bd73855dced11dabe92596358a5dab89d2e020 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 1 Aug 2016 14:58:15 -0500 Subject: [PATCH 0813/2005] add python-609, which was merged before release --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3db920828b..94b8f98b9f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -12,6 +12,7 @@ Features * Utilize v2 protocol's ability to skip result set metadata for prepared statement execution (PYTHON-71) * Return from Cluster.connect() when first contact point connection(pool) is opened (PYTHON-105) * cqlengine: Add ContextQuery to allow cqlengine models to switch the keyspace context easily (PYTHON-598) +* Standardize Validation between Ascii and Text types in Cqlengine (PYTHON-609) Bug Fixes --------- From d244ece986e336ffa97d00ac36f859e2e29372af Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 2 Aug 2016 09:20:21 -0500 Subject: [PATCH 0814/2005] misc fixes for jenkins --- tests/integration/long/test_schema.py | 6 ++--- tests/integration/standard/test_query.py | 21 ++++++++++-------- tests/integration/standard/test_udts.py | 28 ++++++++++++------------ 3 files changed, 29 insertions(+), 26 deletions(-) diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index 87ca5f3c8a..349a158af8 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -37,7 +37,7 @@ class SchemaTests(unittest.TestCase): @classmethod def setup_class(cls): cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - cls.session = cls.cluster.connect() + cls.session = cls.cluster.connect(wait_for_all_pools=True) @classmethod def teardown_class(cls): @@ -98,7 +98,7 @@ def test_for_schema_disagreements_same_keyspace(self): """ cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) for i in range(30): try: @@ -131,7 +131,7 @@ def test_for_schema_disagreement_attribute(self): """ # This should yield a schema disagreement cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=0.001) - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) rs = session.execute("CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}") self.check_and_wait_for_agreement(session, rs, False) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 719f2b1fc9..1b09286840 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -406,15 +406,18 @@ def test_prepare_on_all_hosts(self): clus = Cluster( load_balancing_policy=white_list, protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, reprepare_on_up=False) - session = clus.connect() - mock_handler = MockLoggingHandler() - logger = logging.getLogger(cluster.__name__) - logger.addHandler(mock_handler) - select_statement = session.prepare("SELECT * FROM system.local") - session.execute(select_statement) - session.execute(select_statement) - session.execute(select_statement) - self.assertEqual(2, mock_handler.get_message_count('debug', "Re-preparing")) + try: + session = clus.connect(wait_for_all_pools=True) + mock_handler = MockLoggingHandler() + logger = logging.getLogger(cluster.__name__) + logger.addHandler(mock_handler) + select_statement = session.prepare("SELECT * FROM system.local") + session.execute(select_statement) + session.execute(select_statement) + session.execute(select_statement) + self.assertEqual(2, mock_handler.get_message_count('debug', "Re-preparing")) + finally: + clus.shutdown() class PrintStatementTests(unittest.TestCase): diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 441b518a6f..81c86069fd 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -56,7 +56,7 @@ def test_can_insert_unprepared_registered_udts(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect(self.keyspace_name) + s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.execute("CREATE TYPE user (age int, name text)") s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)") @@ -100,7 +100,7 @@ def test_can_register_udt_before_connecting(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() + s = c.connect(wait_for_all_pools=True) s.execute(""" CREATE KEYSPACE udt_test_register_before_connecting @@ -128,7 +128,7 @@ def test_can_register_udt_before_connecting(self): c.register_user_type("udt_test_register_before_connecting", "user", User1) c.register_user_type("udt_test_register_before_connecting2", "user", User2) - s = c.connect() + s = c.connect(wait_for_all_pools=True) s.set_keyspace("udt_test_register_before_connecting") s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User1(42, 'bob'))) @@ -158,7 +158,7 @@ def test_can_insert_prepared_unregistered_udts(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect(self.keyspace_name) + s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.execute("CREATE TYPE user (age int, name text)") s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)") @@ -202,7 +202,7 @@ def test_can_insert_prepared_registered_udts(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect(self.keyspace_name) + s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.execute("CREATE TYPE user (age int, name text)") User = namedtuple('user', ('age', 'name')) @@ -252,7 +252,7 @@ def test_can_insert_udts_with_nulls(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect(self.keyspace_name) + s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.execute("CREATE TYPE user (a text, b int, c uuid, d blob)") User = namedtuple('user', ('a', 'b', 'c', 'd')) @@ -282,7 +282,7 @@ def test_can_insert_udts_with_varying_lengths(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect(self.keyspace_name) + s = c.connect(self.keyspace_name, wait_for_all_pools=True) MAX_TEST_LENGTH = 254 @@ -366,7 +366,7 @@ def test_can_insert_nested_registered_udts(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect(self.keyspace_name) + s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.row_factory = dict_factory MAX_NESTING_DEPTH = 16 @@ -397,7 +397,7 @@ def test_can_insert_nested_unregistered_udts(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect(self.keyspace_name) + s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.row_factory = dict_factory MAX_NESTING_DEPTH = 16 @@ -437,7 +437,7 @@ def test_can_insert_nested_registered_udts_with_different_namedtuples(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect(self.keyspace_name) + s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.row_factory = dict_factory MAX_NESTING_DEPTH = 16 @@ -468,7 +468,7 @@ def test_raise_error_on_nonexisting_udts(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect(self.keyspace_name) + s = c.connect(self.keyspace_name, wait_for_all_pools=True) User = namedtuple('user', ('age', 'name')) with self.assertRaises(UserTypeDoesNotExist): @@ -488,7 +488,7 @@ def test_can_insert_udt_all_datatypes(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect(self.keyspace_name) + s = c.connect(self.keyspace_name, wait_for_all_pools=True) # create UDT alpha_type_list = [] @@ -533,7 +533,7 @@ def test_can_insert_udt_all_collection_datatypes(self): """ c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect(self.keyspace_name) + s = c.connect(self.keyspace_name, wait_for_all_pools=True) # create UDT alpha_type_list = [] @@ -600,7 +600,7 @@ def test_can_insert_nested_collections(self): raise unittest.SkipTest("Support for nested collections was introduced in Cassandra 2.1.3") c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect(self.keyspace_name) + s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple name = self._testMethodName From c6b1b8d417a4af4685895a94294d18ed7dda9559 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 3 Aug 2016 12:19:19 -0500 Subject: [PATCH 0815/2005] Wait for first *successful* connect on initial not just any completed (possibly errored) attempt PYTHON-617 --- cassandra/cluster.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 536ae71c14..23cd265f6e 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1927,8 +1927,10 @@ def __init__(self, cluster, hosts, keyspace=None): future = self.add_or_renew_pool(host, is_host_addition=False) if future: self._initial_connect_futures.add(future) - wait_futures(self._initial_connect_futures, return_when=FIRST_COMPLETED) + futures = wait_futures(self._initial_connect_futures, return_when=FIRST_COMPLETED) + while futures.not_done and not any(f.result() for f in futures.done): + futures = wait_futures(futures.not_done, return_when=FIRST_COMPLETED) def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None, execution_profile=EXEC_PROFILE_DEFAULT): """ From 47ec211116dcebf529c5aa3635a457ac2af3c3f6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 4 Aug 2016 16:27:08 -0500 Subject: [PATCH 0816/2005] allow beta proto version flag PYTHON-614 --- cassandra/cluster.py | 12 +++++++++++- cassandra/connection.py | 7 +++++-- cassandra/protocol.py | 8 ++++++-- 3 files changed, 22 insertions(+), 5 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 536ae71c14..ee06bcfe56 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -393,6 +393,12 @@ class Cluster(object): +-------------------+-------------------+ """ + allow_beta_protocol_version = False + """ + Setting true injects a flag in all messages that makes the server accept and use "beta" protocol version. + Used for testing new protocol features incrementally before the new version is complete. + """ + compression = True """ Controls compression for communications between the driver and Cassandra. @@ -760,7 +766,8 @@ def __init__(self, status_event_refresh_window=2, prepare_on_all_hosts=True, reprepare_on_up=True, - execution_profiles=None): + execution_profiles=None, + allow_beta_protocol_version=False): """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as extablishing connection pools or refreshing metadata. @@ -785,6 +792,7 @@ def __init__(self, if protocol_version is not _NOT_SET: self.protocol_version = protocol_version self._protocol_version_explicit = True + self.allow_beta_protocol_version = allow_beta_protocol_version self.auth_provider = auth_provider @@ -1121,6 +1129,7 @@ def _make_connection_kwargs(self, address, kwargs_dict): kwargs_dict.setdefault('cql_version', self.cql_version) kwargs_dict.setdefault('protocol_version', self.protocol_version) kwargs_dict.setdefault('user_type_map', self._user_types) + kwargs_dict.setdefault('allow_beta_protocol_version', self.allow_beta_protocol_version) return kwargs_dict @@ -2084,6 +2093,7 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time message.update_custom_payload(query.custom_payload) message.update_custom_payload(custom_payload) + message.allow_beta_protocol_version = self.cluster.allow_beta_protocol_version return ResponseFuture( self, message, query, timeout, metrics=self._metrics, diff --git a/cassandra/connection.py b/cassandra/connection.py index 11da8a4afe..79910825f9 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -238,6 +238,8 @@ class Connection(object): is_control_connection = False signaled_error = False # used for flagging at the pool level + allow_beta_protocol_version = False + _iobuf = None _current_frame = None @@ -251,7 +253,7 @@ class Connection(object): def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, cql_version=None, protocol_version=MAX_SUPPORTED_VERSION, is_control_connection=False, - user_type_map=None, connect_timeout=None): + user_type_map=None, connect_timeout=None, allow_beta_protocol_version=False): self.host = host self.port = port self.authenticator = authenticator @@ -263,6 +265,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.is_control_connection = is_control_connection self.user_type_map = user_type_map self.connect_timeout = connect_timeout + self.allow_beta_protocol_version = allow_beta_protocol_version self._push_watchers = defaultdict(set) self._requests = {} self._iobuf = io.BytesIO() @@ -455,7 +458,7 @@ def send_msg(self, msg, request_id, cb, encoder=ProtocolHandler.encode_message, # queue the decoder function with the request # this allows us to inject custom functions per request to encode, decode messages self._requests[request_id] = (cb, decoder, result_metadata) - self.push(encoder(msg, request_id, self.protocol_version, compressor=self.compressor)) + self.push(encoder(msg, request_id, self.protocol_version, compressor=self.compressor, allow_beta_protocol_version=self.allow_beta_protocol_version)) return request_id def wait_for_response(self, msg, timeout=None): diff --git a/cassandra/protocol.py b/cassandra/protocol.py index e9e4450f5a..e63966fe54 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -56,7 +56,7 @@ class InternalError(Exception): ColumnMetadata = namedtuple("ColumnMetadata", ['keyspace_name', 'table_name', 'name', 'type']) MIN_SUPPORTED_VERSION = 1 -MAX_SUPPORTED_VERSION = 4 +MAX_SUPPORTED_VERSION = 5 HEADER_DIRECTION_TO_CLIENT = 0x80 HEADER_DIRECTION_MASK = 0x80 @@ -65,6 +65,7 @@ class InternalError(Exception): TRACING_FLAG = 0x02 CUSTOM_PAYLOAD_FLAG = 0x04 WARNING_FLAG = 0x08 +USE_BETA_FLAG = 0x10 _message_types_by_opcode = {} @@ -932,7 +933,7 @@ class _ProtocolHandler(object): """ @classmethod - def encode_message(cls, msg, stream_id, protocol_version, compressor): + def encode_message(cls, msg, stream_id, protocol_version, compressor, allow_beta_protocol_version): """ Encodes a message using the specified frame parameters, and compressor @@ -958,6 +959,9 @@ def encode_message(cls, msg, stream_id, protocol_version, compressor): if msg.tracing: flags |= TRACING_FLAG + if allow_beta_protocol_version: + flags |= USE_BETA_FLAG + buff = io.BytesIO() cls._write_header(buff, protocol_version, flags, stream_id, msg.opcode, len(body)) buff.write(body) From 58cba38e1f918ea5771d62a5e2934fea13d9b7c5 Mon Sep 17 00:00:00 2001 From: Tyler Hobbs Date: Fri, 5 Aug 2016 18:09:40 -0500 Subject: [PATCH 0817/2005] Support v5 error code map for failure messages Done for PYTHON-619 --- cassandra/__init__.py | 30 ++++++++++++--- cassandra/protocol.py | 90 +++++++++++++++++++++++++++++++------------ 2 files changed, 90 insertions(+), 30 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 015891038d..2fc22a044c 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -325,16 +325,34 @@ class CoordinationFailure(RequestExecutionException): The number of replicas that sent a failure message """ - def __init__(self, summary_message, consistency=None, required_responses=None, received_responses=None, failures=None): + error_code_map = None + """ + A map of inet addresses to error codes representing replicas that sent + a failure message. Only set when `protocol_version` is 5 or higher. + """ + + def __init__(self, summary_message, consistency=None, required_responses=None, + received_responses=None, failures=None, error_code_map=None): self.consistency = consistency self.required_responses = required_responses self.received_responses = received_responses self.failures = failures - Exception.__init__(self, summary_message + ' info=' + - repr({'consistency': consistency_value_to_name(consistency), - 'required_responses': required_responses, - 'received_responses': received_responses, - 'failures': failures})) + self.error_code_map = error_code_map + + info_dict = { + 'consistency': consistency_value_to_name(consistency), + 'required_responses': required_responses, + 'received_responses': received_responses, + 'failures': failures + } + + if error_code_map is not None: + # make error codes look like "0x002a" + formatted_map = dict((addr, '0x%04x' % err_code) + for (addr, err_code) in error_code_map.items()) + info_dict['error_code_map'] = formatted_map + + Exception.__init__(self, summary_message + ' info=' + repr(info_dict)) class ReadFailure(CoordinationFailure): diff --git a/cassandra/protocol.py b/cassandra/protocol.py index e63966fe54..53c1d694b1 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -127,11 +127,11 @@ def __init__(self, code, message, info): self.info = info @classmethod - def recv_body(cls, f, *args): + def recv_body(cls, f, protocol_version, *args): code = read_int(f) msg = read_string(f) subcls = error_classes.get(code, cls) - extra_info = subcls.recv_error_info(f) + extra_info = subcls.recv_error_info(f, protocol_version) return subcls(code=code, message=msg, info=extra_info) def summary_msg(self): @@ -146,7 +146,7 @@ def __str__(self): __repr__ = __str__ @staticmethod - def recv_error_info(f): + def recv_error_info(f, protocol_version): pass def to_exception(self): @@ -192,7 +192,7 @@ class UnavailableErrorMessage(RequestExecutionException): error_code = 0x1000 @staticmethod - def recv_error_info(f): + def recv_error_info(f, protocol_version): return { 'consistency': read_consistency_level(f), 'required_replicas': read_int(f), @@ -223,7 +223,7 @@ class WriteTimeoutErrorMessage(RequestExecutionException): error_code = 0x1100 @staticmethod - def recv_error_info(f): + def recv_error_info(f, protocol_version): return { 'consistency': read_consistency_level(f), 'received_responses': read_int(f), @@ -240,7 +240,7 @@ class ReadTimeoutErrorMessage(RequestExecutionException): error_code = 0x1200 @staticmethod - def recv_error_info(f): + def recv_error_info(f, protocol_version): return { 'consistency': read_consistency_level(f), 'received_responses': read_int(f), @@ -257,13 +257,27 @@ class ReadFailureMessage(RequestExecutionException): error_code = 0x1300 @staticmethod - def recv_error_info(f): + def recv_error_info(f, protocol_version): + consistency = read_consistency_level(f) + received_responses = read_int(f) + required_responses = read_int(f) + + if protocol_version >= 5: + error_code_map = read_error_code_map(f) + failures = len(error_code_map) + else: + error_code_map = None + failures = read_int(f) + + data_retrieved = bool(read_byte(f)) + return { - 'consistency': read_consistency_level(f), - 'received_responses': read_int(f), - 'required_responses': read_int(f), - 'failures': read_int(f), - 'data_retrieved': bool(read_byte(f)), + 'consistency': consistency, + 'received_responses': received_responses, + 'required_responses': required_responses, + 'failures': failures, + 'error_code_map': error_code_map, + 'data_retrieved': data_retrieved } def to_exception(self): @@ -275,7 +289,7 @@ class FunctionFailureMessage(RequestExecutionException): error_code = 0x1400 @staticmethod - def recv_error_info(f): + def recv_error_info(f, protocol_version): return { 'keyspace': read_string(f), 'function': read_string(f), @@ -291,13 +305,27 @@ class WriteFailureMessage(RequestExecutionException): error_code = 0x1500 @staticmethod - def recv_error_info(f): + def recv_error_info(f, protocol_version): + consistency = read_consistency_level(f) + received_responses = read_int(f) + required_responses = read_int(f) + + if protocol_version >= 5: + error_code_map = read_error_code_map(f) + failures = len(error_code_map) + else: + error_code_map = None + failures = read_int(f) + + write_type = WriteType.name_to_value[read_string(f)] + return { - 'consistency': read_consistency_level(f), - 'received_responses': read_int(f), - 'required_responses': read_int(f), - 'failures': read_int(f), - 'write_type': WriteType.name_to_value[read_string(f)], + 'consistency': consistency, + 'received_responses': received_responses, + 'required_responses': required_responses, + 'failures': failures, + 'error_code_map': error_code_map, + 'write_type': write_type } def to_exception(self): @@ -335,7 +363,7 @@ class PreparedQueryNotFound(RequestValidationException): error_code = 0x2500 @staticmethod - def recv_error_info(f): + def recv_error_info(f, protocol_version): # return the query ID return read_binary_string(f) @@ -345,7 +373,7 @@ class AlreadyExistsException(ConfigurationException): error_code = 0x2400 @staticmethod - def recv_error_info(f): + def recv_error_info(f, protocol_version): return { 'keyspace': read_string(f), 'table': read_string(f), @@ -1224,6 +1252,15 @@ def write_stringmultimap(f, strmmap): write_stringlist(f, v) +def read_error_code_map(f): + numpairs = read_int(f) + error_code_map = {} + for _ in range(numpairs): + endpoint = read_inet_addr_only(f) + error_code_map[endpoint] = read_short(f) + return error_code_map + + def read_value(f): size = read_int(f) if size < 0: @@ -1241,17 +1278,22 @@ def write_value(f, v): f.write(v) -def read_inet(f): +def read_inet_addr_only(f): size = read_byte(f) addrbytes = f.read(size) - port = read_int(f) if size == 4: addrfam = socket.AF_INET elif size == 16: addrfam = socket.AF_INET6 else: raise InternalError("bad inet address: %r" % (addrbytes,)) - return (util.inet_ntop(addrfam, addrbytes), port) + return util.inet_ntop(addrfam, addrbytes) + + +def read_inet(f): + addr = read_inet_addr_only(f) + port = read_int(f) + return (addr, port) def write_inet(f, addrtuple): From ab7b9ef0e70572498d6b72de172a810c6f3b8578 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 8 Aug 2016 10:12:10 -0500 Subject: [PATCH 0818/2005] only use partition key when deleting static columns PYTHON-608 --- cassandra/cqlengine/query.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 77b724983a..f04a4a9a3a 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1283,19 +1283,23 @@ def _delete_null_columns(self, conditionals=None): """ ds = DeleteStatement(self.column_family_name, conditionals=conditionals, if_exists=self._if_exists) deleted_fields = False + static_only = True for _, v in self.instance._values.items(): col = v.column if v.deleted: ds.add_field(col.db_field_name) deleted_fields = True + static_only &= col.static elif isinstance(col, columns.Map): uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value) if uc.get_context_size() > 0: ds.add_field(uc) deleted_fields = True + static_only |= col.static if deleted_fields: - for name, col in self.model._primary_keys.items(): + keys = self.model._partition_keys if static_only else self.model._primary_keys + for name, col in keys.items(): ds.add_where(col, EqualsOperator(), getattr(self.instance, name)) self._execute(ds) From c81618d787a54d34c97ea0a8a85c390e2ab68675 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 8 Aug 2016 10:56:42 -0500 Subject: [PATCH 0819/2005] clear USE_BETA_FLAG in response message, avoid warnings PYTHON-614 --- cassandra/protocol.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index e63966fe54..9066cddf4e 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -1017,6 +1017,8 @@ def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcod else: custom_payload = None + flags ^= USE_BETA_FLAG # will only be set if we asserted it in connection estabishment + if flags: log.warning("Unknown protocol flags set: %02x. May cause problems.", flags) From 4c39f5f26f7eb6c410b18f396ebab3ee8bb72297 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 8 Aug 2016 11:38:30 -0500 Subject: [PATCH 0820/2005] update test following beta protocol changes PYTHON-614 --- tests/integration/standard/test_cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 4c8339a2cf..91dee32e79 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -188,7 +188,7 @@ def test_protocol_negotiation(self): """ cluster = Cluster() - self.assertEqual(cluster.protocol_version, MAX_SUPPORTED_VERSION) + self.assertLessEqual(cluster.protocol_version, MAX_SUPPORTED_VERSION) session = cluster.connect() updated_protocol_version = session._protocol_version updated_cluster_version = cluster.protocol_version From 4ba8f81baefbb6270418255d696e7f8b1c275a57 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 8 Aug 2016 11:45:45 -0500 Subject: [PATCH 0821/2005] add hash function to cqlengine.columns.Column restore hashability to Column in Python 3.x PYTHON-618 --- cassandra/cqlengine/columns.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 0bb52d6bff..3b5cbe2497 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -198,6 +198,9 @@ def __ge__(self, other): return self.position >= other.position return NotImplemented + def __hash__(self): + return id(self) + def validate(self, value): """ Returns a cleaned and validated value. Raises a ValidationError From f29150482a4ec2f6884cc52722fa30d0da79ba69 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 8 Aug 2016 14:14:49 -0500 Subject: [PATCH 0822/2005] avoid quadratic token processing for RF > nodes PYTHON-379 --- cassandra/metadata.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index dedaa2de7b..f4f358f7f9 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -469,8 +469,6 @@ def __init__(self, dc_replication_factors): (str(k), int(v)) for k, v in dc_replication_factors.items()) def make_token_replica_map(self, token_to_host_owner, ring): - # note: this does not account for hosts having different racks - replica_map = defaultdict(list) dc_rf_map = dict((dc, int(rf)) for dc, rf in self.dc_replication_factors.items() if rf > 0) @@ -478,16 +476,19 @@ def make_token_replica_map(self, token_to_host_owner, ring): # belong to that DC dc_to_token_offset = defaultdict(list) dc_racks = defaultdict(set) + hosts_per_dc = defaultdict(set) for i, token in enumerate(ring): host = token_to_host_owner[token] dc_to_token_offset[host.datacenter].append(i) if host.datacenter and host.rack: dc_racks[host.datacenter].add(host.rack) + hosts_per_dc[host.datacenter].add(host) # A map of DCs to an index into the dc_to_token_offset value for that dc. # This is how we keep track of advancing around the ring for each DC. dc_to_current_index = defaultdict(int) + replica_map = defaultdict(list) for i in range(len(ring)): replicas = replica_map[ring[i]] @@ -506,12 +507,14 @@ def make_token_replica_map(self, token_to_host_owner, ring): dc_to_current_index[dc] = index replicas_remaining = dc_rf_map[dc] + replicas_this_dc = 0 skipped_hosts = [] racks_placed = set() racks_this_dc = dc_racks[dc] + hosts_this_dc = len(hosts_per_dc[dc]) for token_offset in islice(cycle(token_offsets), index, index + num_tokens): host = token_to_host_owner[ring[token_offset]] - if replicas_remaining == 0: + if replicas_remaining == 0 or replicas_this_dc == hosts_this_dc: break if host in replicas: @@ -522,6 +525,7 @@ def make_token_replica_map(self, token_to_host_owner, ring): continue replicas.append(host) + replicas_this_dc += 1 replicas_remaining -= 1 racks_placed.add(host.rack) From cdf375804987c740c12b8534ceb9ee757cee9739 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 8 Aug 2016 16:09:26 -0500 Subject: [PATCH 0823/2005] Adding test for PYTHON-608 --- .../cqlengine/query/test_updates.py | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index a3244d210b..747f6d3abe 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -241,3 +241,39 @@ def test_map_update_none_deletes_key(self): text_map__update={"bar": None}) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_map, {"foo": '1'}) + + +class StaticDeleteModel(Model): + example_id = columns.Integer(partition_key=True, primary_key=True, default=uuid4) + example_static1 = columns.Integer(static=True) + example_static2 = columns.Integer(static=True) + example_clust = columns.Integer(primary_key=True) + + +class StaticDeleteTests(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(StaticDeleteTests, cls).setUpClass() + sync_table(StaticDeleteModel) + + @classmethod + def tearDownClass(cls): + super(StaticDeleteTests, cls).tearDownClass() + drop_table(StaticDeleteModel) + + def test_static_deletion(self): + """ + Test to ensure that cluster keys are not included when removing only static columns + + @since 3.6 + @jira_ticket PYTHON-608 + @expected_result Server should not throw an exception, and the static column should be deleted + + @test_category object_mapper + """ + StaticDeleteModel.create(example_id=5, example_clust=5, example_static2=1) + sdm = StaticDeleteModel.filter(example_id=5).first() + self.assertEqual(1, sdm.example_static2) + sdm.update(example_static2=None) + self.assertIsNone(sdm.example_static2) From 6006d3581a8d5a1a31e92bf7b25c0646a86e9eb2 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 9 Aug 2016 17:59:21 -0500 Subject: [PATCH 0824/2005] PYTHON-617 adding long test to cover this specific scenario --- tests/integration/long/test_consistency.py | 43 ++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index 538d924a65..9232bcda24 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -306,3 +306,46 @@ def rfthree_downgradingcl(self, cluster, keyspace, roundrobin): # instead we should create these elsewhere # def test_rfthree_downgradingcl_twodcs(self): # def test_rfthree_downgradingcl_twodcs_dcaware(self): + + +class ConnectivityTest(unittest.TestCase): + + def setUp(self): + self.coordinator_stats = CoordinatorStats() + + def test_pool_with_host_down(self): + """ + Test to ensure that cluster.connect() doesn't return prior to pools being initialized. + + This test will figure out which host our pool logic will connect to first. It then shuts that server down. + Previouly the cluster.connect() would return prior to the pools being initialized, and the first queries would + return a no host exception + + @since 3.7.0 + @jira_ticket PYTHON-617 + @expected_result query should complete successfully + + @test_category connection + """ + + # find the first node, we will try create connections to, shut it down. + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster.connect() + hosts = cluster.metadata.all_hosts() + address = hosts[0].address + node_to_stop = int(address.split('.')[-1:][0]) + try: + force_stop(node_to_stop) + wait_for_down(cluster, node_to_stop) + # Attempt a query against that node. It should complete + cluster2 = Cluster(protocol_version=PROTOCOL_VERSION) + session2 = cluster2.connect() + session2.execute("SELECT * FROM system.local") + cluster2.shutdown() + finally: + start(node_to_stop) + wait_for_up(cluster, node_to_stop) + cluster.shutdown() + + + From 3ef1ab034b7637a29fedb2c1ebcb548ab27b6aa0 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 11 Aug 2016 11:37:07 -0500 Subject: [PATCH 0825/2005] PYTHON-618 adding hash test --- tests/unit/cqlengine/test_columns.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/unit/cqlengine/test_columns.py b/tests/unit/cqlengine/test_columns.py index 181c103515..c0c9e21b25 100644 --- a/tests/unit/cqlengine/test_columns.py +++ b/tests/unit/cqlengine/test_columns.py @@ -65,4 +65,7 @@ def test_comparisons(self): except TypeError: pass + def test_hash(self): + c0 = Column() + self.assertEqual(id(c0), c0.__hash__()) From e8c638ab8faacac9a137fa5e3b1bb85be4fe0b78 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 11 Aug 2016 11:57:00 -0500 Subject: [PATCH 0826/2005] don't xor beta flag when it's not set --- cassandra/protocol.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 785f353c4f..323af99c2f 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -66,6 +66,7 @@ class InternalError(Exception): CUSTOM_PAYLOAD_FLAG = 0x04 WARNING_FLAG = 0x08 USE_BETA_FLAG = 0x10 +USE_BETA_MASK = ~USE_BETA_FLAG _message_types_by_opcode = {} @@ -1045,7 +1046,7 @@ def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcod else: custom_payload = None - flags ^= USE_BETA_FLAG # will only be set if we asserted it in connection estabishment + flags &= USE_BETA_MASK # will only be set if we asserted it in connection estabishment if flags: log.warning("Unknown protocol flags set: %02x. May cause problems.", flags) From 8850ad3b6114debb7e95d4e1e5f232e03d652b0f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 11 Aug 2016 13:52:13 -0500 Subject: [PATCH 0827/2005] make sure prepared result metadata is replaced on reprepare PYTHON-621 --- cassandra/cluster.py | 4 ++ .../standard/test_prepared_statements.py | 38 ++++++++++++++++++- 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index f705d5edec..6a49ee9719 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3549,6 +3549,10 @@ def _execute_after_prepare(self, response): if isinstance(response, ResultMessage): if response.kind == RESULT_KIND_PREPARED: + # result metadata is the only thing that could have changed from an alter + _, _, _, result_metadata = response.results + self.prepared_statement.result_metadata = result_metadata + # use self._query to re-use the same host and # at the same time properly borrow the connection request_id = self._query(self._current_host) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 63f62f238e..6cb69097f0 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -23,7 +23,7 @@ from cassandra import ConsistencyLevel from cassandra.cluster import Cluster -from cassandra.query import PreparedStatement, UNSET_VALUE +from cassandra.query import PreparedStatement, UNSET_VALUE, tuple_factory from tests.integration import get_server_versions @@ -385,3 +385,39 @@ def test_raise_error_on_prepared_statement_execution_dropped_table(self): with self.assertRaises(InvalidRequest): self.session.execute(prepared, [0]) + + def test_invalidated_result_metadata(self): + """ + Tests to make sure cached metadata is updated when an invalidated prepared statement is reprepared. + + @since 2.7.0 + @jira_ticket PYTHON-621 + + Prior to this fix, the request would blow up with a protocol error when the result was decoded expecting a different + number of columns. + """ + s = self.session + s.result_factory = tuple_factory + + table = "test1rf.%s" % self._testMethodName.lower() + + s.execute("DROP TABLE IF EXISTS %s" % table) + s.execute("CREATE TABLE %s (k int PRIMARY KEY, a int, b int, c int)" % table) + s.execute("INSERT INTO %s (k, a, b, c) VALUES (0, 0, 0, 0)" % table) + + wildcard_prepared = s.prepare("SELECT * FROM %s" % table) + original_result_metadata = wildcard_prepared.result_metadata + self.assertEqual(len(original_result_metadata), 4) + + r = s.execute(wildcard_prepared) + self.assertEqual(r[0], (0, 0, 0, 0)) + + s.execute("ALTER TABLE %s DROP c" % table) + + # Get a bunch of requests in the pipeline with varying states of result_meta, reprepare, resolved + futures = set(s.execute_async(wildcard_prepared.bind(None)) for _ in range(200)) + for f in futures: + self.assertEqual(f.result()[0], (0, 0, 0)) + self.assertIsNot(wildcard_prepared.result_metadata, original_result_metadata) + s.execute("DROP TABLE %s" % table) + From 151b7642419bcb5c9e0fc77f2402819ea2d8c8ce Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 9 Aug 2016 11:55:02 -0500 Subject: [PATCH 0828/2005] add encoded message size to cluster.ResponseFuture PYTHON-284 --- cassandra/cluster.py | 11 +++++++++-- cassandra/connection.py | 5 +++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index f705d5edec..3daf87dfd2 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3162,6 +3162,11 @@ class ResponseFuture(object): Always ``True`` for non-DDL requests. """ + request_encoded_size = None + """ + Size of the request message sent + """ + session = None row_factory = None message = None @@ -3285,8 +3290,10 @@ def _query(self, host, message=None, cb=None): connection, request_id = pool.borrow_connection(timeout=2.0) self._connection = connection result_meta = self.prepared_statement.result_metadata if self.prepared_statement else [] - connection.send_msg(message, request_id, cb=cb, encoder=self._protocol_handler.encode_message, decoder=self._protocol_handler.decode_message, - result_metadata=result_meta) + self.request_encoded_size = connection.send_msg(message, request_id, cb=cb, + encoder=self._protocol_handler.encode_message, + decoder=self._protocol_handler.decode_message, + result_metadata=result_meta) return request_id except NoConnectionsAvailable as exc: log.debug("All connections for host %s are at capacity, moving to the next host", host) diff --git a/cassandra/connection.py b/cassandra/connection.py index 79910825f9..19b7964b2c 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -458,8 +458,9 @@ def send_msg(self, msg, request_id, cb, encoder=ProtocolHandler.encode_message, # queue the decoder function with the request # this allows us to inject custom functions per request to encode, decode messages self._requests[request_id] = (cb, decoder, result_metadata) - self.push(encoder(msg, request_id, self.protocol_version, compressor=self.compressor, allow_beta_protocol_version=self.allow_beta_protocol_version)) - return request_id + msg = encoder(msg, request_id, self.protocol_version, compressor=self.compressor, allow_beta_protocol_version=self.allow_beta_protocol_version) + self.push(msg) + return len(msg) def wait_for_response(self, msg, timeout=None): return self.wait_for_responses(msg, timeout=timeout)[0] From 686ccf114fd7b50d2ba2685bdcea3a59292ed6ec Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 11 Aug 2016 08:36:02 -0500 Subject: [PATCH 0829/2005] basic request init listener PYTHON-284 --- cassandra/cluster.py | 32 ++++++++++++++++++++++++++++++++ docs/api/cassandra/cluster.rst | 4 ++++ 2 files changed, 36 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 3daf87dfd2..49679f39c3 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1916,6 +1916,7 @@ def default_serial_consistency_level(self, cl): _pools = None _profile_manager = None _metrics = None + _request_init_callbacks = None def __init__(self, cluster, hosts, keyspace=None): self.cluster = cluster @@ -1926,6 +1927,7 @@ def __init__(self, cluster, hosts, keyspace=None): self._pools = {} self._profile_manager = cluster.profile_manager self._metrics = cluster.metrics + self._request_init_callbacks = [] self._protocol_version = self.cluster.protocol_version self.encoder = Encoder() @@ -2018,6 +2020,7 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None """ future = self._create_response_future(query, parameters, trace, custom_payload, timeout, execution_profile) future._protocol_handler = self.client_protocol_handler + self._on_request(future) future.send_request() return future @@ -2123,6 +2126,35 @@ def execution_profile_clone_update(self, ep, **kwargs): setattr(clone, attr, value) return clone + def add_request_init_listener(self, fn, *args, **kwargs): + """ + Adds a callback with arguments to be called when any request is created. + + It will be invoked as `fn(response_future, *args, **kwargs)` after each client request is created, + and before the request is sent\*. This can be used to create extensions by adding result callbacks to the + response future. + + \* where `response_future` is the :class:`.ResponseFuture` for the request. + + Note that the init callback is done on the client thread creating the request, so you may need to consider + synchronization if you have multiple threads. Any callbacks added to the response future will be executed + on the event loop thread, so the normal advice about minimizing cycles and avoiding blocking apply (see Note in + :meth:`.ResponseFuture.add_callbacks`. + """ + self._request_init_callbacks.append((fn, args, kwargs)) + + def remove_request_init_listener(self, fn, *args, **kwargs): + """ + Removes a callback and arguments from the list. + + See :meth:`.Session.add_request_init_listener`. + """ + self._request_init_callbacks.remove((fn, args, kwargs)) + + def _on_request(self, response_future): + for fn, args, kwargs in self._request_init_callbacks: + fn(response_future, *args, **kwargs) + def prepare(self, query, custom_payload=None): """ Prepares a query string, returning a :class:`~cassandra.query.PreparedStatement` diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index b8435ee240..05d66278d0 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -150,6 +150,10 @@ .. automethod:: execution_profile_clone_update + .. automethod:: add_request_init_listener + + .. automethod:: remove_request_init_listener + .. autoclass:: ResponseFuture () .. autoattribute:: query From 5207686badcba16c07799aeb63edc6c02a2190a2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 11 Aug 2016 16:48:07 -0500 Subject: [PATCH 0830/2005] Add example request init listener PYTHON-284 --- cassandra/cluster.py | 3 + examples/README.rst | 8 +++ examples/request_init_listener.py | 107 ++++++++++++++++++++++++++++++ 3 files changed, 118 insertions(+) create mode 100644 examples/README.rst create mode 100644 examples/request_init_listener.py diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 49679f39c3..89cac44a14 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2140,6 +2140,9 @@ def add_request_init_listener(self, fn, *args, **kwargs): synchronization if you have multiple threads. Any callbacks added to the response future will be executed on the event loop thread, so the normal advice about minimizing cycles and avoiding blocking apply (see Note in :meth:`.ResponseFuture.add_callbacks`. + + See `this example `_ in the + source tree for an example. """ self._request_init_callbacks.append((fn, args, kwargs)) diff --git a/examples/README.rst b/examples/README.rst new file mode 100644 index 0000000000..889f911132 --- /dev/null +++ b/examples/README.rst @@ -0,0 +1,8 @@ +Driver Examples +=============== +This directory will contain a set of scripts demonstrating driver APIs or integration techniques. It will not be exhaustive, but will contain examples where they are too involved, or +open-ended to include inline in the docstrings. In that case, they should be referenced from the docstrings + +Features +-------- +* `request_init_listener.py `_ A script demonstrating how to register a session request listener and use it to track alternative metrics about requests (size, for example). diff --git a/examples/request_init_listener.py b/examples/request_init_listener.py new file mode 100644 index 0000000000..7d662b151d --- /dev/null +++ b/examples/request_init_listener.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script shows an example "request init listener" which can be registered to track certain request metrics +# for a session. In this case we're just accumulating total request and error counts, as well as some statistics +# about the encoded request size. Note that the counts would be available using the internal 'metrics' tracking -- +# this is just demonstrating a way to track a few custom attributes. + +from __future__ import print_function +from cassandra.cluster import Cluster +from greplin import scales + +import pprint +pp = pprint.PrettyPrinter(indent=2) + + +class RequestAnalyzer(object): + """ + Class used to track request and error counts for a Session. + + Also computes statistics on encoded request size. + """ + + requests = scales.PmfStat('request size') + errors = scales.IntStat('errors') + + def __init__(self, session): + scales.init(self, '/cassandra') + # each instance will be registered with a session, and receive a callback for each request generated + session.add_request_init_listener(self.on_request) + + def on_request(self, rf): + # This callback is invoked each time a request is created, on the thread creating the request. + # We can use this to count events, or add callbacks + rf.add_callbacks(self.on_success, self.on_error, callback_args=(rf,), errback_args=(rf,)) + + def on_success(self, _, response_future): + # future callback on a successful request; just record the size + self.requests.addValue(response_future.request_encoded_size) + + def on_error(self, _, response_future): + # future callback for failed; record size and increment errors + self.requests.addValue(response_future.request_encoded_size) + self.errors += 1 + + def __str__(self): + # just extracting request count from the size stats (which are recorded on all requests) + request_sizes = dict(self.requests) + count = request_sizes.pop('count') + return "%d requests (%d errors)\nRequest size statistics:\n%s" % (count, self.errors, pp.pformat(request_sizes)) + + +# connect a session +session = Cluster().connect() + +# attach a listener to this session +ra = RequestAnalyzer(session) + +session.execute("SELECT release_version FROM system.local") +session.execute("SELECT release_version FROM system.local") + +print(ra) +# 2 requests (0 errors) +# Request size statistics: +# { '75percentile': 74, +# '95percentile': 74, +# '98percentile': 74, +# '999percentile': 74, +# '99percentile': 74, +# 'max': 74, +# 'mean': 74.0, +# 'median': 74.0, +# 'min': 74, +# 'stddev': 0.0} + +try: + # intentional error to show that count increase + session.execute("syntax err") +except Exception as e: + pass + +print() +print(ra) # note: the counts are updated, but the stats are not because scales only updates every 20s +# 3 requests (1 errors) +# Request size statistics: +# { '75percentile': 74, +# '95percentile': 74, +# '98percentile': 74, +# '999percentile': 74, +# '99percentile': 74, +# 'max': 74, +# 'mean': 74.0, +# 'median': 74.0, +# 'min': 74, +# 'stddev': 0.0} From c31fe9be265b62b165ee988901cd2cc47e7ea102 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 11 Aug 2016 17:10:31 -0500 Subject: [PATCH 0831/2005] PYTHON-379 adding timing test for token replica map generation --- tests/unit/test_metadata.py | 40 +++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index fef360dba6..07640bfd5f 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -21,6 +21,7 @@ from mock import Mock import os import six +import timeit import cassandra from cassandra.marshal import uint16_unpack, uint16_pack @@ -115,6 +116,45 @@ def test_nts_make_token_replica_map(self): self.assertItemsEqual(replica_map[MD5Token(0)], (dc1_1, dc1_2, dc2_1, dc2_2, dc3_1)) + def test_nts_token_performance(self): + """ + Tests to ensure that when rf exceeds the number of nodes available, that we dont' + needlessly iterate trying to construct tokens for nodes that don't exist. + + @since 3.7 + @jira_ticket PYTHON-379 + @expected_result timing with 1500 rf should be same/similar to 3rf if we have 3 nodes + + @test_category metadata + """ + + token_to_host_owner = {} + ring = [] + dc1hostnum = 3 + current_token = 0 + vnodes_per_host = 500 + for i in range(dc1hostnum): + + host = Host('dc1.{0}'.format(i), SimpleConvictionPolicy) + host.set_location_info('dc1', "rack1") + for vnode_num in range(vnodes_per_host): + md5_token = MD5Token(current_token+vnode_num) + token_to_host_owner[md5_token] = host + ring.append(md5_token) + current_token += 1000 + + nts = NetworkTopologyStrategy({'dc1': 3}) + start_time = timeit.default_timer() + nts.make_token_replica_map(token_to_host_owner, ring) + elapsed_base = timeit.default_timer() - start_time + + nts = NetworkTopologyStrategy({'dc1': 1500}) + start_time = timeit.default_timer() + nts.make_token_replica_map(token_to_host_owner, ring) + elapsed_bad = timeit.default_timer() - start_time + difference = elapsed_bad - elapsed_base + self.assertTrue(difference < .2 and difference > -.2) + def test_nts_make_token_replica_map_multi_rack(self): token_to_host_owner = {} From 856335e5ad6e31170c2ed932df68350bfd82a492 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 15 Aug 2016 15:02:12 -0500 Subject: [PATCH 0832/2005] don't mark a host down on event if we're still connected PYTHON-498 --- cassandra/cluster.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 7cf1bee580..8a77c618be 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -729,6 +729,7 @@ def token_metadata_enabled(self, enabled): _prepared_statement_lock = None _idle_heartbeat = None _protocol_version_explicit = False + _discount_down_events = True _user_types = None """ @@ -1401,11 +1402,23 @@ def on_down(self, host, is_host_addition, expect_host_to_be_down=False): with host.lock: was_up = host.is_up + + # ignore down signals if we have open pools to the host + # this is to avoid closing pools when a control connection host became isolated + if self._discount_down_events and self.profile_manager.distance(host) != HostDistance.IGNORED: + connected = False + for session in self.sessions: + pool_states = session.get_pool_state() + pool_state = pool_states.get(host) + if pool_state: + connected |= pool_state['open_count'] > 0 + if connected: + return + host.set_down() if (not was_up and not expect_host_to_be_down) or host.is_currently_reconnecting(): return - log.warning("Host %s has been marked down", host) self.profile_manager.on_down(host) From 015df858229ab73223b998a82bf7dcf6fc765426 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 15 Aug 2016 15:43:03 -0500 Subject: [PATCH 0833/2005] PYTHON-284 tests, fixing timing on unit test issue --- tests/integration/standard/test_metrics.py | 82 +++++++++++++++++++++- tests/unit/test_metadata.py | 2 +- 2 files changed, 82 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 18f35c15f1..858cae7fb0 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -23,11 +23,12 @@ from cassandra.query import SimpleStatement from cassandra import ConsistencyLevel, WriteTimeout, Unavailable, ReadTimeout +from cassandra.protocol import SyntaxException from cassandra.cluster import Cluster, NoHostAvailable from tests.integration import get_cluster, get_node, use_singledc, PROTOCOL_VERSION, execute_until_pass from greplin import scales -from tests.integration import BasicSharedKeyspaceUnitTestCaseWTable +from tests.integration import BasicSharedKeyspaceUnitTestCaseWTable, BasicExistingKeyspaceUnitTestCase def setup_module(): use_singledc() @@ -271,5 +272,84 @@ def test_duplicate_metrics_per_cluster(self): self.assertTrue("devops" in scales._Stats.stats.keys()) +class RequestAnalyzer(object): + """ + Class used to track request and error counts for a Session. + Also computes statistics on encoded request size. + """ + + requests = scales.PmfStat('request size') + errors = scales.IntStat('errors') + successful = scales.IntStat("success") + # Throw exceptions when invoked. + throw_on_success = False + throw_on_fail = False + + def __init__(self, session, throw_on_success=False, throw_on_fail=False): + scales.init(self, '/request') + # each instance will be registered with a session, and receive a callback for each request generated + session.add_request_init_listener(self.on_request) + self.throw_on_fail = throw_on_fail + self.throw_on_success = throw_on_success + + def on_request(self, rf): + # This callback is invoked each time a request is created, on the thread creating the request. + # We can use this to count events, or add callbacks + rf.add_callbacks(self.on_success, self.on_error, callback_args=(rf,), errback_args=(rf,)) + + def on_success(self, _, response_future): + # future callback on a successful request; just record the size + self.requests.addValue(response_future.request_encoded_size) + self.successful += 1 + if self.throw_on_success: + raise AttributeError + + def on_error(self, _, response_future): + # future callback for failed; record size and increment errors + self.requests.addValue(response_future.request_encoded_size) + self.errors += 1 + if self.throw_on_fail: + raise AttributeError + + def __str__(self): + # just extracting request count from the size stats (which are recorded on all requests) + request_sizes = dict(self.requests) + count = request_sizes.pop('count') + return "%d requests (%d errors)\nRequest size statistics:\n%s" % (count, self.errors, pp.pformat(request_sizes)) + + +class MetricsRequestSize(BasicExistingKeyspaceUnitTestCase): + def test_metrics_per_cluster(self): + """ + Test to validate that requests listeners. + + This test creates a simple metrics based request listener to track request size, it then + check to ensure that on_success and on_error methods are invoked appropriately. + @since 3.7.0 + @jira_ticket PYTHON-284 + @expected_result in_error, and on_success should be invoked apropriately + + @test_category metrics + """ + + ra = RequestAnalyzer(self.session) + for _ in range(10): + self.session.execute("SELECT release_version FROM system.local") + + for _ in range(3): + try: + self.session.execute("nonesense") + except SyntaxException: + continue + self.assertEqual(ra.errors, 3) + self.assertEqual(ra.successful, 10) + + # Make sure a poorly coded RA doesn't cause issues + RequestAnalyzer(self.session, throw_on_success=False, throw_on_fail=True) + self.session.execute("SELECT release_version FROM system.local") + try: + self.session.execute("nonesense") + except SyntaxException: + pass diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 07640bfd5f..5c0137314a 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -153,7 +153,7 @@ def test_nts_token_performance(self): nts.make_token_replica_map(token_to_host_owner, ring) elapsed_bad = timeit.default_timer() - start_time difference = elapsed_bad - elapsed_base - self.assertTrue(difference < .2 and difference > -.2) + self.assertTrue(difference < .5 and difference > -.5) def test_nts_make_token_replica_map_multi_rack(self): token_to_host_owner = {} From 3656a351025f33e3395e4ef7f58fc1c9dd626b0d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 15 Aug 2016 16:23:27 -0500 Subject: [PATCH 0834/2005] expose paging state in execution API PYTHON-200 --- cassandra/cluster.py | 39 +++++++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 7cf1bee580..10ed53947a 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1943,7 +1943,7 @@ def __init__(self, cluster, hosts, keyspace=None): while futures.not_done and not any(f.result() for f in futures.done): futures = wait_futures(futures.not_done, return_when=FIRST_COMPLETED) - def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None, execution_profile=EXEC_PROFILE_DEFAULT): + def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None): """ Execute the given query and synchronously wait for the response. @@ -1970,10 +1970,16 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_ `custom_payload` is a :ref:`custom_payload` dict to be passed to the server. If `query` is a Statement with its own custom_payload. The message payload will be a union of the two, with the values specified here taking precedence. + + `execution_profile` is the execution profile to use for this request. It can be a key to a profile configured + via :meth:`Cluster.add_execution_profile` or an instance (from :meth:`Session.execution_profile_clone_update`, + for example + + `paging_state` is an optiional paging state, reused from a previous :class:`ResultSet`. """ - return self.execute_async(query, parameters, trace, custom_payload, timeout, execution_profile).result() + return self.execute_async(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state).result() - def execute_async(self, query, parameters=None, trace=False, custom_payload=None, timeout=_NOT_SET, execution_profile=EXEC_PROFILE_DEFAULT): + def execute_async(self, query, parameters=None, trace=False, custom_payload=None, timeout=_NOT_SET, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None): """ Execute the given query and return a :class:`~.ResponseFuture` object which callbacks may be attached to for asynchronous response @@ -1981,17 +1987,7 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None on the :class:`.ResponseFuture` to synchronously block for results at any time. - If `trace` is set to :const:`True`, you may get the query trace descriptors using - :meth:`.ResponseFuture.get_query_trace()` or :meth:`.ResponseFuture.get_all_query_traces()` - on the future result. - - `custom_payload` is a :ref:`custom_payload` dict to be passed to the server. - If `query` is a Statement with its own custom_payload. The message payload - will be a union of the two, with the values specified here taking precedence. - - If the server sends a custom payload in the response message, - the dict can be obtained following :meth:`.ResponseFuture.result` via - :attr:`.ResponseFuture.custom_payload` + See :meth:`Session.execute` for parameter definitions. Example usage:: @@ -2018,13 +2014,13 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None ... log.exception("Operation failed:") """ - future = self._create_response_future(query, parameters, trace, custom_payload, timeout, execution_profile) + future = self._create_response_future(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state) future._protocol_handler = self.client_protocol_handler self._on_request(future) future.send_request() return future - def _create_response_future(self, query, parameters, trace, custom_payload, timeout, execution_profile=EXEC_PROFILE_DEFAULT): + def _create_response_future(self, query, parameters, trace, custom_payload, timeout, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None): """ Returns the ResponseFuture before calling send_request() on it """ prepared_statement = None @@ -2099,6 +2095,7 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time message.update_custom_payload(query.custom_payload) message.update_custom_payload(custom_payload) message.allow_beta_protocol_version = self.cluster.allow_beta_protocol_version + message.paging_state = paging_state return ResponseFuture( self, message, query, timeout, metrics=self._metrics, @@ -4004,3 +4001,13 @@ def was_applied(self): return row[0] else: return row['[applied]'] + + @property + def paging_state(self): + """ + Server paging state of the query. Can be `None` if the query was not paged. + + The driver treats paging state as opaque, but it may contain primary key data, so applications may want to + avoid sending this to untrusted parties. + """ + return self.response_future._paging_state From bb27149c6b4c982c9fda13c328798eb63af0b9d2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 16 Aug 2016 12:58:44 -0500 Subject: [PATCH 0835/2005] cqle: assign UDT default on init PYTHON-606 --- cassandra/cqlengine/usertype.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index 7d753e898d..1d62e6beaf 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -31,7 +31,8 @@ def __init__(self, **values): values = dict((self._db_map.get(k, k), v) for k, v in values.items()) for name, field in self._fields.items(): - value = values.get(name, None) + field_default = field.get_default() if field.has_default else None + value = values.get(name, field_default) if value is not None or isinstance(field, columns.BaseContainerColumn): value = field.to_python(value) value_mngr = field.value_manager(self, field, value) From 1c7a663e27b267c86a5b939f49e036af8891671b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 16 Aug 2016 15:28:40 -0500 Subject: [PATCH 0836/2005] better error message when row decoding fails PYTHON-361 --- cassandra/protocol.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 323af99c2f..e859421dc4 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -634,10 +634,19 @@ def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)] colnames = [c[2] for c in column_metadata] coltypes = [c[3] for c in column_metadata] - parsed_rows = [ - tuple(ctype.from_binary(val, protocol_version) - for ctype, val in zip(coltypes, row)) - for row in rows] + try: + parsed_rows = [ + tuple(ctype.from_binary(val, protocol_version) + for ctype, val in zip(coltypes, row)) + for row in rows] + except Exception: + for i in range(len(row)): + try: + coltypes[i].from_binary(row[i], protocol_version) + except Exception as e: + raise DriverException('Failed decoding result column "%s" of type %s: %s' % (colnames[i], + coltypes[i].cql_parameterized_type(), + e.message)) return paging_state, (colnames, parsed_rows) @classmethod From fa5cf2d5031eaac92645e296f486f55fac0f12c6 Mon Sep 17 00:00:00 2001 From: Sandeep Tamhankar Date: Tue, 16 Aug 2016 16:57:56 -0700 Subject: [PATCH 0837/2005] Update docs.yaml to get 3.6 and 3.5 sources from the 3.6-doc and 3.5-doc branches, which contain a recent update to .nav. Put that same .nav in master since we want to maintain that nav order going forward. --- docs.yaml | 8 +++++--- docs/.nav | 10 +++++----- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/docs.yaml b/docs.yaml index b337d5dd7b..efcc6d3c8e 100644 --- a/docs.yaml +++ b/docs.yaml @@ -7,7 +7,9 @@ sections: type: sphinx directory: docs versions: + - name: 3.6.0 + ref: 3.6-doc - name: 3.5.0 - ref: 3.5.0 -redirects: - - \A\/(.*)/\Z: /\1.html + ref: 3.5-doc +-redirects: +- - \A\/(.*)/\Z: /\1.html diff --git a/docs/.nav b/docs/.nav index 1af261dd19..f4b3f2236c 100644 --- a/docs/.nav +++ b/docs/.nav @@ -1,13 +1,13 @@ installation getting_started -object_mapper -api -upgrading execution_profiles +lwt +object_mapper performance query_paging -lwt -user_defined_types security +upgrading +user_defined_types dates_and_times faq +api From be8c056d8bc052da829b9ba973af92bb4f572d9b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 17 Aug 2016 08:51:31 -0500 Subject: [PATCH 0838/2005] typo --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 10ed53947a..0ae6e13f6a 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1975,7 +1975,7 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_ via :meth:`Cluster.add_execution_profile` or an instance (from :meth:`Session.execution_profile_clone_update`, for example - `paging_state` is an optiional paging state, reused from a previous :class:`ResultSet`. + `paging_state` is an optional paging state, reused from a previous :class:`ResultSet`. """ return self.execute_async(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state).result() From a4cf9f6afc70f6a36f4abf179b9125b74e0fe32b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 17 Aug 2016 12:50:51 -0500 Subject: [PATCH 0839/2005] refresh UDTs on keyspace update for v[12] PYTHON-106 --- cassandra/metadata.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index f4f358f7f9..1340c2561e 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -131,7 +131,12 @@ def refresh(self, connection, timeout, target_type=None, change_type=None, **kwa meta = parse_method(self.keyspaces, **kwargs) if meta: update_method = getattr(self, '_update_' + tt_lower) - update_method(meta) + if tt_lower == 'keyspace' and connection.protocol_version < 3: + # we didn't have 'type' target in legacy protocol versions, so we need to query those too + user_types = parser.get_types_map(self.keyspaces, **kwargs) + self._update_keyspace(meta, user_types) + else: + update_method(meta) else: drop_method = getattr(self, '_drop_' + tt_lower) drop_method(**kwargs) @@ -157,13 +162,13 @@ def _rebuild_all(self, parser): for ksname in removed_keyspaces: self._keyspace_removed(ksname) - def _update_keyspace(self, keyspace_meta): + def _update_keyspace(self, keyspace_meta, new_user_types=None): ks_name = keyspace_meta.name old_keyspace_meta = self.keyspaces.get(ks_name, None) self.keyspaces[ks_name] = keyspace_meta if old_keyspace_meta: keyspace_meta.tables = old_keyspace_meta.tables - keyspace_meta.user_types = old_keyspace_meta.user_types + keyspace_meta.user_types = new_user_types or old_keyspace_meta.user_types keyspace_meta.indexes = old_keyspace_meta.indexes keyspace_meta.functions = old_keyspace_meta.functions keyspace_meta.aggregates = old_keyspace_meta.aggregates @@ -1588,11 +1593,14 @@ def _handle_results(self, success, result): raise result def _query_build_row(self, query_string, build_func): + result = self._query_build_rows(query_string, build_func) + return result[0] if result else None + + def _query_build_rows(self, query_string, build_func): query = QueryMessage(query=query_string, consistency_level=ConsistencyLevel.ONE) response = self.connection.wait_for_response(query, self.timeout) result = dict_factory(*response.results) - if result: - return build_func(result[0]) + return [build_func(row) for row in result] class SchemaParserV22(_SchemaParser): @@ -1701,6 +1709,11 @@ def get_type(self, keyspaces, keyspace, type): where_clause = bind_params(" WHERE keyspace_name = %s AND type_name = %s", (keyspace, type), _encoder) return self._query_build_row(self._SELECT_TYPES + where_clause, self._build_user_type) + def get_types_map(self, keyspaces, keyspace): + where_clause = bind_params(" WHERE keyspace_name = %s", (keyspace,), _encoder) + types = self._query_build_rows(self._SELECT_TYPES + where_clause, self._build_user_type) + return dict((t.name, t) for t in types) + def get_function(self, keyspaces, keyspace, function): where_clause = bind_params(" WHERE keyspace_name = %%s AND function_name = %%s AND %s = %%s" % (self._function_agg_arument_type_col,), (keyspace, function.name, function.argument_types), _encoder) From 8e258b37e8c99f8b500c3d2bf84434e77f3861db Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 17 Aug 2016 14:24:25 -0500 Subject: [PATCH 0840/2005] allow Cluster.refresh_nodes to force token rebuild PYTHON-349 --- cassandra/cluster.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 7cf1bee580..4935cc1ca3 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1671,13 +1671,15 @@ def refresh_user_aggregate_metadata(self, keyspace, aggregate, max_schema_agreem schema_agreement_wait=max_schema_agreement_wait, force=True): raise DriverException("User Aggregate metadata was not refreshed. See log for details.") - def refresh_nodes(self): + def refresh_nodes(self, force_token_rebuild=False): """ Synchronously refresh the node list and token metadata + `force_token_rebuild` can be used to rebuild the token map metadata, even if no new nodes are discovered. + An Exception is raised if node refresh fails for any reason. """ - if not self.control_connection.refresh_node_list_and_token_map(): + if not self.control_connection.refresh_node_list_and_token_map(force_token_rebuild): raise DriverException("Node list was not refreshed. See log for details.") def set_meta_refresh_enabled(self, enabled): From 1f950325349d9a42c30931838337f817e1ef3437 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 17 Aug 2016 15:04:52 -0500 Subject: [PATCH 0841/2005] PYTHON-614 PYTHON-619 Adding tests. Enabling protocol v5 detection and support to test harness. --- test-requirements.txt | 1 + tests/integration/__init__.py | 28 +++++----- tests/integration/long/test_failure_types.py | 46 +++++++++------- tests/integration/standard/test_cluster.py | 57 +++++++++++++++++--- 4 files changed, 94 insertions(+), 38 deletions(-) diff --git a/test-requirements.txt b/test-requirements.txt index 500795357c..5e2552631d 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -12,3 +12,4 @@ twisted gevent>=1.0 eventlet cython>=0.21 +packaging diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index bd9fe103cd..9c6b975137 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -16,7 +16,7 @@ import unittest2 as unittest except ImportError: import unittest # noqa - +from packaging.version import Version import logging import os import socket @@ -140,11 +140,11 @@ def _get_cass_version_from_dse(dse_version): def get_default_protocol(): - if CASSANDRA_VERSION >= '2.2': + if Version(CASSANDRA_VERSION) >= Version('2.2'): return 4 - elif CASSANDRA_VERSION >= '2.1': + elif Version(CASSANDRA_VERSION) >= Version('2.1'): return 3 - elif CASSANDRA_VERSION >= '2.0': + elif Version(CASSANDRA_VERSION) >= Version('2.0'): return 2 else: return 1 @@ -157,14 +157,17 @@ def get_supported_protocol_versions(): 2.1 -> 3, 2, 1 2.2 -> 4, 3, 2, 1 3.X -> 4, 3 + 3.10 -> 5(beta),4,3 ` """ - if CASSANDRA_VERSION >= '3.0': + if Version(CASSANDRA_VERSION) >= Version('3.10'): + return (3, 4, 5) + elif Version(CASSANDRA_VERSION) >= Version('3.0'): return (3, 4) - elif CASSANDRA_VERSION >= '2.2': + elif Version(CASSANDRA_VERSION) >= Version('2.2'): return (1, 2, 3, 4) - elif CASSANDRA_VERSION >= '2.1': + elif Version(CASSANDRA_VERSION) >= Version('2.1'): return (1, 2, 3) - elif CASSANDRA_VERSION >= '2.0': + elif Version(CASSANDRA_VERSION) >= Version('2.0'): return (1, 2) else: return (1) @@ -176,7 +179,7 @@ def get_unsupported_lower_protocol(): supported by the version of C* running """ - if CASSANDRA_VERSION >= '3.0': + if Version(CASSANDRA_VERSION) >= Version('3.0'): return 2 else: return None @@ -188,11 +191,11 @@ def get_unsupported_upper_protocol(): supported by the version of C* running """ - if CASSANDRA_VERSION >= '2.2': + if Version(CASSANDRA_VERSION) >= Version('2.2'): return None - if CASSANDRA_VERSION >= '2.1': + if Version(CASSANDRA_VERSION) >= Version('2.1'): return 4 - elif CASSANDRA_VERSION >= '2.0': + elif Version(CASSANDRA_VERSION) >= Version('2.0'): return 3 else: return None @@ -205,6 +208,7 @@ def get_unsupported_upper_protocol(): notprotocolv1 = unittest.skipUnless(PROTOCOL_VERSION > 1, 'Protocol v1 not supported') lessthenprotocolv4 = unittest.skipUnless(PROTOCOL_VERSION < 4, 'Protocol versions 4 or greater not supported') greaterthanprotocolv3 = unittest.skipUnless(PROTOCOL_VERSION >= 4, 'Protocol versions less than 4 are not supported') +protocolv5 = unittest.skipUnless(5 in get_supported_protocol_versions(), 'Protocol versions less than 5 are not supported') greaterthancass20 = unittest.skipUnless(CASSANDRA_VERSION >= '2.1', 'Cassandra version 2.1 or greater required') greaterthancass21 = unittest.skipUnless(CASSANDRA_VERSION >= '2.2', 'Cassandra version 2.2 or greater required') diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index 7654fbcca7..e2891b358f 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -16,7 +16,8 @@ from cassandra import ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, ReadFailure, WriteFailure,\ FunctionFailure -from cassandra.cluster import Cluster +from cassandra.protocol import MAX_SUPPORTED_VERSION +from cassandra.cluster import Cluster, NoHostAvailable from cassandra.concurrent import execute_concurrent_with_args from cassandra.query import SimpleStatement from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace, remove_cluster, get_node @@ -63,14 +64,20 @@ def setUp(self): """ Test is skipped if run with native protocol version <4 """ - + self.support_v5 = True if PROTOCOL_VERSION < 4: raise unittest.SkipTest( "Native protocol 4,0+ is required for custom payloads, currently using %r" % (PROTOCOL_VERSION,)) + try: + self.cluster = Cluster(protocol_version=MAX_SUPPORTED_VERSION, allow_beta_protocol_version=True) + self.session = self.cluster.connect() + except NoHostAvailable: + log.info("Protocol Version 5 not supported,") + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + self.support_v5 = False - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect() self.nodes_currently_failing = [] self.node1, self.node2, self.node3 = get_cluster().nodes.values() @@ -132,21 +139,28 @@ def setFailingNodes(self, failing_nodes, keyspace): node.start(wait_for_binary_proto=True, wait_other_notice=True) self.nodes_currently_failing.remove(node) - def _perform_cql_statement(self, text, consistency_level, expected_exception): + def _perform_cql_statement(self, text, consistency_level, expected_exception, session=None): """ Simple helper method to preform cql statements and check for expected exception @param text CQl statement to execute @param consistency_level Consistency level at which it is to be executed @param expected_exception Exception expected to be throw or none """ + if session is None: + session = self.session statement = SimpleStatement(text) statement.consistency_level = consistency_level if expected_exception is None: - self.execute_helper(self.session, statement) + self.execute_helper(session, statement) else: - with self.assertRaises(expected_exception): - self.execute_helper(self.session, statement) + with self.assertRaises(expected_exception) as cm: + self.execute_helper(session, statement) + if self.support_v5 and (isinstance(cm.exception, WriteFailure) or isinstance(cm.exception, ReadFailure)): + if isinstance(cm.exception, ReadFailure): + self.assertEqual(cm.exception.error_code_map.values()[0], 1) + else: + self.assertEqual(cm.exception.error_code_map.values()[0], 0) def test_write_failures_from_coordinator(self): """ @@ -157,8 +171,8 @@ def test_write_failures_from_coordinator(self): factor of the keyspace, and the consistency level, we will expect the coordinator to send WriteFailure, or not. - @since 2.6.0 - @jira_ticket PYTHON-238 + @since 2.6.0, 3.7.0 + @jira_ticket PYTHON-238, PYTHON-619 @expected_result Appropriate write failures from the coordinator @test_category queries:basic @@ -217,8 +231,8 @@ def test_tombstone_overflow_read_failure(self): from the coordinator. - @since 2.6.0 - @jira_ticket PYTHON-238 + @since 2.6.0, 3.7.0 + @jira_ticket PYTHON-238, PYTHON-619 @expected_result Appropriate write failures from the coordinator @test_category queries:basic @@ -379,11 +393,3 @@ def test_async_timeouts(self): self.assertAlmostEqual(expected_time, total_time, delta=.05) self.assertTrue(mock_errorback.called) self.assertFalse(mock_callback.called) - - - - - - - - diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 91dee32e79..a0d9c308d3 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -33,8 +33,8 @@ from cassandra.protocol import MAX_SUPPORTED_VERSION from cassandra.query import SimpleStatement, TraceUnavailable, tuple_factory -from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node, CASSANDRA_VERSION, execute_until_pass, execute_with_long_wait_retry, get_node,\ - MockLoggingHandler, get_unsupported_lower_protocol, get_unsupported_upper_protocol +from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, CASSANDRA_VERSION, execute_until_pass, execute_with_long_wait_retry, get_node,\ + MockLoggingHandler, get_unsupported_lower_protocol, get_unsupported_upper_protocol, protocolv5 from tests.integration.util import assert_quiescent_pool_state @@ -460,7 +460,7 @@ def test_refresh_schema_no_wait(self): end_time = time.time() self.assertGreaterEqual(end_time - start_time, agreement_timeout) self.assertIs(original_meta, c.metadata.keyspaces) - + # refresh wait overrides cluster value original_meta = c.metadata.keyspaces start_time = time.time() @@ -489,7 +489,7 @@ def test_refresh_schema_no_wait(self): self.assertLess(end_time - start_time, refresh_threshold) self.assertIsNot(original_meta, c.metadata.keyspaces) self.assertEqual(original_meta, c.metadata.keyspaces) - + # refresh wait overrides cluster value original_meta = c.metadata.keyspaces start_time = time.time() @@ -587,7 +587,7 @@ def test_idle_heartbeat(self): cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) session = cluster.connect(wait_for_all_pools=True) - # This test relies on impl details of connection req id management to see if heartbeats + # This test relies on impl details of connection req id management to see if heartbeats # are being sent. May need update if impl is changed connection_request_ids = {} for h in cluster.get_connection_holders(): @@ -763,7 +763,7 @@ def test_profile_lb_swap(self): expected_hosts = set(cluster.metadata.all_hosts()) rr1_queried_hosts = set() rr2_queried_hosts = set() - + rs = session.execute(query, execution_profile='rr1') rr1_queried_hosts.add(rs.response_future._current_host) rs = session.execute(query, execution_profile='rr2') @@ -1054,3 +1054,48 @@ def test_duplicate(self): self.assertEqual(len(warnings), 1) self.assertTrue('multiple' in warnings[0]) logger.removeHandler(mock_handler) + + +@protocolv5 +class BetaProtocolTest(unittest.TestCase): + + @protocolv5 + def test_invalid_protocol_version_beta_option(self): + """ + Test cluster connection with protocol v5 and beta flag not set + + @since 3.7.0 + @jira_ticket PYTHON-614 + @expected_result client shouldn't connect with V5 and no beta flag set + + @test_category connection + """ + + cluster = Cluster(protocol_version=MAX_SUPPORTED_VERSION, allow_beta_protocol_version=False) + try: + with self.assertRaises(NoHostAvailable): + cluster.connect() + except Exception as e: + self.fail("Unexpected error encountered {0}".format(e.message)) + cluster.shutdown() + + @protocolv5 + def test_valid_protocol_version_beta_options_connect(self): + """ + Test cluster connection with protocol version 5 and beta flag set + + @since 3.7.0 + @jira_ticket PYTHON-614 + @expected_result client should connect with protocol v5 and beta flag set. + + @test_category connection + """ + cluster = Cluster(protocol_version=MAX_SUPPORTED_VERSION, allow_beta_protocol_version=True) + session = cluster.connect() + self.assertEqual(cluster.protocol_version, MAX_SUPPORTED_VERSION) + self.assertTrue(session.execute("select release_version from system.local")[0]) + + + + + From dab11470374818d31430855f201ddbc423b7ecdc Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 17 Aug 2016 15:16:12 -0500 Subject: [PATCH 0842/2005] Adding new packaging dependency to tox --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index f4d21ba5df..5e2a02ff9f 100644 --- a/tox.ini +++ b/tox.ini @@ -6,6 +6,7 @@ deps = nose mock<=1.0.1 PyYAML six + packaging [testenv] deps = {[base]deps} From e345becabbec0edc19f07a47b8169a3ad37b5d37 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 17 Aug 2016 16:15:43 -0500 Subject: [PATCH 0843/2005] PYTHON-606 Adding integation tests for udt default values --- .../integration/cqlengine/model/test_udts.py | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index dc7eb134c0..bb876d800d 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -527,3 +527,38 @@ class User(UserType): u = User() u.age = 20 self.assertEqual(20, u.age) + + def test_default_values(self): + """ + Test that default types are set on object creation for UDTs + + @since 3.7.0 + @jira_ticket PYTHON-606 + @expected_result Default values should be set. + + @test_category data_types:udt + """ + + class NestedUdt(UserType): + + test_id = columns.UUID(default=uuid4) + something = columns.Text() + default_text = columns.Text(default="default text") + + class OuterModel(Model): + + name = columns.Text(primary_key=True) + first_name = columns.Text() + nested = columns.List(columns.UserDefinedType(NestedUdt)) + simple = columns.UserDefinedType(NestedUdt) + + sync_table(OuterModel) + + t = OuterModel.create(name='test1') + t.nested = [NestedUdt(something='test')] + t.simple = NestedUdt(something="") + t.save() + self.assertIsNotNone(t.nested[0].test_id) + self.assertEqual(t.nested[0].default_text, "default text") + self.assertIsNotNone(t.simple.test_id) + self.assertEqual(t.simple.default_text, "default text") From 6c352f699f42e70143b362f3634fd6495881ef2f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 19 Aug 2016 09:19:19 -0500 Subject: [PATCH 0844/2005] ec2 address translator PYTHON-198 --- cassandra/policies.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/cassandra/policies.py b/cassandra/policies.py index d926873bc0..80b807c329 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -884,3 +884,23 @@ class IdentityTranslator(AddressTranslator): """ def translate(self, addr): return addr + + +class EC2MultiRegionTranslator(AddressTranslator): + """ + Resolves private ips of the hosts in the same datacenter as the client, and public ips of hosts in other datacenters. + """ + def translate(self, addr): + """ + Reverse DNS the public broadcast_address, then lookup that hostname to get the AWS-resolved IP, which + will point to the private IP address within the same datacenter. + """ + # get family of this address so we translate to the same + family = socket.getaddrinfo(addr, 0, socket.AF_UNSPEC, socket.SOCK_STREAM)[0][0] + host = socket.getfqdn(addr) + for a in socket.getaddrinfo(host, 0, family, socket.SOCK_STREAM): + try: + return a[4][0] + except Exception: + pass + return addr From 075ac3cba8f257b585162050b79ecc3e4af944c2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 19 Aug 2016 13:53:34 -0500 Subject: [PATCH 0845/2005] address translator unit tests PYTHON-198 --- tests/unit/test_cluster.py | 3 +-- tests/unit/test_policies.py | 19 +++++++++++++++++-- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 96d9a9442b..279b0f94a1 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -16,8 +16,7 @@ except ImportError: import unittest # noqa -from copy import copy -from mock import patch, Mock +from mock import patch from cassandra import ConsistencyLevel, DriverException, Timeout, Unavailable, RequestExecutionException, ReadTimeout, WriteTimeout, CoordinationFailure, ReadFailure, WriteFailure, FunctionFailure, AlreadyExists,\ InvalidRequest, Unauthorized, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, RequestValidationException, ConfigurationException diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 33a712d5a6..61f6bbb49e 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -18,7 +18,7 @@ import unittest # noqa from itertools import islice, cycle -from mock import Mock +from mock import Mock, patch from random import randint import six import sys @@ -33,7 +33,8 @@ HostDistance, ExponentialReconnectionPolicy, RetryPolicy, WriteType, DowngradingConsistencyRetryPolicy, ConstantReconnectionPolicy, - LoadBalancingPolicy, ConvictionPolicy, ReconnectionPolicy, FallthroughRetryPolicy) + LoadBalancingPolicy, ConvictionPolicy, ReconnectionPolicy, FallthroughRetryPolicy, + IdentityTranslator, EC2MultiRegionTranslator) from cassandra.pool import Host from cassandra.query import Statement @@ -1127,3 +1128,17 @@ def test_hosts_with_hostname(self): self.assertEqual(sorted(qplan), [host]) self.assertEqual(policy.distance(host), HostDistance.LOCAL) + +class AddressTranslatorTest(unittest.TestCase): + + def test_identity_translator(self): + it = IdentityTranslator() + addr = '127.0.0.1' + + @patch('socket.getfqdn', return_value='localhost') + def test_ec2_multi_region_translator(self, *_): + ec2t = EC2MultiRegionTranslator() + addr = '127.0.0.1' + translated = ec2t.translate(addr) + self.assertIsNot(translated, addr) # verifies that the resolver path is followed + self.assertEqual(translated, addr) # and that it resolves to the same address From 687f3a33b24f3267d5ae818c8c70d2c093bcd22b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 22 Aug 2016 14:04:37 -0400 Subject: [PATCH 0846/2005] Initial connection registry implementation --- cassandra/cqlengine/connection.py | 220 +++++++++++------- .../cqlengine/connections/test_connection.py | 3 +- 2 files changed, 135 insertions(+), 88 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 8edcc76d54..289a44372b 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import namedtuple, defaultdict +from collections import defaultdict import logging import six import threading from cassandra.cluster import Cluster, _NOT_SET, NoHostAvailable, UserTypeDoesNotExist -from cassandra.query import SimpleStatement, Statement, dict_factory +from cassandra.query import SimpleStatement, dict_factory from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine.statements import BaseCQLStatement @@ -28,13 +28,8 @@ NOT_SET = _NOT_SET # required for passing timeout to Session.execute -Host = namedtuple('Host', ['name', 'port']) - -cluster = None -session = None -lazy_connect_args = None -lazy_connect_lock = threading.RLock() - +# connections registry +_connections = {} # Because type models may be registered before a connection is present, # and because sessions may be replaced, we must register UDTs here, in order @@ -46,20 +41,123 @@ class UndefinedKeyspaceException(CQLEngineException): pass +class Connection(object): + """CQLEngine Connection""" + + name = None + hosts = None + + consistency = None + retry_connect = False + lazy_connect = False + lazy_connect_lock = None + cluster_options = None + + cluster = None + session = None + + def __init__(self, name, hosts, consistency=None, + lazy_connect=False, retry_connect=False, cluster_options=None): + self.hosts = hosts + self.name = name + self.consistency = consistency + self.lazy_connect = lazy_connect + self.retry_connect = retry_connect + self.cluster_options = cluster_options if cluster_options else {} + self.lazy_connect_lock = threading.RLock() + + def setup(self): + """Setup the connection""" + + if 'username' in self.cluster_options or 'password' in self.cluster_options: + raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") + + if self.lazy_connect: + return + + self.cluster = Cluster(self.hosts, **self.cluster_options) + try: + self.session = self.cluster.connect() + log.debug("cqlengine connection '{0}' initialized with internally created session".format(self.name)) + except NoHostAvailable: + if self.retry_connect: + log.warning("connect failed for '{0}', setting up for re-attempt on first use".format(self.name)) + self.lazy_connect = True + raise + + if self.consistency is not None: + self.session.default_consistency_level = self.consistency + + self.setup_session() + + def setup_session(self): + self.session.row_factory = dict_factory + enc = self.session.encoder + enc.mapping[tuple] = enc.cql_encode_tuple + _register_known_types(self.session.cluster) + + def handle_lazy_connect(self): + + # if lazy_connect is False, it means the cluster is setup and ready + # No need to acquire the lock + if not self.lazy_connect: + return + + with self.lazy_connect_lock: + # lazy_connect might have been set to False by another thread while waiting the lock + # In this case, do nothing. + if self.lazy_connect: + log.debug("Lazy connect for connection '{0}'".format(self.name)) + self.lazy_connect = False + self.setup() + + +def register_connection(name, hosts, consistency=None, lazy_connect=False, + retry_connect=False, cluster_options=None, default=False): + + if name in _connections: + log.warning("Registering connection '{0}' when it already exists.".format(name)) + + conn = Connection(name, hosts, consistency=consistency,lazy_connect=lazy_connect, + retry_connect=retry_connect, cluster_options=cluster_options) + + _connections[name] = conn + + if default: + _connections['_default_'] = conn + + return conn + + +def get_connection(name=None): + + if not name: + name = '_default_' + + if name not in _connections: + raise ValueError("Connection name '{0}' doesn't exist in the registry.".format(name)) + + conn = _connections[name] + conn.handle_lazy_connect() + + return conn + + def default(): """ Configures the global mapper connection to localhost, using the driver defaults (except for row_factory) """ - global cluster, session - if session: - log.warning("configuring new connection for cqlengine when one was already set") - - cluster = Cluster() - session = cluster.connect() + try: + conn = get_connection() + if conn.session: + log.warning("configuring new connection for cqlengine when one was already set") + except: + pass - _setup_session(session) + conn = register_connection('default', hosts=None, default=True) + conn.setup() log.debug("cqlengine connection initialized with default session to localhost") @@ -71,22 +169,23 @@ def set_session(s): Note: the mapper presently requires a Session :attr:`~.row_factory` set to ``dict_factory``. This may be relaxed in the future """ - global cluster, session - if session: + conn = get_connection() + + if conn.session: log.warning("configuring new connection for cqlengine when one was already set") if s.row_factory is not dict_factory: raise CQLEngineException("Failed to initialize: 'Session.row_factory' must be 'dict_factory'.") - session = s - cluster = s.cluster + conn.session = s + conn.cluster = s.cluster # Set default keyspace from given session's keyspace - if session.keyspace: + if conn.session.keyspace: from cassandra.cqlengine import models - models.DEFAULT_KEYSPACE = session.keyspace + models.DEFAULT_KEYSPACE = conn.session.keyspace - _setup_session(session) + conn.setup_session() log.debug("cqlengine connection initialized with %s", s) @@ -108,53 +207,20 @@ def setup( :param bool retry_connect: True if we should retry to connect even if there was a connection failure initially :param \*\*kwargs: Pass-through keyword arguments for :class:`cassandra.cluster.Cluster` """ - global cluster, session, lazy_connect_args - - if 'username' in kwargs or 'password' in kwargs: - raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") from cassandra.cqlengine import models models.DEFAULT_KEYSPACE = default_keyspace - if lazy_connect: - kwargs['default_keyspace'] = default_keyspace - kwargs['consistency'] = consistency - kwargs['lazy_connect'] = False - kwargs['retry_connect'] = retry_connect - lazy_connect_args = (hosts, kwargs) - return - - cluster = Cluster(hosts, **kwargs) - try: - session = cluster.connect() - log.debug("cqlengine connection initialized with internally created session") - except NoHostAvailable: - if retry_connect: - log.warning("connect failed, setting up for re-attempt on first use") - kwargs['default_keyspace'] = default_keyspace - kwargs['consistency'] = consistency - kwargs['lazy_connect'] = False - kwargs['retry_connect'] = retry_connect - lazy_connect_args = (hosts, kwargs) - raise - if consistency is not None: - session.default_consistency_level = consistency - - _setup_session(session) - - -def _setup_session(session): - session.row_factory = dict_factory - enc = session.encoder - enc.mapping[tuple] = enc.cql_encode_tuple - _register_known_types(session.cluster) + conn = register_connection('default', hosts=hosts, consistency=consistency, lazy_connect=lazy_connect, + retry_connect=retry_connect, cluster_options=kwargs, default=True) + conn.setup() def execute(query, params=None, consistency_level=None, timeout=NOT_SET): - handle_lazy_connect() + conn = get_connection() - if not session: + if not conn.session: raise CQLEngineException("It is required to setup() cqlengine before executing queries") if isinstance(query, SimpleStatement): @@ -167,45 +233,27 @@ def execute(query, params=None, consistency_level=None, timeout=NOT_SET): log.debug(query.query_string) - result = session.execute(query, params, timeout=timeout) + result = conn.session.execute(query, params, timeout=timeout) return result def get_session(): - handle_lazy_connect() - return session + conn = get_connection() + return conn.session def get_cluster(): - handle_lazy_connect() - if not cluster: + conn = get_connection() + if not conn.cluster: raise CQLEngineException("%s.cluster is not configured. Call one of the setup or default functions first." % __name__) - return cluster - - -def handle_lazy_connect(): - global lazy_connect_args - - # if lazy_connect_args is None, it means the cluster is setup and ready - # No need to acquire the lock - if not lazy_connect_args: - return - - with lazy_connect_lock: - # lazy_connect_args might have been set to None by another thread while waiting the lock - # In this case, do nothing. - if lazy_connect_args: - log.debug("lazy connect") - hosts, kwargs = lazy_connect_args - setup(hosts, **kwargs) - lazy_connect_args = None + return conn.cluster def register_udt(keyspace, type_name, klass): udt_by_keyspace[keyspace][type_name] = klass - global cluster + cluster = get_cluster() if cluster: try: cluster.register_user_type(keyspace, type_name, klass) diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index 80771d3697..659893bd0f 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -40,7 +40,7 @@ class ConnectionTest(BaseCassEngTestCase): @classmethod def setUpClass(cls): - cls.original_cluster = connection.cluster + cls.original_cluster = connection.get_cluster() cls.keyspace1 = 'ctest1' cls.keyspace2 = 'ctest2' super(ConnectionTest, cls).setUpClass() @@ -95,4 +95,3 @@ def test_connection_session_switch(self): connection.set_session(self.session2) self.assertEqual(1, TestConnectModel.objects.count()) self.assertEqual(TestConnectModel.objects.first(), TCM2) - From f4a82ee073d481379fca4e8dbc7dc2109e7124ed Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 22 Aug 2016 15:51:01 -0400 Subject: [PATCH 0847/2005] Add QuerySet using() method --- cassandra/cqlengine/query.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index f04a4a9a3a..3fc45fcb38 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -965,6 +965,18 @@ def timeout(self, timeout): clone._timeout = timeout return clone + def using(self, keyspace=None): + """ + Change the context on-the-fly of the Model class (connection, keyspace) + """ + + clone = copy.deepcopy(self) + if keyspace: + new_type = type(self.model.__name__, (self.model,), {'__keyspace__': keyspace}) + clone.model = new_type + + return clone + class ResultObject(dict): """ From 1b53c7c154f29087eb2c99af143f86da316b3bcf Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 23 Aug 2016 10:11:25 -0500 Subject: [PATCH 0848/2005] PYTHON-361 adding test --- tests/integration/standard/test_query.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 1b09286840..94b69d22c1 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -13,7 +13,7 @@ # limitations under the License. import os from cassandra.concurrent import execute_concurrent - +from cassandra import DriverException try: import unittest2 as unittest @@ -25,7 +25,7 @@ BatchStatement, BatchType, dict_factory, TraceUnavailable) from cassandra.cluster import Cluster, NoHostAvailable from cassandra.policies import HostDistance, RoundRobinPolicy - +from tests.unit.cython.utils import notcython from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions import time @@ -70,6 +70,23 @@ def test_trace_prints_okay(self): for event in trace.events: str(event) + @notcython + def test_row_error_message(self): + """ + Test to validate, new column deserialization message + @since 3.7.0 + @jira_ticket PYTHON-361 + @expected_result Special failed decoding message should be present + + @test_category tracing + """ + self.session.execute("CREATE TABLE {0}.{1} (k int PRIMARY KEY, v timestamp)".format(self.keyspace_name,self.function_table_name)) + ss = SimpleStatement("INSERT INTO {0}.{1} (k, v) VALUES (1, 1000000000000000)".format(self.keyspace_name, self.function_table_name)) + self.session.execute(ss) + with self.assertRaises(DriverException) as context: + self.session.execute("SELECT * FROM {0}.{1}".format(self.keyspace_name, self.function_table_name)) + self.assertIn("Failed decoding result column", context.exception.message) + def test_trace_id_to_resultset(self): future = self.session.execute_async("SELECT * FROM system.local", trace=True) From c11b0c0dd08c5d906bdff16b96405650535c6735 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 18 Aug 2016 15:10:33 -0500 Subject: [PATCH 0849/2005] Fix to keep type queries from breaking metadata construction in C* <2.1 --- cassandra/metadata.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 1340c2561e..735d718fbc 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1598,9 +1598,16 @@ def _query_build_row(self, query_string, build_func): def _query_build_rows(self, query_string, build_func): query = QueryMessage(query=query_string, consistency_level=ConsistencyLevel.ONE) - response = self.connection.wait_for_response(query, self.timeout) - result = dict_factory(*response.results) - return [build_func(row) for row in result] + responses = self.connection.wait_for_responses((query), timeout=self.timeout, fail_on_error=False) + (success, response) = responses[0] + if success: + result = dict_factory(*response.results) + return [build_func(row) for row in result] + elif isinstance(response, InvalidRequest): + log.debug("user types table not found") + return [] + else: + raise response class SchemaParserV22(_SchemaParser): From 387bec21cf024dc9fe6fe2c08d69562bcb9a6e59 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 23 Aug 2016 14:24:55 -0500 Subject: [PATCH 0850/2005] Increaseing wiggle room for timeout test --- tests/unit/test_metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 5c0137314a..cd1c9118ed 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -153,7 +153,7 @@ def test_nts_token_performance(self): nts.make_token_replica_map(token_to_host_owner, ring) elapsed_bad = timeit.default_timer() - start_time difference = elapsed_bad - elapsed_base - self.assertTrue(difference < .5 and difference > -.5) + self.assertTrue(difference < 1 and difference > -1) def test_nts_make_token_replica_map_multi_rack(self): token_to_host_owner = {} From 0b1d012524b1a7377b2dc44a81655ed6bbbf9609 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 23 Aug 2016 15:42:42 -0400 Subject: [PATCH 0851/2005] Add connection selection support to mangement commands --- cassandra/cqlengine/connection.py | 29 ++--- cassandra/cqlengine/management.py | 201 +++++++++++++++++++----------- cassandra/cqlengine/query.py | 4 +- cassandra/cqlengine/usertype.py | 6 +- 4 files changed, 146 insertions(+), 94 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 289a44372b..1a8922a862 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -29,6 +29,7 @@ NOT_SET = _NOT_SET # required for passing timeout to Session.execute # connections registry +DEFAULT_CONNECTION = '_default_' _connections = {} # Because type models may be registered before a connection is present, @@ -124,18 +125,19 @@ def register_connection(name, hosts, consistency=None, lazy_connect=False, _connections[name] = conn if default: - _connections['_default_'] = conn + _connections[DEFAULT_CONNECTION] = conn + conn.setup() return conn def get_connection(name=None): if not name: - name = '_default_' + name = DEFAULT_CONNECTION if name not in _connections: - raise ValueError("Connection name '{0}' doesn't exist in the registry.".format(name)) + raise CQLEngineException("Connection name '{0}' doesn't exist in the registry.".format(name)) conn = _connections[name] conn.handle_lazy_connect() @@ -211,14 +213,13 @@ def setup( from cassandra.cqlengine import models models.DEFAULT_KEYSPACE = default_keyspace - conn = register_connection('default', hosts=hosts, consistency=consistency, lazy_connect=lazy_connect, - retry_connect=retry_connect, cluster_options=kwargs, default=True) - conn.setup() + register_connection('default', hosts=hosts, consistency=consistency, lazy_connect=lazy_connect, + retry_connect=retry_connect, cluster_options=kwargs, default=True) -def execute(query, params=None, consistency_level=None, timeout=NOT_SET): +def execute(query, params=None, consistency_level=None, timeout=NOT_SET, connection=None): - conn = get_connection() + conn = get_connection(connection) if not conn.session: raise CQLEngineException("It is required to setup() cqlengine before executing queries") @@ -238,22 +239,22 @@ def execute(query, params=None, consistency_level=None, timeout=NOT_SET): return result -def get_session(): - conn = get_connection() +def get_session(connection=None): + conn = get_connection(connection) return conn.session -def get_cluster(): - conn = get_connection() +def get_cluster(connection=None): + conn = get_connection(connection) if not conn.cluster: raise CQLEngineException("%s.cluster is not configured. Call one of the setup or default functions first." % __name__) return conn.cluster -def register_udt(keyspace, type_name, klass): +def register_udt(keyspace, type_name, klass, connection=None): udt_by_keyspace[keyspace][type_name] = klass - cluster = get_cluster() + cluster = get_cluster(connection) if cluster: try: cluster.register_user_type(keyspace, type_name, klass) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index cc2a34599f..4a8ad5a32b 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -18,11 +18,12 @@ import os import six import warnings +from itertools import product from cassandra import metadata from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine import columns, query -from cassandra.cqlengine.connection import execute, get_cluster +from cassandra.cqlengine.connection import execute, get_cluster, DEFAULT_CONNECTION from cassandra.cqlengine.models import Model from cassandra.cqlengine.named import NamedTable from cassandra.cqlengine.usertype import UserType @@ -37,7 +38,34 @@ schema_columnfamilies = NamedTable('system', 'schema_columnfamilies') -def create_keyspace_simple(name, replication_factor, durable_writes=True): +def get_context(keyspaces, connections): + """Return the execution context""" + + if keyspaces: + if not isinstance(keyspaces, (list, tuple)): + raise ValueError('keyspaces must be a list or a tuple.') + + if connections: + if not isinstance(connections, (list, tuple)): + raise ValueError('connections must be a list or a tuple.') + + keyspaces = keyspaces if keyspaces else [None] + connections = connections if connections else [None] + + return product(connections, keyspaces) + + +def log_msg(msg, connection=None, keyspace=None): + """Format log message to add keyspace and connection context""" + connection_info = connection if connection else DEFAULT_CONNECTION + if keyspace: + msg = '[Connection: {0}, Keyspace: {1}] {2}'.format(connection_info, keyspace, msg) + else: + msg = '[Connection: {0}] {1}'.format(connection_info, msg) + return msg + + +def create_keyspace_simple(name, replication_factor, durable_writes=True, connections=None): """ Creates a keyspace with SimpleStrategy for replica placement @@ -51,12 +79,13 @@ def create_keyspace_simple(name, replication_factor, durable_writes=True): :param str name: name of keyspace to create :param int replication_factor: keyspace replication factor, used with :attr:`~.SimpleStrategy` :param bool durable_writes: Write log is bypassed if set to False + :param str connections: List of connection names """ _create_keyspace(name, durable_writes, 'SimpleStrategy', - {'replication_factor': replication_factor}) + {'replication_factor': replication_factor}, connections=connections) -def create_keyspace_network_topology(name, dc_replication_map, durable_writes=True): +def create_keyspace_network_topology(name, dc_replication_map, durable_writes=True, connections=None): """ Creates a keyspace with NetworkTopologyStrategy for replica placement @@ -70,25 +99,37 @@ def create_keyspace_network_topology(name, dc_replication_map, durable_writes=Tr :param str name: name of keyspace to create :param dict dc_replication_map: map of dc_names: replication_factor :param bool durable_writes: Write log is bypassed if set to False + :param str connections: List of connection names """ - _create_keyspace(name, durable_writes, 'NetworkTopologyStrategy', dc_replication_map) + _create_keyspace(name, durable_writes, 'NetworkTopologyStrategy', dc_replication_map, connections=connections) -def _create_keyspace(name, durable_writes, strategy_class, strategy_options): +def _create_keyspace(name, durable_writes, strategy_class, strategy_options, connections=None): if not _allow_schema_modification(): return - cluster = get_cluster() + if connections: + if not isinstance(connections, (list, tuple)): + raise ValueError('Connections must be a list or a tuple.') + + def __create_keyspace(name, durable_writes, strategy_class, strategy_options, connection=None): + cluster = get_cluster(connection) + + if name not in cluster.metadata.keyspaces: + log.info(log_msg("Creating keyspace %s", connection=connection), name) + ks_meta = metadata.KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) + execute(ks_meta.as_cql_query(), connection=connection) + else: + log.info(log_msg("Not creating keyspace %s because it already exists", connection=connection), name) - if name not in cluster.metadata.keyspaces: - log.info("Creating keyspace %s ", name) - ks_meta = metadata.KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) - execute(ks_meta.as_cql_query()) + if connections: + for connection in connections: + __create_keyspace(name, durable_writes, strategy_class, strategy_options, connection=connection) else: - log.info("Not creating keyspace %s because it already exists", name) + __create_keyspace(name, durable_writes, strategy_class, strategy_options) -def drop_keyspace(name): +def drop_keyspace(name, connections=None): """ Drops a keyspace, if it exists. @@ -98,14 +139,25 @@ def drop_keyspace(name): Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).** :param str name: name of keyspace to drop + :param str connections: List of connection names """ if not _allow_schema_modification(): return - cluster = get_cluster() - if name in cluster.metadata.keyspaces: - execute("DROP KEYSPACE {0}".format(metadata.protect_name(name))) + if connections: + if not isinstance(connections, (list, tuple)): + raise ValueError('Connections must be a list or a tuple.') + def _drop_keyspace(name, connection=None): + cluster = get_cluster(connection) + if name in cluster.metadata.keyspaces: + execute("DROP KEYSPACE {0}".format(metadata.protect_name(name)), connection=connection) + + if connections: + for connection in connections: + _drop_keyspace(name, connection) + else: + _drop_keyspace(name) def _get_index_name_by_column(table, column_name): """ @@ -119,7 +171,7 @@ def _get_index_name_by_column(table, column_name): return index_metadata.name -def sync_table(model, keyspaces=None): +def sync_table(model, keyspaces=None, connections=None): """ Inspects the model and creates / updates the corresponding table and columns. @@ -138,19 +190,13 @@ def sync_table(model, keyspaces=None): *There are plans to guard schema-modifying functions with an environment-driven conditional.* """ - if keyspaces: - if not isinstance(keyspaces, (list, tuple)): - raise ValueError('keyspaces must be a list or a tuple.') - - for keyspace in keyspaces: - with query.ContextQuery(model, keyspace=keyspace) as m: - _sync_table(m) - else: - _sync_table(model) - + context = get_context(keyspaces, connections) + for connection, keyspace in context: + with query.ContextQuery(model, keyspace=keyspace) as m: + _sync_table(m, connection=connection) -def _sync_table(model): +def _sync_table(model, connection=None): if not _allow_schema_modification(): return @@ -165,12 +211,13 @@ def _sync_table(model): ks_name = model._get_keyspace() - cluster = get_cluster() + cluster = get_cluster(connection) try: keyspace = cluster.metadata.keyspaces[ks_name] except KeyError: - raise CQLEngineException("Keyspace '{0}' for model {1} does not exist.".format(ks_name, model)) + msg = log_msg("Keyspace '{0}' for model {1} does not exist.", connection=connection) + raise CQLEngineException(msg.format(ks_name, model)) tables = keyspace.tables @@ -179,21 +226,21 @@ def _sync_table(model): udts = [] columns.resolve_udts(col, udts) for udt in [u for u in udts if u not in syncd_types]: - _sync_type(ks_name, udt, syncd_types) + _sync_type(ks_name, udt, syncd_types, connection=connection) if raw_cf_name not in tables: - log.debug("sync_table creating new table %s", cf_name) + log.debug(log_msg("sync_table creating new table %s", keyspace=ks_name, connection=connection), cf_name) qs = _get_create_table(model) try: - execute(qs) + execute(qs, connection=connection) except CQLEngineException as ex: # 1.2 doesn't return cf names, so we have to examine the exception # and ignore if it says the column family already exists if "Cannot add already existing column family" not in unicode(ex): raise else: - log.debug("sync_table checking existing table %s", cf_name) + log.debug(log_msg("sync_table checking existing table %s", keyspace=ks_name, connection=connection), cf_name) table_meta = tables[raw_cf_name] _validate_pk(model, table_meta) @@ -207,24 +254,27 @@ def _sync_table(model): if db_name in table_columns: col_meta = table_columns[db_name] if col_meta.cql_type != col.db_type: - msg = 'Existing table {0} has column "{1}" with a type ({2}) differing from the model type ({3}).' \ - ' Model should be updated.'.format(cf_name, db_name, col_meta.cql_type, col.db_type) + msg = log_msg('Existing table {0} has column "{1}" with a type ({2}) differing from the model type ({3}).' + ' Model should be updated.', keyspace=ks_name, connection=connection) + msg = msg.format(cf_name, db_name, col_meta.cql_type, col.db_type) warnings.warn(msg) log.warning(msg) continue if col.primary_key or col.primary_key: - raise CQLEngineException("Cannot add primary key '{0}' (with db_field '{1}') to existing table {2}".format(model_name, db_name, cf_name)) + msg = log_msg("Cannot add primary key '{0}' (with db_field '{1}') to existing table {2}", keyspace=ks_name, connection=connection) + raise CQLEngineException(msg.format(model_name, db_name, cf_name)) query = "ALTER TABLE {0} add {1}".format(cf_name, col.get_column_def()) - execute(query) + execute(query, connection=connection) db_fields_not_in_model = model_fields.symmetric_difference(table_columns) if db_fields_not_in_model: - log.info("Table {0} has fields not referenced by model: {1}".format(cf_name, db_fields_not_in_model)) + msg = log_msg("Table {0} has fields not referenced by model: {1}", keyspace=ks_name, connection=connection) + log.info(msg.format(cf_name, db_fields_not_in_model)) - _update_options(model) + _update_options(model, connection=connection) table = cluster.metadata.keyspaces[ks_name].tables[raw_cf_name] @@ -240,7 +290,7 @@ def _sync_table(model): qs += ['ON {0}'.format(cf_name)] qs += ['("{0}")'.format(column.db_field_name)] qs = ' '.join(qs) - execute(qs) + execute(qs, connection=connection) def _validate_pk(model, table_meta): @@ -259,7 +309,7 @@ def _pk_string(partition, clustering): _pk_string(meta_partition, meta_clustering))) -def sync_type(ks_name, type_model): +def sync_type(ks_name, type_model, connection=None): """ Inspects the type_model and creates / updates the corresponding type. @@ -277,33 +327,33 @@ def sync_type(ks_name, type_model): if not issubclass(type_model, UserType): raise CQLEngineException("Types must be derived from base UserType.") - _sync_type(ks_name, type_model) + _sync_type(ks_name, type_model, connection=connection) -def _sync_type(ks_name, type_model, omit_subtypes=None): +def _sync_type(ks_name, type_model, omit_subtypes=None, connection=None): syncd_sub_types = omit_subtypes or set() for field in type_model._fields.values(): udts = [] columns.resolve_udts(field, udts) for udt in [u for u in udts if u not in syncd_sub_types]: - _sync_type(ks_name, udt, syncd_sub_types) + _sync_type(ks_name, udt, syncd_sub_types, connection=connection) syncd_sub_types.add(udt) type_name = type_model.type_name() type_name_qualified = "%s.%s" % (ks_name, type_name) - cluster = get_cluster() + cluster = get_cluster(connection) keyspace = cluster.metadata.keyspaces[ks_name] defined_types = keyspace.user_types if type_name not in defined_types: - log.debug("sync_type creating new type %s", type_name_qualified) + log.debug(log_msg("sync_type creating new type %s", keyspace=ks_name, connection=connection), type_name_qualified) cql = get_create_type(type_model, ks_name) - execute(cql) + execute(cql, connection=connection) cluster.refresh_user_type_metadata(ks_name, type_name) - type_model.register_for_keyspace(ks_name) + type_model.register_for_keyspace(ks_name, connection=connection) else: type_meta = defined_types[type_name] defined_fields = type_meta.field_names @@ -311,24 +361,26 @@ def _sync_type(ks_name, type_model, omit_subtypes=None): for field in type_model._fields.values(): model_fields.add(field.db_field_name) if field.db_field_name not in defined_fields: - execute("ALTER TYPE {0} ADD {1}".format(type_name_qualified, field.get_column_def())) + execute("ALTER TYPE {0} ADD {1}".format(type_name_qualified, field.get_column_def()), connection=connection) else: field_type = type_meta.field_types[defined_fields.index(field.db_field_name)] if field_type != field.db_type: - msg = 'Existing user type {0} has field "{1}" with a type ({2}) differing from the model user type ({3}).' \ - ' UserType should be updated.'.format(type_name_qualified, field.db_field_name, field_type, field.db_type) + msg = log_msg('Existing user type {0} has field "{1}" with a type ({2}) differing from the model user type ({3}).' + ' UserType should be updated.', keyspace=ks_name, connection=connection) + msg = msg.format(type_name_qualified, field.db_field_name, field_type, field.db_type) warnings.warn(msg) log.warning(msg) - type_model.register_for_keyspace(ks_name) + type_model.register_for_keyspace(ks_name, connection=connection) if len(defined_fields) == len(model_fields): - log.info("Type %s did not require synchronization", type_name_qualified) + log.info(log_msg("Type %s did not require synchronization", keyspace=ks_name, connection=connection), type_name_qualified) return db_fields_not_in_model = model_fields.symmetric_difference(defined_fields) if db_fields_not_in_model: - log.info("Type %s has fields not referenced by model: %s", type_name_qualified, db_fields_not_in_model) + msg = log_msg("Type %s has fields not referenced by model: %s", keyspace=ks_name, connection=connection) + log.info(msg, type_name_qualified, db_fields_not_in_model) def get_create_type(type_model, keyspace): @@ -377,9 +429,9 @@ def add_column(col): return ' '.join(query_strings) -def _get_table_metadata(model): +def _get_table_metadata(model, connection=None): # returns the table as provided by the native driver for a given model - cluster = get_cluster() + cluster = get_cluster(connection) ks = model._get_keyspace() table = model._raw_column_family_name() table = cluster.metadata.keyspaces[ks].tables[table] @@ -401,19 +453,22 @@ def _options_map_from_strings(option_strings): return options -def _update_options(model): +def _update_options(model, connection=None): """Updates the table options for the given model if necessary. :param model: The model to update. + :param connection: Name of the connection to use :return: `True`, if the options were modified in Cassandra, `False` otherwise. :rtype: bool """ - log.debug("Checking %s for option differences", model) + ks_name = model._get_keyspace() + msg = log_msg("Checking %s for option differences", keyspace=ks_name, connection=connection) + log.debug(msg, model) model_options = model.__options__ or {} - table_meta = _get_table_metadata(model) + table_meta = _get_table_metadata(model, connection=connection) # go to CQL string first to normalize meta from different versions existing_option_strings = set(table_meta._make_option_strings(table_meta.options)) existing_options = _options_map_from_strings(existing_option_strings) @@ -425,7 +480,8 @@ def _update_options(model): try: existing_value = existing_options[name] except KeyError: - raise KeyError("Invalid table option: '%s'; known options: %s" % (name, existing_options.keys())) + msg = log_msg("Invalid table option: '%s'; known options: %s", keyspace=ks_name, connection=connection) + raise KeyError(msg % (name, existing_options.keys())) if isinstance(existing_value, six.string_types): if value != existing_value: update_options[name] = value @@ -441,13 +497,13 @@ def _update_options(model): if update_options: options = ' AND '.join(metadata.TableMetadataV3._make_option_strings(update_options)) query = "ALTER TABLE {0} WITH {1}".format(model.column_family_name(), options) - execute(query) + execute(query, connection=connection) return True return False -def drop_table(model, keyspaces=None): +def drop_table(model, keyspaces=None, connections=None): """ Drops the table indicated by the model, if it exists. @@ -459,29 +515,24 @@ def drop_table(model, keyspaces=None): *There are plans to guard schema-modifying functions with an environment-driven conditional.* """ - if keyspaces: - if not isinstance(keyspaces, (list, tuple)): - raise ValueError('keyspaces must be a list or a tuple.') - - for keyspace in keyspaces: - with query.ContextQuery(model, keyspace=keyspace) as m: - _drop_table(m) - else: - _drop_table(model) + context = get_context(keyspaces, connections) + for connection, keyspace in context: + with query.ContextQuery(model, keyspace=keyspace) as m: + _drop_table(m, connection=connection) -def _drop_table(model): +def _drop_table(model, connection=None): if not _allow_schema_modification(): return # don't try to delete non existant tables - meta = get_cluster().metadata + meta = get_cluster(connection).metadata ks_name = model._get_keyspace() raw_cf_name = model._raw_column_family_name() try: meta.keyspaces[ks_name].tables[raw_cf_name] - execute('DROP TABLE {0};'.format(model.column_family_name())) + execute('DROP TABLE {0};'.format(model.column_family_name()), connection=connection) except KeyError: pass diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 3fc45fcb38..0eb95a6900 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -290,7 +290,7 @@ def __init__(self, model, keyspace=None): raise CQLEngineException("Models must be derived from base Model.") ks = keyspace if keyspace else model.__keyspace__ - new_type = type(model.__name__, (model,), {'__keyspace__': ks}) + new_type = type(model.__name__, (model,), {'__keyspace__': ks, '__abstract__': model.__abstract__}) self.model = new_type @@ -972,7 +972,7 @@ def using(self, keyspace=None): clone = copy.deepcopy(self) if keyspace: - new_type = type(self.model.__name__, (self.model,), {'__keyspace__': keyspace}) + new_type = type(self.model.__name__, (self.model,), {'__keyspace__': keyspace, '__abstract__': self.model.__abstract__}) clone.model = new_type return clone diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index 1d62e6beaf..adf3f5e95b 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -4,7 +4,7 @@ from cassandra.util import OrderedDict from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine import columns -from cassandra.cqlengine import connection +from cassandra.cqlengine import connection as conn from cassandra.cqlengine import models @@ -112,8 +112,8 @@ def items(self): return [(k, self[k]) for k in self] @classmethod - def register_for_keyspace(cls, keyspace): - connection.register_udt(keyspace, cls.type_name(), cls) + def register_for_keyspace(cls, keyspace, connection=None): + conn.register_udt(keyspace, cls.type_name(), cls, connection=connection) @classmethod def type_name(cls): From 2d8374a2b67622e940737dd101b49855c312076f Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 24 Aug 2016 11:47:16 -0500 Subject: [PATCH 0852/2005] PYTHON-106 adding test for UDTs and protocol v1/v2 --- tests/integration/standard/test_metadata.py | 39 ++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 598dd83971..4cd0d37c5e 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -35,7 +35,7 @@ from tests.integration import get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, \ BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION, \ - BasicExistingSegregatedKeyspaceUnitTestCase, dseonly, DSE_VERSION + BasicExistingSegregatedKeyspaceUnitTestCase, dseonly, DSE_VERSION, get_supported_protocol_versions def setup_module(): @@ -712,6 +712,43 @@ def test_refresh_user_type_metadata(self): cluster2.shutdown() + def test_refresh_user_type_metadata_proto_2(self): + """ + Test to insure that protocol v1/v2 surface UDT metadata changes + + @since 3.7.0 + @jira_ticket PYTHON-106 + @expected_result UDT metadata in the keyspace should be updated regardless of protocol version + + @test_category metadata + """ + if 1 not in get_supported_protocol_versions() and 2 not in get_supported_protocol_versions(): + raise unittest.SkipTest("Protocol 1 or 2 is not support in Cassandra version ".format(CASSANDRA_VERSION)) + cluster2 = None + if 2 in get_supported_protocol_versions(): + cluster2 = Cluster(protocol_version=2) + session2 = cluster2.connect() + cluster1 = None + if 1 in get_supported_protocol_versions(): + cluster1 = Cluster(protocol_version=1) + session1 = cluster1.connect() + + if cluster2: + self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {}) + session2.execute("CREATE TYPE {0}.user (age int, name text)".format(self.keyspace_name)) + self.assertIn("user", cluster2.metadata.keyspaces[self.keyspace_name].user_types) + session2.execute("DROP TYPE {0}.user".format(self.keyspace_name)) + self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {}) + cluster2.shutdown() + + if cluster1: + self.assertEqual(cluster1.metadata.keyspaces[self.keyspace_name].user_types, {}) + session1.execute("CREATE TYPE {0}.user (age int, name text)".format(self.keyspace_name)) + self.assertIn("user", cluster1.metadata.keyspaces[self.keyspace_name].user_types) + session1.execute("DROp TYPE {0}.user".format(self.keyspace_name)) + self.assertEqual(cluster1.metadata.keyspaces[self.keyspace_name].user_types, {}) + cluster1.shutdown() + def test_refresh_user_function_metadata(self): """ test for synchronously refreshing UDF metadata in keyspace From e93fbcc5de3e7bffe301bad8b16ce7ee4ad5e685 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 25 Aug 2016 07:24:39 -0400 Subject: [PATCH 0853/2005] add connection support via using() to QuerySet and Model --- cassandra/cqlengine/connection.py | 24 +++++++---- cassandra/cqlengine/management.py | 40 +++++++------------ cassandra/cqlengine/models.py | 40 +++++++++++++++++++ cassandra/cqlengine/named.py | 11 ++++- cassandra/cqlengine/query.py | 39 +++++++++++------- .../cqlengine/query/test_queryoperators.py | 1 - 6 files changed, 106 insertions(+), 49 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 1a8922a862..a59f250c90 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -38,6 +38,16 @@ udt_by_keyspace = defaultdict(dict) +def format_log_context(msg, connection=None, keyspace=None): + """Format log message to add keyspace and connection context""" + connection_info = connection if connection else DEFAULT_CONNECTION + if keyspace: + msg = '[Connection: {0}, Keyspace: {1}] {2}'.format(connection_info, keyspace, msg) + else: + msg = '[Connection: {0}] {1}'.format(connection_info, msg) + return msg + + class UndefinedKeyspaceException(CQLEngineException): pass @@ -79,10 +89,10 @@ def setup(self): self.cluster = Cluster(self.hosts, **self.cluster_options) try: self.session = self.cluster.connect() - log.debug("cqlengine connection '{0}' initialized with internally created session".format(self.name)) + log.debug(format_log_context("connection initialized with internally created session", connection=self.name)) except NoHostAvailable: if self.retry_connect: - log.warning("connect failed for '{0}', setting up for re-attempt on first use".format(self.name)) + log.warning(format_log_context("connect failed, setting up for re-attempt on first use", connection=self.name)) self.lazy_connect = True raise @@ -108,7 +118,7 @@ def handle_lazy_connect(self): # lazy_connect might have been set to False by another thread while waiting the lock # In this case, do nothing. if self.lazy_connect: - log.debug("Lazy connect for connection '{0}'".format(self.name)) + log.debug(format_log_context("Lazy connect for connection", connection=self.name)) self.lazy_connect = False self.setup() @@ -154,7 +164,7 @@ def default(): try: conn = get_connection() if conn.session: - log.warning("configuring new connection for cqlengine when one was already set") + log.warning("configuring new default connection for cqlengine when one was already set") except: pass @@ -175,7 +185,7 @@ def set_session(s): conn = get_connection() if conn.session: - log.warning("configuring new connection for cqlengine when one was already set") + log.warning("configuring new default connection for cqlengine when one was already set") if s.row_factory is not dict_factory: raise CQLEngineException("Failed to initialize: 'Session.row_factory' must be 'dict_factory'.") @@ -189,7 +199,7 @@ def set_session(s): conn.setup_session() - log.debug("cqlengine connection initialized with %s", s) + log.debug("cqlengine default connection initialized with %s", s) def setup( @@ -232,7 +242,7 @@ def execute(query, params=None, consistency_level=None, timeout=NOT_SET, connect elif isinstance(query, six.string_types): query = SimpleStatement(query, consistency_level=consistency_level) - log.debug(query.query_string) + log.debug(format_log_context(query.query_string, connection=connection)) result = conn.session.execute(query, params, timeout=timeout) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 4a8ad5a32b..b53c15b8cb 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -23,7 +23,7 @@ from cassandra import metadata from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine import columns, query -from cassandra.cqlengine.connection import execute, get_cluster, DEFAULT_CONNECTION +from cassandra.cqlengine.connection import execute, get_cluster, format_log_context from cassandra.cqlengine.models import Model from cassandra.cqlengine.named import NamedTable from cassandra.cqlengine.usertype import UserType @@ -55,16 +55,6 @@ def get_context(keyspaces, connections): return product(connections, keyspaces) -def log_msg(msg, connection=None, keyspace=None): - """Format log message to add keyspace and connection context""" - connection_info = connection if connection else DEFAULT_CONNECTION - if keyspace: - msg = '[Connection: {0}, Keyspace: {1}] {2}'.format(connection_info, keyspace, msg) - else: - msg = '[Connection: {0}] {1}'.format(connection_info, msg) - return msg - - def create_keyspace_simple(name, replication_factor, durable_writes=True, connections=None): """ Creates a keyspace with SimpleStrategy for replica placement @@ -116,11 +106,11 @@ def __create_keyspace(name, durable_writes, strategy_class, strategy_options, co cluster = get_cluster(connection) if name not in cluster.metadata.keyspaces: - log.info(log_msg("Creating keyspace %s", connection=connection), name) + log.info(format_log_context("Creating keyspace %s", connection=connection), name) ks_meta = metadata.KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) execute(ks_meta.as_cql_query(), connection=connection) else: - log.info(log_msg("Not creating keyspace %s because it already exists", connection=connection), name) + log.info(format_log_context("Not creating keyspace %s because it already exists", connection=connection), name) if connections: for connection in connections: @@ -216,7 +206,7 @@ def _sync_table(model, connection=None): try: keyspace = cluster.metadata.keyspaces[ks_name] except KeyError: - msg = log_msg("Keyspace '{0}' for model {1} does not exist.", connection=connection) + msg = format_log_context("Keyspace '{0}' for model {1} does not exist.", connection=connection) raise CQLEngineException(msg.format(ks_name, model)) tables = keyspace.tables @@ -229,7 +219,7 @@ def _sync_table(model, connection=None): _sync_type(ks_name, udt, syncd_types, connection=connection) if raw_cf_name not in tables: - log.debug(log_msg("sync_table creating new table %s", keyspace=ks_name, connection=connection), cf_name) + log.debug(format_log_context("sync_table creating new table %s", keyspace=ks_name, connection=connection), cf_name) qs = _get_create_table(model) try: @@ -240,7 +230,7 @@ def _sync_table(model, connection=None): if "Cannot add already existing column family" not in unicode(ex): raise else: - log.debug(log_msg("sync_table checking existing table %s", keyspace=ks_name, connection=connection), cf_name) + log.debug(format_log_context("sync_table checking existing table %s", keyspace=ks_name, connection=connection), cf_name) table_meta = tables[raw_cf_name] _validate_pk(model, table_meta) @@ -254,7 +244,7 @@ def _sync_table(model, connection=None): if db_name in table_columns: col_meta = table_columns[db_name] if col_meta.cql_type != col.db_type: - msg = log_msg('Existing table {0} has column "{1}" with a type ({2}) differing from the model type ({3}).' + msg = format_log_context('Existing table {0} has column "{1}" with a type ({2}) differing from the model type ({3}).' ' Model should be updated.', keyspace=ks_name, connection=connection) msg = msg.format(cf_name, db_name, col_meta.cql_type, col.db_type) warnings.warn(msg) @@ -263,7 +253,7 @@ def _sync_table(model, connection=None): continue if col.primary_key or col.primary_key: - msg = log_msg("Cannot add primary key '{0}' (with db_field '{1}') to existing table {2}", keyspace=ks_name, connection=connection) + msg = format_log_context("Cannot add primary key '{0}' (with db_field '{1}') to existing table {2}", keyspace=ks_name, connection=connection) raise CQLEngineException(msg.format(model_name, db_name, cf_name)) query = "ALTER TABLE {0} add {1}".format(cf_name, col.get_column_def()) @@ -271,7 +261,7 @@ def _sync_table(model, connection=None): db_fields_not_in_model = model_fields.symmetric_difference(table_columns) if db_fields_not_in_model: - msg = log_msg("Table {0} has fields not referenced by model: {1}", keyspace=ks_name, connection=connection) + msg = format_log_context("Table {0} has fields not referenced by model: {1}", keyspace=ks_name, connection=connection) log.info(msg.format(cf_name, db_fields_not_in_model)) _update_options(model, connection=connection) @@ -349,7 +339,7 @@ def _sync_type(ks_name, type_model, omit_subtypes=None, connection=None): defined_types = keyspace.user_types if type_name not in defined_types: - log.debug(log_msg("sync_type creating new type %s", keyspace=ks_name, connection=connection), type_name_qualified) + log.debug(format_log_context("sync_type creating new type %s", keyspace=ks_name, connection=connection), type_name_qualified) cql = get_create_type(type_model, ks_name) execute(cql, connection=connection) cluster.refresh_user_type_metadata(ks_name, type_name) @@ -365,7 +355,7 @@ def _sync_type(ks_name, type_model, omit_subtypes=None, connection=None): else: field_type = type_meta.field_types[defined_fields.index(field.db_field_name)] if field_type != field.db_type: - msg = log_msg('Existing user type {0} has field "{1}" with a type ({2}) differing from the model user type ({3}).' + msg = format_log_context('Existing user type {0} has field "{1}" with a type ({2}) differing from the model user type ({3}).' ' UserType should be updated.', keyspace=ks_name, connection=connection) msg = msg.format(type_name_qualified, field.db_field_name, field_type, field.db_type) warnings.warn(msg) @@ -374,12 +364,12 @@ def _sync_type(ks_name, type_model, omit_subtypes=None, connection=None): type_model.register_for_keyspace(ks_name, connection=connection) if len(defined_fields) == len(model_fields): - log.info(log_msg("Type %s did not require synchronization", keyspace=ks_name, connection=connection), type_name_qualified) + log.info(format_log_context("Type %s did not require synchronization", keyspace=ks_name, connection=connection), type_name_qualified) return db_fields_not_in_model = model_fields.symmetric_difference(defined_fields) if db_fields_not_in_model: - msg = log_msg("Type %s has fields not referenced by model: %s", keyspace=ks_name, connection=connection) + msg = format_log_context("Type %s has fields not referenced by model: %s", keyspace=ks_name, connection=connection) log.info(msg, type_name_qualified, db_fields_not_in_model) @@ -464,7 +454,7 @@ def _update_options(model, connection=None): :rtype: bool """ ks_name = model._get_keyspace() - msg = log_msg("Checking %s for option differences", keyspace=ks_name, connection=connection) + msg = format_log_context("Checking %s for option differences", keyspace=ks_name, connection=connection) log.debug(msg, model) model_options = model.__options__ or {} @@ -480,7 +470,7 @@ def _update_options(model, connection=None): try: existing_value = existing_options[name] except KeyError: - msg = log_msg("Invalid table option: '%s'; known options: %s", keyspace=ks_name, connection=connection) + msg = format_log_context("Invalid table option: '%s'; known options: %s", keyspace=ks_name, connection=connection) raise KeyError(msg % (name, existing_options.keys())) if isinstance(existing_value, six.string_types): if value != existing_value: diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 41dfc77770..a7caa7131c 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -231,6 +231,25 @@ def __call__(self, *args, **kwargs): raise NotImplementedError +class UsingDescriptor(object): + """ + return a query set descriptor with a connection context specified + """ + def __get__(self, instance, model): + if instance: + # instance method + def using_setter(connection=None): + if connection: + instance._connection = connection + return instance + return using_setter + + return model.objects.using + + def __call__(self, *args, **kwargs): + raise NotImplementedError + + class ColumnQueryEvaluator(query.AbstractQueryableColumn): """ Wraps a column and allows it to be used in comparator @@ -323,6 +342,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): if_exists = IfExistsDescriptor() + using = UsingDescriptor() + # _len is lazily created by __len__ __table_name__ = None @@ -331,6 +352,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): __keyspace__ = None + __connection__ = None + __discriminator_value__ = None __options__ = None @@ -351,6 +374,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): _table_name = None # used internally to cache a derived table name + _connection = None + def __init__(self, **values): self._ttl = None self._timestamp = None @@ -358,6 +383,7 @@ def __init__(self, **values): self._batch = None self._timeout = connection.NOT_SET self._is_persisted = False + self._connection = None self._values = {} for name, column in self._columns.items(): @@ -774,6 +800,15 @@ def _inst_batch(self, batch): batch = hybrid_classmethod(_class_batch, _inst_batch) + @classmethod + def _class_get_connection(cls): + return cls.__connection__ + + def _inst_get_connection(self): + return self._connection or self.__connection__ + + _get_connection = hybrid_classmethod(_class_get_connection, _inst_get_connection) + class ModelMetaClass(type): @@ -1002,6 +1037,11 @@ class Model(BaseModel): Sets the name of the keyspace used by this model. """ + __connection__ = None + """ + Sets the name of the default connection used by this model. + """ + __options__ = None """ *Optional* Table options applied with this model diff --git a/cassandra/cqlengine/named.py b/cassandra/cqlengine/named.py index 07b4c50b61..14d14c402e 100644 --- a/cassandra/cqlengine/named.py +++ b/cassandra/cqlengine/named.py @@ -17,6 +17,7 @@ from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine.columns import Column from cassandra.cqlengine.connection import get_cluster +from cassandra.cqlengine.models import UsingDescriptor, BaseModel from cassandra.cqlengine.query import AbstractQueryableColumn, SimpleQuerySet from cassandra.cqlengine.query import DoesNotExist as _DoesNotExist from cassandra.cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned @@ -86,6 +87,13 @@ class NamedTable(object): _partition_key_index = None + __connection__ = None + _connection = None + + using = UsingDescriptor() + + _get_connection = BaseModel._get_connection + class DoesNotExist(_DoesNotExist): pass @@ -95,6 +103,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): def __init__(self, keyspace, name): self.keyspace = keyspace self.name = name + self._connection = None @property def _partition_keys(self): @@ -104,7 +113,7 @@ def _partition_keys(self): def _get_partition_keys(self): try: - table_meta = get_cluster().metadata.keyspaces[self.keyspace].tables[self.name] + table_meta = get_cluster(self._get_connection()).metadata.keyspaces[self.keyspace].tables[self.name] self.__partition_keys = OrderedDict((pk.name, Column(primary_key=True, partition_key=True, db_field=pk.name)) for pk in table_meta.partition_key) except Exception as e: raise CQLEngineException("Failed inspecting partition keys for {0}." diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 0eb95a6900..8752e40cc7 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -21,7 +21,7 @@ from cassandra.query import SimpleStatement from cassandra.cqlengine import columns, CQLEngineException, ValidationError, UnicodeMixin -from cassandra.cqlengine import connection +from cassandra.cqlengine import connection as conn from cassandra.cqlengine.functions import Token, BaseQueryFunction, QueryValue from cassandra.cqlengine.operators import (InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator, LessThanOperator, @@ -144,7 +144,7 @@ class BatchQuery(object): _consistency = None def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on_exception=False, - timeout=connection.NOT_SET): + timeout=conn.NOT_SET): """ :param batch_type: (optional) One of batch type values available through BatchType enum :type batch_type: str or None @@ -244,7 +244,7 @@ def execute(self): query_list.append('APPLY BATCH;') - tmp = connection.execute('\n'.join(query_list), parameters, self._consistency, self._timeout) + tmp = conn.execute('\n'.join(query_list), parameters, self._consistency, self._timeout) check_applied(tmp) self.queries = [] @@ -289,10 +289,12 @@ def __init__(self, model, keyspace=None): if not issubclass(model, models.Model): raise CQLEngineException("Models must be derived from base Model.") - ks = keyspace if keyspace else model.__keyspace__ - new_type = type(model.__name__, (model,), {'__keyspace__': ks, '__abstract__': model.__abstract__}) + self.model = model - self.model = new_type + if keyspace: + ks = keyspace + new_type = type(model.__name__, (model,), {'__keyspace__': ks}) + self.model = new_type def __enter__(self): return self.model @@ -345,9 +347,10 @@ def __init__(self, model): self._consistency = None self._timestamp = None self._if_not_exists = False - self._timeout = connection.NOT_SET + self._timeout = conn.NOT_SET self._if_exists = False self._fetch_size = None + self._connection = None @property def column_family_name(self): @@ -357,7 +360,7 @@ def _execute(self, statement): if self._batch: return self._batch.add_query(statement) else: - result = _execute_statement(self.model, statement, self._consistency, self._timeout) + result = _execute_statement(self.model, statement, self._consistency, self._timeout, connection=self._connection) if self._if_not_exists or self._if_exists or self._conditional: check_applied(result) return result @@ -928,6 +931,7 @@ def create(self, **kwargs): .if_not_exists(self._if_not_exists) \ .timestamp(self._timestamp) \ .if_exists(self._if_exists) \ + .using(connection=self._connection) \ .save() def delete(self): @@ -965,16 +969,19 @@ def timeout(self, timeout): clone._timeout = timeout return clone - def using(self, keyspace=None): + def using(self, keyspace=None, connection=None): """ Change the context on-the-fly of the Model class (connection, keyspace) """ clone = copy.deepcopy(self) if keyspace: - new_type = type(self.model.__name__, (self.model,), {'__keyspace__': keyspace, '__abstract__': self.model.__abstract__}) + new_type = type(self.model.__name__, (self.model,), {'__keyspace__': keyspace}) clone.model = new_type + if connection: + clone._connection = connection + return clone @@ -1261,7 +1268,7 @@ class DMLQuery(object): _if_exists = False def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, - if_not_exists=False, conditional=None, timeout=connection.NOT_SET, if_exists=False): + if_not_exists=False, conditional=None, timeout=conn.NOT_SET, if_exists=False): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance @@ -1278,7 +1285,8 @@ def _execute(self, statement): if self._batch: return self._batch.add_query(statement) else: - results = _execute_statement(self.model, statement, self._consistency, self._timeout) + connection = self.instance._get_connection() if self.instance else self.model._get_connection() + results = _execute_statement(self.model, statement, self._consistency, self._timeout, connection=connection) if self._if_not_exists or self._if_exists or self._conditional: check_applied(results) return results @@ -1419,13 +1427,14 @@ def delete(self): self._execute(ds) -def _execute_statement(model, statement, consistency_level, timeout): +def _execute_statement(model, statement, consistency_level, timeout, connection=None): params = statement.get_context() s = SimpleStatement(str(statement), consistency_level=consistency_level, fetch_size=statement.fetch_size) if model._partition_key_index: key_values = statement.partition_key_values(model._partition_key_index) if not any(v is None for v in key_values): - parts = model._routing_key_from_values(key_values, connection.get_cluster().protocol_version) + parts = model._routing_key_from_values(key_values, conn.get_cluster(connection).protocol_version) s.routing_key = parts s.keyspace = model._get_keyspace() - return connection.execute(s, params, timeout=timeout) + connection = connection if connection else model._get_connection() + return conn.execute(s, params, timeout=timeout, connection=connection) diff --git a/tests/integration/cqlengine/query/test_queryoperators.py b/tests/integration/cqlengine/query/test_queryoperators.py index 055e8f3db2..5741b5a2d7 100644 --- a/tests/integration/cqlengine/query/test_queryoperators.py +++ b/tests/integration/cqlengine/query/test_queryoperators.py @@ -156,4 +156,3 @@ def test_named_table_pk_token_function(self): self.assertTrue(len(first_page) is 1) next_page = list(query.filter(pk__token__gt=functions.Token(last.key))) self.assertTrue(len(next_page) is 1) - From 894b9be1b07f5bf75e9bcc1b783263acda36debe Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 25 Aug 2016 10:27:15 -0400 Subject: [PATCH 0854/2005] Add connection support to BatchQuery --- cassandra/cqlengine/query.py | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 8752e40cc7..2fe3bb9b2e 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -143,8 +143,12 @@ class BatchQuery(object): _consistency = None + _connection = None + _connection_explicit = False + + def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on_exception=False, - timeout=conn.NOT_SET): + timeout=conn.NOT_SET, connection=None): """ :param batch_type: (optional) One of batch type values available through BatchType enum :type batch_type: str or None @@ -161,6 +165,7 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on :param timeout: (optional) Timeout for the entire batch (in seconds), if not specified fallback to default session timeout :type timeout: float or None + :param str connection: Connection name to use for the batch execution """ self.queries = [] self.batch_type = batch_type @@ -173,6 +178,9 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on self._callbacks = [] self._executed = False self._context_entered = False + self._connection = connection + if connection: + self._connection_explicit = True def add_query(self, query): if not isinstance(query, BaseCQLStatement): @@ -244,7 +252,7 @@ def execute(self): query_list.append('APPLY BATCH;') - tmp = conn.execute('\n'.join(query_list), parameters, self._consistency, self._timeout) + tmp = conn.execute('\n'.join(query_list), parameters, self._consistency, self._timeout, connection=self._connection) check_applied(tmp) self.queries = [] @@ -544,6 +552,9 @@ def batch(self, batch_obj): Note: running a select query with a batch object will raise an exception """ + if self._connection: + raise CQLEngineException("Cannot specify the connection on model in batch mode.") + if batch_obj is not None and not isinstance(batch_obj, BatchQuery): raise CQLEngineException('batch_obj must be a BatchQuery instance or None') clone = copy.deepcopy(self) @@ -974,6 +985,9 @@ def using(self, keyspace=None, connection=None): Change the context on-the-fly of the Model class (connection, keyspace) """ + if connection and self._batch: + raise CQLEngineException("Cannot specify a connection on model in batch mode.") + clone = copy.deepcopy(self) if keyspace: new_type = type(self.model.__name__, (self.model,), {'__keyspace__': keyspace}) @@ -1282,10 +1296,17 @@ def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, self._timeout = timeout def _execute(self, statement): + connection = self.instance._get_connection() if self.instance else self.model._get_connection() if self._batch: + if self._batch._connection: + if not self._batch._connection_explicit and connection and \ + connection != self._batch._connection: + raise CQLEngineException('BatchQuery queries must be executed on the same connection') + else: + # set the BatchQuery connection from the model + self._batch._connection = connection return self._batch.add_query(statement) else: - connection = self.instance._get_connection() if self.instance else self.model._get_connection() results = _execute_statement(self.model, statement, self._consistency, self._timeout, connection=connection) if self._if_not_exists or self._if_exists or self._conditional: check_applied(results) From acd5f746d2f7586d94b8d96d2873aace1fdc5bf9 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 25 Aug 2016 10:28:54 -0500 Subject: [PATCH 0855/2005] PYTHON-200 adding paging state test --- .../integration/standard/test_query_paging.py | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index fdad1bc3ee..48e257a90d 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -69,6 +69,34 @@ def test_paging(self): self.assertEqual(100, len(list(self.session.execute(prepared)))) + def test_paging_state(self): + """ + Test to validate paging state api + @since 3.7.0 + @jira_ticket PYTHON-200 + @expected_result paging state should returned should be accurate, and allow for queries to be resumed. + + @test_category queries + """ + statements_and_params = zip(cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]), + [(i, ) for i in range(100)]) + execute_concurrent(self.session, list(statements_and_params)) + + list_all_results = [] + self.session.default_fetch_size = 3 + + result_set = self.session.execute("SELECT * FROM test3rf.test") + while(result_set.has_more_pages): + for row in result_set.current_rows: + self.assertNotIn(row, list_all_results) + list_all_results.extend(result_set.current_rows) + page_state = result_set.paging_state + result_set = self.session.execute("SELECT * FROM test3rf.test", paging_state=page_state) + + if(len(result_set.current_rows) > 0): + list_all_results.append(result_set.current_rows) + self.assertEqual(len(list_all_results), 100) + def test_paging_verify_writes(self): statements_and_params = zip(cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]), [(i, ) for i in range(100)]) From a334ae51efcc70f919a65b33dcd235866bb928eb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 25 Aug 2016 12:58:01 -0500 Subject: [PATCH 0856/2005] fix removing last user type in proto [1,2] PYTHON-106 --- cassandra/metadata.py | 2 +- tests/integration/standard/test_metadata.py | 47 ++++++++++----------- 2 files changed, 23 insertions(+), 26 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 735d718fbc..67b6b85090 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -168,7 +168,7 @@ def _update_keyspace(self, keyspace_meta, new_user_types=None): self.keyspaces[ks_name] = keyspace_meta if old_keyspace_meta: keyspace_meta.tables = old_keyspace_meta.tables - keyspace_meta.user_types = new_user_types or old_keyspace_meta.user_types + keyspace_meta.user_types = new_user_types if new_user_types is not None else old_keyspace_meta.user_types keyspace_meta.indexes = old_keyspace_meta.indexes keyspace_meta.functions = old_keyspace_meta.functions keyspace_meta.aggregates = old_keyspace_meta.aggregates diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 4cd0d37c5e..3031ce1d8b 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -722,32 +722,29 @@ def test_refresh_user_type_metadata_proto_2(self): @test_category metadata """ - if 1 not in get_supported_protocol_versions() and 2 not in get_supported_protocol_versions(): - raise unittest.SkipTest("Protocol 1 or 2 is not support in Cassandra version ".format(CASSANDRA_VERSION)) - cluster2 = None - if 2 in get_supported_protocol_versions(): - cluster2 = Cluster(protocol_version=2) - session2 = cluster2.connect() - cluster1 = None - if 1 in get_supported_protocol_versions(): - cluster1 = Cluster(protocol_version=1) - session1 = cluster1.connect() - - if cluster2: - self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {}) - session2.execute("CREATE TYPE {0}.user (age int, name text)".format(self.keyspace_name)) - self.assertIn("user", cluster2.metadata.keyspaces[self.keyspace_name].user_types) - session2.execute("DROP TYPE {0}.user".format(self.keyspace_name)) - self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {}) - cluster2.shutdown() + supported_versions = get_supported_protocol_versions() + if 2 not in supported_versions: # 1 and 2 were dropped in the same version + raise unittest.SkipTest("Protocol versions 1 and 2 are not supported in Cassandra version ".format(CASSANDRA_VERSION)) + + for protocol_version in (1, 2): + cluster = Cluster(protocol_version=protocol_version) + session = cluster.connect() + self.assertEqual(cluster.metadata.keyspaces[self.keyspace_name].user_types, {}) + + session.execute("CREATE TYPE {0}.user (age int, name text)".format(self.keyspace_name)) + self.assertIn("user", cluster.metadata.keyspaces[self.keyspace_name].user_types) + self.assertIn("age", cluster.metadata.keyspaces[self.keyspace_name].user_types["user"].field_names) + self.assertIn("name", cluster.metadata.keyspaces[self.keyspace_name].user_types["user"].field_names) + + session.execute("ALTER TYPE {0}.user ADD flag boolean".format(self.keyspace_name)) + self.assertIn("flag", cluster.metadata.keyspaces[self.keyspace_name].user_types["user"].field_names) + + session.execute("ALTER TYPE {0}.user RENAME flag TO something".format(self.keyspace_name)) + self.assertIn("something", cluster.metadata.keyspaces[self.keyspace_name].user_types["user"].field_names) - if cluster1: - self.assertEqual(cluster1.metadata.keyspaces[self.keyspace_name].user_types, {}) - session1.execute("CREATE TYPE {0}.user (age int, name text)".format(self.keyspace_name)) - self.assertIn("user", cluster1.metadata.keyspaces[self.keyspace_name].user_types) - session1.execute("DROp TYPE {0}.user".format(self.keyspace_name)) - self.assertEqual(cluster1.metadata.keyspaces[self.keyspace_name].user_types, {}) - cluster1.shutdown() + session.execute("DROP TYPE {0}.user".format(self.keyspace_name)) + self.assertEqual(cluster.metadata.keyspaces[self.keyspace_name].user_types, {}) + cluster.shutdown() def test_refresh_user_function_metadata(self): """ From a0f86f1cdcd6ec445888ebe588e6db61062ce998 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 25 Aug 2016 15:03:30 -0400 Subject: [PATCH 0857/2005] Fix class model copy --- cassandra/cqlengine/models.py | 8 ++++++++ cassandra/cqlengine/query.py | 8 ++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index a7caa7131c..ef2ecea5b4 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -29,6 +29,14 @@ log = logging.getLogger(__name__) +def _copy_model_class(model, attrs): + new_type = type(model.__name__, (model,), attrs) + new_type.__abstract__ = model.__abstract__ + new_type.__discriminator_value__ = model.__discriminator_value__ + new_type.__default_ttl__ = model.__default_ttl__ + return new_type + + class ModelException(CQLEngineException): pass diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 2fe3bb9b2e..48c8b89b88 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -300,9 +300,9 @@ def __init__(self, model, keyspace=None): self.model = model if keyspace: + from cassandra.cqlengine.models import _copy_model_class ks = keyspace - new_type = type(model.__name__, (model,), {'__keyspace__': ks}) - self.model = new_type + self.model = _copy_model_class(model, {'__keyspace__': ks}) def __enter__(self): return self.model @@ -990,8 +990,8 @@ def using(self, keyspace=None, connection=None): clone = copy.deepcopy(self) if keyspace: - new_type = type(self.model.__name__, (self.model,), {'__keyspace__': keyspace}) - clone.model = new_type + from cassandra.cqlengine.models import _copy_model_class + clone.model = type(self.model, {'__keyspace__': keyspace}) if connection: clone._connection = connection From da10c634611f725e24bbf99f8ff56b4152f74cee Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 25 Aug 2016 16:22:57 -0400 Subject: [PATCH 0858/2005] connection support in ContextQuery --- cassandra/cqlengine/models.py | 2 +- cassandra/cqlengine/query.py | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index ef2ecea5b4..306b07bdcb 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -810,7 +810,7 @@ def _inst_batch(self, batch): @classmethod def _class_get_connection(cls): - return cls.__connection__ + return cls._connection or cls.__connection__ def _inst_get_connection(self): return self._connection or self.__connection__ diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 48c8b89b88..a7dbf3be3b 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -287,7 +287,7 @@ class ContextQuery(object): """ - def __init__(self, model, keyspace=None): + def __init__(self, model, keyspace=None, connection=None): """ :param model: A model. This should be a class type, not an instance. :param keyspace: (optional) A keyspace name @@ -304,10 +304,14 @@ def __init__(self, model, keyspace=None): ks = keyspace self.model = _copy_model_class(model, {'__keyspace__': ks}) + if connection: + self.model._connection = connection + def __enter__(self): return self.model def __exit__(self, exc_type, exc_val, exc_tb): + self.model._connection = None return @@ -991,7 +995,7 @@ def using(self, keyspace=None, connection=None): clone = copy.deepcopy(self) if keyspace: from cassandra.cqlengine.models import _copy_model_class - clone.model = type(self.model, {'__keyspace__': keyspace}) + clone.model = _copy_model_class(self.model, {'__keyspace__': keyspace}) if connection: clone._connection = connection From e5d27eb5c3eddc1cf4e0dff1171f95dd2eae097c Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 25 Aug 2016 17:37:11 -0400 Subject: [PATCH 0859/2005] Add multiple models support to ContextQuery --- cassandra/cqlengine/query.py | 49 +++++++++++++++++++++++++----------- 1 file changed, 35 insertions(+), 14 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index a7dbf3be3b..38abe7c72a 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -285,33 +285,54 @@ class ContextQuery(object): with ContextQuery(Automobile, keyspace='test4') as A: print len(A.objects.all()) # 0 result + # Multiple models + with ContextQuery(Automobile, Automobile2, connection='cluster2') as (A, A2): + print len(A.objects.all()) + print len(A2.objects.all()) + """ - def __init__(self, model, keyspace=None, connection=None): + def __init__(self, *args, **kwargs): """ - :param model: A model. This should be a class type, not an instance. - :param keyspace: (optional) A keyspace name + :param *args: One or more models. A model should be a class type, not an instance. + :param **kwargs: (optional) Context parameters: can be keyspace or connection """ from cassandra.cqlengine import models - if not issubclass(model, models.Model): - raise CQLEngineException("Models must be derived from base Model.") + self.models = [] - self.model = model + if len(args) < 1: + raise CQLEngineException("No model provided.") - if keyspace: - from cassandra.cqlengine.models import _copy_model_class - ks = keyspace - self.model = _copy_model_class(model, {'__keyspace__': ks}) + keyspace = kwargs.pop('keyspace', None) + connection = kwargs.pop('connection', None) - if connection: - self.model._connection = connection + if kwargs: + raise CQLEngineException("Unknown keyword argument(s): {0}".format( + ','.join(kwargs.keys()))) + + for model in args: + if not issubclass(model, models.Model): + raise CQLEngineException("Models must be derived from base Model.") + + m = copy.deepcopy(model) if not keyspace else None + + if keyspace: + from cassandra.cqlengine.models import _copy_model_class + ks = keyspace + m = _copy_model_class(model, {'__keyspace__': ks}) + + if connection: + m._connection = connection + + self.models.append(m) def __enter__(self): - return self.model + if len(self.models) > 1: + return tuple(self.models) + return self.models[0] def __exit__(self, exc_type, exc_val, exc_tb): - self.model._connection = None return From d8739374c4718fbb7abb9aef94535f5390e02b36 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 25 Aug 2016 20:01:03 -0400 Subject: [PATCH 0860/2005] Ensure the Class is properly cloned in the ContextQuery --- cassandra/cqlengine/models.py | 4 ++-- cassandra/cqlengine/query.py | 13 +++++-------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 306b07bdcb..286d8c1f62 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -29,7 +29,7 @@ log = logging.getLogger(__name__) -def _copy_model_class(model, attrs): +def _clone_model_class(model, attrs): new_type = type(model.__name__, (model,), attrs) new_type.__abstract__ = model.__abstract__ new_type.__discriminator_value__ = model.__discriminator_value__ @@ -810,7 +810,7 @@ def _inst_batch(self, batch): @classmethod def _class_get_connection(cls): - return cls._connection or cls.__connection__ + return cls.__connection__ def _inst_get_connection(self): return self._connection or self.__connection__ diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 38abe7c72a..8b968c6f68 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -315,15 +315,12 @@ def __init__(self, *args, **kwargs): if not issubclass(model, models.Model): raise CQLEngineException("Models must be derived from base Model.") - m = copy.deepcopy(model) if not keyspace else None + m = models._clone_model_class(model, {}) if keyspace: - from cassandra.cqlengine.models import _copy_model_class - ks = keyspace - m = _copy_model_class(model, {'__keyspace__': ks}) - + m.__keyspace__ = keyspace if connection: - m._connection = connection + m.__connection__ = connection self.models.append(m) @@ -1015,8 +1012,8 @@ def using(self, keyspace=None, connection=None): clone = copy.deepcopy(self) if keyspace: - from cassandra.cqlengine.models import _copy_model_class - clone.model = _copy_model_class(self.model, {'__keyspace__': keyspace}) + from cassandra.cqlengine.models import _clone_model_class + clone.model = _clone_model_class(self.model, {'__keyspace__': keyspace}) if connection: clone._connection = connection From 5dbd4357077954e9cc8c17ba9ac9a8fc2c53190f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 26 Aug 2016 07:07:33 -0400 Subject: [PATCH 0861/2005] Get the connection from the model if not specified in sync_table and drop_table --- cassandra/cqlengine/management.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index b53c15b8cb..bb85a2c689 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -39,7 +39,7 @@ def get_context(keyspaces, connections): - """Return the execution context""" + """Return all the execution contexts""" if keyspaces: if not isinstance(keyspaces, (list, tuple)): @@ -69,7 +69,7 @@ def create_keyspace_simple(name, replication_factor, durable_writes=True, connec :param str name: name of keyspace to create :param int replication_factor: keyspace replication factor, used with :attr:`~.SimpleStrategy` :param bool durable_writes: Write log is bypassed if set to False - :param str connections: List of connection names + :param list connections: List of connection names """ _create_keyspace(name, durable_writes, 'SimpleStrategy', {'replication_factor': replication_factor}, connections=connections) @@ -89,7 +89,7 @@ def create_keyspace_network_topology(name, dc_replication_map, durable_writes=Tr :param str name: name of keyspace to create :param dict dc_replication_map: map of dc_names: replication_factor :param bool durable_writes: Write log is bypassed if set to False - :param str connections: List of connection names + :param list connections: List of connection names """ _create_keyspace(name, durable_writes, 'NetworkTopologyStrategy', dc_replication_map, connections=connections) @@ -129,7 +129,7 @@ def drop_keyspace(name, connections=None): Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).** :param str name: name of keyspace to drop - :param str connections: List of connection names + :param list connections: List of connection names """ if not _allow_schema_modification(): return @@ -182,6 +182,7 @@ def sync_table(model, keyspaces=None, connections=None): context = get_context(keyspaces, connections) for connection, keyspace in context: + connection = connection if connection else model._get_connection() with query.ContextQuery(model, keyspace=keyspace) as m: _sync_table(m, connection=connection) @@ -507,6 +508,7 @@ def drop_table(model, keyspaces=None, connections=None): context = get_context(keyspaces, connections) for connection, keyspace in context: + connection = connection if connection else model._get_connection() with query.ContextQuery(model, keyspace=keyspace) as m: _drop_table(m, connection=connection) From 5de8b576557b8711700667ddc952512cc84ca7f7 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 26 Aug 2016 07:21:26 -0400 Subject: [PATCH 0862/2005] Add unregister_connection and set_default_connection --- cassandra/cqlengine/connection.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index a59f250c90..c88c26f45c 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -141,6 +141,28 @@ def register_connection(name, hosts, consistency=None, lazy_connect=False, return conn +def unregister_connection(name): + + if name not in _connections: + return + + if _connections[name] == _connections[DEFAULT_CONNECTION]: + del _connections[DEFAULT_CONNECTION] + log.warning("Unregistering default connection '{0}'. Use set_default_connection to set a new one.".format(name)) + + log.debug("Connection '{0}' has been removed from the registry.".format(name)) + del _connections[name] + + +def set_default_connection(name): + + if name not in _connections: + raise CQLEngineException("Connection '{0}' doesn't exist.".format(name)) + + log.debug("Connection '{0}' has been set as default.".format(name)) + _connections[DEFAULT_CONNECTION] = _connections[name] + + def get_connection(name=None): if not name: From 71d822bb35eb6eb741522186e7e317ab241e7869 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 26 Aug 2016 07:29:07 -0400 Subject: [PATCH 0863/2005] minor changes --- cassandra/cqlengine/connection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index c88c26f45c..3d3cd89ca9 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -179,7 +179,7 @@ def get_connection(name=None): def default(): """ - Configures the global mapper connection to localhost, using the driver defaults + Configures the default connection to localhost, using the driver defaults (except for row_factory) """ @@ -198,7 +198,7 @@ def default(): def set_session(s): """ - Configures the global mapper connection with a preexisting :class:`cassandra.cluster.Session` + Configures the default connection with a preexisting :class:`cassandra.cluster.Session` Note: the mapper presently requires a Session :attr:`~.row_factory` set to ``dict_factory``. This may be relaxed in the future From 9152b4a1d45596fa1d01cd6de90bffb7ab565e1b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 26 Aug 2016 08:13:17 -0400 Subject: [PATCH 0864/2005] minor fixes --- cassandra/cqlengine/management.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index bb85a2c689..4fdba1c4d2 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -182,7 +182,6 @@ def sync_table(model, keyspaces=None, connections=None): context = get_context(keyspaces, connections) for connection, keyspace in context: - connection = connection if connection else model._get_connection() with query.ContextQuery(model, keyspace=keyspace) as m: _sync_table(m, connection=connection) @@ -201,6 +200,7 @@ def _sync_table(model, connection=None): raw_cf_name = model._raw_column_family_name() ks_name = model._get_keyspace() + connection = connection if connection else model._get_connection() cluster = get_cluster(connection) @@ -508,14 +508,16 @@ def drop_table(model, keyspaces=None, connections=None): context = get_context(keyspaces, connections) for connection, keyspace in context: - connection = connection if connection else model._get_connection() with query.ContextQuery(model, keyspace=keyspace) as m: _drop_table(m, connection=connection) + def _drop_table(model, connection=None): if not _allow_schema_modification(): return + connection = connection if connection else model._get_connection() + # don't try to delete non existant tables meta = get_cluster(connection).metadata From fbe987718ddf3f9565ef4065377d65795efeb350 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 26 Aug 2016 15:28:00 -0500 Subject: [PATCH 0865/2005] PYTHON-498, adding host state test --- tests/integration/standard/test_cluster.py | 36 ++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index a0d9c308d3..873d562311 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -1020,6 +1020,42 @@ def test_session_no_cluster(self): self.assertTrue(cluster.is_shutdown) +class HostStateTest(unittest.TestCase): + + def test_down_event_with_active_connection(self): + """ + Test to ensure that on down calls to clusters with connections still active don't result in + a host being marked down. The second part of the test kills the connection then invokes + on_down, and ensures the state changes for host's metadata. + + @since 3.7 + @jira_ticket PYTHON-498 + @expected_result host should never be toggled down while a connection is active. + + @test_category connection + """ + with Cluster(protocol_version=PROTOCOL_VERSION) as cluster: + session = cluster.connect() + random_host = cluster.metadata.all_hosts()[0] + cluster.on_down(random_host, False) + for _ in range(10): + new_host = cluster.metadata.all_hosts()[0] + self.assertTrue(new_host.is_up) + time.sleep(.01) + + pool = session._pools.get(random_host) + pool.shutdown() + cluster.on_down(random_host, False) + was_marked_down = False + for _ in range(10): + new_host = cluster.metadata.all_hosts()[0] + if not new_host.is_up: + was_marked_down = True + break + + self.assertTrue(was_marked_down) + + class DuplicateRpcTest(unittest.TestCase): load_balancing_policy = WhiteListRoundRobinPolicy(['127.0.0.1']) From 50d3e4b3a4d075bc07e8e9fc7e1009f685a2e287 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 29 Aug 2016 14:26:15 -0500 Subject: [PATCH 0866/2005] Fix cascade failure scenario in libev connection tests, fix timing issue in new cluster tests --- tests/integration/standard/test_cluster.py | 4 ++-- tests/integration/standard/test_connection.py | 12 +++++++----- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 873d562311..39ecf1e5ae 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -1047,12 +1047,12 @@ def test_down_event_with_active_connection(self): pool.shutdown() cluster.on_down(random_host, False) was_marked_down = False - for _ in range(10): + for _ in range(20): new_host = cluster.metadata.all_hosts()[0] if not new_host.is_up: was_marked_down = True break - + time.sleep(.01) self.assertTrue(was_marked_down) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 69566c80ad..5a4a4637ac 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -113,11 +113,13 @@ def test_heart_beat_timeout(self): self.assertNotEqual(len(initial_connections), 0) self.cluster.register_listener(test_listener) # Pause the node - node.pause() - # Wait for connections associated with this host go away - self.wait_for_no_connections(host, self.cluster) - # Resume paused node - node.resume() + try: + node.pause() + # Wait for connections associated with this host go away + self.wait_for_no_connections(host, self.cluster) + # Resume paused node + finally: + node.resume() # Run a query to ensure connections are re-established current_host = "" count = 0 From f5e15123d3529282b945da22f8307fe79fdb1709 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 29 Aug 2016 16:20:42 -0500 Subject: [PATCH 0867/2005] Skipping UDT metadata test for C* < 2.1 --- tests/integration/standard/test_metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 3031ce1d8b..88f209d76f 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -723,7 +723,7 @@ def test_refresh_user_type_metadata_proto_2(self): @test_category metadata """ supported_versions = get_supported_protocol_versions() - if 2 not in supported_versions: # 1 and 2 were dropped in the same version + if 2 not in supported_versions or CASSANDRA_VERSION < "2.1": # 1 and 2 were dropped in the same version raise unittest.SkipTest("Protocol versions 1 and 2 are not supported in Cassandra version ".format(CASSANDRA_VERSION)) for protocol_version in (1, 2): From df93b8403ac7e5dc1cc96d45b11d8cec87040ac2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 29 Aug 2016 16:25:13 -0500 Subject: [PATCH 0868/2005] speculative execution policies PYTHON-218 --- cassandra/cluster.py | 156 +++++++++++++++++++---------- cassandra/policies.py | 56 ++++++++++- cassandra/query.py | 15 ++- tests/unit/test_response_future.py | 70 +++++++------ tests/unit/test_resultset.py | 1 - 5 files changed, 205 insertions(+), 93 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 95aff072f5..75304a9b33 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -62,7 +62,8 @@ from cassandra.metadata import Metadata, protect_name, murmur3 from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, - RetryPolicy, IdentityTranslator) + RetryPolicy, IdentityTranslator, NoSpeculativeExecutionPlan, + NoSpeculativeExecutionPolicy) from cassandra.pool import (Host, _ReconnectionHandler, _HostReconnectionHandler, HostConnectionPool, HostConnection, NoConnectionsAvailable) @@ -240,15 +241,23 @@ class ExecutionProfile(object): - :func:`cassandra.query.ordered_dict_factory` - return a result row as an OrderedDict """ + speculative_execution_policy = None + """ + An instance of :class:`.policies.SpeculativeExecutionPolicy` + + Defaults to :class:`.NoSpeculativeExecutionPolicy` if not specified + """ + def __init__(self, load_balancing_policy=None, retry_policy=None, consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, - request_timeout=10.0, row_factory=named_tuple_factory): + request_timeout=10.0, row_factory=named_tuple_factory, speculative_execution_policy=None): self.load_balancing_policy = load_balancing_policy or default_lbp_factory() self.retry_policy = retry_policy or RetryPolicy() self.consistency_level = consistency_level self.serial_consistency_level = serial_consistency_level self.request_timeout = request_timeout self.row_factory = row_factory + self.speculative_execution_policy = speculative_execution_policy or NoSpeculativeExecutionPolicy() class ProfileManager(object): @@ -2058,6 +2067,7 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time retry_policy = query.retry_policy or self.cluster.default_retry_policy row_factory = self.row_factory load_balancing_policy = self.cluster.load_balancing_policy + spec_exec_policy = None else: execution_profile = self._get_execution_profile(execution_profile) @@ -2070,6 +2080,8 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time retry_policy = query.retry_policy or execution_profile.retry_policy row_factory = execution_profile.row_factory load_balancing_policy = execution_profile.load_balancing_policy + spec_exec_policy = execution_profile.speculative_execution_policy + fetch_size = query.fetch_size if fetch_size is FETCH_SIZE_UNSET and self._protocol_version >= 2: @@ -2077,8 +2089,9 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time elif self._protocol_version == 1: fetch_size = None + start_time = time.time() if self._protocol_version >= 3 and self.use_client_timestamp: - timestamp = int(time.time() * 1e6) + timestamp = int(start_time * 1e6) else: timestamp = None @@ -2112,9 +2125,11 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time message.allow_beta_protocol_version = self.cluster.allow_beta_protocol_version message.paging_state = paging_state + spec_exec_plan = spec_exec_policy.new_plan(query.keyspace or self.keyspace, query) if query.is_idempotent and spec_exec_policy else None return ResponseFuture( self, message, query, timeout, metrics=self._metrics, - prepared_statement=prepared_statement, retry_policy=retry_policy, row_factory=row_factory, load_balancer=load_balancing_policy) + prepared_statement=prepared_statement, retry_policy=retry_policy, row_factory=row_factory, + load_balancer=load_balancing_policy, start_time=start_time, speculative_execution_plan=spec_exec_plan) def _get_execution_profile(self, ep): profiles = self.cluster.profile_manager.profiles @@ -3172,11 +3187,11 @@ def _log_if_failed(self, future): exc_info=exc) -def refresh_schema_and_set_result(control_conn, response_future, **kwargs): +def refresh_schema_and_set_result(control_conn, response_future, connection, **kwargs): try: log.debug("Refreshing schema in response to schema change. " "%s", kwargs) - response_future.is_schema_agreed = control_conn._refresh_schema(response_future._connection, **kwargs) + response_future.is_schema_agreed = control_conn._refresh_schema(connection, **kwargs) except Exception: log.exception("Exception refreshing schema in response to schema change:") response_future.session.submit(control_conn.refresh_schema, **kwargs) @@ -3214,6 +3229,14 @@ class ResponseFuture(object): Size of the request message sent """ + coordinator_host = None + """ + The host an actual result from ( + """ + + """ + """ + session = None row_factory = None message = None @@ -3230,7 +3253,6 @@ class ResponseFuture(object): _callbacks = None _errbacks = None _current_host = None - _current_pool = None _connection = None _query_retries = 0 _start_time = None @@ -3240,11 +3262,12 @@ class ResponseFuture(object): _warnings = None _timer = None _protocol_handler = ProtocolHandler + _spec_execution_plan = NoSpeculativeExecutionPlan() _warned_timeout = False def __init__(self, session, message, query, timeout, metrics=None, prepared_statement=None, - retry_policy=RetryPolicy(), row_factory=None, load_balancer=None): + retry_policy=RetryPolicy(), row_factory=None, load_balancer=None, start_time=None, speculative_execution_plan=None): self.session = session # TODO: normalize handling of retry policy and row factory self.row_factory = row_factory or session.row_factory @@ -3252,21 +3275,29 @@ def __init__(self, session, message, query, timeout, metrics=None, prepared_stat self.message = message self.query = query self.timeout = timeout + self._time_remaining = timeout self._retry_policy = retry_policy self._metrics = metrics self.prepared_statement = prepared_statement self._callback_lock = Lock() - if metrics is not None: - self._start_time = time.time() + self._start_time = start_time or time.time() self._make_query_plan() self._event = Event() self._errors = {} self._callbacks = [] self._errbacks = [] + self._spec_execution_plan = speculative_execution_plan or self._spec_execution_plan + self._queried_hosts = [] def _start_timer(self): - if self.timeout is not None: - self._timer = self.session.cluster.connection_class.create_timer(self.timeout, self._on_timeout) + if self._timer is None: + spec_delay = self._spec_execution_plan.next_execution(self._current_host) + if spec_delay >= 0: + if self._time_remaining is None or self._time_remaining > spec_delay: + self._timer = self.session.cluster.connection_class.create_timer(spec_delay, self._on_speculative_execute) + return + if self._time_remaining is not None: + self._timer = self.session.cluster.connection_class.create_timer(self._time_remaining, self._on_timeout) def _cancel_timer(self): if self._timer: @@ -3284,17 +3315,29 @@ def _on_timeout(self): self._set_final_exception(OperationTimedOut(errors, self._current_host)) + def _on_speculative_execute(self): + self._timer = None + if not self._event.is_set(): + if self._time_remaining is not None: + elapsed = time.time() - self._start_time + self._time_remaining -= elapsed + if self._time_remaining <= 0: + self._on_timeout() + return + if not self.send_request(error_no_hosts=False): + self._start_timer() + + def _make_query_plan(self): # convert the list/generator/etc to an iterator so that subsequent # calls to send_request (which retries may do) will resume where # they last left off self.query_plan = iter(self._load_balancer.make_query_plan(self.session.keyspace, self.query)) - def send_request(self): + def send_request(self, error_no_hosts=True): """ Internal """ # query_plan is an iterator, so this will resume where we last left # off if send_request() is called multiple times - start = time.time() for host in self.query_plan: req_id = self._query(host) if req_id is not None: @@ -3303,23 +3346,21 @@ def send_request(self): # timer is only started here, after we have at least one message queued # this is done to avoid overrun of timers with unfettered client requests # in the case of full disconnect, where no hosts will be available - if self._timer is None: - self._start_timer() - return - if self.timeout is not None and time.time() - start > self.timeout: + self._start_timer() + return True + if self.timeout is not None and time.time() - self._start_time > self.timeout: self._on_timeout() - return + return True - self._set_final_exception(NoHostAvailable( - "Unable to complete the operation against any hosts", self._errors)) + if error_no_hosts: + self._set_final_exception(NoHostAvailable( + "Unable to complete the operation against any hosts", self._errors)) + return False def _query(self, host, message=None, cb=None): if message is None: message = self.message - if cb is None: - cb = self._set_result - pool = self.session._pools.get(host) if not pool: self._errors[host] = ConnectionException("Host has been marked down or removed") @@ -3329,7 +3370,6 @@ def _query(self, host, message=None, cb=None): return None self._current_host = host - self._current_pool = pool connection = None try: @@ -3337,6 +3377,10 @@ def _query(self, host, message=None, cb=None): connection, request_id = pool.borrow_connection(timeout=2.0) self._connection = connection result_meta = self.prepared_statement.result_metadata if self.prepared_statement else [] + + if cb is None: + cb = partial(self._set_result, host, connection, pool) + self.request_encoded_size = connection.send_msg(message, request_id, cb=cb, encoder=self._protocol_handler.encode_message, decoder=self._protocol_handler.decode_message, @@ -3423,17 +3467,18 @@ def start_fetching_next_page(self): self._timer = None # clear cancelled timer; new one will be set when request is queued self.send_request() - def _reprepare(self, prepare_message): - cb = partial(self.session.submit, self._execute_after_prepare) - request_id = self._query(self._current_host, prepare_message, cb=cb) + def _reprepare(self, prepare_message, host, connection, pool): + cb = partial(self.session.submit, self._execute_after_prepare, host, connection, pool) + request_id = self._query(host, prepare_message, cb=cb) if request_id is None: # try to submit the original prepared statement on some other host self.send_request() - def _set_result(self, response): + def _set_result(self, host, connection, pool, response): try: - if self._current_pool and self._connection: - self._current_pool.return_connection(self._connection) + self.coordinator_host = host + if pool: + pool.return_connection(connection) trace_id = getattr(response, 'trace_id', None) if trace_id: @@ -3464,7 +3509,7 @@ def _set_result(self, response): self.session.submit( refresh_schema_and_set_result, self.session.cluster.control_connection, - self, **response.results) + self, connection, **response.results) else: results = getattr(response, 'results', None) if results is not None and response.kind == RESULT_KIND_ROWS: @@ -3495,14 +3540,14 @@ def _set_result(self, response): self._metrics.on_other_error() # need to retry against a different host here log.warning("Host %s is overloaded, retrying against a different " - "host", self._current_host) - self._retry(reuse_connection=False, consistency_level=None) + "host", host) + self._retry(reuse_connection=False, consistency_level=None, host=host) return elif isinstance(response, IsBootstrappingErrorMessage): if self._metrics is not None: self._metrics.on_other_error() # need to retry against a different host here - self._retry(reuse_connection=False, consistency_level=None) + self._retry(reuse_connection=False, consistency_level=None, host=host) return elif isinstance(response, PreparedQueryNotFound): if self.prepared_statement: @@ -3536,11 +3581,11 @@ def _set_result(self, response): return log.debug("Re-preparing unrecognized prepared statement against host %s: %s", - self._current_host, prepared_statement.query_string) + host, prepared_statement.query_string) prepare_message = PrepareMessage(query=prepared_statement.query_string) # since this might block, run on the executor to avoid hanging # the event loop thread - self.session.submit(self._reprepare, prepare_message) + self.session.submit(self._reprepare, prepare_message, host, connection, pool) return else: if hasattr(response, 'to_exception'): @@ -3553,20 +3598,20 @@ def _set_result(self, response): if retry_type in (RetryPolicy.RETRY, RetryPolicy.RETRY_NEXT_HOST): self._query_retries += 1 reuse = retry_type == RetryPolicy.RETRY - self._retry(reuse_connection=reuse, consistency_level=consistency) + self._retry(reuse, consistency, host) elif retry_type is RetryPolicy.RETHROW: self._set_final_exception(response.to_exception()) else: # IGNORE if self._metrics is not None: self._metrics.on_ignore() self._set_final_result(None) - self._errors[self._current_host] = response.to_exception() + self._errors[host] = response.to_exception() elif isinstance(response, ConnectionException): if self._metrics is not None: self._metrics.on_connection_error() if not isinstance(response, ConnectionShutdown): self._connection.defunct(response) - self._retry(reuse_connection=False, consistency_level=None) + self._retry(reuse_connection=False, consistency_level=None, host=host) elif isinstance(response, Exception): if hasattr(response, 'to_exception'): self._set_final_exception(response.to_exception()) @@ -3575,7 +3620,7 @@ def _set_result(self, response): else: # we got some other kind of response message msg = "Got unexpected message: %r" % (response,) - exc = ConnectionException(msg, self._current_host) + exc = ConnectionException(msg, host) self._connection.defunct(exc) self._set_final_exception(exc) except Exception as exc: @@ -3590,13 +3635,14 @@ def _set_keyspace_completed(self, errors): self._set_final_exception(ConnectionException( "Failed to set keyspace on all hosts: %s" % (errors,))) - def _execute_after_prepare(self, response): + def _execute_after_prepare(self, host, connection, pool, response): """ Handle the response to our attempt to prepare a statement. If it succeeded, run the original query again against the same host. """ - if self._current_pool and self._connection: - self._current_pool.return_connection(self._connection) + "AFTER PREPARE" + if pool: + pool.return_connection(connection) if self._final_exception: return @@ -3609,14 +3655,14 @@ def _execute_after_prepare(self, response): # use self._query to re-use the same host and # at the same time properly borrow the connection - request_id = self._query(self._current_host) + request_id = self._query(host) if request_id is None: # this host errored out, move on to the next self.send_request() else: self._set_final_exception(ConnectionException( "Got unexpected response when preparing statement " - "on host %s: %s" % (self._current_host, response))) + "on host %s: %s" % (host, response))) elif isinstance(response, ErrorMessage): if hasattr(response, 'to_exception'): self._set_final_exception(response.to_exception()) @@ -3624,14 +3670,14 @@ def _execute_after_prepare(self, response): self._set_final_exception(response) elif isinstance(response, ConnectionException): log.debug("Connection error when preparing statement on host %s: %s", - self._current_host, response) + host, response) # try again on a different host, preparing again if necessary - self._errors[self._current_host] = response + self._errors[host] = response self.send_request() else: self._set_final_exception(ConnectionException( "Got unexpected response type when preparing " - "statement on host %s: %s" % (self._current_host, response))) + "statement on host %s: %s" % (host, response))) def _set_final_result(self, response): self._cancel_timer() @@ -3661,7 +3707,7 @@ def _set_final_exception(self, response): fn, args, kwargs = errback fn(response, *args, **kwargs) - def _retry(self, reuse_connection, consistency_level): + def _retry(self, reuse_connection, consistency_level, host): if self._final_exception: # the connection probably broke while we were waiting # to retry the operation @@ -3673,15 +3719,15 @@ def _retry(self, reuse_connection, consistency_level): self.message.consistency_level = consistency_level # don't retry on the event loop thread - self.session.submit(self._retry_task, reuse_connection) + self.session.submit(self._retry_task, reuse_connection, host) - def _retry_task(self, reuse_connection): + def _retry_task(self, reuse_connection, host): if self._final_exception: # the connection probably broke while we were waiting # to retry the operation return - if reuse_connection and self._query(self._current_host) is not None: + if reuse_connection and self._query(host) is not None: return # otherwise, move onto another host @@ -3852,8 +3898,8 @@ def clear_callbacks(self): def __str__(self): result = "(no result yet)" if self._final_result is _NOT_SET else self._final_result - return "" \ - % (self.query, self._req_id, result, self._final_exception, self._current_host) + return "" \ + % (self.query, self._req_id, result, self._final_exception, self.coordinator_host) __repr__ = __str__ diff --git a/cassandra/policies.py b/cassandra/policies.py index 80b807c329..3736afcb68 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -875,7 +875,7 @@ def translate(self, addr): """ Accepts the node ip address, and returns a translated address to be used connecting to this node. """ - raise NotImplementedError + raise NotImplementedError() class IdentityTranslator(AddressTranslator): @@ -904,3 +904,57 @@ def translate(self, addr): except Exception: pass return addr + + +class SpeculativeExecutionPolicy(object): + """ + Interface for specifying speculative execution plans + """ + + def new_plan(self, keyspace, statement): + """ + Returns + + :param keyspace: + :param statement: + :return: + """ + raise NotImplementedError() + + +class SpeculativeExecutionPlan(object): + def next_execution(self, host): + raise NotImplementedError() + + +class NoSpeculativeExecutionPlan(SpeculativeExecutionPlan): + def next_execution(self, host): + return -1 + + +class NoSpeculativeExecutionPolicy(SpeculativeExecutionPolicy): + + def new_plan(self, keyspace, statement): + return self.NoSpeculativeExecutionPlan() + + +class ConstantSpeculativeExecutionPolicy(SpeculativeExecutionPolicy): + + def __init__(self, delay, max_attempts): + self.delay = delay + self.max_attempts = max_attempts + + class ConstantSpeculativeExecutionPlan(SpeculativeExecutionPlan): + def __init__(self, delay, max_attempts): + self.delay = delay + self.remaining = max_attempts + + def next_execution(self, host): + if self.remaining > 0: + self.remaining -= 1 + return self.delay + else: + return -1 + + def new_plan(self, keyspace, statement): + return self.ConstantSpeculativeExecutionPlan(self.delay, self.max_attempts) diff --git a/cassandra/query.py b/cassandra/query.py index 65cb6ba9e0..40d522d3f7 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -215,11 +215,17 @@ class Statement(object): .. versionadded:: 2.6.0 """ + is_idempotent = False + """ + Flag indicating whether this statement is safe to run multiple times in speculative execution. + """ + _serial_consistency_level = None _routing_key = None def __init__(self, retry_policy=None, consistency_level=None, routing_key=None, - serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None, custom_payload=None): + serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None, custom_payload=None, + is_idempotent=False): if retry_policy and not hasattr(retry_policy, 'on_read_timeout'): # just checking one method to detect positional parameter errors raise ValueError('retry_policy should implement cassandra.policies.RetryPolicy') self.retry_policy = retry_policy @@ -234,6 +240,7 @@ def __init__(self, retry_policy=None, consistency_level=None, routing_key=None, self.keyspace = keyspace if custom_payload is not None: self.custom_payload = custom_payload + self.is_idempotent = is_idempotent def _key_parts_packed(self, parts): for p in parts: @@ -328,7 +335,7 @@ class SimpleStatement(Statement): def __init__(self, query_string, retry_policy=None, consistency_level=None, routing_key=None, serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None, - custom_payload=None): + custom_payload=None, is_idempotent=False): """ `query_string` should be a literal CQL statement with the exception of parameter placeholders that will be filled through the @@ -337,7 +344,7 @@ def __init__(self, query_string, retry_policy=None, consistency_level=None, rout See :class:`Statement` attributes for a description of the other parameters. """ Statement.__init__(self, retry_policy, consistency_level, routing_key, - serial_consistency_level, fetch_size, keyspace, custom_payload) + serial_consistency_level, fetch_size, keyspace, custom_payload, is_idempotent) self._query_string = query_string @property @@ -383,6 +390,7 @@ def __init__(self, column_metadata, query_id, routing_key_indexes, query, self.keyspace = keyspace self.protocol_version = protocol_version self.result_metadata = result_metadata + self.is_idempotent = False @classmethod def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, @@ -465,6 +473,7 @@ def __init__(self, prepared_statement, retry_policy=None, consistency_level=None self.serial_consistency_level = prepared_statement.serial_consistency_level self.fetch_size = prepared_statement.fetch_size self.custom_payload = prepared_statement.custom_payload + self.is_idempotent = prepared_statement.is_idempotent self.values = [] meta = prepared_statement.column_metadata diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 88b08af878..f995909154 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -69,7 +69,7 @@ def test_result_message(self): connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=[]) - rf._set_result(self.make_mock_response([{'col': 'val'}])) + rf._set_result(None, None, None, self.make_mock_response([{'col': 'val'}])) result = rf.result() self.assertEqual(result, [{'col': 'val'}]) @@ -81,7 +81,7 @@ def test_unknown_result_class(self): rf = self.make_response_future(session) rf.send_request() - rf._set_result(object()) + rf._set_result(None, None, None, object()) self.assertRaises(ConnectionException, rf.result) def test_set_keyspace_result(self): @@ -92,7 +92,7 @@ def test_set_keyspace_result(self): result = Mock(spec=ResultMessage, kind=RESULT_KIND_SET_KEYSPACE, results="keyspace1") - rf._set_result(result) + rf._set_result(None, None, None, result) rf._set_keyspace_completed({}) self.assertFalse(rf.result()) @@ -106,15 +106,16 @@ def test_schema_change_result(self): result = Mock(spec=ResultMessage, kind=RESULT_KIND_SCHEMA_CHANGE, results=event_results) - rf._set_result(result) - session.submit.assert_called_once_with(ANY, ANY, rf, **event_results) + connection = Mock() + rf._set_result(None, connection, None, result) + session.submit.assert_called_once_with(ANY, ANY, rf, connection, **event_results) def test_other_result_message_kind(self): session = self.make_session() rf = self.make_response_future(session) rf.send_request() result = [1, 2, 3] - rf._set_result(Mock(spec=ResultMessage, kind=999, results=result)) + rf._set_result(None, None, None, Mock(spec=ResultMessage, kind=999, results=result)) self.assertListEqual(list(rf.result()), result) def test_read_timeout_error_message(self): @@ -128,7 +129,7 @@ def test_read_timeout_error_message(self): rf.send_request() result = Mock(spec=ReadTimeoutErrorMessage, info={}) - rf._set_result(result) + rf._set_result(None, None, None, result) self.assertRaises(Exception, rf.result) @@ -143,7 +144,7 @@ def test_write_timeout_error_message(self): rf.send_request() result = Mock(spec=WriteTimeoutErrorMessage, info={}) - rf._set_result(result) + rf._set_result(None, None, None, result) self.assertRaises(Exception, rf.result) def test_unavailable_error_message(self): @@ -157,7 +158,7 @@ def test_unavailable_error_message(self): rf.send_request() result = Mock(spec=UnavailableErrorMessage, info={}) - rf._set_result(result) + rf._set_result(None, None, None, result) self.assertRaises(Exception, rf.result) def test_retry_policy_says_ignore(self): @@ -171,7 +172,7 @@ def test_retry_policy_says_ignore(self): rf.send_request() result = Mock(spec=UnavailableErrorMessage, info={}) - rf._set_result(result) + rf._set_result(None, None, None, result) self.assertFalse(rf.result()) def test_retry_policy_says_retry(self): @@ -195,20 +196,21 @@ def test_retry_policy_says_retry(self): connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=[]) result = Mock(spec=UnavailableErrorMessage, info={}) - rf._set_result(result) + host = Mock() + rf._set_result(host, None, None, result) - session.submit.assert_called_once_with(rf._retry_task, True) + session.submit.assert_called_once_with(rf._retry_task, True, host) self.assertEqual(1, rf._query_retries) connection = Mock(spec=Connection) pool.borrow_connection.return_value = (connection, 2) # simulate the executor running this - rf._retry_task(True) + rf._retry_task(True, host) # it should try again with the same host since this was # an UnavailableException - rf.session._pools.get.assert_called_with('ip1') + rf.session._pools.get.assert_called_with(host) pool.borrow_connection.assert_called_with(timeout=ANY) connection.send_msg.assert_called_with(rf.message, 2, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=[]) @@ -229,16 +231,17 @@ def test_retry_with_different_host(self): self.assertEqual(ConsistencyLevel.QUORUM, rf.message.consistency_level) result = Mock(spec=OverloadedErrorMessage, info={}) - rf._set_result(result) + host = Mock() + rf._set_result(host, None, None, result) - session.submit.assert_called_once_with(rf._retry_task, False) + session.submit.assert_called_once_with(rf._retry_task, False, host) # query_retries does not get incremented for Overloaded/Bootstrapping errors self.assertEqual(0, rf._query_retries) connection = Mock(spec=Connection) pool.borrow_connection.return_value = (connection, 2) # simulate the executor running this - rf._retry_task(False) + rf._retry_task(False, host) # it should try with a different host rf.session._pools.get.assert_called_with('ip2') @@ -259,21 +262,22 @@ def test_all_retries_fail(self): rf.session._pools.get.assert_called_once_with('ip1') result = Mock(spec=IsBootstrappingErrorMessage, info={}) - rf._set_result(result) + host = Mock() + rf._set_result(host, None, None, result) # simulate the executor running this - session.submit.assert_called_once_with(rf._retry_task, False) - rf._retry_task(False) + session.submit.assert_called_once_with(rf._retry_task, False, host) + rf._retry_task(False, host) # it should try with a different host rf.session._pools.get.assert_called_with('ip2') result = Mock(spec=IsBootstrappingErrorMessage, info={}) - rf._set_result(result) + rf._set_result(host, None, None, result) # simulate the executor running this - session.submit.assert_called_with(rf._retry_task, False) - rf._retry_task(False) + session.submit.assert_called_with(rf._retry_task, False, host) + rf._retry_task(False, host) self.assertRaises(NoHostAvailable, rf.result) @@ -295,7 +299,7 @@ def test_first_pool_shutdown(self): rf = self.make_response_future(session) rf.send_request() - rf._set_result(self.make_mock_response([{'col': 'val'}])) + rf._set_result(None, None, None, self.make_mock_response([{'col': 'val'}])) result = rf.result() self.assertEqual(result, [{'col': 'val'}]) @@ -319,7 +323,7 @@ def test_timeout_getting_connection_from_pool(self): rf = self.make_response_future(session) rf.send_request() - rf._set_result(self.make_mock_response([{'col': 'val'}])) + rf._set_result(None, None, None, self.make_mock_response([{'col': 'val'}])) self.assertEqual(rf.result(), [{'col': 'val'}]) # make sure the exception is recorded correctly @@ -336,7 +340,7 @@ def test_callback(self): kwargs = {'one': 1, 'two': 2} rf.add_callback(callback, arg, **kwargs) - rf._set_result(self.make_mock_response(expected_result)) + rf._set_result(None, None, None, self.make_mock_response(expected_result)) result = rf.result() self.assertEqual(result, expected_result) @@ -363,7 +367,7 @@ def test_errback(self): rf.add_errback(self.assertIsInstance, Exception) result = Mock(spec=UnavailableErrorMessage, info={}) - rf._set_result(result) + rf._set_result(None, None, None, result) self.assertRaises(Exception, rf.result) # this should get called immediately now that the error is set @@ -385,7 +389,7 @@ def test_multiple_callbacks(self): kwargs2 = {'three': 3, 'four': 4} rf.add_callback(callback2, arg2, **kwargs2) - rf._set_result(self.make_mock_response(expected_result)) + rf._set_result(None, None, None, self.make_mock_response(expected_result)) result = rf.result() self.assertEqual(result, expected_result) @@ -420,7 +424,7 @@ def test_multiple_errbacks(self): expected_exception = Unavailable("message", 1, 2, 3) result = Mock(spec=UnavailableErrorMessage, info={'something': 'here'}) result.to_exception.return_value = expected_exception - rf._set_result(result) + rf._set_result(None, None, None, result) self.assertRaises(Exception, rf.result) callback.assert_called_once_with(expected_exception, arg, **kwargs) @@ -442,7 +446,7 @@ def test_add_callbacks(self): errback=self.assertIsInstance, errback_args=(Exception,)) result = Mock(spec=UnavailableErrorMessage, info={}) - rf._set_result(result) + rf._set_result(None, None, None, result) self.assertRaises(Exception, rf.result) # test callback @@ -457,7 +461,7 @@ def test_add_callbacks(self): callback=callback, callback_args=(arg,), callback_kwargs=kwargs, errback=self.assertIsInstance, errback_args=(Exception,)) - rf._set_result(self.make_mock_response(expected_result)) + rf._set_result(None, None, None, self.make_mock_response(expected_result)) self.assertEqual(rf.result(), expected_result) callback.assert_called_once_with(expected_result, arg, **kwargs) @@ -478,7 +482,7 @@ def test_prepared_query_not_found(self): rf._connection.keyspace = "FooKeyspace" result = Mock(spec=PreparedQueryNotFound, info='a' * 16) - rf._set_result(result) + rf._set_result(None, None, None, result) self.assertTrue(session.submit.call_args) args, kwargs = session.submit.call_args @@ -502,5 +506,5 @@ def test_prepared_query_not_found_bad_keyspace(self): rf._connection.keyspace = "BarKeyspace" result = Mock(spec=PreparedQueryNotFound, info='a' * 16) - rf._set_result(result) + rf._set_result(None, None, None, result) self.assertRaises(ValueError, rf.result) diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index 2deeb30f75..c0fbad4a65 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -19,7 +19,6 @@ import unittest # noqa from mock import Mock, PropertyMock -import warnings from cassandra.cluster import ResultSet From 365f1318ccae4d04a68b3adc2350ef925f02d8b3 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 30 Aug 2016 11:26:31 -0500 Subject: [PATCH 0869/2005] Fixing timing issue with metrics test --- tests/integration/standard/test_metrics.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 858cae7fb0..efacc4c071 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -320,6 +320,17 @@ def __str__(self): class MetricsRequestSize(BasicExistingKeyspaceUnitTestCase): + def wait_for_count(self, ra, expected_count, error=False): + for _ in range(10): + if not error: + if ra.successful is expected_count: + return True + else: + if ra.errors is expected_count: + return True + time.sleep(.01) + return False + def test_metrics_per_cluster(self): """ Test to validate that requests listeners. @@ -343,8 +354,9 @@ def test_metrics_per_cluster(self): except SyntaxException: continue - self.assertEqual(ra.errors, 3) - self.assertEqual(ra.successful, 10) + self.assertTrue(self.wait_for_count(ra, 10)) + self.assertTrue(self.wait_for_count(ra, 3, error=True)) + # Make sure a poorly coded RA doesn't cause issues RequestAnalyzer(self.session, throw_on_success=False, throw_on_fail=True) From b470667fa53403b9a9a51f31ce97d5e37f6892ce Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 30 Aug 2016 12:00:24 -0500 Subject: [PATCH 0870/2005] fix docs, ResponseFuture.attempted_hosts PYTHON-218 --- cassandra/cluster.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 75304a9b33..f2daab98ac 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3231,10 +3231,12 @@ class ResponseFuture(object): coordinator_host = None """ - The host an actual result from ( + The host from which we recieved a response """ + attempted_hosts = None """ + A list of hosts tried, including all speculative executions, retries, and pages """ session = None @@ -3287,7 +3289,7 @@ def __init__(self, session, message, query, timeout, metrics=None, prepared_stat self._callbacks = [] self._errbacks = [] self._spec_execution_plan = speculative_execution_plan or self._spec_execution_plan - self._queried_hosts = [] + self.attempted_hosts = [] def _start_timer(self): if self._timer is None: @@ -3385,6 +3387,7 @@ def _query(self, host, message=None, cb=None): encoder=self._protocol_handler.encode_message, decoder=self._protocol_handler.decode_message, result_metadata=result_meta) + self.attempted_hosts.append(host) return request_id except NoConnectionsAvailable as exc: log.debug("All connections for host %s are at capacity, moving to the next host", host) @@ -3640,7 +3643,6 @@ def _execute_after_prepare(self, host, connection, pool, response): Handle the response to our attempt to prepare a statement. If it succeeded, run the original query again against the same host. """ - "AFTER PREPARE" if pool: pool.return_connection(connection) From 6888e7b21c3d53098fd091bd0f4e83aa273ce268 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 30 Aug 2016 12:09:59 -0500 Subject: [PATCH 0871/2005] fix unit test for new arguments PYTHON-218 --- tests/unit/test_response_future.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index f995909154..a0f38c4a52 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -486,9 +486,9 @@ def test_prepared_query_not_found(self): self.assertTrue(session.submit.call_args) args, kwargs = session.submit.call_args - self.assertEqual(rf._reprepare, args[-2]) - self.assertIsInstance(args[-1], PrepareMessage) - self.assertEqual(args[-1].query, "SELECT * FROM foobar") + self.assertEqual(rf._reprepare, args[-5]) + self.assertIsInstance(args[-4], PrepareMessage) + self.assertEqual(args[-4].query, "SELECT * FROM foobar") def test_prepared_query_not_found_bad_keyspace(self): session = self.make_session() From d1d5ef4ec6a3da80ef2494157a9c156ade4053c2 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 31 Aug 2016 07:37:07 -0400 Subject: [PATCH 0872/2005] Add integration tests --- cassandra/cqlengine/connection.py | 2 +- cassandra/cqlengine/models.py | 11 +- cassandra/cqlengine/query.py | 13 +- .../integration/cqlengine/test_connections.py | 389 ++++++++++++++++++ .../cqlengine/test_context_query.py | 48 +++ 5 files changed, 454 insertions(+), 9 deletions(-) create mode 100644 tests/integration/cqlengine/test_connections.py diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 3d3cd89ca9..3070e67e56 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -146,7 +146,7 @@ def unregister_connection(name): if name not in _connections: return - if _connections[name] == _connections[DEFAULT_CONNECTION]: + if DEFAULT_CONNECTION in _connections and _connections[name] == _connections[DEFAULT_CONNECTION]: del _connections[DEFAULT_CONNECTION] log.warning("Unregistering default connection '{0}'. Use set_default_connection to set a new one.".format(name)) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 286d8c1f62..1b1987396f 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -31,9 +31,12 @@ def _clone_model_class(model, attrs): new_type = type(model.__name__, (model,), attrs) - new_type.__abstract__ = model.__abstract__ - new_type.__discriminator_value__ = model.__discriminator_value__ - new_type.__default_ttl__ = model.__default_ttl__ + try: + new_type.__abstract__ = model.__abstract__ + new_type.__discriminator_value__ = model.__discriminator_value__ + new_type.__default_ttl__ = model.__default_ttl__ + except AttributeError: + pass return new_type @@ -803,6 +806,8 @@ def _class_batch(cls, batch): def _inst_batch(self, batch): assert self._timeout is connection.NOT_SET, 'Setting both timeout and batch is not supported' + if self._connection: + raise CQLEngineException("Cannot specify a connection on model in batch mode.") self._batch = batch return self diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 8b968c6f68..f47b025f08 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -302,18 +302,20 @@ def __init__(self, *args, **kwargs): self.models = [] if len(args) < 1: - raise CQLEngineException("No model provided.") + raise ValueError("No model provided.") keyspace = kwargs.pop('keyspace', None) connection = kwargs.pop('connection', None) if kwargs: - raise CQLEngineException("Unknown keyword argument(s): {0}".format( + raise ValueError("Unknown keyword argument(s): {0}".format( ','.join(kwargs.keys()))) for model in args: - if not issubclass(model, models.Model): - raise CQLEngineException("Models must be derived from base Model.") + try: + issubclass(model, models.Model) + except TypeError: + raise ValueError("Models must be derived from base Model.") m = models._clone_model_class(model, {}) @@ -390,7 +392,8 @@ def _execute(self, statement): if self._batch: return self._batch.add_query(statement) else: - result = _execute_statement(self.model, statement, self._consistency, self._timeout, connection=self._connection) + connection = self._connection if self._connection else self.model._get_connection() + result = _execute_statement(self.model, statement, self._consistency, self._timeout, connection=connection) if self._if_not_exists or self._if_exists or self._conditional: check_applied(result) return result diff --git a/tests/integration/cqlengine/test_connections.py b/tests/integration/cqlengine/test_connections.py new file mode 100644 index 0000000000..f2f88b0faa --- /dev/null +++ b/tests/integration/cqlengine/test_connections.py @@ -0,0 +1,389 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from cassandra import InvalidRequest +from cassandra.cluster import NoHostAvailable +from cassandra.cqlengine import columns, CQLEngineException +from cassandra.cqlengine import connection as conn +from cassandra.cqlengine.management import drop_keyspace, sync_table, drop_table, create_keyspace_simple +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.query import ContextQuery, BatchQuery +from tests.integration.cqlengine import setup_connection, DEFAULT_KEYSPACE +from tests.integration.cqlengine.base import BaseCassEngTestCase + + +class TestModel(Model): + + __keyspace__ = 'ks1' + + partition = columns.Integer(primary_key=True) + cluster = columns.Integer(primary_key=True) + count = columns.Integer() + text = columns.Text() + + +class AnotherTestModel(Model): + + __keyspace__ = 'ks1' + + partition = columns.Integer(primary_key=True) + cluster = columns.Integer(primary_key=True) + count = columns.Integer() + text = columns.Text() + + +class ContextQueryConnectionTests(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(ContextQueryConnectionTests, cls).setUpClass() + create_keyspace_simple('ks1', 1) + + conn.unregister_connection('default') + conn.register_connection('fake_cluster', ['127.0.0.100'], lazy_connect=True, retry_connect=True, default=True) + conn.register_connection('cluster', ['127.0.0.1']) + + with ContextQuery(TestModel, connection='cluster') as tm: + sync_table(tm) + + @classmethod + def tearDownClass(cls): + super(ContextQueryConnectionTests, cls).tearDownClass() + + with ContextQuery(TestModel, connection='cluster') as tm: + drop_table(tm) + drop_keyspace('ks1', connections=['cluster']) + + + # reset the default connection + conn.unregister_connection('fake_cluster') + conn.unregister_connection('cluster') + setup_connection(DEFAULT_KEYSPACE) + + + def setUp(self): + super(BaseCassEngTestCase, self).setUp() + + def test_context_connection_priority(self): + + # Set the default connection on the Model + TestModel.__connection__ = 'cluster' + with ContextQuery(TestModel) as tm: + tm.objects.create(partition=1, cluster=1) + + # ContextQuery connection should have priority over default one + with ContextQuery(TestModel, connection='fake_cluster') as tm: + with self.assertRaises(NoHostAvailable): + tm.objects.create(partition=1, cluster=1) + + # Explicit connection should have priority over ContextQuery one + with ContextQuery(TestModel, connection='fake_cluster') as tm: + tm.objects.using(connection='cluster').create(partition=1, cluster=1) + + # Reset the default conn of the model + TestModel.__connection__ = None + + # No model connection and an invalid default connection + with ContextQuery(TestModel) as tm: + with self.assertRaises(NoHostAvailable): + tm.objects.create(partition=1, cluster=1) + + def test_context_connection_with_keyspace(self): + + # ks2 doesn't exist + with ContextQuery(TestModel, connection='cluster', keyspace='ks2') as tm: + with self.assertRaises(InvalidRequest): + tm.objects.create(partition=1, cluster=1) + + +class ManagementConnectionTests(BaseCassEngTestCase): + + keyspaces = ['ks1', 'ks2'] + conns = ['cluster'] + + @classmethod + def setUpClass(cls): + super(ManagementConnectionTests, cls).setUpClass() + conn.unregister_connection('default') + conn.register_connection('fake_cluster', ['127.0.0.100'], lazy_connect=True, retry_connect=True, default=True) + conn.register_connection('cluster', ['127.0.0.1']) + + + @classmethod + def tearDownClass(cls): + super(ManagementConnectionTests, cls).tearDownClass() + + # reset the default connection + conn.unregister_connection('fake_cluster') + conn.unregister_connection('cluster') + setup_connection(DEFAULT_KEYSPACE) + + def setUp(self): + super(BaseCassEngTestCase, self).setUp() + + def test_create_drop_keyspace(self): + + # No connection (default is fake) + with self.assertRaises(NoHostAvailable): + create_keyspace_simple(self.keyspaces[0], 1) + + # Explicit connections + for ks in self.keyspaces: + create_keyspace_simple(ks, 1, connections=self.conns) + + for ks in self.keyspaces: + drop_keyspace(ks, connections=self.conns) + + def test_create_drop_table(self): + + for ks in self.keyspaces: + create_keyspace_simple(ks, 1, connections=self.conns) + + # No connection (default is fake) + with self.assertRaises(NoHostAvailable): + sync_table(TestModel) + + # Explicit connections + sync_table(TestModel, connections=self.conns) + + # Explicit drop + drop_table(TestModel, connections=self.conns) + + # Model connection + TestModel.__connection__ = 'cluster' + sync_table(TestModel) + TestModel.__connection__ = None + + # No connection (default is fake) + with self.assertRaises(NoHostAvailable): + drop_table(TestModel) + + # Model connection + TestModel.__connection__ = 'cluster' + drop_table(TestModel) + TestModel.__connection__ = None + + # Model connection + for ks in self.keyspaces: + drop_keyspace(ks, connections=self.conns) + + +class BatchQueryConnectionTests(BaseCassEngTestCase): + + conns = ['cluster'] + + @classmethod + def setUpClass(cls): + super(BatchQueryConnectionTests, cls).setUpClass() + + create_keyspace_simple('ks1', 1) + sync_table(TestModel) + sync_table(AnotherTestModel) + + conn.unregister_connection('default') + conn.register_connection('fake_cluster', ['127.0.0.100'], lazy_connect=True, retry_connect=True, default=True) + conn.register_connection('cluster', ['127.0.0.1']) + + + @classmethod + def tearDownClass(cls): + super(BatchQueryConnectionTests, cls).tearDownClass() + + # reset the default connection + conn.unregister_connection('fake_cluster') + conn.unregister_connection('cluster') + setup_connection(DEFAULT_KEYSPACE) + + drop_keyspace('ks1') + + def setUp(self): + super(BaseCassEngTestCase, self).setUp() + + def test_basic_batch_query(self): + """Test BatchQuery requests""" + + # No connection with a QuerySet (default is a fake one) + with self.assertRaises(NoHostAvailable): + with BatchQuery() as b: + TestModel.objects.batch(b).create(partition=1, cluster=1) + + # Explicit connection with a QuerySet + with BatchQuery(connection='cluster') as b: + TestModel.objects.batch(b).create(partition=1, cluster=1) + + # Get an object from the BD + with ContextQuery(TestModel, connection='cluster') as tm: + obj = tm.objects.get(partition=1, cluster=1) + obj.__connection__ = None + + # No connection with a model (default is a fake one) + with self.assertRaises(NoHostAvailable): + with BatchQuery() as b: + obj.count = 2 + obj.batch(b).save() + + # Explicit connection with a model + with BatchQuery(connection='cluster') as b: + obj.count = 2 + obj.batch(b).save() + + def test_batch_query_different_connection(self): + """Test BatchQuery with Models that have a different connection""" + + # Testing on a model class + TestModel.__connection__ = 'cluster' + AnotherTestModel.__connection__ = 'cluster2' + + with self.assertRaises(CQLEngineException): + with BatchQuery() as b: + TestModel.objects.batch(b).create(partition=1, cluster=1) + AnotherTestModel.objects.batch(b).create(partition=1, cluster=1) + + TestModel.__connection__ = None + AnotherTestModel.__connection__ = None + + with BatchQuery(connection='cluster') as b: + TestModel.objects.batch(b).create(partition=1, cluster=1) + AnotherTestModel.objects.batch(b).create(partition=1, cluster=1) + + # Testing on a model instance + with ContextQuery(TestModel, AnotherTestModel, connection='cluster') as (tm, atm): + obj1 = tm.objects.get(partition=1, cluster=1) + obj2 = atm.objects.get(partition=1, cluster=1) + + obj1.__connection__ = 'cluster' + obj2.__connection__ = 'cluster2' + + obj1.count = 4 + obj2.count = 4 + + with self.assertRaises(CQLEngineException): + with BatchQuery() as b: + obj1.batch(b).save() + obj2.batch(b).save() + + def test_batch_query_connection_override(self): + """Test that we cannot override a BatchQuery connection per model""" + + with self.assertRaises(CQLEngineException): + with BatchQuery(connection='cluster') as b: + TestModel.batch(b).using(connection='test').save() + + with self.assertRaises(CQLEngineException): + with BatchQuery(connection='cluster') as b: + TestModel.using(connection='test').batch(b).save() + + with ContextQuery(TestModel, AnotherTestModel, connection='cluster') as (tm, atm): + obj1 = tm.objects.get(partition=1, cluster=1) + obj1.__connection__ = None + + with self.assertRaises(CQLEngineException): + with BatchQuery(connection='cluster') as b: + obj1.using(connection='test').batch(b).save() + + with self.assertRaises(CQLEngineException): + with BatchQuery(connection='cluster') as b: + obj1.batch(b).using(connection='test').save() + + +class UsingDescriptorTests(BaseCassEngTestCase): + + conns = ['cluster'] + keyspaces = ['ks1', 'ks2'] + + @classmethod + def setUpClass(cls): + super(UsingDescriptorTests, cls).setUpClass() + + conn.unregister_connection('default') + conn.register_connection('fake_cluster', ['127.0.0.100'], lazy_connect=True, retry_connect=True, default=True) + conn.register_connection('cluster', ['127.0.0.1']) + + + @classmethod + def tearDownClass(cls): + super(UsingDescriptorTests, cls).tearDownClass() + + # reset the default connection + conn.unregister_connection('fake_cluster') + conn.unregister_connection('cluster') + setup_connection(DEFAULT_KEYSPACE) + + for ks in cls.keyspaces: + drop_keyspace(ks) + + def setUp(self): + super(BaseCassEngTestCase, self).setUp() + + def _reset_data(self): + + for ks in self.keyspaces: + drop_keyspace(ks, connections=self.conns) + + for ks in self.keyspaces: + create_keyspace_simple(ks, 1, connections=self.conns) + sync_table(TestModel, keyspaces=self.keyspaces, connections=self.conns) + + def test_keyspace(self): + + self._reset_data() + + with ContextQuery(TestModel, connection='cluster') as tm: + + # keyspace Model class + tm.objects.using(keyspace='ks2').create(partition=1, cluster=1) + tm.objects.using(keyspace='ks2').create(partition=2, cluster=2) + + with self.assertRaises(TestModel.DoesNotExist): + tm.objects.get(partition=1, cluster=1) # default keyspace ks1 + obj1 = tm.objects.using(keyspace='ks2').get(partition=1, cluster=1) + + obj1.count = 2 + obj1.save() + + with self.assertRaises(NoHostAvailable): + TestModel.objects.using(keyspace='ks2').get(partition=1, cluster=1) + + obj2 = TestModel.objects.using(connection='cluster', keyspace='ks2').get(partition=1, cluster=1) + self.assertEqual(obj2.count, 2) + + # Update test + TestModel.objects(partition=2, cluster=2).using(connection='cluster', keyspace='ks2').update(count=5) + obj3 = TestModel.objects.using(connection='cluster', keyspace='ks2').get(partition=2, cluster=2) + self.assertEqual(obj3.count, 5) + + TestModel.objects(partition=2, cluster=2).using(connection='cluster', keyspace='ks2').delete() + with self.assertRaises(TestModel.DoesNotExist): + TestModel.objects.using(connection='cluster', keyspace='ks2').get(partition=2, cluster=2) + + def test_connection(self): + + self._reset_data() + + # Model class + with self.assertRaises(NoHostAvailable): + TestModel.objects.create(partition=1, cluster=1) + + TestModel.objects.using(connection='cluster').create(partition=1, cluster=1) + TestModel.objects(partition=1, cluster=1).using(connection='cluster').update(count=2) + obj1 = TestModel.objects.using(connection='cluster').get(partition=1, cluster=1) + self.assertEqual(obj1.count, 2) + + obj1.using(connection='cluster').update(count=5) + obj1 = TestModel.objects.using(connection='cluster').get(partition=1, cluster=1) + self.assertEqual(obj1.count, 5) + + obj1.using(connection='cluster').delete() + with self.assertRaises(TestModel.DoesNotExist): + TestModel.objects.using(connection='cluster').get(partition=1, cluster=1) diff --git a/tests/integration/cqlengine/test_context_query.py b/tests/integration/cqlengine/test_context_query.py index b3941319e9..0a29688d96 100644 --- a/tests/integration/cqlengine/test_context_query.py +++ b/tests/integration/cqlengine/test_context_query.py @@ -46,6 +46,7 @@ def tearDownClass(cls): for ks in cls.KEYSPACES: drop_keyspace(ks) + def setUp(self): super(ContextQueryTests, self).setUp() for ks in self.KEYSPACES: @@ -125,3 +126,50 @@ def test_context_keyspace(self): self.assertEqual(42, tm.objects.get(partition=1).count) + def test_context_multiple_models(self): + """ + Tests the use of multiple models with the context manager + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result all models are properly updated with the context + + @test_category query + """ + + with ContextQuery(TestModel, TestModel, keyspace='ks4') as (tm1, tm2): + + self.assertNotEqual(tm1, tm2) + self.assertEqual(tm1.__keyspace__, 'ks4') + self.assertEqual(tm2.__keyspace__, 'ks4') + + def test_context_invalid_parameters(self): + """ + Tests that invalid parameters are raised by the context manager + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result a ValueError is raised when passing invalid parameters + + @test_category query + """ + + with self.assertRaises(ValueError): + with ContextQuery(keyspace='ks2'): + pass + + with self.assertRaises(ValueError): + with ContextQuery(42) as tm: + pass + + with self.assertRaises(ValueError): + with ContextQuery(TestModel, 42): + pass + + with self.assertRaises(ValueError): + with ContextQuery(TestModel, unknown_param=42): + pass + + with self.assertRaises(ValueError): + with ContextQuery(TestModel, keyspace='ks2', unknown_param=42): + pass \ No newline at end of file From 07aea035bf9b00364c28b47b239047141a2ca580 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 31 Aug 2016 09:45:59 -0400 Subject: [PATCH 0873/2005] preserve connection.cluster/session since they are not explicitly private --- cassandra/cqlengine/connection.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 3070e67e56..943467b8c8 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -28,6 +28,9 @@ NOT_SET = _NOT_SET # required for passing timeout to Session.execute +cluster = None +session = None + # connections registry DEFAULT_CONNECTION = '_default_' _connections = {} @@ -79,6 +82,7 @@ def __init__(self, name, hosts, consistency=None, def setup(self): """Setup the connection""" + global cluster, session if 'username' in self.cluster_options or 'password' in self.cluster_options: raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") @@ -99,6 +103,10 @@ def setup(self): if self.consistency is not None: self.session.default_consistency_level = self.consistency + if DEFAULT_CONNECTION in _connections and _connections[DEFAULT_CONNECTION] == self: + cluster = _connections[DEFAULT_CONNECTION].cluster + session = _connections[DEFAULT_CONNECTION].session + self.setup_session() def setup_session(self): @@ -135,19 +143,22 @@ def register_connection(name, hosts, consistency=None, lazy_connect=False, _connections[name] = conn if default: - _connections[DEFAULT_CONNECTION] = conn + set_default_connection(name) conn.setup() return conn def unregister_connection(name): + global cluster, session if name not in _connections: return if DEFAULT_CONNECTION in _connections and _connections[name] == _connections[DEFAULT_CONNECTION]: del _connections[DEFAULT_CONNECTION] + cluster = None + session = None log.warning("Unregistering default connection '{0}'. Use set_default_connection to set a new one.".format(name)) log.debug("Connection '{0}' has been removed from the registry.".format(name)) @@ -155,12 +166,15 @@ def unregister_connection(name): def set_default_connection(name): + global cluster, session if name not in _connections: raise CQLEngineException("Connection '{0}' doesn't exist.".format(name)) log.debug("Connection '{0}' has been set as default.".format(name)) _connections[DEFAULT_CONNECTION] = _connections[name] + cluster = _connections[name].cluster + session = _connections[name].session def get_connection(name=None): From 83e8d66435041b679990035b6e77590a7ea0c13f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 31 Aug 2016 09:52:42 -0400 Subject: [PATCH 0874/2005] make DEFAULT_CONNECTION key an object --- cassandra/cqlengine/connection.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 943467b8c8..9b11dc3abd 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -32,7 +32,7 @@ session = None # connections registry -DEFAULT_CONNECTION = '_default_' +DEFAULT_CONNECTION = object() _connections = {} # Because type models may be registered before a connection is present, @@ -43,7 +43,9 @@ def format_log_context(msg, connection=None, keyspace=None): """Format log message to add keyspace and connection context""" - connection_info = connection if connection else DEFAULT_CONNECTION + connection_info = connection + if not connection_info: + connection_info = 'DEFAULT_CONNECTION' if keyspace: msg = '[Connection: {0}, Keyspace: {1}] {2}'.format(connection_info, keyspace, msg) else: From d648bfe397066e6a867cb701b3a5645036d287d8 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 31 Aug 2016 10:12:36 -0400 Subject: [PATCH 0875/2005] minor fixes --- cassandra/cqlengine/management.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 4fdba1c4d2..b5ca80d3a7 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -38,7 +38,7 @@ schema_columnfamilies = NamedTable('system', 'schema_columnfamilies') -def get_context(keyspaces, connections): +def _get_context(keyspaces, connections): """Return all the execution contexts""" if keyspaces: @@ -165,7 +165,11 @@ def sync_table(model, keyspaces=None, connections=None): """ Inspects the model and creates / updates the corresponding table and columns. - If `keyspaces` is specified, the table will be synched for all specified keyspaces. Note that the `Model.__keyspace__` is ignored in that case. + If `keyspaces` is specified, the table will be synched for all specified keyspaces. + Note that the `Model.__keyspace__` is ignored in that case. + + If `connections` is specified, the table will be synched for all specified connections. Note that the `Model.__connection__` is ignored in that case. + If not specified, it will try to get the connection from the Model. Any User Defined Types used in the table are implicitly synchronized. @@ -180,7 +184,7 @@ def sync_table(model, keyspaces=None, connections=None): *There are plans to guard schema-modifying functions with an environment-driven conditional.* """ - context = get_context(keyspaces, connections) + context = _get_context(keyspaces, connections) for connection, keyspace in context: with query.ContextQuery(model, keyspace=keyspace) as m: _sync_table(m, connection=connection) @@ -500,13 +504,17 @@ def drop_table(model, keyspaces=None, connections=None): If `keyspaces` is specified, the table will be dropped for all specified keyspaces. Note that the `Model.__keyspace__` is ignored in that case. + If `connections` is specified, the table will be synched for all specified connections. Note that the `Model.__connection__` is ignored in that case. + If not specified, it will try to get the connection from the Model. + + **This function should be used with caution, especially in production environments. Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).** *There are plans to guard schema-modifying functions with an environment-driven conditional.* """ - context = get_context(keyspaces, connections) + context = _get_context(keyspaces, connections) for connection, keyspace in context: with query.ContextQuery(model, keyspace=keyspace) as m: _drop_table(m, connection=connection) From 8bbe7ce3e384bdc21a12ef834ce6ab63b75f2991 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 31 Aug 2016 09:50:45 -0500 Subject: [PATCH 0876/2005] fix race adding connection pool to session while handling keyspace change PYTHON-628 --- cassandra/cluster.py | 28 ++++++++++++++++++++++++---- cassandra/pool.py | 20 +++++++++++++------- 2 files changed, 37 insertions(+), 11 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index f2daab98ac..36b526a5bc 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2320,7 +2320,27 @@ def run_add_or_renew_pool(): return False previous = self._pools.get(host) - self._pools[host] = new_pool + with self._lock: + while new_pool._keyspace != self.keyspace: + self._lock.release() + set_keyspace_event = Event() + errors_returned = [] + + def callback(pool, errors): + errors_returned.extend(errors) + set_keyspace_event.set() + + new_pool._set_keyspace_for_all_conns(self.keyspace, callback) + set_keyspace_event.wait(self.cluster.connect_timeout) + if not set_keyspace_event.is_set() or errors_returned: + log.warning("Failed setting keyspace for pool after keyspace changed during connect: %s", errors_returned) + self.cluster.on_down(host, is_host_addition) + new_pool.shutdown() + self._lock.acquire() + return False + self._lock.acquire() + self._pools[host] = new_pool + log.debug("Added pool for host %s to session", host) if previous: previous.shutdown() @@ -2397,9 +2417,9 @@ def _set_keyspace_for_all_pools(self, keyspace, callback): called with a dictionary of all errors that occurred, keyed by the `Host` that they occurred against. """ - self.keyspace = keyspace - - remaining_callbacks = set(self._pools.values()) + with self._lock: + self.keyspace = keyspace + remaining_callbacks = set(self._pools.values()) errors = {} if not remaining_callbacks: diff --git a/cassandra/pool.py b/cassandra/pool.py index c47c275d5b..c6801de345 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -307,6 +307,7 @@ class HostConnection(object): _session = None _connection = None _lock = None + _keyspace = None def __init__(self, host, host_distance, session): self.host = host @@ -326,8 +327,9 @@ def __init__(self, host, host_distance, session): log.debug("Initializing connection for host %s", self.host) self._connection = session.cluster.connection_factory(host.address) - if session.keyspace: - self._connection.set_keyspace_blocking(session.keyspace) + self._keyspace = session.keyspace + if self._keyspace: + self._connection.set_keyspace_blocking(self._keyspace) log.debug("Finished initializing connection for host %s", self.host) def borrow_connection(self, timeout): @@ -381,8 +383,8 @@ def _replace(self, connection): log.debug("Replacing connection (%s) to %s", id(connection), self.host) try: conn = self._session.cluster.connection_factory(self.host.address) - if self._session.keyspace: - conn.set_keyspace_blocking(self._session.keyspace) + if self._keyspace: + conn.set_keyspace_blocking(self._keyspace) self._connection = conn except Exception: log.warning("Failed reconnecting %s. Retrying." % (self.host.address,)) @@ -412,6 +414,7 @@ def connection_finished_setting_keyspace(conn, error): errors = [] if not error else [error] callback(self, errors) + self._keyspace = keyspace self._connection.set_keyspace_async(keyspace, connection_finished_setting_keyspace) def get_connections(self): @@ -445,6 +448,7 @@ class HostConnectionPool(object): open_count = 0 _scheduled_for_creation = 0 _next_trash_allowed_at = 0 + _keyspace = None def __init__(self, host, host_distance, session): self.host = host @@ -459,9 +463,10 @@ def __init__(self, host, host_distance, session): self._connections = [session.cluster.connection_factory(host.address) for i in range(core_conns)] - if session.keyspace: + self._keyspace = session.keyspace + if self._keyspace: for conn in self._connections: - conn.set_keyspace_blocking(session.keyspace) + conn.set_keyspace_blocking(self._keyspace) self._trash = set() self._next_trash_allowed_at = time.time() @@ -560,7 +565,7 @@ def _add_conn_if_under_max(self): log.debug("Going to open new connection to host %s", self.host) try: conn = self._session.cluster.connection_factory(self.host.address) - if self._session.keyspace: + if self._keyspace: conn.set_keyspace_blocking(self._session.keyspace) self._next_trash_allowed_at = time.time() + _MIN_TRASH_INTERVAL with self._lock: @@ -761,6 +766,7 @@ def connection_finished_setting_keyspace(conn, error): if not remaining_callbacks: callback(self, errors) + self._keyspace = keyspace for conn in self._connections: conn.set_keyspace_async(keyspace, connection_finished_setting_keyspace) From eb9cb9ea7091c9465814dd1cf24a985513138bcd Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 31 Aug 2016 10:59:56 -0400 Subject: [PATCH 0877/2005] better syntax --- cassandra/cqlengine/connection.py | 5 ++--- cassandra/cqlengine/management.py | 4 ++-- cassandra/cqlengine/query.py | 4 ++-- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 9b11dc3abd..cd015abebb 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -43,9 +43,8 @@ def format_log_context(msg, connection=None, keyspace=None): """Format log message to add keyspace and connection context""" - connection_info = connection - if not connection_info: - connection_info = 'DEFAULT_CONNECTION' + connection_info = connection or 'DEFAULT_CONNECTION' + if keyspace: msg = '[Connection: {0}, Keyspace: {1}] {2}'.format(connection_info, keyspace, msg) else: diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index b5ca80d3a7..fe313214bb 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -204,7 +204,7 @@ def _sync_table(model, connection=None): raw_cf_name = model._raw_column_family_name() ks_name = model._get_keyspace() - connection = connection if connection else model._get_connection() + connection = connection or model._get_connection() cluster = get_cluster(connection) @@ -524,7 +524,7 @@ def _drop_table(model, connection=None): if not _allow_schema_modification(): return - connection = connection if connection else model._get_connection() + connection = connection or model._get_connection() # don't try to delete non existant tables meta = get_cluster(connection).metadata diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index f47b025f08..cf682c7a48 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -392,7 +392,7 @@ def _execute(self, statement): if self._batch: return self._batch.add_query(statement) else: - connection = self._connection if self._connection else self.model._get_connection() + connection = self._connection or self.model._get_connection() result = _execute_statement(self.model, statement, self._consistency, self._timeout, connection=connection) if self._if_not_exists or self._if_exists or self._conditional: check_applied(result) @@ -1482,5 +1482,5 @@ def _execute_statement(model, statement, consistency_level, timeout, connection= parts = model._routing_key_from_values(key_values, conn.get_cluster(connection).protocol_version) s.routing_key = parts s.keyspace = model._get_keyspace() - connection = connection if connection else model._get_connection() + connection = connection or model._get_connection() return conn.execute(s, params, timeout=timeout, connection=connection) From 793755941fc5791cc299f1d9b2ed179c9b79274e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 31 Aug 2016 15:03:07 -0500 Subject: [PATCH 0878/2005] refer to NoSpecExecPlan in the right place after refactor PYTHON-218 --- cassandra/policies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 3736afcb68..27b66def3b 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -935,7 +935,7 @@ def next_execution(self, host): class NoSpeculativeExecutionPolicy(SpeculativeExecutionPolicy): def new_plan(self, keyspace, statement): - return self.NoSpeculativeExecutionPlan() + return NoSpeculativeExecutionPlan() class ConstantSpeculativeExecutionPolicy(SpeculativeExecutionPolicy): From 98b9d1ad0af0815477014a81eed232995008add3 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 31 Aug 2016 15:13:45 -0500 Subject: [PATCH 0879/2005] PYTHON-218 added test for speculative execution policies --- tests/integration/__init__.py | 1 + tests/integration/standard/test_policies.py | 99 +++++++++++++++++++++ 2 files changed, 100 insertions(+) create mode 100644 tests/integration/standard/test_policies.py diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 9c6b975137..319277b698 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -31,6 +31,7 @@ from cassandra import OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure, AlreadyExists from cassandra.cluster import Cluster from cassandra.protocol import ConfigurationException +from cassandra.policies import RoundRobinPolicy try: from ccmlib.cluster import Cluster as CCMCluster diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py new file mode 100644 index 0000000000..21c711f3c1 --- /dev/null +++ b/tests/integration/standard/test_policies.py @@ -0,0 +1,99 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tests.integration import use_singledc + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa +from cassandra import OperationTimedOut +from cassandra.cluster import ExecutionProfile +from cassandra.query import SimpleStatement +from cassandra.policies import ConstantSpeculativeExecutionPolicy, RoundRobinPolicy +from tests.integration import BasicSharedKeyspaceUnitTestCase + + +def setup_module(): + use_singledc() + + +class BadRoundRobinPolicy(RoundRobinPolicy): + + def make_query_plan(self, working_keyspace=None, query=None): + pos = self._position + self._position += 1 + + hosts = [] + for _ in range(10): + hosts.extend(self._live_hosts) + + return hosts + + +class SpecExecTest(BasicSharedKeyspaceUnitTestCase): + + def setUp(self): + spec_ep_brr = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20)) + spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20)) + spec_ep_rr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 1)) + self.cluster.add_execution_profile("spec_ep_brr", spec_ep_brr) + self.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr) + self.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim) + + def test_speculative_execution(self): + """ + Test to ensure that speculative execution honors LBP, and that they retry appropriately. + + This test will use various LBP, and ConstantSpeculativeExecutionPolicy settings and ensure the proper number of hosts are queried + @since 3.7.0 + @jira_ticket PYTHON-218 + @expected_result speculative retries should honor max retries, idempotent state of queries, and underlying lbp. + + @test_category metadata + """ + self.session.execute("""USE {0}""".format(self.keyspace_name)) + self.session.execute("""create or replace function timeout (arg int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS $$ long start = System.currentTimeMillis(); while(System.currentTimeMillis() - start < arg){} return arg; $$;""") + self.session.execute("""CREATE TABLE d (k int PRIMARY KEY , i int);""") + self.session.execute("""INSERT INTO d (k,i) VALUES (0, 1000);""") + statement = SimpleStatement("""SELECT timeout(i) FROM d WHERE k =0""", is_idempotent=True) + statement_non_idem = SimpleStatement("""SELECT timeout(i) FROM d WHERE k =0""", is_idempotent=False) + + # This LBP should repeat hosts up to around 30 + result = self.session.execute(statement, execution_profile='spec_ep_brr') + self.assertEqual(21, len(result.response_future.attempted_hosts)) + + # This LBP should keep host list to 3 + result = self.session.execute(statement, execution_profile='spec_ep_rr') + self.assertEqual(3, len(result.response_future.attempted_hosts)) + # Spec_execution policy should limit retries to 1 + result = self.session.execute(statement, execution_profile='spec_ep_rr_lim') + + self.assertEqual(2, len(result.response_future.attempted_hosts)) + + # Spec_execution policy should not be used if the query is not idempotent + result = self.session.execute(statement_non_idem, execution_profile='spec_ep_brr') + self.assertEqual(1, len(result.response_future.attempted_hosts)) + + # Default policy with non_idem query + result = self.session.execute(statement_non_idem) + self.assertEqual(1, len(result.response_future.attempted_hosts)) + + # Should be able to run an idempotent query against default execution policy with no speculative_execution_policy + result = self.session.execute(statement) + self.assertEqual(1, len(result.response_future.attempted_hosts)) + + # Test timeout with spec_ex + with self.assertRaises(OperationTimedOut): + result = self.session.execute(statement, execution_profile='spec_ep_rr', timeout=.5) From 3998a9a2bf074560ece49667d33087b175ca206f Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 1 Sep 2016 17:15:58 -0500 Subject: [PATCH 0880/2005] PYTHON-493 adding test for non frozen UDTs --- tests/integration/__init__.py | 1 + tests/integration/standard/test_udts.py | 25 ++++++++++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 319277b698..1336002ef6 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -214,6 +214,7 @@ def get_unsupported_upper_protocol(): greaterthancass20 = unittest.skipUnless(CASSANDRA_VERSION >= '2.1', 'Cassandra version 2.1 or greater required') greaterthancass21 = unittest.skipUnless(CASSANDRA_VERSION >= '2.2', 'Cassandra version 2.2 or greater required') greaterthanorequalcass30 = unittest.skipUnless(CASSANDRA_VERSION >= '3.0', 'Cassandra version 3.0 or greater required') +greaterthanorequalcass36 = unittest.skipUnless(CASSANDRA_VERSION >= '3.6', 'Cassandra version 3.6 or greater required') lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < '3.0', 'Cassandra version less then 3.0 required') dseonly = unittest.skipUnless(DSE_VERSION, "Test is only applicalbe to DSE clusters") pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 81c86069fd..0e2f46ee37 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -26,7 +26,7 @@ from cassandra.query import dict_factory from cassandra.util import OrderedMap -from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION, execute_until_pass, BasicSegregatedKeyspaceUnitTestCase, greaterthancass20 +from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass, BasicSegregatedKeyspaceUnitTestCase, greaterthancass20, greaterthanorequalcass36 from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, \ get_sample, get_collection_sample @@ -50,6 +50,29 @@ def setUp(self): super(UDTTests, self).setUp() self.session.set_keyspace(self.keyspace_name) + @greaterthanorequalcass36 + def test_non_frozen_udts(self): + """ + Test to ensure that non frozen udt's work with C* >3.6. + + @since 3.7.0 + @jira_ticket PYTHON-498 + @expected_result Non frozen UDT's are supported + + @test_category data_types, udt + """ + self.session.execute("USE {0}".format(self.keyspace_name)) + self.session.execute("CREATE TYPE user (state text, has_corn boolean)") + self.session.execute("CREATE TABLE {0} (a int PRIMARY KEY, b user)".format(self.function_table_name)) + User = namedtuple('user', ('state', 'has_corn')) + self.cluster.register_user_type(self.keyspace_name, "user", User) + self.session.execute("INSERT INTO {0} (a, b) VALUES (%s, %s)".format(self.function_table_name), (0, User("Nebraska", True))) + self.session.execute("UPDATE {0} SET b.has_corn = False where a = 0".format(self.function_table_name)) + result = self.session.execute("SELECT * FROM {0}".format(self.function_table_name)) + self.assertFalse(result[0].b.has_corn) + table_sql = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].as_cql_query() + self.assertNotIn("", table_sql) + def test_can_insert_unprepared_registered_udts(self): """ Test the insertion of unprepared, registered UDTs From dfb5b7c3e236e6b995f061b4e443781d1d7b7b7a Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 2 Sep 2016 13:30:36 -0400 Subject: [PATCH 0881/2005] some docs update for documentor --- README-dev.rst | 3 ++- cassandra/cqlengine/columns.py | 2 +- cassandra/cqlengine/functions.py | 6 +++--- cassandra/cqlengine/query.py | 2 +- docs/cqlengine/queryset.rst | 2 +- docs/upgrading.rst | 2 +- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/README-dev.rst b/README-dev.rst index deb2666fb7..e790e15bdd 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -7,6 +7,7 @@ Releasing * For beta releases, use a version like ``(2, 1, '0b1')`` * For release candidates, use a version like ``(2, 1, '0rc1')`` * When in doubt, follow PEP 440 versioning +* Update the version in ``docs.yaml`` * Commit the changelog and version changes * Tag the release. For example: ``git tag -a 1.0.0 -m 'version 1.0.0'`` @@ -25,7 +26,7 @@ Releasing * Commit and push * Update 'cassandra-test' branch to reflect new release - + * this is typically a matter of merging or rebasing onto master * test and push updated branch to origin diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 3b5cbe2497..064e4a9105 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -699,7 +699,7 @@ class BaseCollectionColumn(Column): """ Base Container type for collection-like columns. - https://cassandra.apache.org/doc/cql3/CQL.html#collections + http://cassandra.apache.org/doc/cql3/CQL-3.0.html#collections """ def __init__(self, types, **kwargs): """ diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index ccfe9de93a..ee0fd10a4c 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -86,7 +86,7 @@ class MinTimeUUID(TimeUUIDQueryFunction): """ return a fake timeuuid corresponding to the smallest possible timeuuid for the given timestamp - http://cassandra.apache.org/doc/cql3/CQL.html#timeuuidFun + http://cassandra.apache.org/doc/cql3/CQL-3.0.html#timeuuidFun """ format_string = 'MinTimeUUID(%({0})s)' @@ -95,7 +95,7 @@ class MaxTimeUUID(TimeUUIDQueryFunction): """ return a fake timeuuid corresponding to the largest possible timeuuid for the given timestamp - http://cassandra.apache.org/doc/cql3/CQL.html#timeuuidFun + http://cassandra.apache.org/doc/cql3/CQL-3.0.html#timeuuidFun """ format_string = 'MaxTimeUUID(%({0})s)' @@ -104,7 +104,7 @@ class Token(BaseQueryFunction): """ compute the token for a given partition key - http://cassandra.apache.org/doc/cql3/CQL.html#tokenFun + http://cassandra.apache.org/doc/cql3/CQL-3.0.html#tokenFun """ def __init__(self, *values): if len(values) == 1 and isinstance(values[0], (list, tuple)): diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index cf682c7a48..76c4fe75ef 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -135,7 +135,7 @@ class BatchQuery(object): """ Handles the batching of queries - http://www.datastax.com/docs/1.2/cql_cli/cql/BATCH + http://docs.datastax.com/en/cql/3.0/cql/cql_reference/batch_r.html See :doc:`/cqlengine/batches` for more details. """ diff --git a/docs/cqlengine/queryset.rst b/docs/cqlengine/queryset.rst index c9c33932f8..340bc5c58c 100644 --- a/docs/cqlengine/queryset.rst +++ b/docs/cqlengine/queryset.rst @@ -224,7 +224,7 @@ Token Function Token functon may be used only on special, virtual column pk__token, representing token of partition key (it also works for composite partition keys). Cassandra orders returned items by value of partition key token, so using cqlengine.Token we can easy paginate through all table rows. - See http://cassandra.apache.org/doc/cql3/CQL.html#tokenFun + See http://cassandra.apache.org/doc/cql3/CQL-3.0.html#tokenFun *Example* diff --git a/docs/upgrading.rst b/docs/upgrading.rst index 2fa86b8fc7..93ac213cac 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -250,7 +250,7 @@ See :ref:`query-paging` for full details. Protocol-Level Batch Statements ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ With version 1 of the native protocol, batching of statements required -using a `BATCH cql query `_. +using a `BATCH cql query `_. With version 2 of the native protocol, you can now batch statements at the protocol level. This allows you to use many different prepared statements within a single batch. From af0b853c9bdf98e8aab1fe2801dcf1cbf0ddf3fd Mon Sep 17 00:00:00 2001 From: Frens Jan Rumph Date: Tue, 6 Sep 2016 15:16:09 +0200 Subject: [PATCH 0882/2005] Add __len__ to BatchStatement A __len__ method on BatchStatement would allow getting the length of the batch without poking around in the 'private' _statements_and_parameters field. --- cassandra/query.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cassandra/query.py b/cassandra/query.py index 40d522d3f7..9865fb85b0 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -791,10 +791,13 @@ def _update_state(self, statement): self._maybe_set_routing_attributes(statement) self._update_custom_payload(statement) + def __len__(self): + return len(self._statements_and_parameters) + def __str__(self): consistency = ConsistencyLevel.value_to_name.get(self.consistency_level, 'Not Set') return (u'' % - (self.batch_type, len(self._statements_and_parameters), consistency)) + (self.batch_type, len(self), consistency)) __repr__ = __str__ From 039cb78c8267c0776a4a127dcf303a270073de6f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 6 Sep 2016 15:54:34 -0400 Subject: [PATCH 0883/2005] TokenAwarePolicy can be default with PyPy implementation as well --- cassandra/cluster.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 36b526a5bc..d5fa3affbe 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -183,16 +183,10 @@ def _shutdown_clusters(): atexit.register(_shutdown_clusters) -# murmur3 implementation required for TokenAware is only available for CPython -import platform -if platform.python_implementation() == 'CPython': - def default_lbp_factory(): - if murmur3 is not None: - return TokenAwarePolicy(DCAwareRoundRobinPolicy()) - return DCAwareRoundRobinPolicy() -else: - def default_lbp_factory(): - return DCAwareRoundRobinPolicy() +def default_lbp_factory(): + if murmur3 is not None: + return TokenAwarePolicy(DCAwareRoundRobinPolicy()) + return DCAwareRoundRobinPolicy() class ExecutionProfile(object): From 9268a4bee966bef6c5f6ac8d5fbb8921a71cc894 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 7 Sep 2016 14:52:17 -0400 Subject: [PATCH 0884/2005] shutdown cluster on cqlengine connection unregistration --- cassandra/cqlengine/connection.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index cd015abebb..61baabed7c 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -127,7 +127,7 @@ def handle_lazy_connect(self): # lazy_connect might have been set to False by another thread while waiting the lock # In this case, do nothing. if self.lazy_connect: - log.debug(format_log_context("Lazy connect for connection", connection=self.name)) + log.debug(format_log_context("Lazy connect enabled", connection=self.name)) self.lazy_connect = False self.setup() @@ -162,8 +162,11 @@ def unregister_connection(name): session = None log.warning("Unregistering default connection '{0}'. Use set_default_connection to set a new one.".format(name)) - log.debug("Connection '{0}' has been removed from the registry.".format(name)) + conn = _connections[name] + if conn.cluster: + conn.cluster.shutdown() del _connections[name] + log.debug("Connection '{0}' has been removed from the registry.".format(name)) def set_default_connection(name): From 83ea44a2d4418717b46d07b0186e682c921ff06e Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 7 Sep 2016 16:33:36 -0500 Subject: [PATCH 0885/2005] PYTHON-613 extended test_queryset suite to run with non default connection set --- .../integration/cqlengine/test_connections.py | 243 +++++++++++++++++- 1 file changed, 234 insertions(+), 9 deletions(-) diff --git a/tests/integration/cqlengine/test_connections.py b/tests/integration/cqlengine/test_connections.py index f2f88b0faa..d5f069b4e5 100644 --- a/tests/integration/cqlengine/test_connections.py +++ b/tests/integration/cqlengine/test_connections.py @@ -17,10 +17,11 @@ from cassandra.cqlengine import columns, CQLEngineException from cassandra.cqlengine import connection as conn from cassandra.cqlengine.management import drop_keyspace, sync_table, drop_table, create_keyspace_simple -from cassandra.cqlengine.models import Model -from cassandra.cqlengine.query import ContextQuery, BatchQuery +from cassandra.cqlengine.models import Model, QuerySetDescriptor +from cassandra.cqlengine.query import ContextQuery, BatchQuery, ModelQuerySet from tests.integration.cqlengine import setup_connection, DEFAULT_KEYSPACE from tests.integration.cqlengine.base import BaseCassEngTestCase +from tests.integration.cqlengine.query import test_queryset class TestModel(Model): @@ -71,11 +72,24 @@ def tearDownClass(cls): conn.unregister_connection('cluster') setup_connection(DEFAULT_KEYSPACE) - def setUp(self): super(BaseCassEngTestCase, self).setUp() def test_context_connection_priority(self): + """ + Tests to ensure the proper connection priority is honored. + + Explicit connection should have higest priority, + Followed by context query connection + Default connection should be honored last. + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result priorities should be honored + + @test_category object_mapper + """ + # model keyspace write/read # Set the default connection on the Model TestModel.__connection__ = 'cluster' @@ -100,6 +114,15 @@ def test_context_connection_priority(self): tm.objects.create(partition=1, cluster=1) def test_context_connection_with_keyspace(self): + """ + Tests to ensure keyspace param is honored + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result Invalid request is thrown + + @test_category object_mapper + """ # ks2 doesn't exist with ContextQuery(TestModel, connection='cluster', keyspace='ks2') as tm: @@ -119,7 +142,6 @@ def setUpClass(cls): conn.register_connection('fake_cluster', ['127.0.0.100'], lazy_connect=True, retry_connect=True, default=True) conn.register_connection('cluster', ['127.0.0.1']) - @classmethod def tearDownClass(cls): super(ManagementConnectionTests, cls).tearDownClass() @@ -133,6 +155,15 @@ def setUp(self): super(BaseCassEngTestCase, self).setUp() def test_create_drop_keyspace(self): + """ + Tests drop and create keyspace with connections explicitly set + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result keyspaces should be created and dropped + + @test_category object_mapper + """ # No connection (default is fake) with self.assertRaises(NoHostAvailable): @@ -146,7 +177,15 @@ def test_create_drop_keyspace(self): drop_keyspace(ks, connections=self.conns) def test_create_drop_table(self): + """ + Tests drop and create Table with connections explicitly set + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result Tables should be created and dropped + @test_category object_mapper + """ for ks in self.keyspaces: create_keyspace_simple(ks, 1, connections=self.conns) @@ -195,7 +234,6 @@ def setUpClass(cls): conn.register_connection('fake_cluster', ['127.0.0.100'], lazy_connect=True, retry_connect=True, default=True) conn.register_connection('cluster', ['127.0.0.1']) - @classmethod def tearDownClass(cls): super(BatchQueryConnectionTests, cls).tearDownClass() @@ -211,7 +249,15 @@ def setUp(self): super(BaseCassEngTestCase, self).setUp() def test_basic_batch_query(self): - """Test BatchQuery requests""" + """ + Test Batch queries with connections explicitly set + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result queries should execute appropriately + + @test_category object_mapper + """ # No connection with a QuerySet (default is a fake one) with self.assertRaises(NoHostAvailable): @@ -239,7 +285,15 @@ def test_basic_batch_query(self): obj.batch(b).save() def test_batch_query_different_connection(self): - """Test BatchQuery with Models that have a different connection""" + """ + Test BatchQuery with Models that have a different connection + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result queries should execute appropriately + + @test_category object_mapper + """ # Testing on a model class TestModel.__connection__ = 'cluster' @@ -274,7 +328,15 @@ def test_batch_query_different_connection(self): obj2.batch(b).save() def test_batch_query_connection_override(self): - """Test that we cannot override a BatchQuery connection per model""" + """ + Test that we cannot override a BatchQuery connection per model + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result Proper exceptions should be raised + + @test_category object_mapper + """ with self.assertRaises(CQLEngineException): with BatchQuery(connection='cluster') as b: @@ -310,7 +372,6 @@ def setUpClass(cls): conn.register_connection('fake_cluster', ['127.0.0.100'], lazy_connect=True, retry_connect=True, default=True) conn.register_connection('cluster', ['127.0.0.1']) - @classmethod def tearDownClass(cls): super(UsingDescriptorTests, cls).tearDownClass() @@ -336,7 +397,15 @@ def _reset_data(self): sync_table(TestModel, keyspaces=self.keyspaces, connections=self.conns) def test_keyspace(self): + """ + Test keyspace segregation when same connection is used + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result Keyspace segration is honored + @test_category object_mapper + """ self._reset_data() with ContextQuery(TestModel, connection='cluster') as tm: @@ -368,7 +437,15 @@ def test_keyspace(self): TestModel.objects.using(connection='cluster', keyspace='ks2').get(partition=2, cluster=2) def test_connection(self): + """ + Test basic connection functionality + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result proper connection should be used + + @test_category object_mapper + """ self._reset_data() # Model class @@ -387,3 +464,151 @@ def test_connection(self): obj1.using(connection='cluster').delete() with self.assertRaises(TestModel.DoesNotExist): TestModel.objects.using(connection='cluster').get(partition=1, cluster=1) + + +class ModelQuerySetNew(ModelQuerySet): + def __init__(self, *args, **kwargs): + super(ModelQuerySetNew, self).__init__(*args, **kwargs) + self._connection = "cluster" + + +class BaseConnectionTestNoDefault(object): + conns = ['cluster'] + + @classmethod + def setUpClass(cls): + conn.register_connection('cluster', ['127.0.0.1']) + test_queryset.TestModel.__queryset__ = ModelQuerySetNew + test_queryset.IndexedTestModel.__queryset__ = ModelQuerySetNew + test_queryset.IndexedCollectionsTestModel.__queryset__ = ModelQuerySetNew + test_queryset.TestMultiClusteringModel.__queryset__ = ModelQuerySetNew + + super(BaseConnectionTestNoDefault, cls).setUpClass() + conn.unregister_connection('default') + + @classmethod + def tearDownClass(cls): + conn.unregister_connection('cluster') + setup_connection(DEFAULT_KEYSPACE) + super(BaseConnectionTestNoDefault, cls).tearDownClass() + # reset the default connection + + def setUp(self): + super(BaseCassEngTestCase, self).setUp() + + +class TestQuerySetOperationConnection(BaseConnectionTestNoDefault, test_queryset.TestQuerySetOperation): + """ + Execute test_queryset.TestQuerySetOperation using non default connection + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result proper connection should be used + + @test_category object_mapper + """ + pass + + +class TestQuerySetDistinctNoDefault(BaseConnectionTestNoDefault, test_queryset.TestQuerySetDistinct): + """ + Execute test_queryset.TestQuerySetDistinct using non default connection + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result proper connection should be used + + @test_category object_mapper + """ + pass + + +class TestQuerySetOrderingNoDefault(BaseConnectionTestNoDefault, test_queryset.TestQuerySetOrdering): + """ + Execute test_queryset.TestQuerySetOrdering using non default connection + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result proper connection should be used + + @test_category object_mapper + """ + pass + + +class TestQuerySetCountSelectionAndIterationNoDefault(BaseConnectionTestNoDefault, test_queryset.TestQuerySetCountSelectionAndIteration): + """ + Execute test_queryset.TestQuerySetOrdering using non default connection + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result proper connection should be used + + @test_category object_mapper + """ + pass + + +class TestQuerySetSlicingNoDefault(BaseConnectionTestNoDefault, test_queryset.TestQuerySetSlicing): + """ + Execute test_queryset.TestQuerySetOrdering using non default connection + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result proper connection should be used + + @test_category object_mapper + """ + pass + + +class TestQuerySetValidationNoDefault(BaseConnectionTestNoDefault, test_queryset.TestQuerySetValidation): + """ + Execute test_queryset.TestQuerySetOrdering using non default connection + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result proper connection should be used + + @test_category object_mapper + """ + pass + + +class TestQuerySetDeleteNoDefault(BaseConnectionTestNoDefault, test_queryset.TestQuerySetDelete): + """ + Execute test_queryset.TestQuerySetDelete using non default connection + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result proper connection should be used + + @test_category object_mapper + """ + pass + + +class TestValuesListNoDefault(BaseConnectionTestNoDefault, test_queryset.TestValuesList): + """ + Execute test_queryset.TestValuesList using non default connection + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result proper connection should be used + + @test_category object_mapper + """ + pass + + +class TestObjectsPropertyNoDefault(BaseConnectionTestNoDefault, test_queryset.TestObjectsProperty): + """ + Execute test_queryset.TestObjectsProperty using non default connection + + @since 3.7 + @jira_ticket PYTHON-613 + @expected_result proper connection should be used + + @test_category object_mapper + """ + pass From baf3fd3612e0e025e3e9397898417f7b0e439074 Mon Sep 17 00:00:00 2001 From: Jim Bisso Date: Thu, 8 Sep 2016 06:41:14 -0700 Subject: [PATCH 0886/2005] Update docs.yaml We decided to limit the version labels (names) to major and minor, i.e., X.Y. --- docs.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs.yaml b/docs.yaml index efcc6d3c8e..73524a70b7 100644 --- a/docs.yaml +++ b/docs.yaml @@ -7,9 +7,9 @@ sections: type: sphinx directory: docs versions: - - name: 3.6.0 + - name: 3.6 ref: 3.6-doc - - name: 3.5.0 + - name: 3.5 ref: 3.5-doc -redirects: - - \A\/(.*)/\Z: /\1.html From 42797209d2035252953dde5dd79be266326dcf58 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 8 Sep 2016 09:52:15 -0500 Subject: [PATCH 0887/2005] Skipping invalidated metadata test, restrict policy test to C*>2.2 --- tests/integration/standard/test_policies.py | 3 ++- tests/integration/standard/test_prepared_statements.py | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 21c711f3c1..31bcd6a968 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -22,7 +22,7 @@ from cassandra.cluster import ExecutionProfile from cassandra.query import SimpleStatement from cassandra.policies import ConstantSpeculativeExecutionPolicy, RoundRobinPolicy -from tests.integration import BasicSharedKeyspaceUnitTestCase +from tests.integration import BasicSharedKeyspaceUnitTestCase, greaterthancass21 def setup_module(): @@ -52,6 +52,7 @@ def setUp(self): self.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr) self.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim) + @greaterthancass21 def test_speculative_execution(self): """ Test to ensure that speculative execution honors LBP, and that they retry appropriately. diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 6cb69097f0..ea7588a3b8 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -386,6 +386,8 @@ def test_raise_error_on_prepared_statement_execution_dropped_table(self): with self.assertRaises(InvalidRequest): self.session.execute(prepared, [0]) + # TODO revisit this test + @unittest.skip def test_invalidated_result_metadata(self): """ Tests to make sure cached metadata is updated when an invalidated prepared statement is reprepared. @@ -417,6 +419,7 @@ def test_invalidated_result_metadata(self): # Get a bunch of requests in the pipeline with varying states of result_meta, reprepare, resolved futures = set(s.execute_async(wildcard_prepared.bind(None)) for _ in range(200)) for f in futures: + self.assertEqual(f.result()[0], (0, 0, 0)) self.assertIsNot(wildcard_prepared.result_metadata, original_result_metadata) s.execute("DROP TABLE %s" % table) From bc534c4731e1a9b8c5e305e687ab8332ab5c6e33 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 8 Sep 2016 11:14:53 -0400 Subject: [PATCH 0888/2005] Connections docs --- cassandra/cqlengine/query.py | 9 +- docs/api/cassandra/cqlengine/connection.rst | 6 + docs/api/cassandra/cqlengine/models.rst | 6 + docs/api/cassandra/cqlengine/query.rst | 2 + docs/cqlengine/batches.rst | 4 +- docs/cqlengine/connections.rst | 126 ++++++++++++++++++++ docs/cqlengine/models.rst | 2 +- docs/cqlengine/queryset.rst | 4 +- docs/cqlengine/third_party.rst | 8 +- docs/cqlengine/upgrade_guide.rst | 2 +- docs/object_mapper.rst | 4 + 11 files changed, 158 insertions(+), 15 deletions(-) create mode 100644 docs/cqlengine/connections.rst diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index cf682c7a48..dd9bffad9f 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -274,6 +274,9 @@ class ContextQuery(object): A Context manager to allow a Model to switch context easily. Presently, the context only specifies a keyspace for model IO. + :param *args: One or more models. A model should be a class type, not an instance. + :param **kwargs: (optional) Context parameters: can be *keyspace* or *connection* + For example: .. code-block:: python @@ -293,10 +296,6 @@ class ContextQuery(object): """ def __init__(self, *args, **kwargs): - """ - :param *args: One or more models. A model should be a class type, not an instance. - :param **kwargs: (optional) Context parameters: can be keyspace or connection - """ from cassandra.cqlengine import models self.models = [] @@ -1007,7 +1006,7 @@ def timeout(self, timeout): def using(self, keyspace=None, connection=None): """ - Change the context on-the-fly of the Model class (connection, keyspace) + Change the context on-the-fly of the Model class (keyspace, connection) """ if connection and self._batch: diff --git a/docs/api/cassandra/cqlengine/connection.rst b/docs/api/cassandra/cqlengine/connection.rst index 184a6026cb..0f584fcca2 100644 --- a/docs/api/cassandra/cqlengine/connection.rst +++ b/docs/api/cassandra/cqlengine/connection.rst @@ -8,3 +8,9 @@ .. autofunction:: set_session .. autofunction:: setup + +.. autofunction:: register_connection + +.. autofunction:: unregister_connection + +.. autofunction:: set_default_connection diff --git a/docs/api/cassandra/cqlengine/models.rst b/docs/api/cassandra/cqlengine/models.rst index fd081fb190..b3695e8ac6 100644 --- a/docs/api/cassandra/cqlengine/models.rst +++ b/docs/api/cassandra/cqlengine/models.rst @@ -32,6 +32,8 @@ Model .. autoattribute:: __keyspace__ + .. autoattribute:: __connection__ + .. attribute:: __default_ttl__ :annotation: = None @@ -169,6 +171,10 @@ Model Sets the ttl values to run instance updates and inserts queries with. + .. method:: using(keyspace=None, connection=None) + + Change the context on the fly of the model instance (keyspace, connection) + .. automethod:: column_family_name Models also support dict-like access: diff --git a/docs/api/cassandra/cqlengine/query.rst b/docs/api/cassandra/cqlengine/query.rst index c0c8f285cf..ce8f764b6b 100644 --- a/docs/api/cassandra/cqlengine/query.rst +++ b/docs/api/cassandra/cqlengine/query.rst @@ -50,6 +50,8 @@ The methods here are used to filter, order, and constrain results. .. automethod:: ttl + .. automethod:: using + .. _blind_updates: .. automethod:: update diff --git a/docs/cqlengine/batches.rst b/docs/cqlengine/batches.rst index a567e31c27..29520674d7 100644 --- a/docs/cqlengine/batches.rst +++ b/docs/cqlengine/batches.rst @@ -12,7 +12,7 @@ Batch Query General Use Pattern .. code-block:: python - from cqlengine import BatchQuery + from cassandra.cqlengine import BatchQuery #using a context manager with BatchQuery() as b: @@ -102,7 +102,7 @@ Logged vs Unlogged Batches .. code-block:: python - from cqlengine.query import BatchType + from cassandra.cqlengine.query import BatchType with BatchQuery(batch_type=BatchType.Unlogged) as b: LogEntry.batch(b).create(k=1, v=1) LogEntry.batch(b).create(k=1, v=2) diff --git a/docs/cqlengine/connections.rst b/docs/cqlengine/connections.rst new file mode 100644 index 0000000000..922dbb5d29 --- /dev/null +++ b/docs/cqlengine/connections.rst @@ -0,0 +1,126 @@ +========================== +Connections (experimental) +========================== + +Connections are experimental and aimed to ease the use of multiple sessions with cqlengine. Connections can be set on a model class, per query or using a context manager. + + +Register a new connection +========================= + +To use cqlengine, you need at least a default connection. This is currently done automatically under the hood with :func:`connection.setup <.connection.setup>`. If you want to use another cluster/session, you need to register a new cqlengine connection. You register a connection with :func:`~.connection.register_connection` + + .. code-block:: python + + from cassandra.cqlengine import connection + + connection.setup(['127.0.0.1') + connection.register_connection('cluster2', ['127.0.0.2']) + +Change the default connection +============================= + +You can change the default cqlengine connection on registration: + + .. code-block:: python + + from cassandra.cqlengine import connection + + connection.register_connection('cluster2', ['127.0.0.2'] default=True) + +or on the fly using :func:`~.connection.set_default_connection` + + .. code-block:: python + + connection.set_default_connection('cluster2') + +Unregister a connection +======================= + +You can unregister a connection using :func:`~.connection.unregister_connection`: + + .. code-block:: python + + connection.unregister_connection('cluster2') + +Management +========== + +When using multiples connections, you also need to sync your models on all connections (and keyspaces) that you need operate on. Management commands have been improved to ease this part. Here is an example: + + .. code-block:: python + + from cassandra.cqlengine import management + + keyspaces = ['ks1', 'ks2'] + conns = ['cluster1', 'cluster2'] + + # registers your connections + # ... + + # create all keyspaces on all connections + for ks in keyspaces: + management.create_simple_keyspace(ks, connections=conns) + + # define your Automobile model + # ... + + # sync your models + management.sync_table(Automobile, keyspaces=keyspaces, connections=conns) + + +Connection Selection +==================== + +cqlengine will select the default connection, unless your specify a connection using one of the following methods. + +Default Model Connection +------------------------ + +You can specify a default connection per model: + + .. code-block:: python + + class Automobile(Model): + __keyspace__ = 'test' + __connection__ = 'cluster2' + manufacturer = columns.Text(primary_key=True) + year = columns.Integer(primary_key=True) + model = columns.Text(primary_key=True) + + print len(Automobile.objects.all()) # executed on the connection 'cluster2' + +QuerySet and model instance +--------------------------- + +You can use the :attr:`using() <.query.ModelQuerySet.using>` method to select a connection (or keyspace): + + .. code-block:: python + + Automobile.objects.using(connection='cluster1').create(manufacturer='honda', year=2010, model='civic') + q = Automobile.objects.filter(manufacturer='Tesla') + autos = q.using(keyspace='ks2, connection='cluster2').all() + + for auto in autos: + auto.using(connection='cluster1').save() + +Context Manager +--------------- + +You can use the ContextQuery as well to select a connection: + + .. code-block:: python + + with ContextQuery(Automobile, connection='cluster1') as A: + A.objects.filter(manufacturer='honda').all() # executed on 'cluster1' + + +BatchQuery +---------- + +With a BatchQuery, you can select the connection with the context manager. Note that all operations in the batch need to use the same connection. + + .. code-block:: python + + with BatchQuery(connection='cluster1') as b: + Automobile.objects.batch(b).create(manufacturer='honda', year=2010, model='civic') diff --git a/docs/cqlengine/models.rst b/docs/cqlengine/models.rst index dffd06fb3f..c0ba390119 100644 --- a/docs/cqlengine/models.rst +++ b/docs/cqlengine/models.rst @@ -119,7 +119,7 @@ extend the model's validation method: if self.name == 'jon': raise ValidationError('no jon\'s allowed') -*Note*: while not required, the convention is to raise a ``ValidationError`` (``from cqlengine import ValidationError``) +*Note*: while not required, the convention is to raise a ``ValidationError`` (``from cassandra.cqlengine import ValidationError``) if validation fails. .. _model_inheritance: diff --git a/docs/cqlengine/queryset.rst b/docs/cqlengine/queryset.rst index c9c33932f8..6af2e05f41 100644 --- a/docs/cqlengine/queryset.rst +++ b/docs/cqlengine/queryset.rst @@ -387,10 +387,10 @@ Named tables are a way of querying a table without creating an class. They're u .. code-block:: python - from cqlengine.connection import setup + from cassandra.cqlengine.connection import setup setup("127.0.0.1", "cqlengine_test") - from cqlengine.named import NamedTable + from cassandra.cqlengine.named import NamedTable user = NamedTable("cqlengine_test", "user") user.objects() user.objects()[0] diff --git a/docs/cqlengine/third_party.rst b/docs/cqlengine/third_party.rst index 997a8a2559..20c26df304 100644 --- a/docs/cqlengine/third_party.rst +++ b/docs/cqlengine/third_party.rst @@ -13,8 +13,8 @@ Here's how, in substance, CQLengine can be plugged to `Celery from celery import Celery from celery.signals import worker_process_init, beat_init - from cqlengine import connection - from cqlengine.connection import ( + from cassandra.cqlengine import connection + from cassandra.cqlengine.connection import ( cluster as cql_cluster, session as cql_session) def cassandra_init(**kwargs): @@ -40,8 +40,8 @@ This is the code required for proper connection handling of CQLengine for a .. code-block:: python - from cqlengine import connection - from cqlengine.connection import ( + from cassandra.cqlengine import connection + from cassandra.cqlengine.connection import ( cluster as cql_cluster, session as cql_session) try: diff --git a/docs/cqlengine/upgrade_guide.rst b/docs/cqlengine/upgrade_guide.rst index ee524cc7f8..5b0ab39360 100644 --- a/docs/cqlengine/upgrade_guide.rst +++ b/docs/cqlengine/upgrade_guide.rst @@ -40,7 +40,7 @@ Imports cqlengine is now integrated as a sub-package of the driver base package 'cassandra'. Upgrading will require adjusting imports to cqlengine. For example:: - from cqlengine import columns + from cassandra.cqlengine import columns is now:: diff --git a/docs/object_mapper.rst b/docs/object_mapper.rst index 4e38994064..4379434f31 100644 --- a/docs/object_mapper.rst +++ b/docs/object_mapper.rst @@ -19,6 +19,9 @@ Contents :doc:`cqlengine/batches` Working with batch mutations +:doc:`cqlengine/connections` + Working with multiple sessions + :ref:`API Documentation ` Index of API documentation @@ -34,6 +37,7 @@ Contents cqlengine/models cqlengine/queryset cqlengine/batches + cqlengine/connections cqlengine/third_party cqlengine/faq From 379434b099df96c4e4252fb369b8bf50cec9a4ad Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 8 Sep 2016 13:32:16 -0400 Subject: [PATCH 0889/2005] 3.7.0 changelog --- CHANGELOG.rst | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 94b8f98b9f..e6e7d9981f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,33 @@ +3.7.0 +===== +September 13, 2016 + +Features +-------- +* Add v5 protocol failure map (PYTHON-619) +* Don't return from initial connect on first error (PYTHON-617) +* Indicate failed column when deserialization fails (PYTHON-361) +* Let Cluster.refresh_nodes force a token map rebuild (PYTHON-349) +* Refresh UDTs after "keyspace updated" event with v1/v2 protocol (PYTHON-106) +* EC2 Address Resolver (PYTHON-198) +* Speculative query retries (PYTHON-218) +* Expose paging state in API (PYTHON-200) +* Don't mark host down while one connection is active (PYTHON-498) +* Query request size information (PYTHON-284) +* Avoid quadratic ring processing with invalid replication factors (PYTHON-379) +* Improve Connection/Pool creation concurrency on startup (PYTHON-82) +* Add beta version native protocol flag (PYTHON-614) +* cqlengine: Connections: support of multiple keyspaces and sessions (PYTHON-613) + +Bug Fixes +--------- +* Race when adding a pool while setting keyspace (PYTHON-628) +* Update results_metadata when prepared statement is reprepared (PYTHON-621) +* CQL Export for Thrift Tables (PYTHON-213) +* cqlengine: default value not applied to UserDefinedType (PYTHON-606) +* cqlengine: columns are no longer hashable (PYTHON-618) +* cqlengine: remove clustering keys from where clause when deleting only static columns (PYTHON-608) + 3.6.0 ===== August 1, 2016 From a011eea8970ec3983392841b2f0d27e3a9603971 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 9 Sep 2016 16:25:28 -0500 Subject: [PATCH 0890/2005] 3.7.0 version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 2fc22a044c..8b348df891 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 6, 0, 'post0') +__version_info__ = (3, 7, 0) __version__ = '.'.join(map(str, __version_info__)) From 39f8202990cb16b11028c6fec42b53775cba3301 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 13 Sep 2016 09:50:27 -0400 Subject: [PATCH 0891/2005] some docs improvement for 3.7.0 --- cassandra/policies.py | 3 +++ docs/api/cassandra/policies.rst | 9 +++++++++ docs/query_paging.rst | 21 ++++++++++++++++++++- 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 27b66def3b..347907eca3 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -939,6 +939,9 @@ def new_plan(self, keyspace, statement): class ConstantSpeculativeExecutionPolicy(SpeculativeExecutionPolicy): + """ + A speculative execution policy that sends a new query every X seconds (**delay**) for a maximum of Y attempts (**max_attempts**). + """ def __init__(self, delay, max_attempts): self.delay = delay diff --git a/docs/api/cassandra/policies.rst b/docs/api/cassandra/policies.rst index c96e491a03..83a193ba19 100644 --- a/docs/api/cassandra/policies.rst +++ b/docs/api/cassandra/policies.rst @@ -68,3 +68,12 @@ Retrying Failed Operations .. autoclass:: DowngradingConsistencyRetryPolicy :members: + +Retrying Idempotent Operations +------------------------------ + +.. autoclass:: SpeculativeExecutionPolicy + :members: + +.. autoclass:: ConstantSpeculativeExecutionPolicy + :members: diff --git a/docs/query_paging.rst b/docs/query_paging.rst index 52366116e8..0b97de4839 100644 --- a/docs/query_paging.rst +++ b/docs/query_paging.rst @@ -3,7 +3,7 @@ Paging Large Queries ==================== Cassandra 2.0+ offers support for automatic query paging. Starting with -version 2.0 of the driver, if :attr:`~.Cluster.protocol_version` is greater than +version 2.0 of the driver, if :attr:`~.Cluster.protocol_version` is greater than :const:`2` (it is by default), queries returning large result sets will be automatically paged. @@ -74,3 +74,22 @@ pages. For example:: handler.finished_event.wait() if handler.error: raise handler.error + +Resume Paged Results +-------------------- + +You can resume the pagination when executing a new query by using the :attr:`.ResultSet.paging_state`. This can be useful if you want to provide some stateless pagination capabilities to your application (ie. via http). For example:: + + from cassandra.query import SimpleStatement + query = "SELECT * FROM users" + statement = SimpleStatement(query, fetch_size=10) + results = session.execute(statement) + + # save the paging_state somewhere and return current results + session['paging_stage'] = results.paging_state + + + # resume the pagination sometime later... + statement = SimpleStatement(query, fetch_size=10) + ps = session['paging_state'] + results = session.execute(statement, paging_state=ps) From 9007c5608fe6a2e69deb5fa612b90433df24de7f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 13 Sep 2016 13:21:52 -0400 Subject: [PATCH 0892/2005] minor version updates for 3.7.0 --- README-dev.rst | 2 +- docs.yaml | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/README-dev.rst b/README-dev.rst index e790e15bdd..e7c056c19c 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -7,7 +7,7 @@ Releasing * For beta releases, use a version like ``(2, 1, '0b1')`` * For release candidates, use a version like ``(2, 1, '0rc1')`` * When in doubt, follow PEP 440 versioning -* Update the version in ``docs.yaml`` +* Add the new version in ``docs.yaml`` * Commit the changelog and version changes * Tag the release. For example: ``git tag -a 1.0.0 -m 'version 1.0.0'`` diff --git a/docs.yaml b/docs.yaml index 73524a70b7..5cc5a81f28 100644 --- a/docs.yaml +++ b/docs.yaml @@ -7,6 +7,8 @@ sections: type: sphinx directory: docs versions: + - name: 3.7 + ref: 3.7-doc - name: 3.6 ref: 3.6-doc - name: 3.5 From 1fd961a55a06a3ab739a3995d09c53a1b0e35fb5 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 13 Sep 2016 14:38:16 -0400 Subject: [PATCH 0893/2005] post-release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 8b348df891..9727e389e7 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 7, 0) +__version_info__ = (3, 7, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From f89df3f15f21ff2c11b698f91d541cf55961ed09 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 13 Sep 2016 17:39:38 -0400 Subject: [PATCH 0894/2005] add EC2MultiRegionTranslator in docs --- docs/api/cassandra/policies.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/api/cassandra/policies.rst b/docs/api/cassandra/policies.rst index 83a193ba19..294fb5cfa9 100644 --- a/docs/api/cassandra/policies.rst +++ b/docs/api/cassandra/policies.rst @@ -33,6 +33,9 @@ Translating Server Node Addresses .. autoclass:: IdentityTranslator :members: +.. autoclass:: EC2MultiRegionTranslator + :members: + Marking Hosts Up or Down ------------------------ From 4fba5514aba1e9d29e539f205b835775ba253f27 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 14 Sep 2016 14:15:06 -0500 Subject: [PATCH 0895/2005] Fixing various unstable tests --- tests/integration/__init__.py | 3 ++- tests/integration/standard/test_connection.py | 2 +- tests/integration/standard/test_query.py | 5 +++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 1336002ef6..9dbfca3e17 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -218,6 +218,7 @@ def get_unsupported_upper_protocol(): lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < '3.0', 'Cassandra version less then 3.0 required') dseonly = unittest.skipUnless(DSE_VERSION, "Test is only applicalbe to DSE clusters") pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") +notpy3 = unittest.skipIf(sys.version_info >= (3, 0), "Test not applicable for Python 3.x runtime") def wait_for_node_socket(node, timeout): @@ -546,7 +547,7 @@ def create_keyspace(cls, rf): @classmethod def common_setup(cls, rf, keyspace_creation=True, create_class_table=False, metrics=False): cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, metrics_enabled=metrics) - cls.session = cls.cluster.connect() + cls.session = cls.cluster.connect(wait_for_all_pools=True) cls.ks_name = cls.__name__.lower() if keyspace_creation: cls.create_keyspace(rf) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 5a4a4637ac..8b4b0430a6 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -162,7 +162,7 @@ def wait_for_connections(self, host, cluster): def wait_for_no_connections(self, host, cluster): retry = 0 - while(retry < 100): + while(retry < 200): retry += 1 connections = self.fetch_connections(host, cluster) if len(connections) is 0: diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 94b69d22c1..e75506e3ee 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -26,7 +26,7 @@ from cassandra.cluster import Cluster, NoHostAvailable from cassandra.policies import HostDistance, RoundRobinPolicy from tests.unit.cython.utils import notcython -from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions +from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, notpy3 import time import re @@ -71,6 +71,7 @@ def test_trace_prints_okay(self): str(event) @notcython + @notpy3 def test_row_error_message(self): """ Test to validate, new column deserialization message @@ -483,7 +484,7 @@ def setUp(self): self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) if PROTOCOL_VERSION < 3: self.cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) - self.session = self.cluster.connect() + self.session = self.cluster.connect(wait_for_all_pools=True) def tearDown(self): self.cluster.shutdown() From ba6f0e8ef423ac9affe75d54ec5d41dee519aa21 Mon Sep 17 00:00:00 2001 From: Stefania Alborghetti Date: Mon, 12 Sep 2016 14:48:39 +0800 Subject: [PATCH 0896/2005] added cql types to result set for CASSANDRA-11534 --- cassandra/cluster.py | 3 +++ cassandra/protocol.py | 10 ++++++---- cassandra/row_parser.pyx | 2 +- tests/unit/test_concurrent.py | 1 + tests/unit/test_response_future.py | 2 +- 5 files changed, 12 insertions(+), 6 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d5fa3affbe..7dc05de630 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3264,6 +3264,7 @@ class ResponseFuture(object): _req_id = None _final_result = _NOT_SET _col_names = None + _col_types = None _final_exception = None _query_traces = None _callbacks = None @@ -3531,6 +3532,7 @@ def _set_result(self, host, connection, pool, response): results = getattr(response, 'results', None) if results is not None and response.kind == RESULT_KIND_ROWS: self._paging_state = response.paging_state + self._col_types = response.col_types self._col_names = results[0] results = self.row_factory(*results) self._set_final_result(results) @@ -3954,6 +3956,7 @@ class ResultSet(object): def __init__(self, response_future, initial_response): self.response_future = response_future self.column_names = response_future._col_names + self.column_types = response_future._col_types self._set_current_rows(initial_response) self._page_iter = None self._list_mode = False diff --git a/cassandra/protocol.py b/cassandra/protocol.py index e859421dc4..9f4613d046 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -601,19 +601,21 @@ class ResultMessage(_MessageType): _HAS_MORE_PAGES_FLAG = 0x0002 _NO_METADATA_FLAG = 0x0004 - def __init__(self, kind, results, paging_state=None): + def __init__(self, kind, results, paging_state=None, col_types=None): self.kind = kind self.results = results self.paging_state = paging_state + self.col_types = col_types @classmethod def recv_body(cls, f, protocol_version, user_type_map, result_metadata): kind = read_int(f) paging_state = None + col_types = None if kind == RESULT_KIND_VOID: results = None elif kind == RESULT_KIND_ROWS: - paging_state, results = cls.recv_results_rows( + paging_state, col_types, results = cls.recv_results_rows( f, protocol_version, user_type_map, result_metadata) elif kind == RESULT_KIND_SET_KEYSPACE: ksname = read_string(f) @@ -624,7 +626,7 @@ def recv_body(cls, f, protocol_version, user_type_map, result_metadata): results = cls.recv_results_schema_change(f, protocol_version) else: raise DriverException("Unknown RESULT kind: %d" % kind) - return cls(kind, results, paging_state) + return cls(kind, results, paging_state, col_types) @classmethod def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): @@ -647,7 +649,7 @@ def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): raise DriverException('Failed decoding result column "%s" of type %s: %s' % (colnames[i], coltypes[i].cql_parameterized_type(), e.message)) - return paging_state, (colnames, parsed_rows) + return paging_state, coltypes, (colnames, parsed_rows) @classmethod def recv_results_prepared(cls, f, protocol_version, user_type_map): diff --git a/cassandra/row_parser.pyx b/cassandra/row_parser.pyx index 8422d544d3..6e537affd5 100644 --- a/cassandra/row_parser.pyx +++ b/cassandra/row_parser.pyx @@ -35,6 +35,6 @@ def make_recv_results_rows(ColumnParser colparser): reader = BytesIOReader(f.read()) parsed_rows = colparser.parse_rows(reader, desc) - return (paging_state, (colnames, parsed_rows)) + return (paging_state, coltypes, (colnames, parsed_rows)) return recv_results_rows diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index 948f6f2502..f46761094d 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -37,6 +37,7 @@ class MockResponseResponseFuture(): _query_trace = None _col_names = None + _col_types = None # a list pending callbacks, these will be prioritized in reverse or normal orderd pending_callbacks = PriorityQueue() diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index a0f38c4a52..6628686b9a 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -50,7 +50,7 @@ def make_response_future(self, session): return ResponseFuture(session, message, query, 1) def make_mock_response(self, results): - return Mock(spec=ResultMessage, kind=RESULT_KIND_ROWS, results=results, paging_state=None) + return Mock(spec=ResultMessage, kind=RESULT_KIND_ROWS, results=results, paging_state=None, col_types=None) def test_result_message(self): session = self.make_basic_session() From 70feea2c903462f68782c6da2e69bf1c64b396d2 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 15 Sep 2016 18:27:10 -0500 Subject: [PATCH 0897/2005] Tweaking heartbeat timeout test to not fail with protocol v2 --- tests/integration/standard/test_connection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 8b4b0430a6..4beededeb7 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -143,7 +143,7 @@ def fetch_connections(self, host, cluster): for conn in holders: if host == str(getattr(conn, 'host', '')): if isinstance(conn, HostConnectionPool): - if conn._connections is not None: + if conn._connections is not None and len(conn._connections) > 0: connections.append(conn._connections) else: if conn._connection is not None: @@ -162,7 +162,7 @@ def wait_for_connections(self, host, cluster): def wait_for_no_connections(self, host, cluster): retry = 0 - while(retry < 200): + while(retry < 100): retry += 1 connections = self.fetch_connections(host, cluster) if len(connections) is 0: From 8b3cc90487749794fb3c2e96924a2697a8a2ab51 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 16 Sep 2016 13:08:02 -0500 Subject: [PATCH 0898/2005] correct from_binary for (mostly unused) cqltypes runtime error if C* ever sends back a FrozenType or ReversedType --- cassandra/cqltypes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index b6a720e6c9..ac813a1388 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -1010,7 +1010,7 @@ class ReversedType(_ParameterizedType): @classmethod def deserialize_safe(cls, byts, protocol_version): subtype, = cls.subtypes - return subtype.from_binary(byts) + return subtype.from_binary(byts, protocol_version) @classmethod def serialize_safe(cls, val, protocol_version): @@ -1025,7 +1025,7 @@ class FrozenType(_ParameterizedType): @classmethod def deserialize_safe(cls, byts, protocol_version): subtype, = cls.subtypes - return subtype.from_binary(byts) + return subtype.from_binary(byts, protocol_version) @classmethod def serialize_safe(cls, val, protocol_version): From f0110ba4aa894a535c167d1a64eeda08bfd289a9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 16 Sep 2016 13:10:36 -0500 Subject: [PATCH 0899/2005] remove todo from exception message not linking because there's no good way to link to latest --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d5fa3affbe..a4cc7c6ac4 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -985,7 +985,7 @@ def add_execution_profile(self, name, profile, pool_wait_timeout=5): if not isinstance(profile, ExecutionProfile): raise TypeError("profile must be an instance of ExecutionProfile") if self._config_mode == _ConfigMode.LEGACY: - raise ValueError("Cannot add execution profiles when legacy parameters are set explicitly. TODO: link to doc") + raise ValueError("Cannot add execution profiles when legacy parameters are set explicitly.") if name in self.profile_manager.profiles: raise ValueError("Profile %s already exists") self.profile_manager.profiles[name] = profile From 47775aba781e289e700b36e222cbc088baf0aee8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 16 Sep 2016 15:50:12 -0500 Subject: [PATCH 0900/2005] typo --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index a4cc7c6ac4..17ff0f32c9 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2137,7 +2137,7 @@ def execution_profile_clone_update(self, ep, **kwargs): Returns a clone of the ``ep`` profile. ``kwargs`` can be specified to update attributes of the returned profile. - This is a shollow clone, so any objects referenced by the profile are shared. This means Load Balancing Policy + This is a shallow clone, so any objects referenced by the profile are shared. This means Load Balancing Policy is maintained by inclusion in the active profiles. It also means updating any other rich objects will be seen by the active profile. In cases where this is not desirable, be sure to replace the instance instead of manipulating the shared object. From ab37ae0f15d9689b516c9cd0a5ed637362209019 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 19 Sep 2016 16:16:13 -0500 Subject: [PATCH 0901/2005] Updating build.yaml for schedules --- build.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/build.yaml b/build.yaml index 42b88e8a79..dcf8639bc1 100644 --- a/build.yaml +++ b/build.yaml @@ -1,3 +1,9 @@ +schedule: + commit: + schedule: per_commit + branches: + include: [master, /python.*/] + python: - 2.7 - 3.4 From 9e7031162474f351dcbfaa7559227a29ca8305fc Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 19 Sep 2016 16:28:24 -0500 Subject: [PATCH 0902/2005] Fixing schedules typo --- build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.yaml b/build.yaml index dcf8639bc1..80a2aec928 100644 --- a/build.yaml +++ b/build.yaml @@ -1,4 +1,4 @@ -schedule: +schedules: commit: schedule: per_commit branches: From d0e7f4bbc51e2a5a2b9975ebbd1448bb5c954d37 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 20 Sep 2016 12:25:08 -0500 Subject: [PATCH 0903/2005] Various test fixes --- tests/integration/long/utils.py | 1 + tests/integration/standard/test_metadata.py | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index bd48e96f4e..e5909dbfb9 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -146,6 +146,7 @@ def wait_for_up(cluster, node): def wait_for_down(cluster, node): log.debug("Waiting for node %s to be down", node) tries = 0 + addr = IP_FORMAT % node while tries < 100: host = cluster.metadata.get_host(IP_FORMAT % node) if not host or not host.is_up: diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 88f209d76f..a817947ae4 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1189,7 +1189,7 @@ def test_legacy_tables(self): CREATE TABLE legacy.composite_comp_with_col ( key blob, - column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(b=>org.apache.cassandra.db.marshal.BytesType, s=>org.apache.cassandra.db.marshal.UTF8Type, t=>org.apache.cassandra.db.marshal.TimeUUIDType)', + column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(t=>org.apache.cassandra.db.marshal.TimeUUIDType, b=>org.apache.cassandra.db.marshal.BytesType, s=>org.apache.cassandra.db.marshal.UTF8Type)', "b@6869746d65776974686d75736963" blob, "b@6d616d6d616a616d6d61" blob, PRIMARY KEY (key, column1) @@ -1309,7 +1309,7 @@ def test_legacy_tables(self): CREATE TABLE legacy.composite_comp_no_col ( key blob, - column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(b=>org.apache.cassandra.db.marshal.BytesType, s=>org.apache.cassandra.db.marshal.UTF8Type, t=>org.apache.cassandra.db.marshal.TimeUUIDType)', + column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(t=>org.apache.cassandra.db.marshal.TimeUUIDType, b=>org.apache.cassandra.db.marshal.BytesType, s=>org.apache.cassandra.db.marshal.UTF8Type)', value blob, PRIMARY KEY (key, column1) ) WITH COMPACT STORAGE @@ -1328,12 +1328,14 @@ def test_legacy_tables(self): AND speculative_retry = 'NONE';""" ccm = get_cluster() - ccm.run_cli(cli_script) + livenodes = [node for node in list(ccm.nodelist()) if node.is_live()] + livenodes[0].run_cli(cli_script) cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() legacy_meta = cluster.metadata.keyspaces['legacy'] + print(legacy_meta.export_as_string()) self.assert_equal_diff(legacy_meta.export_as_string(), expected_string) session.execute('DROP KEYSPACE legacy') From f19ae5a0af7dece1f19e18669d3a198fb61dcc66 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 21 Sep 2016 13:46:21 -0500 Subject: [PATCH 0904/2005] Various test timing fixes --- tests/integration/long/test_consistency.py | 10 +++++----- tests/integration/standard/test_metadata.py | 4 ++-- tests/integration/standard/test_query.py | 2 +- tests/integration/standard/test_query_paging.py | 8 ++++---- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index 9232bcda24..018cc45529 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -130,7 +130,7 @@ def _test_tokenaware_one_node_down(self, keyspace, rf, accepted): cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), protocol_version=PROTOCOL_VERSION) - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) wait_for_up(cluster, 1) wait_for_up(cluster, 2) @@ -182,7 +182,7 @@ def test_rfthree_tokenaware_none_down(self): cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), protocol_version=PROTOCOL_VERSION) - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) wait_for_up(cluster, 1) wait_for_up(cluster, 2) @@ -207,7 +207,7 @@ def _test_downgrading_cl(self, keyspace, rf, accepted): load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), default_retry_policy=DowngradingConsistencyRetryPolicy(), protocol_version=PROTOCOL_VERSION) - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) create_schema(cluster, session, keyspace, replication_factor=rf) self._insert(session, keyspace, 1) @@ -262,7 +262,7 @@ def test_rfthree_tokenaware_downgradingcl(self): self.rfthree_downgradingcl(cluster, keyspace, False) def rfthree_downgradingcl(self, cluster, keyspace, roundrobin): - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) create_schema(cluster, session, keyspace, replication_factor=2) self._insert(session, keyspace, count=12) @@ -330,7 +330,7 @@ def test_pool_with_host_down(self): # find the first node, we will try create connections to, shut it down. cluster = Cluster(protocol_version=PROTOCOL_VERSION) - cluster.connect() + cluster.connect(wait_for_all_pools=True) hosts = cluster.metadata.all_hosts() address = hosts[0].address node_to_stop = int(address.split('.')[-1:][0]) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index a817947ae4..a3b6935621 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1189,7 +1189,7 @@ def test_legacy_tables(self): CREATE TABLE legacy.composite_comp_with_col ( key blob, - column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(t=>org.apache.cassandra.db.marshal.TimeUUIDType, b=>org.apache.cassandra.db.marshal.BytesType, s=>org.apache.cassandra.db.marshal.UTF8Type)', + column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(b=>org.apache.cassandra.db.marshal.BytesType, s=>org.apache.cassandra.db.marshal.UTF8Type, t=>org.apache.cassandra.db.marshal.TimeUUIDType)', "b@6869746d65776974686d75736963" blob, "b@6d616d6d616a616d6d61" blob, PRIMARY KEY (key, column1) @@ -1309,7 +1309,7 @@ def test_legacy_tables(self): CREATE TABLE legacy.composite_comp_no_col ( key blob, - column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(t=>org.apache.cassandra.db.marshal.TimeUUIDType, b=>org.apache.cassandra.db.marshal.BytesType, s=>org.apache.cassandra.db.marshal.UTF8Type)', + column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(b=>org.apache.cassandra.db.marshal.BytesType, s=>org.apache.cassandra.db.marshal.UTF8Type, t=>org.apache.cassandra.db.marshal.TimeUUIDType)', value blob, PRIMARY KEY (key, column1) ) WITH COMPACT STORAGE diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index e75506e3ee..bb63c9dec3 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -245,7 +245,7 @@ def _wait_for_trace_to_delete(self, trace_id): def _is_trace_present(self, trace_id): select_statement = SimpleStatement("SElECT duration FROM system_traces.sessions WHERE session_id = {0}".format(trace_id), consistency_level=ConsistencyLevel.ALL) ssrs = self.session.execute(select_statement) - if(ssrs[0].duration is None): + if not len(ssrs.current_rows) or ssrs[0].duration is None: return False return True diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index 48e257a90d..30f1316adf 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -47,7 +47,7 @@ def setUp(self): self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) if PROTOCOL_VERSION < 3: self.cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) - self.session = self.cluster.connect() + self.session = self.cluster.connect(wait_for_all_pools=True) self.session.execute("TRUNCATE test3rf.test") def tearDown(self): @@ -262,7 +262,7 @@ def test_paging_callbacks(self): for fetch_size in (2, 3, 7, 10, 99, 100, 101, 10000): self.session.default_fetch_size = fetch_size - future = self.session.execute_async("SELECT * FROM test3rf.test") + future = self.session.execute_async("SELECT * FROM test3rf.test", timeout=20) event = Event() counter = count() @@ -285,7 +285,7 @@ def handle_error(err): self.assertEqual(next(counter), 100) # simple statement - future = self.session.execute_async(SimpleStatement("SELECT * FROM test3rf.test")) + future = self.session.execute_async(SimpleStatement("SELECT * FROM test3rf.test"), timeout=20) event.clear() counter = count() @@ -294,7 +294,7 @@ def handle_error(err): self.assertEqual(next(counter), 100) # prepared statement - future = self.session.execute_async(prepared) + future = self.session.execute_async(prepared, timeout=20) event.clear() counter = count() From a47a06895dea0d5b18796bed0a1aafec00ba5d84 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 22 Sep 2016 15:18:04 -0500 Subject: [PATCH 0905/2005] Various test fixes --- tests/integration/long/test_consistency.py | 3 +++ tests/integration/standard/test_cluster.py | 4 ++-- tests/integration/standard/test_metrics.py | 4 ++++ tests/integration/standard/test_query.py | 2 +- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index 018cc45529..f983a43dff 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -334,6 +334,9 @@ def test_pool_with_host_down(self): hosts = cluster.metadata.all_hosts() address = hosts[0].address node_to_stop = int(address.split('.')[-1:][0]) + cluster.shutdown() + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster.connect(contact_points=["127.0.0.2"], wait_for_all_pools=True) try: force_stop(node_to_stop) wait_for_down(cluster, node_to_stop) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 39ecf1e5ae..e41eec7ea3 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -1035,12 +1035,12 @@ def test_down_event_with_active_connection(self): @test_category connection """ with Cluster(protocol_version=PROTOCOL_VERSION) as cluster: - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) random_host = cluster.metadata.all_hosts()[0] cluster.on_down(random_host, False) for _ in range(10): new_host = cluster.metadata.all_hosts()[0] - self.assertTrue(new_host.is_up) + self.assertTrue(new_host.is_up, "Host was not up on iteration {0}".format(_)) time.sleep(.01) pool = session._pools.get(random_host) diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index efacc4c071..c59b58f989 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -311,6 +311,9 @@ def on_error(self, _, response_future): if self.throw_on_fail: raise AttributeError + def remove_ra(self, session): + session.remove_request_init_listener(self.on_request) + def __str__(self): # just extracting request count from the size stats (which are recorded on all requests) request_sizes = dict(self.requests) @@ -357,6 +360,7 @@ def test_metrics_per_cluster(self): self.assertTrue(self.wait_for_count(ra, 10)) self.assertTrue(self.wait_for_count(ra, 3, error=True)) + ra.remove_ra(self.session) # Make sure a poorly coded RA doesn't cause issues RequestAnalyzer(self.session, throw_on_success=False, throw_on_fail=True) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index bb63c9dec3..e8c9ead09b 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -146,7 +146,7 @@ def test_client_ip_in_trace(self): response_future.result() # Fetch the client_ip from the trace. - trace = response_future.get_query_trace(max_wait=2.0) + trace = response_future.get_query_trace(max_wait=5.0) client_ip = trace.client # Ip address should be in the local_host range From 9fd889501e022257b67cfa82ef5a32a5d673d94c Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 22 Sep 2016 16:35:03 -0500 Subject: [PATCH 0906/2005] Moving contact_points param to constructor for ConnectivityTest --- tests/integration/long/test_consistency.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index f983a43dff..eaa8cb6af3 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -335,8 +335,8 @@ def test_pool_with_host_down(self): address = hosts[0].address node_to_stop = int(address.split('.')[-1:][0]) cluster.shutdown() - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - cluster.connect(contact_points=["127.0.0.2"], wait_for_all_pools=True) + cluster = Cluster(contact_points=["127.0.0.2"],protocol_version=PROTOCOL_VERSION) + cluster.connect(wait_for_all_pools=True) try: force_stop(node_to_stop) wait_for_down(cluster, node_to_stop) From 6a7eb853edcdd23ac9751e76f6e5972443817465 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 23 Sep 2016 16:10:55 -0500 Subject: [PATCH 0907/2005] Tweaking ConnectivityTest --- tests/integration/long/test_consistency.py | 25 ++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index eaa8cb6af3..98463b6052 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -14,6 +14,7 @@ import struct, time, traceback, sys, logging +from random import randint from cassandra import ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, Unavailable from cassandra.cluster import Cluster from cassandra.policies import TokenAwarePolicy, RoundRobinPolicy, DowngradingConsistencyRetryPolicy @@ -310,15 +311,18 @@ def rfthree_downgradingcl(self, cluster, keyspace, roundrobin): class ConnectivityTest(unittest.TestCase): - def setUp(self): - self.coordinator_stats = CoordinatorStats() + def get_node_not_x(self, node_to_stop): + nodes = [1, 2, 3] + for num in nodes: + if num is not node_to_stop: + return num def test_pool_with_host_down(self): """ Test to ensure that cluster.connect() doesn't return prior to pools being initialized. This test will figure out which host our pool logic will connect to first. It then shuts that server down. - Previouly the cluster.connect() would return prior to the pools being initialized, and the first queries would + Previously the cluster.connect() would return prior to the pools being initialized, and the first queries would return a no host exception @since 3.7.0 @@ -329,23 +333,32 @@ def test_pool_with_host_down(self): """ # find the first node, we will try create connections to, shut it down. + + # We will be shuting down a random house, so we need a complete contact list + all_contact_points = ["127.0.0.1", "127.0.0.2", "127.0.0.3"] + + # Connect up and find out which host will bet queries routed to to first cluster = Cluster(protocol_version=PROTOCOL_VERSION) cluster.connect(wait_for_all_pools=True) hosts = cluster.metadata.all_hosts() address = hosts[0].address node_to_stop = int(address.split('.')[-1:][0]) cluster.shutdown() - cluster = Cluster(contact_points=["127.0.0.2"],protocol_version=PROTOCOL_VERSION) + + # We now register a cluster that has it's Control Connection NOT on the node that we are shutting down. + # We do this so we don't miss the event + contact_point = '127.0.0.{0}'.format(self.get_node_not_x(node_to_stop)) + cluster = Cluster(contact_points=[contact_point], protocol_version=PROTOCOL_VERSION) cluster.connect(wait_for_all_pools=True) try: force_stop(node_to_stop) wait_for_down(cluster, node_to_stop) # Attempt a query against that node. It should complete - cluster2 = Cluster(protocol_version=PROTOCOL_VERSION) + cluster2 = Cluster(contact_points=all_contact_points, protocol_version=PROTOCOL_VERSION) session2 = cluster2.connect() session2.execute("SELECT * FROM system.local") - cluster2.shutdown() finally: + cluster2.shutdown() start(node_to_stop) wait_for_up(cluster, node_to_stop) cluster.shutdown() From b862c2a4fc23aa50f746643521e46752cf81632f Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 26 Sep 2016 16:54:38 -0500 Subject: [PATCH 0908/2005] Tweaking ip trace timeout, and adding wiggle room to large row test --- tests/integration/long/test_large_data.py | 2 +- tests/integration/standard/test_query.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/long/test_large_data.py b/tests/integration/long/test_large_data.py index acac09893d..13542916aa 100644 --- a/tests/integration/long/test_large_data.py +++ b/tests/integration/long/test_large_data.py @@ -119,7 +119,7 @@ def test_wide_rows(self): # Verify for i, row in enumerate(results): - self.assertEqual(row['i'], i) + self.assertAlmostEqual(row['i'], i, delta=3) session.cluster.shutdown() diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index e8c9ead09b..6753a35add 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -146,7 +146,7 @@ def test_client_ip_in_trace(self): response_future.result() # Fetch the client_ip from the trace. - trace = response_future.get_query_trace(max_wait=5.0) + trace = response_future.get_query_trace(max_wait=10.0) client_ip = trace.client # Ip address should be in the local_host range From 8289de0b3858917e47e479dbc98dd9b4073f577e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 29 Sep 2016 15:10:04 -0400 Subject: [PATCH 0909/2005] Ensure we can connect to a Cassandra node version > 3 --- cassandra/metadata.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 67b6b85090..eb8896d44c 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2487,7 +2487,8 @@ def export_as_string(self): def get_schema_parser(connection, server_version, timeout): - if server_version.startswith('3'): + server_major_version = int(server_version.split('.')[0]) + if server_major_version >= 3: return SchemaParserV3(connection, timeout) else: # we could further specialize by version. Right now just refactoring the From cf633e8f12123395fc3e772ad08838bd114f6d7e Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 6 Oct 2016 16:28:06 -0500 Subject: [PATCH 0910/2005] Don't replace custom query handler if graph is being used --- tests/integration/__init__.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 9dbfca3e17..8202b6fae5 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -342,8 +342,10 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): try: jvm_args = [] # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back - if PROTOCOL_VERSION >= 4: - jvm_args = [" -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler"] + + if 'graph' not in workloads: + if PROTOCOL_VERSION >= 4: + jvm_args = [" -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler"] if(len(workloads) > 0): for node in CCM_CLUSTER.nodes.values(): node.set_workloads(workloads) From a6a9f79358d65464e473c964cb0eb4c47f3dcc73 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 7 Oct 2016 11:40:29 -0500 Subject: [PATCH 0911/2005] update custom payload integration tests for PR #655 --- tests/integration/standard/test_custom_protocol_handler.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index c6818f7f4b..89b1b2fd1e 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -130,8 +130,9 @@ def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map) rowcount = read_int(f) rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)] + colnames = [c[2] for c in column_metadata] coltypes = [c[3] for c in column_metadata] - return (paging_state, (coltypes, rows)) + return paging_state, coltypes, (colnames, rows) class CustomTestRawRowType(ProtocolHandler): @@ -166,7 +167,7 @@ def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): tuple(ctype.from_binary(val, protocol_version) for ctype, val in zip(coltypes, row)) for row in rows] - return (paging_state, (colnames, parsed_rows)) + return paging_state, coltypes, (colnames, parsed_rows) class CustomProtocolHandlerResultMessageTracked(ProtocolHandler): From 32b51cef1d1c7c42fd572e261124a360a970c367 Mon Sep 17 00:00:00 2001 From: "James, Laurie" Date: Thu, 20 Oct 2016 11:53:36 +0100 Subject: [PATCH 0912/2005] Fixup attribute name on cluster reconnect --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 3cd4a8d0e6..92cc39377e 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2693,7 +2693,7 @@ def _reconnect(self): self._set_new_connection(self._reconnect_internal()) except NoHostAvailable: # make a retry schedule (which includes backoff) - schedule = self.cluster.reconnection_policy.new_schedule() + schedule = self._cluster.reconnection_policy.new_schedule() with self._reconnection_lock: From 4f00f5fada77ec2b33bdda6af01c6d4895555046 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 26 Oct 2016 10:01:34 -0400 Subject: [PATCH 0913/2005] Freeze Cython dep --- setup.py | 4 ++-- test-requirements.txt | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index a6fcc7b4b2..6dba9cc7b1 100644 --- a/setup.py +++ b/setup.py @@ -212,7 +212,7 @@ def __init__(self, *args, **kwargs): base.__init__(self, *args, **kwargs) else: Extension.__init__(self, *args, **kwargs) - + class build_extensions(build_ext): @@ -388,7 +388,7 @@ def run_setup(extensions): # 1.) build_ext eats errors at compile time, letting the install complete while producing useful feedback # 2.) there could be a case where the python environment has cython installed but the system doesn't have build tools if pre_build_check(): - kw['setup_requires'] = ['Cython>=0.20'] + kw['setup_requires'] = ['Cython>=0.20,<0.25'] else: sys.stderr.write("Bypassing Cython setup requirement\n") diff --git a/test-requirements.txt b/test-requirements.txt index 5e2552631d..c84faa8021 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -11,5 +11,5 @@ pure-sasl twisted gevent>=1.0 eventlet -cython>=0.21 +cython>=0.20,<0.25 packaging From 9b78c6d86c994341ed409b931667f7c9dd36a2b9 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 26 Oct 2016 11:32:47 -0400 Subject: [PATCH 0914/2005] changelog update --- CHANGELOG.rst | 8 ++++++++ cassandra/__init__.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e6e7d9981f..eff7b04e68 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +3.7.1 +===== +October 26, 2016 + +Bug Fixes +--------- +* Cython upgrade has broken stable version of cassandra-driver (PYTHON-656) + 3.7.0 ===== September 13, 2016 diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 9727e389e7..02f6e57ee0 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 7, 0, 'post0') +__version_info__ = (3, 7, 1, 'post0') __version__ = '.'.join(map(str, __version_info__)) From 7165b46bb292d272c429a8e873c3ddbba55dd1da Mon Sep 17 00:00:00 2001 From: Sandeep Tamhankar Date: Wed, 26 Oct 2016 17:24:44 -0700 Subject: [PATCH 0915/2005] Disable link check for vcpython link since it always erroneously fails --- docs.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs.yaml b/docs.yaml index 5cc5a81f28..a8fa9a4338 100644 --- a/docs.yaml +++ b/docs.yaml @@ -1,6 +1,10 @@ title: DataStax Python Driver for Apache Cassandra summary: DataStax Python Driver for Apache Cassandra Documentation output: docs/_build/ +checks: + external_links: + exclude: + - 'http://aka.ms/vcpython27' sections: - title: N/A prefix: / From cb532caed813827c42cc2adce7f73f53fa781341 Mon Sep 17 00:00:00 2001 From: Sam Tunnicliffe Date: Thu, 27 Oct 2016 08:32:20 +0100 Subject: [PATCH 0916/2005] Quote index name in cql string where necessary --- cassandra/metadata.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index eb8896d44c..ba9cbf031e 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1336,14 +1336,14 @@ def as_cql_query(self): index_target = options.pop("target") if self.kind != "CUSTOM": return "CREATE INDEX %s ON %s.%s (%s)" % ( - self.name, # Cassandra doesn't like quoted index names for some reason + protect_name(self.name), protect_name(self.keyspace_name), protect_name(self.table_name), index_target) else: class_name = options.pop("class_name") ret = "CREATE CUSTOM INDEX %s ON %s.%s (%s) USING '%s'" % ( - self.name, # Cassandra doesn't like quoted index names for some reason + protect_name(self.name), protect_name(self.keyspace_name), protect_name(self.table_name), index_target, From 5827fe7ce6b136c6f9675329a3f5c68617dcef84 Mon Sep 17 00:00:00 2001 From: Stefania Alborghetti Date: Thu, 27 Oct 2016 10:11:19 +0800 Subject: [PATCH 0917/2005] 12838: expand QUERY/EXECUTE/BATCH flags from [byte] to [int] --- cassandra/protocol.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 9f4613d046..753e705027 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -560,7 +560,10 @@ def send_body(self, f, protocol_version): if self.timestamp is not None: flags |= _PROTOCOL_TIMESTAMP - write_byte(f, flags) + if protocol_version >= 5: + write_int(f, flags) + else: + write_byte(f, flags) if self._query_params is not None: write_short(f, len(self._query_params)) @@ -823,7 +826,12 @@ def send_body(self, f, protocol_version): "3 or higher. Consider setting Cluster.protocol_version to 3.") if self.skip_meta: flags |= _SKIP_METADATA_FLAG - write_byte(f, flags) + + if protocol_version >= 5: + write_int(f, flags) + else: + write_byte(f, flags) + write_short(f, len(self.query_params)) for param in self.query_params: write_value(f, param) @@ -872,7 +880,11 @@ def send_body(self, f, protocol_version): flags |= _WITH_SERIAL_CONSISTENCY_FLAG if self.timestamp is not None: flags |= _PROTOCOL_TIMESTAMP - write_byte(f, flags) + + if protocol_version >= 5: + write_int(f, flags) + else: + write_byte(f, flags) if self.serial_consistency_level: write_consistency_level(f, self.serial_consistency_level) From 527ea3ed85c0e65a33d35a075cfb3ff0392dc093 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 28 Oct 2016 09:23:40 -0500 Subject: [PATCH 0918/2005] Test quoted index PYTHON-616 --- tests/integration/standard/test_metadata.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index a3b6935621..40db8ac80e 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1007,6 +1007,9 @@ def test_case_sensitivity(self): session.execute(""" CREATE INDEX myindex ON "%s"."%s" ("MyColumn") """ % (ksname, cfname)) + session.execute(""" + CREATE INDEX "AnotherIndex" ON "%s"."%s" ("B") + """ % (ksname, cfname)) ksmeta = cluster.metadata.keyspaces[ksname] schema = ksmeta.export_as_string() @@ -1018,6 +1021,7 @@ def test_case_sensitivity(self): self.assertIn('PRIMARY KEY (k, "A")', schema) self.assertIn('WITH CLUSTERING ORDER BY ("A" DESC)', schema) self.assertIn('CREATE INDEX myindex ON "AnInterestingKeyspace"."AnInterestingTable" ("MyColumn")', schema) + self.assertIn('CREATE INDEX "AnotherIndex" ON "AnInterestingKeyspace"."AnInterestingTable" ("B")', schema) cluster.shutdown() def test_already_exists_exceptions(self): From 5d7d880f7cc3b55a54afbd3e9c8d2afcbb8ac7a6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 2 Nov 2016 13:34:33 -0500 Subject: [PATCH 0919/2005] Add interfaces for defining and registering table extensions Cassandra 3.0+ --- cassandra/metadata.py | 49 +++++++++++++- tests/integration/standard/test_metadata.py | 71 ++++++++++++++++++++- 2 files changed, 117 insertions(+), 3 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index ba9cbf031e..ab9573dd1b 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1076,6 +1076,20 @@ def is_cql_compatible(self): return not incompatible return True + extensions = None + """ + Metadata describing configuration for table extensions + """ + + _extension_registry = {} + + class _RegisteredExtensionType(type): + def __new__(mcs, name, bases, dct): + cls = super(TableMetadata._RegisteredExtensionType, mcs).__new__(mcs, name, bases, dct) + if name != 'RegisteredTableExtension': + TableMetadata._extension_registry[cls.name] = cls + return cls + def __init__(self, keyspace_name, name, partition_key=None, clustering_key=None, columns=None, triggers=None, options=None): self.keyspace_name = keyspace_name self.name = name @@ -1124,6 +1138,13 @@ def _all_as_cql(self): for view_meta in self.views.values(): ret += "\n\n%s;" % (view_meta.as_cql_query(formatted=True),) + if self.extensions: # None + for k in six.viewkeys(self._extension_registry) & self.extensions: # no viewkeys on OrderedMapSerializeKey + ext = self._extension_registry[k] + cql = ext.after_table_cql(self, k, self.extensions[k]) + if cql: + ret += "\n\n%s" % (cql,) + return ret def as_cql_query(self, formatted=False): @@ -1205,7 +1226,6 @@ def _make_option_strings(cls, options_map): actual_options.setdefault("class", value) compaction_option_strings = ["'%s': '%s'" % (k, v) for k, v in actual_options.items()] - ret.append('compaction = {%s}' % ', '.join(compaction_option_strings)) for system_table_name in cls.compaction_options.keys(): options_copy.pop(system_table_name, None) # delete if present @@ -1225,6 +1245,31 @@ def _make_option_strings(cls, options_map): return list(sorted(ret)) +class TableExtensionInterface(object): + """ + Defines CQL/DDL for Cassandra table extensions. + """ + # limited API for now. Could be expanded as new extension types materialize -- "extend_option_strings", for example + @classmethod + def after_table_cql(cls, ext_key, ext_blob): + """ + Called to produce CQL/DDL to follow the table definition. + Should contain requisite terminating semicolon(s). + """ + pass + + +@six.add_metaclass(TableMetadata._RegisteredExtensionType) +class RegisteredTableExtension(TableExtensionInterface): + """ + Extending this class registers it by name (associated by key in the `system_schema.tables.extensions` map). + """ + name = None + """ + Name of the extension (key in the map) + """ + + def protect_name(name): return maybe_escape_name(name) @@ -2222,6 +2267,8 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_row index_meta = self._build_index_metadata(table_meta, index_row) if index_meta: table_meta.indexes[index_meta.name] = index_meta + + table_meta.extensions = row.get('extensions', {}) except Exception: table_meta._exc_info = sys.exc_info() log.exception("Error while parsing metadata for table %s.%s row(%s) columns(%s)", keyspace_name, table_name, row, col_rows) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 40db8ac80e..f579859304 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -29,13 +29,13 @@ from cassandra.encoder import Encoder from cassandra.metadata import (Metadata, KeyspaceMetadata, IndexMetadata, Token, MD5Token, TokenMap, murmur3, Function, Aggregate, protect_name, protect_names, - get_schema_parser) + get_schema_parser, RegisteredTableExtension) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host from tests.integration import get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, \ BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION, \ - BasicExistingSegregatedKeyspaceUnitTestCase, dseonly, DSE_VERSION, get_supported_protocol_versions + BasicExistingSegregatedKeyspaceUnitTestCase, dseonly, DSE_VERSION, get_supported_protocol_versions, greaterthanorequalcass30 def setup_module(): @@ -858,6 +858,73 @@ def test_multiple_indices(self): self.assertEqual(index_2.index_options["target"], "keys(b)") self.assertEqual(index_2.keyspace_name, "schemametadatatests") + @greaterthanorequalcass30 + def test_table_extensions(self): + s = self.session + ks = self.keyspace_name + ks_meta = s.cluster.metadata.keyspaces[ks] + t = self.function_table_name + + s.execute("CREATE TABLE %s.%s (k text PRIMARY KEY, v int)" % (ks, t)) + + table_meta = ks_meta.tables[t] + + self.assertFalse(table_meta.extensions) + self.assertNotIn(t, table_meta._extension_registry) + + original_cql = table_meta.export_as_string() + + # extensions registered, not present + # -------------------------------------- + class Ext0(RegisteredTableExtension): + name = t + + @classmethod + def after_table_cql(cls, table_meta, ext_key, ext_blob): + return "%s %s %s %s" % (cls.name, table_meta.name, ext_key, ext_blob) + + class Ext1(Ext0): + name = t + '##' + + self.assertFalse(table_meta.extensions) + self.assertIn(Ext0.name, table_meta._extension_registry) + self.assertIn(Ext1.name, table_meta._extension_registry) + self.assertEqual(len(table_meta._extension_registry), 2) + + self.cluster.refresh_table_metadata(ks, t) + table_meta = ks_meta.tables[t] + + self.assertEqual(table_meta.export_as_string(), original_cql) + + p = s.prepare('UPDATE system_schema.tables SET extensions=? WHERE keyspace_name=? AND table_name=?') # for blob type coercing + # extensions registered, one present + # -------------------------------------- + ext_map = {Ext0.name: six.b("THA VALUE")} + s.execute(p, (ext_map, ks, t)) + self.cluster.refresh_table_metadata(ks, t) + table_meta = ks_meta.tables[t] + + self.assertIn(Ext0.name, table_meta.extensions) + new_cql = table_meta.export_as_string() + self.assertNotEqual(new_cql, original_cql) + self.assertIn(Ext0.after_table_cql(table_meta, Ext0.name, ext_map[Ext0.name]), new_cql) + self.assertNotIn(Ext1.name, new_cql) + + # extensions registered, one present + # -------------------------------------- + ext_map = {Ext0.name: six.b("THA VALUE"), + Ext1.name: six.b("OTHA VALUE")} + s.execute(p, (ext_map, ks, t)) + self.cluster.refresh_table_metadata(ks, t) + table_meta = ks_meta.tables[t] + + self.assertIn(Ext0.name, table_meta.extensions) + self.assertIn(Ext1.name, table_meta.extensions) + new_cql = table_meta.export_as_string() + self.assertNotEqual(new_cql, original_cql) + self.assertIn(Ext0.after_table_cql(table_meta, Ext0.name, ext_map[Ext0.name]), new_cql) + self.assertIn(Ext1.after_table_cql(table_meta, Ext1.name, ext_map[Ext1.name]), new_cql) + class TestCodeCoverage(unittest.TestCase): From c47a8f4ab2e7e55615b8042a135eec3ebbc189d5 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 8 Nov 2016 12:08:09 -0500 Subject: [PATCH 0920/2005] escape literal asterisks in docstring PYTHON-645 --- cassandra/cqlengine/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index f6b789a87b..fd001979f8 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -274,8 +274,8 @@ class ContextQuery(object): A Context manager to allow a Model to switch context easily. Presently, the context only specifies a keyspace for model IO. - :param *args: One or more models. A model should be a class type, not an instance. - :param **kwargs: (optional) Context parameters: can be *keyspace* or *connection* + :param \*args: One or more models. A model should be a class type, not an instance. + :param \*\*kwargs: (optional) Context parameters: can be *keyspace* or *connection* For example: From 6760623c1ce3c9b4619c25d03bd5e45b872d8d6c Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 9 Nov 2016 11:19:29 -0600 Subject: [PATCH 0921/2005] Use CCM fork that is compatible with py3 --- build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.yaml b/build.yaml index 80a2aec928..16a09212bb 100644 --- a/build.yaml +++ b/build.yaml @@ -26,7 +26,7 @@ build: export JAVA_HOME=$CCM_JAVA_HOME export PATH=$JAVA_HOME/bin:$PATH - sudo python /home/jenkins/ccm/setup.py install + pip install git+https://github.com/GregBestland/ccm.git # Install dependencies if [[ $EVENT_LOOP_MANAGER == 'libev' ]]; then sudo apt-get install -y libev4 libev-dev From 806e0b0021ca283842ea7ce48f27305725658e3b Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 10 Nov 2016 15:23:34 -0600 Subject: [PATCH 0922/2005] Fixing issue where compaction options were not honored on AlterTable with cqlengine --- cassandra/metadata.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index ab9573dd1b..6e902028d1 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1226,6 +1226,7 @@ def _make_option_strings(cls, options_map): actual_options.setdefault("class", value) compaction_option_strings = ["'%s': '%s'" % (k, v) for k, v in actual_options.items()] + ret.append('compaction = {%s}' % ', '.join(compaction_option_strings)) for system_table_name in cls.compaction_options.keys(): options_copy.pop(system_table_name, None) # delete if present From 74331d860adcac23f5845b96cbeaa4504f9ad851 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 14 Nov 2016 17:23:30 -0600 Subject: [PATCH 0923/2005] PYTHON-468, adding integration test --- tests/integration/standard/test_query.py | 32 ++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 6753a35add..c8bd048e2f 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -249,6 +249,34 @@ def _is_trace_present(self, trace_id): return False return True + def test_query_by_id(self): + """ + Test to ensure column_types are set as part of the result set + + @since 3.8 + @jira_ticket PYTHON-648 + @expected_result column_names should be preset. + + @test_category queries basic + """ + create_table = "CREATE TABLE {0}.{1} (id int primary key, m map)".format(self.keyspace_name, self.function_table_name) + self.session.execute(create_table) + + self.session.execute("insert into "+self.keyspace_name+"."+self.function_table_name+" (id, m) VALUES ( 1, {1: 'one', 2: 'two', 3:'three'})") + results1 = self.session.execute("select id, m from {0}.{1}".format(self.keyspace_name, self.function_table_name)) + + self.assertIsNotNone(results1.column_types) + self.assertEqual(results1.column_types[0].typename, 'int') + self.assertEqual(results1.column_types[1].typename, 'map') + self.assertEqual(results1.column_types[0].cassname, 'Int32Type') + self.assertEqual(results1.column_types[1].cassname, 'MapType') + self.assertEqual(len(results1.column_types[0].subtypes), 0) + self.assertEqual(len(results1.column_types[1].subtypes), 2) + self.assertEqual(results1.column_types[1].subtypes[0].typename, "int") + self.assertEqual(results1.column_types[1].subtypes[1].typename, "varchar") + self.assertEqual(results1.column_types[1].subtypes[0].cassname, "Int32Type") + self.assertEqual(results1.column_types[1].subtypes[1].cassname, "VarcharType") + def test_column_names(self): """ Test to validate the columns are present on the result set. @@ -269,8 +297,12 @@ def test_column_names(self): score INT, PRIMARY KEY (user, game, year, month, day) )""".format(self.keyspace_name, self.function_table_name) + + self.session.execute(create_table) result_set = self.session.execute("SELECT * FROM {0}.{1}".format(self.keyspace_name, self.function_table_name)) + self.assertIsNotNone(result_set.column_types) + self.assertEqual(result_set.column_names, [u'user', u'game', u'year', u'month', u'day', u'score']) From be63ec7152836c8fac55f421b1b7763ddeed9bfa Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 22 Nov 2016 15:27:15 -0500 Subject: [PATCH 0924/2005] consistent error message on deserialization failure --- cassandra/obj_parser.pyx | 9 +++++++-- cassandra/protocol.py | 2 +- cassandra/row_parser.pyx | 12 +++++++++++- 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/cassandra/obj_parser.pyx b/cassandra/obj_parser.pyx index 2ec889ebc6..08dea2224e 100644 --- a/cassandra/obj_parser.pyx +++ b/cassandra/obj_parser.pyx @@ -14,6 +14,7 @@ include "ioutils.pyx" +from cassandra import DriverException from cassandra.bytesio cimport BytesIOReader from cassandra.deserializers cimport Deserializer, from_binary from cassandra.parsing cimport ParseDesc, ColumnParser, RowParser @@ -67,8 +68,12 @@ cdef class TupleRowParser(RowParser): # Deserialize bytes to python object deserializer = desc.deserializers[i] - val = from_binary(deserializer, &buf, desc.protocol_version) - + try: + val = from_binary(deserializer, &buf, desc.protocol_version) + except Exception as e: + raise DriverException('Failed decoding result column "%s" of type %s: %s' % (desc.colnames[i], + desc.coltypes[i].cql_parameterized_type(), + str(e))) # Insert new object into tuple tuple_set(res, i, val) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 753e705027..7057656376 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -651,7 +651,7 @@ def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): except Exception as e: raise DriverException('Failed decoding result column "%s" of type %s: %s' % (colnames[i], coltypes[i].cql_parameterized_type(), - e.message)) + str(e))) return paging_state, coltypes, (colnames, parsed_rows) @classmethod diff --git a/cassandra/row_parser.pyx b/cassandra/row_parser.pyx index 6e537affd5..0377e1224a 100644 --- a/cassandra/row_parser.pyx +++ b/cassandra/row_parser.pyx @@ -13,6 +13,7 @@ # limitations under the License. from cassandra.parsing cimport ParseDesc, ColumnParser +from cassandra.obj_parser import TupleRowParser from cassandra.deserializers import make_deserializers include "ioutils.pyx" @@ -33,7 +34,16 @@ def make_recv_results_rows(ColumnParser colparser): desc = ParseDesc(colnames, coltypes, make_deserializers(coltypes), protocol_version) reader = BytesIOReader(f.read()) - parsed_rows = colparser.parse_rows(reader, desc) + try: + parsed_rows = colparser.parse_rows(reader, desc) + except Exception as e: + # Use explicitly the TupleRowParser to display better error messages for column decoding failures + rowparser = TupleRowParser() + reader.buf_ptr = reader.buf + reader.pos = 0 + rowcount = read_int(reader) + for i in range(rowcount): + rowparser.unpack_row(reader, desc) return (paging_state, coltypes, (colnames, parsed_rows)) From fd046f69a0f038925e724a2efe20640bbd3fcb11 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 23 Nov 2016 16:31:58 -0600 Subject: [PATCH 0925/2005] Removing pypy3 from unit tests due to pip incompatibility --- .travis.yml | 1 - tox.ini | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 8e1e587229..07d0a35c8e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,7 +13,6 @@ env: - TOX_ENV=py34 CASS_VER=21 CASS_DRIVER_NO_CYTHON=1 - TOX_ENV=py34 CASS_VER=21 - TOX_ENV=pypy CASS_VER=21 CASS_DRIVER_NO_CYTHON=1 - - TOX_ENV=pypy3 CASS_VER=21 CASS_DRIVER_NO_CYTHON=1 addons: apt: diff --git a/tox.ini b/tox.ini index 5e2a02ff9f..9d45755981 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py{26,27,33,34},pypy,pypy3 +envlist = py{26,27,33,34},pypy [base] deps = nose From 68fe1c518bfc2e5bff8af86e0590c3a8d0a3a6e5 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 28 Nov 2016 15:39:16 -0500 Subject: [PATCH 0926/2005] Add a debug message when the connection heartbeat failed due to a closed connection --- cassandra/connection.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/connection.py b/cassandra/connection.py index 19b7964b2c..432de2f973 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -984,6 +984,8 @@ def run(self): else: connection.reset_idle() else: + log.debug("Cannot send heartbeat message on connection (%s) to %s", + id(connection), connection.host) # make sure the owner sees this defunt/closed connection owner.return_connection(connection) self._raise_if_stopped() From ef23ec6ec44439dbb3a35fb2a764e1a4c3032253 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 28 Nov 2016 16:38:48 -0600 Subject: [PATCH 0927/2005] Swapping ccm version back to pcmanus --- build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.yaml b/build.yaml index 16a09212bb..2bebeb6844 100644 --- a/build.yaml +++ b/build.yaml @@ -26,7 +26,7 @@ build: export JAVA_HOME=$CCM_JAVA_HOME export PATH=$JAVA_HOME/bin:$PATH - pip install git+https://github.com/GregBestland/ccm.git + pip install ccm # Install dependencies if [[ $EVENT_LOOP_MANAGER == 'libev' ]]; then sudo apt-get install -y libev4 libev-dev From a74d273d9dc3cdb60aea88b04614a0599fa5be91 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 29 Nov 2016 10:09:59 -0600 Subject: [PATCH 0928/2005] Swapping ccm source to master branch --- build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.yaml b/build.yaml index 2bebeb6844..0b387d5808 100644 --- a/build.yaml +++ b/build.yaml @@ -26,7 +26,7 @@ build: export JAVA_HOME=$CCM_JAVA_HOME export PATH=$JAVA_HOME/bin:$PATH - pip install ccm + pip install git+https://github.com/pcmanus/ccm.git # Install dependencies if [[ $EVENT_LOOP_MANAGER == 'libev' ]]; then sudo apt-get install -y libev4 libev-dev From 1de822975aa7858341e7b93c23f3c3f80a3cc1c5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 29 Nov 2016 15:11:37 -0600 Subject: [PATCH 0929/2005] fix table extension test manipulating schema meta --- tests/integration/standard/test_metadata.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index f579859304..66bb3b48d6 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -900,7 +900,7 @@ class Ext1(Ext0): # extensions registered, one present # -------------------------------------- ext_map = {Ext0.name: six.b("THA VALUE")} - s.execute(p, (ext_map, ks, t)) + [s.execute(p, (ext_map, ks, t)) for _ in self.cluster.metadata.all_hosts()] # we're manipulating metadata - do it on all hosts self.cluster.refresh_table_metadata(ks, t) table_meta = ks_meta.tables[t] @@ -914,7 +914,7 @@ class Ext1(Ext0): # -------------------------------------- ext_map = {Ext0.name: six.b("THA VALUE"), Ext1.name: six.b("OTHA VALUE")} - s.execute(p, (ext_map, ks, t)) + [s.execute(p, (ext_map, ks, t)) for _ in self.cluster.metadata.all_hosts()] # we're manipulating metadata - do it on all hosts self.cluster.refresh_table_metadata(ks, t) table_meta = ks_meta.tables[t] From 7f7614dbbc7799e528ac67cb30e87e069d2ad384 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 30 Nov 2016 09:29:01 -0600 Subject: [PATCH 0930/2005] docstring tweak --- cassandra/cqltypes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index ac813a1388..e333c11755 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -176,7 +176,7 @@ def lookup_casstype(casstype): Example: >>> lookup_casstype('org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.Int32Type)') - + """ if isinstance(casstype, (CassandraType, CassandraTypeType)): @@ -296,7 +296,7 @@ def apply_parameters(cls, subtypes, names=None): using them as parameters. This is how composite types are constructed. >>> MapType.apply_parameters([DateType, BooleanType]) - + `subtypes` will be a sequence of CassandraTypes. If provided, `names` will be an equally long sequence of column names or Nones. From e989d398a97f2047e236152fcb072472c0818816 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 30 Nov 2016 15:09:10 -0500 Subject: [PATCH 0931/2005] Py3 fix related to variable access in a except block --- cassandra/protocol.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 7057656376..d50e4d8868 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -645,13 +645,14 @@ def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): for ctype, val in zip(coltypes, row)) for row in rows] except Exception: - for i in range(len(row)): - try: - coltypes[i].from_binary(row[i], protocol_version) - except Exception as e: - raise DriverException('Failed decoding result column "%s" of type %s: %s' % (colnames[i], - coltypes[i].cql_parameterized_type(), - str(e))) + for row in rows: + for i in range(len(row)): + try: + coltypes[i].from_binary(row[i], protocol_version) + except Exception as e: + raise DriverException('Failed decoding result column "%s" of type %s: %s' % (colnames[i], + coltypes[i].cql_parameterized_type(), + str(e))) return paging_state, coltypes, (colnames, parsed_rows) @classmethod From 94b7820d4133d87cffd73eafbe6fbee97b79db74 Mon Sep 17 00:00:00 2001 From: Sandeep Tamhankar Date: Thu, 8 Dec 2016 11:13:22 -0800 Subject: [PATCH 0932/2005] Send a flags field (value 0) in PREPARE messages in v5. --- cassandra/protocol.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index d50e4d8868..ac6f74a9e1 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -775,6 +775,9 @@ def __init__(self, query): def send_body(self, f, protocol_version): write_longstring(f, self.query) + if protocol_version >= 5: + # Write the flags byte; with 0 value for now, but this should change in PYTHON-678 + write_int(f, 0) class ExecuteMessage(_MessageType): From f9e4e8e189e3ea27b6829b0572c66e81af65a43f Mon Sep 17 00:00:00 2001 From: Sandeep Tamhankar Date: Fri, 9 Dec 2016 09:10:25 -0800 Subject: [PATCH 0933/2005] Send a flags field (value 0) in PREPARE messages in v5. * Write flags as a 32-bit unsigned int, not int. --- cassandra/protocol.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index ac6f74a9e1..2a7f851a3e 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -561,7 +561,7 @@ def send_body(self, f, protocol_version): flags |= _PROTOCOL_TIMESTAMP if protocol_version >= 5: - write_int(f, flags) + write_uint(f, flags) else: write_byte(f, flags) @@ -777,7 +777,7 @@ def send_body(self, f, protocol_version): write_longstring(f, self.query) if protocol_version >= 5: # Write the flags byte; with 0 value for now, but this should change in PYTHON-678 - write_int(f, 0) + write_uint(f, 0) class ExecuteMessage(_MessageType): @@ -832,7 +832,7 @@ def send_body(self, f, protocol_version): flags |= _SKIP_METADATA_FLAG if protocol_version >= 5: - write_int(f, flags) + write_uint(f, flags) else: write_byte(f, flags) @@ -1167,6 +1167,10 @@ def write_int(f, i): f.write(int32_pack(i)) +def write_uint(f, i): + f.write(uint32_pack(i)) + + def write_long(f, i): f.write(uint64_pack(i)) From ae18522ef2dfcc3688253501d131dee85ccbadb2 Mon Sep 17 00:00:00 2001 From: Sandeep Tamhankar Date: Fri, 9 Dec 2016 10:54:37 -0800 Subject: [PATCH 0934/2005] Send a flags field (value 0) in PREPARE messages in v5. * Write flags as a 32-bit unsigned int, not int. * Forgot to import the uint32_pack function from the marshal package. --- cassandra/protocol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 2a7f851a3e..32e192ee6d 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -30,7 +30,7 @@ UserAggregateDescriptor, SchemaTargetType) from cassandra.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack, int8_pack, int8_unpack, uint64_pack, header_pack, - v3_header_pack) + v3_header_pack, uint32_pack) from cassandra.cqltypes import (AsciiType, BytesType, BooleanType, CounterColumnType, DateType, DecimalType, DoubleType, FloatType, Int32Type, From 5a6af1d64b3afa882b7a9abfbbf0176f80c8b660 Mon Sep 17 00:00:00 2001 From: a-lst Date: Thu, 17 Nov 2016 20:02:58 +0000 Subject: [PATCH 0935/2005] Avoid using of nonexistent prepared statement in ResponseFuture when handle succesful response to prepare a statement --- cassandra/cluster.py | 8 +++++--- tests/unit/test_response_future.py | 19 ++++++++++++++++++- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 92cc39377e..5d95095a0c 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3667,9 +3667,11 @@ def _execute_after_prepare(self, host, connection, pool, response): if isinstance(response, ResultMessage): if response.kind == RESULT_KIND_PREPARED: - # result metadata is the only thing that could have changed from an alter - _, _, _, result_metadata = response.results - self.prepared_statement.result_metadata = result_metadata + if self.prepared_statement: + # result metadata is the only thing that could have + # changed from an alter + _, _, _, result_metadata = response.results + self.prepared_statement.result_metadata = result_metadata # use self._query to re-use the same host and # at the same time properly borrow the connection diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 6628686b9a..2bc1ae21de 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -27,7 +27,8 @@ OverloadedErrorMessage, IsBootstrappingErrorMessage, PreparedQueryNotFound, PrepareMessage, RESULT_KIND_ROWS, RESULT_KIND_SET_KEYSPACE, - RESULT_KIND_SCHEMA_CHANGE, ProtocolHandler) + RESULT_KIND_SCHEMA_CHANGE, RESULT_KIND_PREPARED, + ProtocolHandler) from cassandra.policies import RetryPolicy from cassandra.pool import NoConnectionsAvailable from cassandra.query import SimpleStatement @@ -508,3 +509,19 @@ def test_prepared_query_not_found_bad_keyspace(self): result = Mock(spec=PreparedQueryNotFound, info='a' * 16) rf._set_result(None, None, None, result) self.assertRaises(ValueError, rf.result) + + def test_repeat_orig_query_after_succesful_reprepare(self): + session = self.make_session() + rf = self.make_response_future(session) + + response = Mock(spec=ResultMessage, kind=RESULT_KIND_PREPARED) + response.results = (None, None, None, None) + + rf._query = Mock(return_value=True) + rf._execute_after_prepare('host', None, None, response) + rf._query.assert_called_once_with('host') + + rf.prepared_statement = Mock() + rf._query = Mock(return_value=True) + rf._execute_after_prepare('host', None, None, response) + rf._query.assert_called_once_with('host') From 2e28fc27c9c218d1e580ba9277f12f011b9d8bb7 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 19 Dec 2016 10:48:24 -0500 Subject: [PATCH 0936/2005] PYTHON-649 (#677) Adds a test to reproduce PYTHON-649 and fixes it. Also adds docs and tests for some existing connection-management code. --- cassandra/cqlengine/connection.py | 76 +++++++++++++++++-- docs/cqlengine/connections.rst | 13 +++- .../integration/cqlengine/test_connections.py | 7 ++ tests/unit/cqlengine/test_connection.py | 59 ++++++++++++++ tests/unit/cqlengine/test_udt.py | 41 ++++++++++ 5 files changed, 188 insertions(+), 8 deletions(-) create mode 100644 tests/unit/cqlengine/test_connection.py create mode 100644 tests/unit/cqlengine/test_udt.py diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 61baabed7c..f9ebebedb5 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -81,6 +81,13 @@ def __init__(self, name, hosts, consistency=None, self.cluster_options = cluster_options if cluster_options else {} self.lazy_connect_lock = threading.RLock() + @classmethod + def from_session(cls, name, session): + instance = cls(name=name, hosts=session.hosts) + instance.cluster, instance.session = session.cluster, session + instance.setup_session() + return instance + def setup(self): """Setup the connection""" global cluster, session @@ -132,21 +139,67 @@ def handle_lazy_connect(self): self.setup() -def register_connection(name, hosts, consistency=None, lazy_connect=False, - retry_connect=False, cluster_options=None, default=False): +def register_connection(name, hosts=None, consistency=None, lazy_connect=False, + retry_connect=False, cluster_options=None, default=False, + session=None): + """ + Add a connection to the connection registry. ``hosts`` and ``session`` are + mutually exclusive, and ``consistency``, ``lazy_connect``, + ``retry_connect``, and ``cluster_options`` only work with ``hosts``. Using + ``hosts`` will create a new :class:`cassandra.cluster.Cluster` and + :class:`cassandra.cluster.Session`. + + :param list hosts: list of hosts, (``contact_points`` for :class:`cassandra.cluster.Cluster`). + :param int consistency: The default :class:`~.ConsistencyLevel` for the + registered connection's new session. Default is the same as + :attr:`.Session.default_consistency_level`. For use with ``hosts`` only; + will fail when used with ``session``. + :param bool lazy_connect: True if should not connect until first use. For + use with ``hosts`` only; will fail when used with ``session``. + :param bool retry_connect: True if we should retry to connect even if there + was a connection failure initially. For use with ``hosts`` only; will + fail when used with ``session``. + :param dict cluster_options: A dict of options to be used as keyword + arguments to :class:`cassandra.cluster.Cluster`. For use with ``hosts`` + only; will fail when used with ``session``. + :param bool default: If True, set the new connection as the cqlengine + default + :param Session session: A :class:`cassandra.cluster.Session` to be used in + the created connection. + """ if name in _connections: log.warning("Registering connection '{0}' when it already exists.".format(name)) - conn = Connection(name, hosts, consistency=consistency,lazy_connect=lazy_connect, - retry_connect=retry_connect, cluster_options=cluster_options) + hosts_xor_session_passed = (hosts is None) ^ (session is None) + if not hosts_xor_session_passed: + raise CQLEngineException( + "Must pass exactly one of 'hosts' or 'session' arguments" + ) + elif session is not None: + invalid_config_args = (consistency is not None or + lazy_connect is not False or + retry_connect is not False or + cluster_options is not None) + if invalid_config_args: + raise CQLEngineException( + "Session configuration arguments and 'session' argument are mutually exclusive" + ) + conn = Connection.from_session(name, session=session) + conn.setup_session() + elif hosts is not None: + conn = Connection( + name, hosts=hosts, + consistency=consistency, lazy_connect=lazy_connect, + retry_connect=retry_connect, cluster_options=cluster_options + ) + conn.setup() _connections[name] = conn if default: set_default_connection(name) - conn.setup() return conn @@ -222,7 +275,12 @@ def set_session(s): This may be relaxed in the future """ - conn = get_connection() + try: + conn = get_connection() + except CQLEngineException: + # no default connection set; initalize one + register_connection('default', session=s, default=True) + conn = get_connection() if conn.session: log.warning("configuring new default connection for cqlengine when one was already set") @@ -304,7 +362,11 @@ def get_cluster(connection=None): def register_udt(keyspace, type_name, klass, connection=None): udt_by_keyspace[keyspace][type_name] = klass - cluster = get_cluster(connection) + try: + cluster = get_cluster(connection) + except CQLEngineException: + cluster = None + if cluster: try: cluster.register_user_type(keyspace, type_name, klass) diff --git a/docs/cqlengine/connections.rst b/docs/cqlengine/connections.rst index 922dbb5d29..6d25682d37 100644 --- a/docs/cqlengine/connections.rst +++ b/docs/cqlengine/connections.rst @@ -8,7 +8,7 @@ Connections are experimental and aimed to ease the use of multiple sessions with Register a new connection ========================= -To use cqlengine, you need at least a default connection. This is currently done automatically under the hood with :func:`connection.setup <.connection.setup>`. If you want to use another cluster/session, you need to register a new cqlengine connection. You register a connection with :func:`~.connection.register_connection` +To use cqlengine, you need at least a default connection. If you initialize cqlengine's connections with with :func:`connection.setup <.connection.setup>`, a connection will be created automatically. If you want to use another cluster/session, you need to register a new cqlengine connection. You register a connection with :func:`~.connection.register_connection`: .. code-block:: python @@ -17,6 +17,17 @@ To use cqlengine, you need at least a default connection. This is currently done connection.setup(['127.0.0.1') connection.register_connection('cluster2', ['127.0.0.2']) +:func:`~.connection.register_connection` can take a list of hosts, as shown above, in which case it will create a connection with a new session. It can also take a `session` argument if you've already created a session: + + .. code-block:: python + + from cassandra.cqlengine import connection + from cassandra.cluster import Cluster + + session = Cluster(['127.0.0.1']).connect() + connection.register_connection('cluster3', session=session) + + Change the default connection ============================= diff --git a/tests/integration/cqlengine/test_connections.py b/tests/integration/cqlengine/test_connections.py index d5f069b4e5..20753705a7 100644 --- a/tests/integration/cqlengine/test_connections.py +++ b/tests/integration/cqlengine/test_connections.py @@ -13,6 +13,7 @@ # limitations under the License. from cassandra import InvalidRequest +from cassandra.cluster import Cluster from cassandra.cluster import NoHostAvailable from cassandra.cqlengine import columns, CQLEngineException from cassandra.cqlengine import connection as conn @@ -217,6 +218,12 @@ def test_create_drop_table(self): for ks in self.keyspaces: drop_keyspace(ks, connections=self.conns) + def test_connection_creation_from_session(self): + session = Cluster(['127.0.0.1']).connect() + connection_name = 'from_session' + conn.register_connection(connection_name, session=session) + self.addCleanup(conn.unregister_connection, connection_name) + class BatchQueryConnectionTests(BaseCassEngTestCase): diff --git a/tests/unit/cqlengine/test_connection.py b/tests/unit/cqlengine/test_connection.py new file mode 100644 index 0000000000..58728987f0 --- /dev/null +++ b/tests/unit/cqlengine/test_connection.py @@ -0,0 +1,59 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from cassandra.cqlengine import connection +from cassandra.query import dict_factory + +from mock import Mock + + +class ConnectionTest(unittest.TestCase): + + no_registered_connection_msg = "doesn't exist in the registry" + + def setUp(self): + super(ConnectionTest, self).setUp() + self.assertFalse( + connection._connections, + 'Test precondition not met: connections are registered: {cs}'.format(cs=connection._connections) + ) + + def test_set_session_without_existing_connection(self): + """ + Users can set the default session without having a default connection set. + """ + mock_session = Mock( + row_factory=dict_factory, + encoder=Mock(mapping={}) + ) + connection.set_session(mock_session) + + def test_get_session_fails_without_existing_connection(self): + """ + Users can't get the default session without having a default connection set. + """ + with self.assertRaisesRegexp(connection.CQLEngineException, self.no_registered_connection_msg): + connection.get_session(connection=None) + + def test_get_cluster_fails_without_existing_connection(self): + """ + Users can't get the default cluster without having a default connection set. + """ + with self.assertRaisesRegexp(connection.CQLEngineException, self.no_registered_connection_msg): + connection.get_cluster(connection=None) diff --git a/tests/unit/cqlengine/test_udt.py b/tests/unit/cqlengine/test_udt.py new file mode 100644 index 0000000000..950429a800 --- /dev/null +++ b/tests/unit/cqlengine/test_udt.py @@ -0,0 +1,41 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from cassandra.cqlengine import columns +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.usertype import UserType + + +class UDTTest(unittest.TestCase): + + def test_initialization_without_existing_connection(self): + """ + Test that users can define models with UDTs without initializing + connections. + + Written to reproduce PYTHON-649. + """ + + class Value(UserType): + t = columns.Text() + + class DummyUDT(Model): + __keyspace__ = 'ks' + primary_key = columns.Integer(primary_key=True) + value = columns.UserDefinedType(Value) From 47863c73886ba2dce2fc4a92bdc90159ab053d11 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 19 Dec 2016 10:10:53 -0500 Subject: [PATCH 0937/2005] remove whitespace from end of test_cluster --- tests/integration/standard/test_cluster.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index e41eec7ea3..71bfe1dda7 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -1130,8 +1130,3 @@ def test_valid_protocol_version_beta_options_connect(self): session = cluster.connect() self.assertEqual(cluster.protocol_version, MAX_SUPPORTED_VERSION) self.assertTrue(session.execute("select release_version from system.local")[0]) - - - - - From a2d082986d2d14166a7cbcd7837f6678a1c561d0 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 14 Dec 2016 09:42:38 -0500 Subject: [PATCH 0938/2005] test for and fix PYTHON-669 --- cassandra/cluster.py | 5 +-- tests/integration/standard/test_cluster.py | 39 +++++++++++++++++++++- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 92cc39377e..88535e612e 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1330,8 +1330,9 @@ def on_up(self, host): log.debug("Now that host %s is up, cancelling the reconnection handler", host) reconnector.cancel() - self._prepare_all_queries(host) - log.debug("Done preparing all queries for host %s, ", host) + if self.profile_manager.distance(host) != HostDistance.IGNORED: + self._prepare_all_queries(host) + log.debug("Done preparing all queries for host %s, ", host) for session in self.sessions: session.remove_pool(host) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 71bfe1dda7..440408bbfa 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -19,7 +19,7 @@ from collections import deque from copy import copy -from mock import patch +from mock import Mock, call, patch import time from uuid import uuid4 import logging @@ -1056,6 +1056,43 @@ def test_down_event_with_active_connection(self): self.assertTrue(was_marked_down) +class DontPrepareOnIgnoredHostsTest(unittest.TestCase): + + ignored_addresses = ['127.0.0.3'] + ignore_node_3_policy = IgnoredHostPolicy(ignored_addresses) + + def test_prepare_on_ignored_hosts(self): + + cluster = Cluster(protocol_version=PROTOCOL_VERSION, + load_balancing_policy=self.ignore_node_3_policy) + session = cluster.connect() + cluster.reprepare_on_up, cluster.prepare_on_all_hosts = True, False + + hosts = cluster.metadata.all_hosts() + session.execute("CREATE KEYSPACE clustertests " + "WITH replication = " + "{'class': 'SimpleStrategy', 'replication_factor': '1'}") + session.execute("CREATE TABLE clustertests.tab (a text, PRIMARY KEY (a))") + # assign to an unused variable so cluster._prepared_statements retains + # reference + _ = session.prepare("INSERT INTO clustertests.tab (a) VALUES ('a')") # noqa + + cluster.connection_factory = Mock(wraps=cluster.connection_factory) + + unignored_address = '127.0.0.1' + unignored_host = next(h for h in hosts if h.address == unignored_address) + ignored_host = next(h for h in hosts if h.address in self.ignored_addresses) + unignored_host.is_up = ignored_host.is_up = False + + cluster.on_up(unignored_host) + cluster.on_up(ignored_host) + + # the length of mock_calls will vary, but all should use the unignored + # address + for c in cluster.connection_factory.mock_calls: + self.assertEqual(call(unignored_address), c) + + class DuplicateRpcTest(unittest.TestCase): load_balancing_policy = WhiteListRoundRobinPolicy(['127.0.0.1']) From 96e7767af4f5aea2aa3be47191831c0a2e542947 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 20 Dec 2016 14:00:55 -0600 Subject: [PATCH 0939/2005] Adding test for PYTHON-631 --- tests/integration/standard/test_query.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index c8bd048e2f..4d80ebae77 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -25,8 +25,7 @@ BatchStatement, BatchType, dict_factory, TraceUnavailable) from cassandra.cluster import Cluster, NoHostAvailable from cassandra.policies import HostDistance, RoundRobinPolicy -from tests.unit.cython.utils import notcython -from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, notpy3 +from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions import time import re @@ -70,8 +69,6 @@ def test_trace_prints_okay(self): for event in trace.events: str(event) - @notcython - @notpy3 def test_row_error_message(self): """ Test to validate, new column deserialization message From f06267a09a40f1cdd28ea3bd63050bac5e90d9d8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 21 Dec 2016 11:42:30 -0600 Subject: [PATCH 0940/2005] ProtocolVersion 'enum' --- cassandra/__init__.py | 50 ++++++++++++++++++ cassandra/cluster.py | 55 +++++--------------- cassandra/connection.py | 10 ++-- cassandra/protocol.py | 3 -- docs/api/cassandra.rst | 3 ++ tests/integration/long/test_failure_types.py | 7 ++- tests/integration/standard/test_cluster.py | 9 ++-- tests/unit/test_marshalling.py | 6 +-- 8 files changed, 81 insertions(+), 62 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 02f6e57ee0..95e70b4577 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -125,6 +125,56 @@ def consistency_value_to_name(value): return ConsistencyLevel.value_to_name[value] if value is not None else "Not Set" +class ProtocolVersion(object): + """ + Defines native protocol versions supported by this driver. + """ + V1 = 1 + """ + v1, supported in Cassandra 1.2-->2.2 + """ + + V2 = 2 + """ + v2, supported in Cassandra 2.0-->2.2; + added support for lightweight transactions, batch operations, and automatic query paging. + """ + + V3 = 3 + """ + v3, supported in Cassandra 2.1-->3.x+; + added support for protocol-level client-side timestamps (see :attr:`.Session.use_client_timestamp`), + serial consistency levels for :class:`~.BatchStatement`, and an improved connection pool. + """ + + V4 = 4 + """ + v4, supported in Cassandra 2.2-->3.x+; + added a number of new types, server warnings, new failure messages, and custom payloads. Details in the + `project docs `_ + """ + + V5 = 5 + """ + v5, in beta from 3.x+ + """ + + SUPPORTED_VERSIONS = (V5, V4, V3, V2, V1) + """ + A tuple of all supported protocol versions + """ + + MIN_SUPPORTED = min(SUPPORTED_VERSIONS) + """ + Minimum protocol version supported by this driver. + """ + + MAX_SUPPORTED = max(SUPPORTED_VERSIONS) + """ + Maximum protocol versioni supported by this driver. + """ + + class SchemaChangeType(object): DROPPED = 'DROPPED' CREATED = 'CREATED' diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 92cc39377e..ed38d7db20 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -42,7 +42,7 @@ from cassandra import (ConsistencyLevel, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, - SchemaTargetType, DriverException) + SchemaTargetType, DriverException, ProtocolVersion) from cassandra.connection import (ConnectionException, ConnectionShutdown, ConnectionHeartbeat, ProtocolVersionUnsupported) from cassandra.cqltypes import UserType @@ -57,8 +57,7 @@ IsBootstrappingErrorMessage, BatchMessage, RESULT_KIND_PREPARED, RESULT_KIND_SET_KEYSPACE, RESULT_KIND_ROWS, - RESULT_KIND_SCHEMA_CHANGE, MIN_SUPPORTED_VERSION, - ProtocolHandler) + RESULT_KIND_SCHEMA_CHANGE, ProtocolHandler) from cassandra.metadata import Metadata, protect_name, murmur3 from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, @@ -355,45 +354,18 @@ class Cluster(object): server will be automatically used. """ - protocol_version = 4 + protocol_version = ProtocolVersion.V4 """ The maximum version of the native protocol to use. + See :class:`.ProtocolVersion` for more information about versions. + If not set in the constructor, the driver will automatically downgrade version based on a negotiation with the server, but it is most efficient to set this to the maximum supported by your version of Cassandra. Setting this will also prevent conflicting versions negotiated if your cluster is upgraded. - Version 2 of the native protocol adds support for lightweight transactions, - batch operations, and automatic query paging. The v2 protocol is - supported by Cassandra 2.0+. - - Version 3 of the native protocol adds support for protocol-level - client-side timestamps (see :attr:`.Session.use_client_timestamp`), - serial consistency levels for :class:`~.BatchStatement`, and an - improved connection pool. - - Version 4 of the native protocol adds a number of new types, server warnings, - new failure messages, and custom payloads. Details in the - `project docs `_ - - The following table describes the native protocol versions that - are supported by each version of Cassandra: - - +-------------------+-------------------+ - | Cassandra Version | Protocol Versions | - +===================+===================+ - | 1.2 | 1 | - +-------------------+-------------------+ - | 2.0 | 1, 2 | - +-------------------+-------------------+ - | 2.1 | 1, 2, 3 | - +-------------------+-------------------+ - | 2.2 | 1, 2, 3, 4 | - +-------------------+-------------------+ - | 3.x | 3, 4 | - +-------------------+-------------------+ """ allow_beta_protocol_version = False @@ -1141,15 +1113,14 @@ def protocol_downgrade(self, host_addr, previous_version): if self._protocol_version_explicit: raise DriverException("ProtocolError returned from server while using explicitly set client protocol_version %d" % (previous_version,)) - new_version = previous_version - 1 - if new_version < self.protocol_version: - if new_version >= MIN_SUPPORTED_VERSION: - log.warning("Downgrading core protocol version from %d to %d for %s. " - "To avoid this, it is best practice to explicitly set Cluster(protocol_version) to the version supported by your cluster. " - "http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Cluster.protocol_version", self.protocol_version, new_version, host_addr) - self.protocol_version = new_version - else: - raise DriverException("Cannot downgrade protocol version (%d) below minimum supported version: %d" % (new_version, MIN_SUPPORTED_VERSION)) + try: + new_version = next(v for v in sorted(ProtocolVersion.SUPPORTED_VERSIONS, reversed=True) if v < previous_version) + log.warning("Downgrading core protocol version from %d to %d for %s. " + "To avoid this, it is best practice to explicitly set Cluster(protocol_version) to the version supported by your cluster. " + "http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Cluster.protocol_version", self.protocol_version, new_version, host_addr) + self.protocol_version = new_version + except StopIteration: + raise DriverException("Cannot downgrade protocol version below minimum supported version: %d" % (ProtocolVersion.MIN_SUPPORTED,)) def connect(self, keyspace=None, wait_for_all_pools=False): """ diff --git a/cassandra/connection.py b/cassandra/connection.py index 432de2f973..399321fab1 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -37,7 +37,7 @@ else: from six.moves.queue import Queue, Empty # noqa -from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut +from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut, ProtocolVersion from cassandra.marshal import int32_pack from cassandra.protocol import (ReadyMessage, AuthenticateMessage, OptionsMessage, StartupMessage, ErrorMessage, CredentialsMessage, @@ -45,7 +45,7 @@ InvalidRequestException, SupportedMessage, AuthResponseMessage, AuthChallengeMessage, AuthSuccessMessage, ProtocolException, - MAX_SUPPORTED_VERSION, RegisterMessage) + RegisterMessage) from cassandra.util import OrderedDict @@ -197,7 +197,7 @@ class Connection(object): out_buffer_size = 4096 cql_version = None - protocol_version = MAX_SUPPORTED_VERSION + protocol_version = ProtocolVersion.MAX_SUPPORTED keyspace = None compression = True @@ -252,7 +252,7 @@ class Connection(object): def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, - cql_version=None, protocol_version=MAX_SUPPORTED_VERSION, is_control_connection=False, + cql_version=None, protocol_version=ProtocolVersion.MAX_SUPPORTED, is_control_connection=False, user_type_map=None, connect_timeout=None, allow_beta_protocol_version=False): self.host = host self.port = port @@ -541,7 +541,7 @@ def _read_frame_header(self): pos = len(buf) if pos: version = int_from_buf_item(buf[0]) & PROTOCOL_VERSION_MASK - if version > MAX_SUPPORTED_VERSION: + if version > ProtocolVersion.MAX_SUPPORTED: raise ProtocolError("This version of the driver does not support protocol version %d" % version) frame_header = frame_header_v3 if version >= 3 else frame_header_v1_v2 # this frame header struct is everything after the version byte diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 32e192ee6d..833283640f 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -55,9 +55,6 @@ class InternalError(Exception): ColumnMetadata = namedtuple("ColumnMetadata", ['keyspace_name', 'table_name', 'name', 'type']) -MIN_SUPPORTED_VERSION = 1 -MAX_SUPPORTED_VERSION = 5 - HEADER_DIRECTION_TO_CLIENT = 0x80 HEADER_DIRECTION_MASK = 0x80 diff --git a/docs/api/cassandra.rst b/docs/api/cassandra.rst index fd4273f470..d46aae56cb 100644 --- a/docs/api/cassandra.rst +++ b/docs/api/cassandra.rst @@ -14,6 +14,9 @@ .. autoclass:: ConsistencyLevel :members: +.. autoclass:: ProtocolVersion + :members: + .. autoclass:: UserFunctionDescriptor :members: :inherited-members: diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index e2891b358f..1a81aa0ce1 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -14,9 +14,8 @@ import sys,logging, traceback, time -from cassandra import ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, ReadFailure, WriteFailure,\ - FunctionFailure -from cassandra.protocol import MAX_SUPPORTED_VERSION +from cassandra import (ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, ReadFailure, WriteFailure, + FunctionFailure, ProtocolVersion) from cassandra.cluster import Cluster, NoHostAvailable from cassandra.concurrent import execute_concurrent_with_args from cassandra.query import SimpleStatement @@ -70,7 +69,7 @@ def setUp(self): "Native protocol 4,0+ is required for custom payloads, currently using %r" % (PROTOCOL_VERSION,)) try: - self.cluster = Cluster(protocol_version=MAX_SUPPORTED_VERSION, allow_beta_protocol_version=True) + self.cluster = Cluster(protocol_version=ProtocolVersion.MAX_SUPPORTED, allow_beta_protocol_version=True) self.session = self.cluster.connect() except NoHostAvailable: log.info("Protocol Version 5 not supported,") diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index e41eec7ea3..20fcde2154 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -30,7 +30,6 @@ from cassandra.policies import (RoundRobinPolicy, ExponentialReconnectionPolicy, RetryPolicy, SimpleConvictionPolicy, HostDistance, WhiteListRoundRobinPolicy, AddressTranslator) -from cassandra.protocol import MAX_SUPPORTED_VERSION from cassandra.query import SimpleStatement, TraceUnavailable, tuple_factory from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, CASSANDRA_VERSION, execute_until_pass, execute_with_long_wait_retry, get_node,\ @@ -188,7 +187,7 @@ def test_protocol_negotiation(self): """ cluster = Cluster() - self.assertLessEqual(cluster.protocol_version, MAX_SUPPORTED_VERSION) + self.assertLessEqual(cluster.protocol_version, cassandra.ProtocolVersion.MAX_SUPPORTED) session = cluster.connect() updated_protocol_version = session._protocol_version updated_cluster_version = cluster.protocol_version @@ -1107,7 +1106,7 @@ def test_invalid_protocol_version_beta_option(self): @test_category connection """ - cluster = Cluster(protocol_version=MAX_SUPPORTED_VERSION, allow_beta_protocol_version=False) + cluster = Cluster(protocol_version=cassandra.ProtocolVersion.MAX_SUPPORTED, allow_beta_protocol_version=False) try: with self.assertRaises(NoHostAvailable): cluster.connect() @@ -1126,9 +1125,9 @@ def test_valid_protocol_version_beta_options_connect(self): @test_category connection """ - cluster = Cluster(protocol_version=MAX_SUPPORTED_VERSION, allow_beta_protocol_version=True) + cluster = Cluster(protocol_version=cassandra.ProtocolVersion.MAX_SUPPORTED, allow_beta_protocol_version=True) session = cluster.connect() - self.assertEqual(cluster.protocol_version, MAX_SUPPORTED_VERSION) + self.assertEqual(cluster.protocol_version, cassandra.ProtocolVersion.MAX_SUPPORTED) self.assertTrue(session.execute("select release_version from system.local")[0]) diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index 626c38b22f..a2bc2d8b1c 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -13,8 +13,8 @@ # limitations under the License. import sys +from cassandra import ProtocolVersion from cassandra.marshal import bitlength -from cassandra.protocol import MAX_SUPPORTED_VERSION try: import unittest2 as unittest @@ -153,10 +153,10 @@ def test_decimal(self): # Just verifying expected exception here f = converted_types[-1] self.assertIsInstance(f, float) - self.assertRaises(TypeError, DecimalType.to_binary, f, MAX_SUPPORTED_VERSION) + self.assertRaises(TypeError, DecimalType.to_binary, f, ProtocolVersion.MAX_SUPPORTED) converted_types = converted_types[:-1] - for proto_ver in range(1, MAX_SUPPORTED_VERSION + 1): + for proto_ver in range(1, ProtocolVersion.MAX_SUPPORTED + 1): for n in converted_types: expected = Decimal(n) self.assertEqual(DecimalType.from_binary(DecimalType.to_binary(n, proto_ver), proto_ver), expected) From 7a0b846a112433259fb83c1be65e477f8cf4d074 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 21 Dec 2016 17:12:51 -0600 Subject: [PATCH 0941/2005] Updating tests for PYTHON-649 --- .../integration/cqlengine/test_connections.py | 31 ++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/test_connections.py b/tests/integration/cqlengine/test_connections.py index 20753705a7..29fec59568 100644 --- a/tests/integration/cqlengine/test_connections.py +++ b/tests/integration/cqlengine/test_connections.py @@ -219,11 +219,40 @@ def test_create_drop_table(self): drop_keyspace(ks, connections=self.conns) def test_connection_creation_from_session(self): + """ + Test to ensure that you can register a connection from a session + @since 3.8 + @jira_ticket PYTHON-649 + @expected_result queries should execute appropriately + + @test_category object_mapper + """ session = Cluster(['127.0.0.1']).connect() connection_name = 'from_session' conn.register_connection(connection_name, session=session) self.addCleanup(conn.unregister_connection, connection_name) + def test_connection_param_validation(self): + """ + Test to validate that invalid parameter combinations for registering connections via session are not tolerated + @since 3.8 + @jira_ticket PYTHON-649 + @expected_result queries should execute appropriately + + @test_category object_mapper + """ + session = Cluster(['127.0.0.1']).connect() + with self.assertRaises(CQLEngineException): + conn.register_connection("bad_coonection1", session=session, consistency="not_null") + with self.assertRaises(CQLEngineException): + conn.register_connection("bad_coonection2", session=session, lazy_connect="not_null") + with self.assertRaises(CQLEngineException): + conn.register_connection("bad_coonection3", session=session, retry_connect="not_null") + with self.assertRaises(CQLEngineException): + conn.register_connection("bad_coonection4", session=session, cluster_options="not_null") + with self.assertRaises(CQLEngineException): + conn.register_connection("bad_coonection5", hosts="not_null", session=session) + class BatchQueryConnectionTests(BaseCassEngTestCase): @@ -398,7 +427,7 @@ def _reset_data(self): for ks in self.keyspaces: drop_keyspace(ks, connections=self.conns) - + import pdb; pdb.set_trace() for ks in self.keyspaces: create_keyspace_simple(ks, 1, connections=self.conns) sync_table(TestModel, keyspaces=self.keyspaces, connections=self.conns) From 0deabed01ff8841f369947d4a3bb96bf41ef802b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 22 Dec 2016 13:27:13 -0600 Subject: [PATCH 0942/2005] fix sorted reverse keyword --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ed38d7db20..5228a89230 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1114,7 +1114,7 @@ def protocol_downgrade(self, host_addr, previous_version): raise DriverException("ProtocolError returned from server while using explicitly set client protocol_version %d" % (previous_version,)) try: - new_version = next(v for v in sorted(ProtocolVersion.SUPPORTED_VERSIONS, reversed=True) if v < previous_version) + new_version = next(v for v in sorted(ProtocolVersion.SUPPORTED_VERSIONS, reverse=True) if v < previous_version) log.warning("Downgrading core protocol version from %d to %d for %s. " "To avoid this, it is best practice to explicitly set Cluster(protocol_version) to the version supported by your cluster. " "http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Cluster.protocol_version", self.protocol_version, new_version, host_addr) From b30f7c95d4863f422b1d7234fc7c0671491d12df Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 22 Dec 2016 14:20:38 -0600 Subject: [PATCH 0943/2005] Removing debug statement from test. --- tests/integration/cqlengine/test_connections.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/cqlengine/test_connections.py b/tests/integration/cqlengine/test_connections.py index 29fec59568..1f30b0a972 100644 --- a/tests/integration/cqlengine/test_connections.py +++ b/tests/integration/cqlengine/test_connections.py @@ -427,7 +427,6 @@ def _reset_data(self): for ks in self.keyspaces: drop_keyspace(ks, connections=self.conns) - import pdb; pdb.set_trace() for ks in self.keyspaces: create_keyspace_simple(ks, 1, connections=self.conns) sync_table(TestModel, keyspaces=self.keyspaces, connections=self.conns) From 1f1b30fbc5f72c539413cc78799ac06e1b11f2f6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 11 Jan 2017 11:15:59 -0600 Subject: [PATCH 0944/2005] fix link markup --- CONTRIBUTING.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 1c8ca7001c..a55820dcff 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -17,7 +17,7 @@ good bug reports. They will not be repeated in detail here, but in general, the Pull Requests ------------- -If you're able to fix a bug yourself, you can [fork the repository](https://help.github.com/articles/fork-a-repo/) and submit a [Pull Request](https://help.github.com/articles/using-pull-requests/) with the fix. +If you're able to fix a bug yourself, you can `fork the repository `_ and submit a `Pull Request `_ with the fix. Please include tests demonstrating the issue and fix. For examples of how to run the tests, consult the `dev README `_. Contribution License Agreement From 8288d667c6b324b4ed210df8bb463e58c89cf2e4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 11 Jan 2017 11:17:25 -0600 Subject: [PATCH 0945/2005] another link fixup --- CONTRIBUTING.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index a55820dcff..f16477c355 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -22,7 +22,7 @@ Please include tests demonstrating the issue and fix. For examples of how to run Contribution License Agreement ------------------------------ -To protect the community, all contributors are required to [sign the DataStax Contribution License Agreement](http://cla.datastax.com/). The process is completely electronic and should only take a few minutes. +To protect the community, all contributors are required to `sign the DataStax Contribution License Agreement `_. The process is completely electronic and should only take a few minutes. Design and Implementation Guidelines ------------------------------------ From 08ba4a5d542c2dbb57380790349b18407fec24cd Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 12 Jan 2017 11:10:38 -0500 Subject: [PATCH 0946/2005] Added some minor comments to README-dev.srt --- README-dev.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/README-dev.rst b/README-dev.rst index e7c056c19c..0b140456a4 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -102,6 +102,12 @@ You can also specify a cassandra directory (to test unreleased versions):: CASSANDRA_DIR=/home/thobbs/cassandra python setup.py nosetests -w tests/integration/standard +Specifying the usage of an already running Cassandra cluster +---------------------------------------------------- +The test will start the appropriate Cassandra clusters when necessary but if you don't want this to happen because a Cassandra cluster is already running the flag ``USE_CASS_EXTERNAL`` can be used, for example: + + USE_CASS_EXTERNAL=1 python setup.py nosetests -w tests/integration/standard + Specify a Protocol Version for Tests ------------------------------------ The protocol version defaults to 1 for cassandra 1.2 and 2 otherwise. You can explicitly set @@ -126,6 +132,10 @@ and change ``tests/unit/`` to ``tests/``. Running the Benchmarks ====================== +There needs to be a version of cassandra running locally so before running the benchmarks, if ccm is installed: + + ccm create benchmark_cluster -v 3.0.1 && ccm populate -n 1 && ccm start + To run the benchmarks, pick one of the files under the ``benchmarks/`` dir and run it:: python benchmarks/future_batches.py From 7aeae52783af0d41ec04681e95da2a8fcac1c750 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 12 Jan 2017 15:02:44 -0500 Subject: [PATCH 0947/2005] Acknowledging comment on the ccm command --- README-dev.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README-dev.rst b/README-dev.rst index 0b140456a4..a549bf8a35 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -134,7 +134,7 @@ Running the Benchmarks ====================== There needs to be a version of cassandra running locally so before running the benchmarks, if ccm is installed: - ccm create benchmark_cluster -v 3.0.1 && ccm populate -n 1 && ccm start + ccm create benchmark_cluster -v 3.0.1 -n 1 -s To run the benchmarks, pick one of the files under the ``benchmarks/`` dir and run it:: From cfed17f3574b77c1e5315b8f1034e1f61ad961c6 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 13 Jan 2017 13:10:34 -0500 Subject: [PATCH 0948/2005] initial vints_unpack --- cassandra/cqltypes.py | 16 +++++++++++++++- cassandra/marshal.py | 15 +++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index ac813a1388..5e3aa706ac 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -48,7 +48,7 @@ uint16_pack, uint16_unpack, uint32_pack, uint32_unpack, int32_pack, int32_unpack, int64_pack, int64_unpack, float_pack, float_unpack, double_pack, double_unpack, - varint_pack, varint_unpack) + varint_pack, varint_unpack, vints_unpack) from cassandra import util apache_cassandra_type_prefix = 'org.apache.cassandra.db.marshal.' @@ -660,6 +660,20 @@ def serialize(val, protocol_version): return int64_pack(nano) +class DurationType(_CassandraType): + typename = 'duration' + + @staticmethod + def deserialize(byts, protocol_version): + print vints_unpack(byts) + varint_unpack(byts) + return varint_unpack(byts) + + @staticmethod + def serialize(byts, protocol_version): + return # ... + + class UTF8Type(_CassandraType): typename = 'text' empty_binary_ok = True diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 5a523d6381..cd81eb3d5b 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -84,3 +84,18 @@ def varint_pack(big): revbytes.append(0) revbytes.reverse() return six.binary_type(revbytes) + + +def decode_zig_zag(n): + return (n >> 1) ^ -(n & 1) + + +def vints_unpack(term): # noqa + first_byte = ord(term[0]) + if (first_byte & 128) == 0: + val = first_byte + else: + extra_bytes = 8 - (~first_byte & 0xff).bit_length() + # takes (8-extra_bytes) bits + extra_bytes + + return decode_zig_zag(val) From aa11b5720b6f54fbde4a2c66ac621485f7ceb404 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 13 Jan 2017 14:23:55 -0500 Subject: [PATCH 0949/2005] fixed vints_unpack --- cassandra/marshal.py | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/cassandra/marshal.py b/cassandra/marshal.py index cd81eb3d5b..2cb34cf4f3 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -91,11 +91,23 @@ def decode_zig_zag(n): def vints_unpack(term): # noqa - first_byte = ord(term[0]) - if (first_byte & 128) == 0: - val = first_byte - else: - extra_bytes = 8 - (~first_byte & 0xff).bit_length() - # takes (8-extra_bytes) bits + extra_bytes - - return decode_zig_zag(val) + values = [] + n = 0 + while n < len(term): + first_byte = ord(term[n]) + + if (first_byte & 128) == 0: + val = first_byte + else: + num_extra_bytes = 8 - (~first_byte & 0xff).bit_length() + val = first_byte & (0xff >> num_extra_bytes) + end = n + num_extra_bytes + while n < end: + n += 1 + val <<= 8 + val |= ord(term[n]) & 0xff + + n += 1 + values.append(decode_zig_zag(val)) + + return tuple(values) From f3612d41aab22f3ee4580eb8eeca1940d8972709 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 13 Jan 2017 16:44:34 -0600 Subject: [PATCH 0950/2005] make table extensions for for views, too PYTHON-682 --- cassandra/metadata.py | 46 +++++++++++----- tests/integration/standard/test_metadata.py | 59 +++++++++++++++------ 2 files changed, 76 insertions(+), 29 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 6e902028d1..e410631ae7 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1081,15 +1081,6 @@ def is_cql_compatible(self): Metadata describing configuration for table extensions """ - _extension_registry = {} - - class _RegisteredExtensionType(type): - def __new__(mcs, name, bases, dct): - cls = super(TableMetadata._RegisteredExtensionType, mcs).__new__(mcs, name, bases, dct) - if name != 'RegisteredTableExtension': - TableMetadata._extension_registry[cls.name] = cls - return cls - def __init__(self, keyspace_name, name, partition_key=None, clustering_key=None, columns=None, triggers=None, options=None): self.keyspace_name = keyspace_name self.name = name @@ -1138,9 +1129,10 @@ def _all_as_cql(self): for view_meta in self.views.values(): ret += "\n\n%s;" % (view_meta.as_cql_query(formatted=True),) - if self.extensions: # None - for k in six.viewkeys(self._extension_registry) & self.extensions: # no viewkeys on OrderedMapSerializeKey - ext = self._extension_registry[k] + if self.extensions: + registry = _RegisteredExtensionType._extension_registry + for k in six.viewkeys(registry) & self.extensions: # no viewkeys on OrderedMapSerializeKey + ext = registry[k] cql = ext.after_table_cql(self, k, self.extensions[k]) if cql: ret += "\n\n%s" % (cql,) @@ -1260,7 +1252,18 @@ def after_table_cql(cls, ext_key, ext_blob): pass -@six.add_metaclass(TableMetadata._RegisteredExtensionType) +class _RegisteredExtensionType(type): + + _extension_registry = {} + + def __new__(mcs, name, bases, dct): + cls = super(_RegisteredExtensionType, mcs).__new__(mcs, name, bases, dct) + if name != 'RegisteredTableExtension': + mcs._extension_registry[cls.name] = cls + return cls + + +@six.add_metaclass(_RegisteredExtensionType) class RegisteredTableExtension(TableExtensionInterface): """ Extending this class registers it by name (associated by key in the `system_schema.tables.extensions` map). @@ -2327,6 +2330,7 @@ def _build_view_metadata(self, row, col_rows=None): view_meta = MaterializedViewMetadata(keyspace_name, view_name, base_table_name, include_all_columns, where_clause, self._build_table_options(row)) self._build_table_columns(view_meta, col_rows) + view_meta.extensions = row.get('extensions', {}) return view_meta @@ -2487,6 +2491,11 @@ class MaterializedViewMetadata(object): view. """ + extensions = None + """ + Metadata describing configuration for table extensions + """ + def __init__(self, keyspace_name, view_name, base_table_name, include_all_columns, where_clause, options): self.keyspace_name = keyspace_name self.name = view_name @@ -2523,13 +2532,22 @@ def as_cql_query(self, formatted=False): properties = TableMetadataV3._property_string(formatted, self.clustering_key, self.options) - return "CREATE MATERIALIZED VIEW %(keyspace)s.%(name)s AS%(sep)s" \ + ret = "CREATE MATERIALIZED VIEW %(keyspace)s.%(name)s AS%(sep)s" \ "SELECT %(selected_cols)s%(sep)s" \ "FROM %(keyspace)s.%(base_table)s%(sep)s" \ "WHERE %(where_clause)s%(sep)s" \ "PRIMARY KEY %(pk)s%(sep)s" \ "WITH %(properties)s" % locals() + if self.extensions: + registry = _RegisteredExtensionType._extension_registry + for k in six.viewkeys(registry) & self.extensions: # no viewkeys on OrderedMapSerializeKey + ext = registry[k] + cql = ext.after_table_cql(self, k, self.extensions[k]) + if cql: + ret += "\n\n%s" % (cql,) + return ret + def export_as_string(self): return self.as_cql_query(formatted=True) + ";" diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 66bb3b48d6..925029a8d5 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -29,13 +29,15 @@ from cassandra.encoder import Encoder from cassandra.metadata import (Metadata, KeyspaceMetadata, IndexMetadata, Token, MD5Token, TokenMap, murmur3, Function, Aggregate, protect_name, protect_names, - get_schema_parser, RegisteredTableExtension) + get_schema_parser, RegisteredTableExtension, _RegisteredExtensionType) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host -from tests.integration import get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, \ - BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION, \ - BasicExistingSegregatedKeyspaceUnitTestCase, dseonly, DSE_VERSION, get_supported_protocol_versions, greaterthanorequalcass30 +from tests.integration import (get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, + BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, + BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION, + BasicExistingSegregatedKeyspaceUnitTestCase, dseonly, DSE_VERSION, + get_supported_protocol_versions, greaterthanorequalcass30) def setup_module(): @@ -864,15 +866,19 @@ def test_table_extensions(self): ks = self.keyspace_name ks_meta = s.cluster.metadata.keyspaces[ks] t = self.function_table_name + v = t + 'view' s.execute("CREATE TABLE %s.%s (k text PRIMARY KEY, v int)" % (ks, t)) + s.execute("CREATE MATERIALIZED VIEW %s.%s AS SELECT * FROM %s.%s WHERE v IS NOT NULL PRIMARY KEY (v, k)" % (ks, v, ks, t)) table_meta = ks_meta.tables[t] + view_meta = table_meta.views[v] self.assertFalse(table_meta.extensions) - self.assertNotIn(t, table_meta._extension_registry) + self.assertFalse(view_meta.extensions) - original_cql = table_meta.export_as_string() + original_table_cql = table_meta.export_as_string() + original_view_cql = view_meta.export_as_string() # extensions registered, not present # -------------------------------------- @@ -887,44 +893,67 @@ class Ext1(Ext0): name = t + '##' self.assertFalse(table_meta.extensions) - self.assertIn(Ext0.name, table_meta._extension_registry) - self.assertIn(Ext1.name, table_meta._extension_registry) - self.assertEqual(len(table_meta._extension_registry), 2) + self.assertFalse(view_meta.extensions) + self.assertIn(Ext0.name, _RegisteredExtensionType._extension_registry) + self.assertIn(Ext1.name, _RegisteredExtensionType._extension_registry) + self.assertEqual(len(_RegisteredExtensionType._extension_registry), 2) self.cluster.refresh_table_metadata(ks, t) table_meta = ks_meta.tables[t] + view_meta = table_meta.views[v] - self.assertEqual(table_meta.export_as_string(), original_cql) + self.assertEqual(table_meta.export_as_string(), original_table_cql) + self.assertEqual(view_meta.export_as_string(), original_view_cql) - p = s.prepare('UPDATE system_schema.tables SET extensions=? WHERE keyspace_name=? AND table_name=?') # for blob type coercing + update_t = s.prepare('UPDATE system_schema.tables SET extensions=? WHERE keyspace_name=? AND table_name=?') # for blob type coercing + update_v = s.prepare('UPDATE system_schema.views SET extensions=? WHERE keyspace_name=? AND view_name=?') # extensions registered, one present # -------------------------------------- ext_map = {Ext0.name: six.b("THA VALUE")} - [s.execute(p, (ext_map, ks, t)) for _ in self.cluster.metadata.all_hosts()] # we're manipulating metadata - do it on all hosts + [(s.execute(update_t, (ext_map, ks, t)), s.execute(update_v, (ext_map, ks, v))) + for _ in self.cluster.metadata.all_hosts()] # we're manipulating metadata - do it on all hosts self.cluster.refresh_table_metadata(ks, t) + self.cluster.refresh_materialized_view_metadata(ks, v) table_meta = ks_meta.tables[t] + view_meta = table_meta.views[v] self.assertIn(Ext0.name, table_meta.extensions) new_cql = table_meta.export_as_string() - self.assertNotEqual(new_cql, original_cql) + self.assertNotEqual(new_cql, original_table_cql) self.assertIn(Ext0.after_table_cql(table_meta, Ext0.name, ext_map[Ext0.name]), new_cql) self.assertNotIn(Ext1.name, new_cql) + self.assertIn(Ext0.name, view_meta.extensions) + new_cql = view_meta.export_as_string() + self.assertNotEqual(new_cql, original_view_cql) + self.assertIn(Ext0.after_table_cql(view_meta, Ext0.name, ext_map[Ext0.name]), new_cql) + self.assertNotIn(Ext1.name, new_cql) + # extensions registered, one present # -------------------------------------- ext_map = {Ext0.name: six.b("THA VALUE"), Ext1.name: six.b("OTHA VALUE")} - [s.execute(p, (ext_map, ks, t)) for _ in self.cluster.metadata.all_hosts()] # we're manipulating metadata - do it on all hosts + [(s.execute(update_t, (ext_map, ks, t)), s.execute(update_v, (ext_map, ks, v))) + for _ in self.cluster.metadata.all_hosts()] # we're manipulating metadata - do it on all hosts self.cluster.refresh_table_metadata(ks, t) + self.cluster.refresh_materialized_view_metadata(ks, v) table_meta = ks_meta.tables[t] + view_meta = table_meta.views[v] self.assertIn(Ext0.name, table_meta.extensions) self.assertIn(Ext1.name, table_meta.extensions) new_cql = table_meta.export_as_string() - self.assertNotEqual(new_cql, original_cql) + self.assertNotEqual(new_cql, original_table_cql) self.assertIn(Ext0.after_table_cql(table_meta, Ext0.name, ext_map[Ext0.name]), new_cql) self.assertIn(Ext1.after_table_cql(table_meta, Ext1.name, ext_map[Ext1.name]), new_cql) + self.assertIn(Ext0.name, view_meta.extensions) + self.assertIn(Ext1.name, view_meta.extensions) + new_cql = view_meta.export_as_string() + self.assertNotEqual(new_cql, original_view_cql) + self.assertIn(Ext0.after_table_cql(view_meta, Ext0.name, ext_map[Ext0.name]), new_cql) + self.assertIn(Ext1.after_table_cql(view_meta, Ext1.name, ext_map[Ext1.name]), new_cql) + class TestCodeCoverage(unittest.TestCase): From 6f30ba392b8b2e995b24716c30be11e46d207787 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Sun, 15 Jan 2017 17:03:39 -0500 Subject: [PATCH 0951/2005] Add vints_pack --- cassandra/marshal.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 2cb34cf4f3..291f514823 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -86,6 +86,10 @@ def varint_pack(big): return six.binary_type(revbytes) +def encode_zig_zag(n): + return (n << 1) ^ (n >> 63) + + def decode_zig_zag(n): return (n >> 1) ^ -(n & 1) @@ -111,3 +115,30 @@ def vints_unpack(term): # noqa values.append(decode_zig_zag(val)) return tuple(values) + + +def vints_pack(values): + revbytes = bytearray() + values.reverse() + for v in values: + v = encode_zig_zag(v) + if v < 128: + revbytes.append(v) + else: + num_extra_bytes = 0 + num_bits = v.bit_length() + # We need to reserve (num_extra_bytes+1) bits in the first byte + # ie. with 1 extra byte, the first byte needs to be something like '10XXXXXX' + while num_bits > (8-(num_extra_bytes+1)): + num_extra_bytes += 1 + num_bits -= 8 + revbytes.append(v & 0xff) + v >>= 8 + + # We can now store the last bits in the first byte + n = 8 - num_extra_bytes + v |= (0xff >> n << n) + revbytes.append(abs(v)) + + revbytes.reverse() + return six.binary_type(revbytes) \ No newline at end of file From f2039b751a3558aa88941268392bf139229e0f46 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Sun, 15 Jan 2017 17:56:29 -0500 Subject: [PATCH 0952/2005] py3 fix --- cassandra/marshal.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 291f514823..035ad71f90 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -45,6 +45,10 @@ def _make_packer(format_string): if six.PY3: + def byte2int(b): + return b + + def varint_unpack(term): val = int(''.join("%02x" % i for i in term), 16) if (term[0] & 128) != 0: @@ -52,6 +56,10 @@ def varint_unpack(term): val -= 1 << (len_term * 8) return val else: + def byte2int(b): + return ord(b) + + def varint_unpack(term): # noqa val = int(term.encode('hex'), 16) if (ord(term[0]) & 128) != 0: @@ -98,7 +106,7 @@ def vints_unpack(term): # noqa values = [] n = 0 while n < len(term): - first_byte = ord(term[n]) + first_byte = byte2int(term[n]) if (first_byte & 128) == 0: val = first_byte @@ -109,7 +117,7 @@ def vints_unpack(term): # noqa while n < end: n += 1 val <<= 8 - val |= ord(term[n]) & 0xff + val |= byte2int(term[n]) & 0xff n += 1 values.append(decode_zig_zag(val)) From 8b106c31fd4f214dd15018adc7b24648cfd6b104 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 16 Jan 2017 12:17:05 -0500 Subject: [PATCH 0953/2005] Revert "Only start timer if request is actually issued." This reverts commit 1a01f118240e5d9af009653cfff80cdb67ee099 for PYTHON-644. The "create timer on request" strategy can let timers leak. --- cassandra/cluster.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 18c1734474..e225ef24a1 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3277,6 +3277,7 @@ def __init__(self, session, message, query, timeout, metrics=None, prepared_stat self._errbacks = [] self._spec_execution_plan = speculative_execution_plan or self._spec_execution_plan self.attempted_hosts = [] + self._start_timer() def _start_timer(self): if self._timer is None: @@ -3331,11 +3332,6 @@ def send_request(self, error_no_hosts=True): req_id = self._query(host) if req_id is not None: self._req_id = req_id - - # timer is only started here, after we have at least one message queued - # this is done to avoid overrun of timers with unfettered client requests - # in the case of full disconnect, where no hosts will be available - self._start_timer() return True if self.timeout is not None and time.time() - self._start_time > self.timeout: self._on_timeout() @@ -3454,7 +3450,7 @@ def start_fetching_next_page(self): self._event.clear() self._final_result = _NOT_SET self._final_exception = None - self._timer = None # clear cancelled timer; new one will be set when request is queued + self._start_timer() self.send_request() def _reprepare(self, prepare_message, host, connection, pool): From 18aa88112eb7ee9d9b711c391c2361b35357e7fb Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 16 Jan 2017 14:53:28 -0500 Subject: [PATCH 0954/2005] Add util.Duration and its string encoding --- cassandra/cqltypes.py | 16 +++++++++------ cassandra/encoder.py | 11 +++++++++-- cassandra/marshal.py | 2 +- cassandra/util.py | 45 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 65 insertions(+), 9 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 5e3aa706ac..4eed45bf21 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -48,7 +48,7 @@ uint16_pack, uint16_unpack, uint32_pack, uint32_unpack, int32_pack, int32_unpack, int64_pack, int64_unpack, float_pack, float_unpack, double_pack, double_unpack, - varint_pack, varint_unpack, vints_unpack) + varint_pack, varint_unpack, vints_pack, vints_unpack) from cassandra import util apache_cassandra_type_prefix = 'org.apache.cassandra.db.marshal.' @@ -665,13 +665,17 @@ class DurationType(_CassandraType): @staticmethod def deserialize(byts, protocol_version): - print vints_unpack(byts) - varint_unpack(byts) - return varint_unpack(byts) + months, days, nanoseconds = vints_unpack(byts) + return util.Duration(months, days, nanoseconds) @staticmethod - def serialize(byts, protocol_version): - return # ... + def serialize(duration, protocol_version): + try: + duration.validate() + m, d, n = duration.months, duration.days, duration.nanoseconds + except AttributeError: + raise TypeError('DurationType arguments must be a Duration.') + return vints_pack([m, d, n]) class UTF8Type(_CassandraType): diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 98d562d1bc..5e45ac7c77 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -30,7 +30,7 @@ import six from cassandra.util import (OrderedDict, OrderedMap, OrderedMapSerializedKey, - sortedset, Time, Date) + sortedset, Time, Date, Duration) if six.PY3: long = int @@ -88,7 +88,8 @@ def __init__(self): sortedset: self.cql_encode_set_collection, frozenset: self.cql_encode_set_collection, types.GeneratorType: self.cql_encode_list_collection, - ValueSequence: self.cql_encode_sequence + ValueSequence: self.cql_encode_sequence, + Duration: self.cql_encode_duration } if six.PY2: @@ -225,3 +226,9 @@ def cql_encode_all_types(self, val): if :attr:`~Encoder.mapping` does not contain an entry for the type. """ return self.mapping.get(type(val), self.cql_encode_object)(val) + + def cql_encode_duration(self, val): + """ + Encodes a :class:`cassandra.util.Duration` object as a string. + """ + return str(val) diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 035ad71f90..9647e22bce 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -127,7 +127,7 @@ def vints_unpack(term): # noqa def vints_pack(values): revbytes = bytearray() - values.reverse() + values = [int(v) for v in values[::-1]] for v in values: v = encode_zig_zag(v) if v < 128: diff --git a/cassandra/util.py b/cassandra/util.py index 7f17e85d18..f606f248a5 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1193,3 +1193,48 @@ def _sanitize_identifiers(field_names): names_out[index] = "%s_" % (names_out[index],) observed_names.add(names_out[index]) return names_out + + +class Duration(object): + """ + Cassandra Duration Type + """ + + months = 0 + days = 0 + nanoseconds = 0 + + def __init__(self, months=0, days=0, nanoseconds=0): + self.months = months + self.days = days + self.nanoseconds = nanoseconds + self.validate() + + def validate(self): + """ + A Duration is valid if its values are all positive or all negative. It cannot has mixed signs. + """ + if self._has_negative_values and self._has_positive_values: + raise ValueError('Duration values cannot have mixed signs.') + + @property + def _has_negative_values(self): + return self.months < 0 or self.days < 0 or self.nanoseconds < 0 + + @property + def _has_positive_values(self): + return self.months > 0 or self.days > 0 or self.nanoseconds > 0 + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.months == other.months and self.days == other.days and self.nanoseconds == other.nanoseconds + + def __repr__(self): + return "Duration({0}, {1}, {2})".format(self.months, self.days, self.nanoseconds) + + def __str__(self): + return '{0}{1}mo{2}d{3}ns'.format( + '-' if self._has_negative_values else '', + abs(self.months), + abs(self.days), + abs(self.nanoseconds) + ) From 52120b83efe049c6eb76d6fd90b3334a0fd4ce8d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 16 Jan 2017 15:00:20 -0500 Subject: [PATCH 0955/2005] Add DurationType in type_codes --- cassandra/protocol.py | 2 +- cassandra/type_codes.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 7057656376..5cae104fef 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -38,7 +38,7 @@ LongType, MapType, SetType, TimeUUIDType, UTF8Type, VarcharType, UUIDType, UserType, TupleType, lookup_casstype, SimpleDateType, - TimeType, ByteType, ShortType) + TimeType, ByteType, ShortType, DurationType) from cassandra.policies import WriteType from cassandra.cython_deps import HAVE_CYTHON, HAVE_NUMPY from cassandra import util diff --git a/cassandra/type_codes.py b/cassandra/type_codes.py index daf882e46c..eab9a3344a 100644 --- a/cassandra/type_codes.py +++ b/cassandra/type_codes.py @@ -25,6 +25,11 @@ 0x000E Varint 0x000F Timeuuid 0x0010 Inet + 0x0011 SimpleDateType + 0x0012 TimeType + 0x0013 ShortType + 0x0014 ByteType + 0x0015 DurationType 0x0020 List: the value is an [option], representing the type of the elements of the list. 0x0021 Map: the value is two [option], representing the types of the @@ -54,6 +59,7 @@ TimeType = 0x0012 ShortType = 0x0013 ByteType = 0x0014 +DurationType = 0x0015 ListType = 0x0020 MapType = 0x0021 SetType = 0x0022 From c49421b077dac5a5dce85c1f864dd6f25a0512f6 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 17 Jan 2017 08:22:44 -0500 Subject: [PATCH 0956/2005] review fixes --- cassandra/encoder.py | 11 ++--------- cassandra/util.py | 2 +- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 5e45ac7c77..98d562d1bc 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -30,7 +30,7 @@ import six from cassandra.util import (OrderedDict, OrderedMap, OrderedMapSerializedKey, - sortedset, Time, Date, Duration) + sortedset, Time, Date) if six.PY3: long = int @@ -88,8 +88,7 @@ def __init__(self): sortedset: self.cql_encode_set_collection, frozenset: self.cql_encode_set_collection, types.GeneratorType: self.cql_encode_list_collection, - ValueSequence: self.cql_encode_sequence, - Duration: self.cql_encode_duration + ValueSequence: self.cql_encode_sequence } if six.PY2: @@ -226,9 +225,3 @@ def cql_encode_all_types(self, val): if :attr:`~Encoder.mapping` does not contain an entry for the type. """ return self.mapping.get(type(val), self.cql_encode_object)(val) - - def cql_encode_duration(self, val): - """ - Encodes a :class:`cassandra.util.Duration` object as a string. - """ - return str(val) diff --git a/cassandra/util.py b/cassandra/util.py index f606f248a5..f0fdca9fd3 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1232,7 +1232,7 @@ def __repr__(self): return "Duration({0}, {1}, {2})".format(self.months, self.days, self.nanoseconds) def __str__(self): - return '{0}{1}mo{2}d{3}ns'.format( + return '%s%dmo%dd%dns' % ( '-' if self._has_negative_values else '', abs(self.months), abs(self.days), From 8aeb926644bcc368d446feee4b3a4e55dd22d673 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 16 Jan 2017 16:05:01 -0500 Subject: [PATCH 0957/2005] cancel timers before defuncting connection This allows us to revert some of the fix to PYTHON-367 without the timer heap growing out of control as described on that ticket. We cancel before defuncting to ensure that the timer doesn't hang around while running callbacks. Between this change and the delegation of callbacks to a secondary thread (see commit 51923029, part of the original PYTHON-367 fix), this commit should not cause a regression on PYTHON-367. Note that we don't cancel timers before defuncting and retrying -- we're not done with the logical request and still want that timer to fire. Now that timers are started on ResponseFuture initialization, we actually use the value of timeout in the tests, so we no longer pass that argument, allowing the test to use the default value. --- cassandra/cluster.py | 1 + tests/unit/test_cluster.py | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e225ef24a1..cc5cd78a1c 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3608,6 +3608,7 @@ def _set_result(self, host, connection, pool, response): # we got some other kind of response message msg = "Got unexpected message: %r" % (response,) exc = ConnectionException(msg, host) + self._cancel_timer() self._connection.defunct(exc) self._set_final_exception(exc) except Exception as exc: diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 279b0f94a1..b3337f6c4d 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -184,7 +184,7 @@ def test_default_legacy(self): self._verify_response_future_profile(rf, expected_profile) def test_default_profile(self): - non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(5)]) + non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)]) cluster = Cluster(execution_profiles={'non-default': non_default_profile}) session = Session(cluster, hosts=[]) @@ -218,7 +218,7 @@ def test_statement_params_override_legacy(self): self._verify_response_future_profile(rf, expected_profile) def test_statement_params_override_profile(self): - non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(5)]) + non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)]) cluster = Cluster(execution_profiles={'non-default': non_default_profile}) session = Session(cluster, hosts=[]) @@ -284,7 +284,7 @@ def test_no_legacy_with_profile(self): def test_profile_name_value(self): - internalized_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(5)]) + internalized_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)]) cluster = Cluster(execution_profiles={'by-name': internalized_profile}) session = Session(cluster, hosts=[]) self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES) @@ -292,7 +292,7 @@ def test_profile_name_value(self): rf = session.execute_async("query", execution_profile='by-name') self._verify_response_future_profile(rf, internalized_profile) - by_value = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(5)]) + by_value = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)]) rf = session.execute_async("query", execution_profile=by_value) self._verify_response_future_profile(rf, by_value) From a471c8b6939aeb5b7aa5d4aecab1c1db32989ea5 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 17 Jan 2017 16:24:33 -0500 Subject: [PATCH 0958/2005] no more validation required --- cassandra/cqltypes.py | 1 - cassandra/util.py | 19 ++----------------- 2 files changed, 2 insertions(+), 18 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 4eed45bf21..6c1552099c 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -671,7 +671,6 @@ def deserialize(byts, protocol_version): @staticmethod def serialize(duration, protocol_version): try: - duration.validate() m, d, n = duration.months, duration.days, duration.nanoseconds except AttributeError: raise TypeError('DurationType arguments must be a Duration.') diff --git a/cassandra/util.py b/cassandra/util.py index f0fdca9fd3..924b5c7905 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1208,22 +1208,6 @@ def __init__(self, months=0, days=0, nanoseconds=0): self.months = months self.days = days self.nanoseconds = nanoseconds - self.validate() - - def validate(self): - """ - A Duration is valid if its values are all positive or all negative. It cannot has mixed signs. - """ - if self._has_negative_values and self._has_positive_values: - raise ValueError('Duration values cannot have mixed signs.') - - @property - def _has_negative_values(self): - return self.months < 0 or self.days < 0 or self.nanoseconds < 0 - - @property - def _has_positive_values(self): - return self.months > 0 or self.days > 0 or self.nanoseconds > 0 def __eq__(self, other): return isinstance(other, self.__class__) and self.months == other.months and self.days == other.days and self.nanoseconds == other.nanoseconds @@ -1232,8 +1216,9 @@ def __repr__(self): return "Duration({0}, {1}, {2})".format(self.months, self.days, self.nanoseconds) def __str__(self): + has_negative_values = self.months < 0 or self.days < 0 or self.nanoseconds < 0 return '%s%dmo%dd%dns' % ( - '-' if self._has_negative_values else '', + '-' if has_negative_values else '', abs(self.months), abs(self.days), abs(self.nanoseconds) From eef33b0a20f9c48c75766b3b8432d9be3653d659 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 18 Jan 2017 11:28:00 -0600 Subject: [PATCH 0959/2005] Test additions for PYTHON-655 --- tests/integration/__init__.py | 2 + tests/integration/datatype_utils.py | 10 +++- tests/integration/standard/test_types.py | 20 ++++---- tests/integration/standard/test_udts.py | 8 ++-- tests/unit/test_types.py | 2 + tests/unit/test_util_types.py | 59 +++++++++++++++++++++++- 6 files changed, 85 insertions(+), 16 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 8202b6fae5..2b720f786c 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -107,6 +107,8 @@ def _get_cass_version_from_dse(dse_version): cass_ver = "2.1" elif dse_version.startswith('5.0'): cass_ver = "3.0" + elif dse_version.startswith("5.1"): + cass_ver = "3.10" else: log.error("Uknown dse version found {0}, defaulting to 2.1".format(dse_version)) cass_ver = "2.1" diff --git a/tests/integration/datatype_utils.py b/tests/integration/datatype_utils.py index ee9695c289..8824053d2c 100644 --- a/tests/integration/datatype_utils.py +++ b/tests/integration/datatype_utils.py @@ -16,7 +16,7 @@ from datetime import datetime, date, time from uuid import uuid1, uuid4 -from cassandra.util import OrderedMap, Date, Time, sortedset +from cassandra.util import OrderedMap, Date, Time, sortedset, Duration from tests.integration import get_server_versions @@ -39,6 +39,8 @@ 'varint', ]) +PRIMITIVE_DATATYPES_KEYS = PRIMITIVE_DATATYPES.copy() + COLLECTION_TYPES = sortedset([ 'list', 'set', @@ -54,6 +56,9 @@ def update_datatypes(): if _cass_version >= (2, 2, 0): PRIMITIVE_DATATYPES.update(['date', 'time', 'smallint', 'tinyint']) + PRIMITIVE_DATATYPES_KEYS.update(['date', 'time', 'smallint', 'tinyint']) + if _cass_version >= (3, 10): + PRIMITIVE_DATATYPES.add('duration') global SAMPLE_DATA SAMPLE_DATA = get_sample_data() @@ -120,6 +125,9 @@ def get_sample_data(): elif datatype == 'smallint': sample_data[datatype] = 32523 + elif datatype == 'duration': + sample_data[datatype] = Duration(months=2, days=12, nanoseconds=21231) + else: raise Exception("Missing handling of {0}".format(datatype)) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index f959d4d9f9..40fb1c4c35 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -32,7 +32,7 @@ from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass, notprotocolv1, \ BasicSharedKeyspaceUnitTestCase, greaterthancass21, lessthancass30 -from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, \ +from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, PRIMITIVE_DATATYPES_KEYS, \ get_sample, get_collection_sample @@ -202,7 +202,7 @@ def test_can_insert_collection_datatypes(self): col_names = ["zz"] start_index = ord('a') for i, collection_type in enumerate(COLLECTION_TYPES): - for j, datatype in enumerate(PRIMITIVE_DATATYPES): + for j, datatype in enumerate(PRIMITIVE_DATATYPES_KEYS): if collection_type == "map": type_string = "{0}_{1} {2}<{3}, {3}>".format(chr(start_index + i), chr(start_index + j), collection_type, datatype) @@ -221,7 +221,7 @@ def test_can_insert_collection_datatypes(self): # create the input for simple statement params = [0] for collection_type in COLLECTION_TYPES: - for datatype in PRIMITIVE_DATATYPES: + for datatype in PRIMITIVE_DATATYPES_KEYS: params.append((get_collection_sample(collection_type, datatype))) # insert into table as a simple statement @@ -236,7 +236,7 @@ def test_can_insert_collection_datatypes(self): # create the input for prepared statement params = [0] for collection_type in COLLECTION_TYPES: - for datatype in PRIMITIVE_DATATYPES: + for datatype in PRIMITIVE_DATATYPES_KEYS: params.append((get_collection_sample(collection_type, datatype))) # try the same thing with a prepared statement @@ -553,15 +553,15 @@ def test_can_insert_tuples_all_collection_datatypes(self): values = [] # create list values - for datatype in PRIMITIVE_DATATYPES: + for datatype in PRIMITIVE_DATATYPES_KEYS: values.append('v_{0} frozen>>'.format(len(values), datatype)) # create set values - for datatype in PRIMITIVE_DATATYPES: + for datatype in PRIMITIVE_DATATYPES_KEYS: values.append('v_{0} frozen>>'.format(len(values), datatype)) # create map values - for datatype in PRIMITIVE_DATATYPES: + for datatype in PRIMITIVE_DATATYPES_KEYS: datatype_1 = datatype_2 = datatype if datatype == 'blob': # unhashable type: 'bytearray' @@ -581,7 +581,7 @@ def test_can_insert_tuples_all_collection_datatypes(self): i = 0 # test tuple> - for datatype in PRIMITIVE_DATATYPES: + for datatype in PRIMITIVE_DATATYPES_KEYS: created_tuple = tuple([[get_sample(datatype)]]) s.execute("INSERT INTO tuple_non_primative (k, v_%s) VALUES (0, %s)", (i, created_tuple)) @@ -590,7 +590,7 @@ def test_can_insert_tuples_all_collection_datatypes(self): i += 1 # test tuple> - for datatype in PRIMITIVE_DATATYPES: + for datatype in PRIMITIVE_DATATYPES_KEYS: created_tuple = tuple([sortedset([get_sample(datatype)])]) s.execute("INSERT INTO tuple_non_primative (k, v_%s) VALUES (0, %s)", (i, created_tuple)) @@ -599,7 +599,7 @@ def test_can_insert_tuples_all_collection_datatypes(self): i += 1 # test tuple> - for datatype in PRIMITIVE_DATATYPES: + for datatype in PRIMITIVE_DATATYPES_KEYS: if datatype == 'blob': # unhashable type: 'bytearray' created_tuple = tuple([{get_sample('ascii'): get_sample(datatype)}]) diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 0e2f46ee37..41957f1227 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -27,7 +27,7 @@ from cassandra.util import OrderedMap from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass, BasicSegregatedKeyspaceUnitTestCase, greaterthancass20, greaterthanorequalcass36 -from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, \ +from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, PRIMITIVE_DATATYPES_KEYS, COLLECTION_TYPES, \ get_sample, get_collection_sample nested_collection_udt = namedtuple('nested_collection_udt', ['m', 't', 'l', 's']) @@ -562,7 +562,7 @@ def test_can_insert_udt_all_collection_datatypes(self): alpha_type_list = [] start_index = ord('a') for i, collection_type in enumerate(COLLECTION_TYPES): - for j, datatype in enumerate(PRIMITIVE_DATATYPES): + for j, datatype in enumerate(PRIMITIVE_DATATYPES_KEYS): if collection_type == "map": type_string = "{0}_{1} {2}<{3}, {3}>".format(chr(start_index + i), chr(start_index + j), collection_type, datatype) @@ -584,7 +584,7 @@ def test_can_insert_udt_all_collection_datatypes(self): # register UDT alphabet_list = [] for i in range(ord('a'), ord('a') + len(COLLECTION_TYPES)): - for j in range(ord('a'), ord('a') + len(PRIMITIVE_DATATYPES)): + for j in range(ord('a'), ord('a') + len(PRIMITIVE_DATATYPES_KEYS)): alphabet_list.append('{0}_{1}'.format(chr(i), chr(j))) Alldatatypes = namedtuple("alldatatypes", alphabet_list) @@ -593,7 +593,7 @@ def test_can_insert_udt_all_collection_datatypes(self): # insert UDT data params = [] for collection_type in COLLECTION_TYPES: - for datatype in PRIMITIVE_DATATYPES: + for datatype in PRIMITIVE_DATATYPES_KEYS: params.append((get_collection_sample(collection_type, datatype))) insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)") diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index d8774b0299..0b0f5efda5 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -67,6 +67,7 @@ def test_lookup_casstype_simple(self): self.assertEqual(lookup_casstype_simple('CompositeType'), cassandra.cqltypes.CompositeType) self.assertEqual(lookup_casstype_simple('ColumnToCollectionType'), cassandra.cqltypes.ColumnToCollectionType) self.assertEqual(lookup_casstype_simple('ReversedType'), cassandra.cqltypes.ReversedType) + self.assertEqual(lookup_casstype_simple('DurationType'), cassandra.cqltypes.DurationType) self.assertEqual(str(lookup_casstype_simple('unknown')), str(cassandra.cqltypes.mkUnrecognizedType('unknown'))) @@ -100,6 +101,7 @@ def test_lookup_casstype(self): self.assertEqual(lookup_casstype('CompositeType'), cassandra.cqltypes.CompositeType) self.assertEqual(lookup_casstype('ColumnToCollectionType'), cassandra.cqltypes.ColumnToCollectionType) self.assertEqual(lookup_casstype('ReversedType'), cassandra.cqltypes.ReversedType) + self.assertEqual(lookup_casstype('DurationType'), cassandra.cqltypes.DurationType) self.assertEqual(str(lookup_casstype('unknown')), str(cassandra.cqltypes.mkUnrecognizedType('unknown'))) diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py index 5bcaa6e724..2b9bc14a90 100644 --- a/tests/unit/test_util_types.py +++ b/tests/unit/test_util_types.py @@ -18,7 +18,7 @@ import datetime -from cassandra.util import Date, Time +from cassandra.util import Date, Time, Duration class DateTests(unittest.TestCase): @@ -150,3 +150,60 @@ def test_invalid_init(self): self.assertRaises(TypeError, Time, 1.234) self.assertRaises(ValueError, Time, 123456789000000) self.assertRaises(TypeError, Time, datetime.datetime(2004, 12, 23, 11, 11, 1)) + + +class DurationTests(unittest.TestCase): + + def test_valid_format(self): + + valid = Duration(1, 1, 1) + self.assertEqual(valid.months, 1) + self.assertEqual(valid.days, 1) + self.assertEqual(valid.nanoseconds, 1) + + valid = Duration(nanoseconds=100000) + self.assertEqual(valid.months, 0) + self.assertEqual(valid.days, 0) + self.assertEqual(valid.nanoseconds, 100000) + + valid = Duration() + self.assertEqual(valid.months, 0) + self.assertEqual(valid.days, 0) + self.assertEqual(valid.nanoseconds, 0) + + valid = Duration(-10, -21, -1000) + self.assertEqual(valid.months, -10) + self.assertEqual(valid.days, -21) + self.assertEqual(valid.nanoseconds, -1000) + + def test_equality(self): + + first = Duration(1, 1, 1) + second = Duration(-1, 1, 1) + self.assertNotEqual(first, second) + + first = Duration(1, 1, 1) + second = Duration(1, 1, 1) + self.assertEqual(first, second) + + first = Duration() + second = Duration(0, 0, 0) + self.assertEqual(first, second) + + first = Duration(1000, 10000, 2345345) + second = Duration(1000, 10000, 2345345) + self.assertEqual(first, second) + + first = Duration(12, 0 , 100) + second = Duration(nanoseconds=100, months=12) + self.assertEqual(first, second) + + def test_str(self): + + self.assertEqual(str(Duration(1, 1, 1)), "1mo1d1ns") + self.assertEqual(str(Duration(1, 1, -1)), "-1mo1d1ns") + self.assertEqual(str(Duration(1, 1, 1000000000000000)), "1mo1d1000000000000000ns") + self.assertEqual(str(Duration(52, 23, 564564)), "52mo23d564564ns") + + + From 65de40859aafc2b7c24f56aaf30513af67a7941b Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 18 Jan 2017 17:27:44 -0500 Subject: [PATCH 0960/2005] PYTHON-673: prevent socket-leaking race... ... between init and shutdown --- cassandra/cluster.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 18c1734474..1996351fda 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2249,6 +2249,13 @@ def shutdown(self): else: self.is_shutdown = True + # PYTHON-673. If shutdown was called shortly after session init, avoid + # a race by cancelling any initial connection attempts haven't started, + # then blocking on any that have. + for future in self._initial_connect_futures: + future.cancel() + wait_futures(self._initial_connect_futures) + for pool in list(self._pools.values()): pool.shutdown() From d9667444520c2f3a2d61ccab7d93d7d70eece577 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 18 Jan 2017 18:02:19 -0500 Subject: [PATCH 0961/2005] Removed support for python 2.6 --- .travis.yml | 2 -- README-dev.rst | 2 +- README.rst | 2 +- cassandra/marshal.py | 10 +--------- setup.py | 1 - tests/unit/test_marshalling.py | 6 ------ tox.ini | 5 ++--- 7 files changed, 5 insertions(+), 23 deletions(-) diff --git a/.travis.yml b/.travis.yml index 07d0a35c8e..190dde47ba 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,8 +2,6 @@ language: python python: 2.7 sudo: false env: - - TOX_ENV=py26 CASS_VER=21 CASS_DRIVER_NO_CYTHON=1 - - TOX_ENV=py26 CASS_VER=21 - TOX_ENV=py27 CASS_VER=12 CASS_DRIVER_NO_CYTHON=1 - TOX_ENV=py27 CASS_VER=20 CASS_DRIVER_NO_CYTHON=1 - TOX_ENV=py27 CASS_VER=21 CASS_DRIVER_NO_CYTHON=1 diff --git a/README-dev.rst b/README-dev.rst index a549bf8a35..d14a9e45a8 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -117,7 +117,7 @@ it with the ``PROTOCOL_VERSION`` environment variable:: Testing Multiple Python Versions -------------------------------- -If you want to test all of python 2.6, 2.7, and pypy, use tox (this is what +If you want to test all of python 2.7, 3.3, 3.4 and pypy, use tox (this is what TravisCI runs):: tox diff --git a/README.rst b/README.rst index b0726a5a75..01fd6d9fe3 100644 --- a/README.rst +++ b/README.rst @@ -6,7 +6,7 @@ DataStax Python Driver for Apache Cassandra A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (1.2+) and DataStax Enterprise (3.1+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. -The driver supports Python 2.6, 2.7, 3.3, and 3.4. +The driver supports Python 2.7, 3.3, and 3.4. Feedback Requested ------------------ diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 9647e22bce..5ce40a5313 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -68,20 +68,12 @@ def varint_unpack(term): # noqa return val -def bitlength(n): - bitlen = 0 - while n > 0: - n >>= 1 - bitlen += 1 - return bitlen - - def varint_pack(big): pos = True if big == 0: return b'\x00' if big < 0: - bytelength = bitlength(abs(big) - 1) // 8 + 1 + bytelength = int.bit_length(abs(big) - 1) // 8 + 1 big = (1 << bytelength * 8) + big pos = False revbytes = bytearray() diff --git a/setup.py b/setup.py index 6dba9cc7b1..95f046ad30 100644 --- a/setup.py +++ b/setup.py @@ -417,7 +417,6 @@ def run_setup(extensions): 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', - 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index a2bc2d8b1c..1f0dde4b6d 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -14,7 +14,6 @@ import sys from cassandra import ProtocolVersion -from cassandra.marshal import bitlength try: import unittest2 as unittest @@ -134,11 +133,6 @@ def test_marshalling(self): msg='Marshaller for %s (%s) gave wrong type (%s instead of %s)' % (valtype, marshaller, type(whatwegot), type(serializedval))) - def test_bitlength(self): - self.assertEqual(bitlength(9), 4) - self.assertEqual(bitlength(-10), 0) - self.assertEqual(bitlength(0), 0) - def test_date(self): # separate test because it will deserialize as datetime self.assertEqual(DateType.from_binary(DateType.to_binary(date(2015, 11, 2), 1), 1), datetime(2015, 11, 2)) diff --git a/tox.ini b/tox.ini index 9d45755981..3b68b73c8a 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py{26,27,33,34},pypy +envlist = py{27,33,34},pypy [base] deps = nose @@ -11,8 +11,7 @@ deps = nose [testenv] deps = {[base]deps} cython - py26: unittest2 - py{26,27}: gevent + py{27}: gevent twisted <15.5.0 setenv = LIBEV_EMBED=0 CARES_EMBED=0 From 05dbebc1a44e43d47ba939ae78926547e502b814 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 19 Jan 2017 16:13:36 -0500 Subject: [PATCH 0962/2005] add shuffle_replicas config option PYTHON-643 --- cassandra/policies.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 347907eca3..5b325bfd1a 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -14,7 +14,7 @@ from itertools import islice, cycle, groupby, repeat import logging -from random import randint +from random import randint, shuffle from threading import Lock import socket @@ -320,13 +320,18 @@ class TokenAwarePolicy(LoadBalancingPolicy): If no :attr:`~.Statement.routing_key` is set on the query, the child policy's query plan will be used as is. + + If :attr:`.shuffle_replicas` is truthy, :attr:`~.HostDistance.LOCAL` + replicas will be yielded in a random order, followed by the remaining + hosts in the order provided child policy's query plan. """ _child_policy = None _cluster_metadata = None - def __init__(self, child_policy): + def __init__(self, child_policy, shuffle_replicas=False): self._child_policy = child_policy + self.shuffle_replicas = shuffle_replicas def populate(self, cluster, hosts): self._cluster_metadata = cluster.metadata @@ -361,6 +366,8 @@ def make_query_plan(self, working_keyspace=None, query=None): yield host else: replicas = self._cluster_metadata.get_replicas(keyspace, routing_key) + if self.shuffle_replicas: + shuffle(replicas) for replica in replicas: if replica.is_up and \ child.distance(replica) == HostDistance.LOCAL: From 8ec890be856be74ba4a09e329de6f3800cdb4e19 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 19 Jan 2017 16:08:02 -0600 Subject: [PATCH 0963/2005] Fixing configuration issue with ssl test --- tests/integration/long/test_ssl.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 99875afe48..7f90665745 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -21,7 +21,7 @@ from cassandra.cluster import Cluster, NoHostAvailable from cassandra import ConsistencyLevel from cassandra.query import SimpleStatement -from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, remove_cluster +from tests.integration import PROTOCOL_VERSION, get_cluster, remove_cluster, use_single_node log = logging.getLogger(__name__) @@ -44,7 +44,7 @@ def setup_cluster_ssl(client_auth=False): ssl connectivity, and client authenticiation if needed. """ - use_singledc(start=False) + use_single_node(start=False) ccm_cluster = get_cluster() ccm_cluster.stop() @@ -68,16 +68,6 @@ def setup_cluster_ssl(client_auth=False): ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) -def teardown_module(): - """ - The rest of the tests don't need ssl enabled, remove the cluster so as to not interfere with other tests. - """ - - ccm_cluster = get_cluster() - ccm_cluster.stop() - remove_cluster() - - def validate_ssl_options(ssl_options): # find absolute path to client CA_CERTS tries = 0 @@ -116,6 +106,12 @@ class SSLConnectionTests(unittest.TestCase): def setUpClass(cls): setup_cluster_ssl() + @classmethod + def tearDownClass(cls): + ccm_cluster = get_cluster() + ccm_cluster.stop() + remove_cluster() + def test_can_connect_with_ssl_ca(self): """ Test to validate that we are able to connect to a cluster using ssl. @@ -204,6 +200,12 @@ class SSLConnectionAuthTests(unittest.TestCase): def setUpClass(cls): setup_cluster_ssl(client_auth=True) + @classmethod + def tearDownClass(cls): + ccm_cluster = get_cluster() + ccm_cluster.stop() + remove_cluster() + def test_can_connect_with_ssl_client_auth(self): """ Test to validate that we can connect to a C* cluster that has client_auth enabled. From 7616fbb9f66a7ff937e523151601e65c1eddb357 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 19 Jan 2017 16:59:46 -0500 Subject: [PATCH 0964/2005] small documentation improvement --- cassandra/policies.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 5b325bfd1a..9be0125f8f 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -315,19 +315,21 @@ class TokenAwarePolicy(LoadBalancingPolicy): This alters the child policy's behavior so that it first attempts to send queries to :attr:`~.HostDistance.LOCAL` replicas (as determined by the child policy) based on the :class:`.Statement`'s - :attr:`~.Statement.routing_key`. Once those hosts are exhausted, the - remaining hosts in the child policy's query plan will be used. + :attr:`~.Statement.routing_key`. If :attr:`.shuffle_replicas` is + truthy, these replicas will be yielded in a random order. Once those + hosts are exhausted, the remaining hosts in the child policy's query + plan will be used in the order provided by the child policy. If no :attr:`~.Statement.routing_key` is set on the query, the child policy's query plan will be used as is. - - If :attr:`.shuffle_replicas` is truthy, :attr:`~.HostDistance.LOCAL` - replicas will be yielded in a random order, followed by the remaining - hosts in the order provided child policy's query plan. """ _child_policy = None _cluster_metadata = None + shuffle_replicas = False + """ + Yield local replicas in a random order. + """ def __init__(self, child_policy, shuffle_replicas=False): self._child_policy = child_policy From 6598eabe722e3d3dd0eb02c28c52405f3c8d78d4 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 20 Jan 2017 10:32:55 -0500 Subject: [PATCH 0965/2005] add smoke test for BatchStatement.__len__ --- tests/unit/test_query.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/unit/test_query.py b/tests/unit/test_query.py index 687bca7c50..037710f650 100644 --- a/tests/unit/test_query.py +++ b/tests/unit/test_query.py @@ -66,3 +66,10 @@ def test_add_all(self): bound_statements = [t[1] for t in batch._statements_and_parameters] str_parameters = [str(i) for i in range(10)] self.assertEqual(bound_statements, str_parameters) + + def test_len(self): + for n in 0, 10, 100: + batch = BatchStatement() + batch.add_all(statements=['%s'] * n, + parameters=[(i,) for i in range(n)]) + self.assertEqual(len(batch), n) From 3298ab3f492650968fdde2ae558326eef9691560 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 20 Jan 2017 16:44:47 -0500 Subject: [PATCH 0966/2005] add regression tests for PYTHON-692 --- tests/integration/cqlengine/connections/test_connection.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index 659893bd0f..e3356771d3 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -95,3 +95,9 @@ def test_connection_session_switch(self): connection.set_session(self.session2) self.assertEqual(1, TestConnectModel.objects.count()) self.assertEqual(TestConnectModel.objects.first(), TCM2) + + def test_connection_setup_with_setup(self): + connection.setup(hosts=None, default_keyspace=None) + + def test_connection_setup_with_default(self): + connection.default() From f9ef412e120dc82569232b938ff666b0118ce245 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 20 Jan 2017 17:01:19 -0500 Subject: [PATCH 0967/2005] remove not-None restriction on hosts PYTHON-692 --- cassandra/cqlengine/connection.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index f9ebebedb5..ea5a4b0702 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -171,12 +171,7 @@ def register_connection(name, hosts=None, consistency=None, lazy_connect=False, if name in _connections: log.warning("Registering connection '{0}' when it already exists.".format(name)) - hosts_xor_session_passed = (hosts is None) ^ (session is None) - if not hosts_xor_session_passed: - raise CQLEngineException( - "Must pass exactly one of 'hosts' or 'session' arguments" - ) - elif session is not None: + if session is not None: invalid_config_args = (consistency is not None or lazy_connect is not False or retry_connect is not False or @@ -187,7 +182,7 @@ def register_connection(name, hosts=None, consistency=None, lazy_connect=False, ) conn = Connection.from_session(name, session=session) conn.setup_session() - elif hosts is not None: + else: # use hosts argument conn = Connection( name, hosts=hosts, consistency=consistency, lazy_connect=lazy_connect, From 6957386f11d03bdf5037bc7b3e31246b0b72a440 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 23 Jan 2017 10:26:43 -0500 Subject: [PATCH 0968/2005] add None example to mapper example --- example_mapper.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/example_mapper.py b/example_mapper.py index 4d5ebca361..7cb580e890 100755 --- a/example_mapper.py +++ b/example_mapper.py @@ -78,6 +78,9 @@ def main(): except LWTException: print "precondition not met" + log.info("### setting individual column to NULL by updating it to None") + nick.update(birth_year=None) + # showing validation try: FamilyMembers.create(id=simmons.id, surname='Tweed', name='Shannon', birth_year=1957, sex='f') From d14a270446794ecfb94b24b61203e712706e8201 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 23 Jan 2017 10:58:17 -0500 Subject: [PATCH 0969/2005] document None behavior on CQL models --- cassandra/cqlengine/models.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 1b1987396f..88bd535595 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -655,7 +655,8 @@ def create(cls, **kwargs): """ Create an instance of this model in the database. - Takes the model column values as keyword arguments. + Takes the model column values as keyword arguments. Setting a value to + `None` is equivalent to running a CQL `DELETE` on that column. Returns the instance. """ @@ -741,7 +742,8 @@ def update(self, **values): Performs an update on the model instance. You can pass in values to set on the model for updating, or you can call without values to execute an update against any modified fields. If no fields on the model have been modified since loading, no query will be - performed. Model validation is performed normally. + performed. Model validation is performed normally. Setting a value to `None` is + equivalent to running a CQL `DELETE` on that column. It is possible to do a blind update, that is, to update a field without having first selected the object out of the database. See :ref:`Blind Updates ` From 5c1e1a6de7130b9386c27f16773504a4b720d687 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 24 Jan 2017 15:57:47 -0500 Subject: [PATCH 0970/2005] Added some test for shuffle_replicas config option --- .../long/test_loadbalancingpolicies.py | 83 +++++++++++++++++++ tests/unit/test_policies.py | 63 ++++++++++++++ 2 files changed, 146 insertions(+) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 9687b0ce69..a2e75ad950 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -520,6 +520,89 @@ def test_token_aware_with_local_table(self): cluster.shutdown() + def test_token_aware_with_shuffle_rf2(self): + """ + Test to validate the hosts are shuffled when the `shuffle_replicas` is truthy + @since 3.8 + @jira_ticket PYTHON-676 + @expected_result the request are spread across the replicas, + when one of them is down, the requests target the avaiable one + + @test_category policy + """ + use_singledc() + keyspace = 'test_token_aware_with_rf_2' + cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy(), shuffle_replicas=True)) + self._wait_for_nodes_up(range(1, 4), cluster) + + create_schema(cluster, session, keyspace, replication_factor=2) + self._insert(session, keyspace) + self._query(session, keyspace) + + self.coordinator_stats.assert_query_count_equals(self, 1, 0) + query_count_two = self.coordinator_stats.get_query_count(2) + query_count_three = self.coordinator_stats.get_query_count(3) + self.assertEqual(query_count_two + query_count_three, 12) + + self.coordinator_stats.reset_counts() + stop(2) + self._wait_for_nodes_down([2], cluster) + + self._query(session, keyspace) + + self.coordinator_stats.assert_query_count_equals(self, 1, 0) + self.coordinator_stats.assert_query_count_equals(self, 2, 0) + self.coordinator_stats.assert_query_count_equals(self, 3, 12) + + cluster.shutdown() + + def test_token_aware_with_shuffle_rf3(self): + """ + Test to validate the hosts are shuffled when the `shuffle_replicas` is truthy + @since 3.8 + @jira_ticket PYTHON-676 + @expected_result the request are spread across the replicas, + when one of them is down, the requests target the other avaiable ones + + @test_category policy + """ + use_singledc() + keyspace = 'test_token_aware_with_rf_3' + cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy(), shuffle_replicas=True)) + self._wait_for_nodes_up(range(1, 4), cluster) + + create_schema(cluster, session, keyspace, replication_factor=3) + self._insert(session, keyspace) + self._query(session, keyspace) + + query_count_one = self.coordinator_stats.get_query_count(1) + query_count_two = self.coordinator_stats.get_query_count(2) + query_count_three = self.coordinator_stats.get_query_count(3) + self.assertEqual(query_count_one + query_count_two + query_count_three, 12) + + self.coordinator_stats.reset_counts() + stop(1) + self._wait_for_nodes_down([1], cluster) + + self._query(session, keyspace) + + self.coordinator_stats.assert_query_count_equals(self, 1, 0) + query_count_two = self.coordinator_stats.get_query_count(2) + query_count_three = self.coordinator_stats.get_query_count(3) + self.assertEqual(query_count_two + query_count_three, 12) + + self.coordinator_stats.reset_counts() + stop(2) + self._wait_for_nodes_down([2], cluster) + + self._query(session, keyspace) + + self.coordinator_stats.assert_query_count_equals(self, 1, 0) + self.coordinator_stats.assert_query_count_equals(self, 2, 0) + self.coordinator_stats.assert_query_count_equals(self, 3, 12) + + cluster.shutdown() + def test_white_list(self): use_singledc() keyspace = 'test_white_list' diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 61f6bbb49e..e7235fda0f 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -731,6 +731,69 @@ def test_statement_keyspace(self): self.assertEqual(replicas + hosts[:2], qplan) cluster.metadata.get_replicas.assert_called_with(statement_keyspace, routing_key) + @patch('cassandra.policies.shuffle') + def test_shuffle(self, patched_shuffle): + """ + Test to validate the hosts are shuffled when the `shuffle_replicas` is truthy + @since 3.8 + @jira_ticket PYTHON-676 + @expected_result shuffle should be called when the `shuffle_replicas` is truthy + if the keyspace and the routing key are set + + @test_category policy + """ + hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)] + for host in hosts: + host.set_up() + + cluster = Mock(spec=Cluster) + cluster.metadata = Mock(spec=Metadata) + replicas = hosts[2:] + cluster.metadata.get_replicas.return_value = replicas + + child_policy = Mock() + child_policy.make_query_plan.return_value = hosts + child_policy.distance.return_value = HostDistance.LOCAL + + policy = TokenAwarePolicy(child_policy, shuffle_replicas=True) + policy.populate(cluster, hosts) + + # no keyspace, no shuffle happens + cluster.metadata.get_replicas.reset_mock() + child_policy.make_query_plan.reset_mock() + keyspace = None + routing_key = 'routing_key' + query = Statement(routing_key=routing_key) + qplan = list(policy.make_query_plan(keyspace, query)) + self.assertEqual(hosts, qplan) + self.assertEqual(cluster.metadata.get_replicas.call_count, 0) + child_policy.make_query_plan.assert_called_once_with(keyspace, query) + self.assertEqual(patched_shuffle.call_count, 0) + + # no routing_key, no shuffle happens + cluster.metadata.get_replicas.reset_mock() + child_policy.make_query_plan.reset_mock() + keyspace = "shuffle_keyspace" + routing_key = None + query = Statement(routing_key=routing_key) + qplan = list(policy.make_query_plan(keyspace, query)) + self.assertEqual(hosts, qplan) + self.assertEqual(cluster.metadata.get_replicas.call_count, 0) + child_policy.make_query_plan.assert_called_once_with(keyspace, query) + self.assertEqual(patched_shuffle.call_count, 0) + + #routing_key and keyspace set, shuffle should happen + cluster.metadata.get_replicas.reset_mock() + child_policy.make_query_plan.reset_mock() + keyspace = "shuffle_keyspace" + routing_key = 'routing_key' + query = Statement(routing_key=routing_key) + qplan = list(policy.make_query_plan(keyspace, query)) + self.assertEqual(set(replicas), set(qplan[:2])) + self.assertEqual(hosts[:2], qplan[2:]) + child_policy.make_query_plan.assert_called_once_with(keyspace, query) + self.assertEqual(patched_shuffle.call_count, 1) + class ConvictionPolicyTest(unittest.TestCase): def test_not_implemented(self): From bca421dca6c2f888489fcada2a573a63441598ff Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 25 Jan 2017 10:50:43 -0500 Subject: [PATCH 0971/2005] Added some check to the existing tests for the cqlengine connection --- tests/integration/cqlengine/connections/test_connection.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index e3356771d3..bbaa31d7e3 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -98,6 +98,8 @@ def test_connection_session_switch(self): def test_connection_setup_with_setup(self): connection.setup(hosts=None, default_keyspace=None) + self.assertIsNotNone(connection.get_connection("default").cluster.metadata.get_host("127.0.0.1")) def test_connection_setup_with_default(self): connection.default() + self.assertIsNotNone(connection.get_connection("default").cluster.metadata.get_host("127.0.0.1")) From 2ab7e14885f2a1d21de9929f52a313d3d6426ad2 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 25 Jan 2017 18:02:42 -0500 Subject: [PATCH 0972/2005] Fixed some tests in jenkins --- tests/integration/standard/test_cluster.py | 2 +- tests/integration/standard/test_metadata.py | 2 ++ tests/integration/standard/test_metrics.py | 6 +++++- tests/integration/standard/test_query.py | 2 +- 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 0e7e2689cb..2817bebec7 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -879,7 +879,7 @@ def test_add_profile_timeout(self): self.assertEqual(set(h.address for h in pools), set(('127.0.0.1',))) node2 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2'])) - self.assertRaises(cassandra.OperationTimedOut, cluster.add_execution_profile, 'node2', node2, pool_wait_timeout=0.0000001) + self.assertRaises(cassandra.OperationTimedOut, cluster.add_execution_profile, 'node2', node2, pool_wait_timeout=0.000000001) class LocalHostAdressTranslator(AddressTranslator): diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 66bb3b48d6..d392636d70 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -37,6 +37,7 @@ BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION, \ BasicExistingSegregatedKeyspaceUnitTestCase, dseonly, DSE_VERSION, get_supported_protocol_versions, greaterthanorequalcass30 +from tests.integration import greaterthancass21 def setup_module(): use_singledc() @@ -1047,6 +1048,7 @@ def test_export_keyspace_schema_udts(self): cluster.shutdown() + @greaterthancass21 def test_case_sensitivity(self): """ Test that names that need to be escaped in CREATE statements are diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index c59b58f989..45e5256fb3 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -363,8 +363,12 @@ def test_metrics_per_cluster(self): ra.remove_ra(self.session) # Make sure a poorly coded RA doesn't cause issues - RequestAnalyzer(self.session, throw_on_success=False, throw_on_fail=True) + ra = RequestAnalyzer(self.session, throw_on_success=False, throw_on_fail=True) self.session.execute("SELECT release_version FROM system.local") + + ra.remove_ra(self.session) + + RequestAnalyzer(self.session, throw_on_success=True) try: self.session.execute("nonesense") except SyntaxException: diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 4d80ebae77..e655312fbd 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -83,7 +83,7 @@ def test_row_error_message(self): self.session.execute(ss) with self.assertRaises(DriverException) as context: self.session.execute("SELECT * FROM {0}.{1}".format(self.keyspace_name, self.function_table_name)) - self.assertIn("Failed decoding result column", context.exception.message) + self.assertIn("Failed decoding result column", str(context.exception)) def test_trace_id_to_resultset(self): From 802663acb42526a2cb3769478e832704574fbfea Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 27 Jan 2017 10:27:58 -0500 Subject: [PATCH 0973/2005] Fixed function name change in ccmlib --- tests/integration/long/test_ipv6.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index 618050fe42..8808a90d9e 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -65,7 +65,7 @@ def validate_host_viable(): # this is something ccm does when starting, but preemptively check to avoid # spinning up the cluster if it's not going to work try: - common.check_socket_available(('::1', 9042)) + common.assert_socket_available(('::1', 9042)) except: raise unittest.SkipTest('failed binding ipv6 loopback ::1 on 9042') From bc213bc07ff95a201d5c02685f2c12a7b4b7f2cd Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 27 Jan 2017 17:43:33 -0500 Subject: [PATCH 0974/2005] Fixed some tests to make sure there is a shuffle going on --- .../long/test_loadbalancingpolicies.py | 54 ++++++++++++++----- 1 file changed, 42 insertions(+), 12 deletions(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index a2e75ad950..17bcd91f11 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -536,14 +536,28 @@ def test_token_aware_with_shuffle_rf2(self): self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=2) - self._insert(session, keyspace) - self._query(session, keyspace) - self.coordinator_stats.assert_query_count_equals(self, 1, 0) - query_count_two = self.coordinator_stats.get_query_count(2) - query_count_three = self.coordinator_stats.get_query_count(3) - self.assertEqual(query_count_two + query_count_three, 12) + LIMIT_TRIES = 20 + previous_query_count_two, previous_query_count_three = None, None + for _ in range(LIMIT_TRIES): + self._insert(session, keyspace) + self._query(session, keyspace) + + self.coordinator_stats.assert_query_count_equals(self, 1, 0) + query_count_two = self.coordinator_stats.get_query_count(2) + query_count_three = self.coordinator_stats.get_query_count(3) + self.assertEqual(query_count_two + query_count_three, 12) + self.coordinator_stats.reset_counts() + + if previous_query_count_two is not None: + if query_count_two != previous_query_count_two or query_count_three != previous_query_count_three: + break + previous_query_count_two, previous_query_count_three = query_count_two, query_count_three + self.coordinator_stats.reset_counts() + else: + raise Exception("After {0} tries shuffle returned the same output".format(LIMIT_TRIES)) + #check TokenAwarePolicy still return the remaining replicas when one goes down self.coordinator_stats.reset_counts() stop(2) self._wait_for_nodes_down([2], cluster) @@ -572,14 +586,30 @@ def test_token_aware_with_shuffle_rf3(self): self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=3) - self._insert(session, keyspace) - self._query(session, keyspace) - query_count_one = self.coordinator_stats.get_query_count(1) - query_count_two = self.coordinator_stats.get_query_count(2) - query_count_three = self.coordinator_stats.get_query_count(3) - self.assertEqual(query_count_one + query_count_two + query_count_three, 12) + LIMIT_TRIES = 20 + previous_query_count_one, previous_query_count_two, previous_query_count_three = None, None, None + for _ in range(LIMIT_TRIES): + self._insert(session, keyspace) + self._query(session, keyspace) + + query_count_one = self.coordinator_stats.get_query_count(1) + query_count_two = self.coordinator_stats.get_query_count(2) + query_count_three = self.coordinator_stats.get_query_count(3) + self.assertEqual(query_count_one + query_count_two + query_count_three, 12) + + if previous_query_count_two is not None: + if query_count_one != previous_query_count_one \ + or query_count_two != previous_query_count_two \ + or query_count_three != previous_query_count_three: + break + previous_query_count_one, previous_query_count_two, previous_query_count_three = \ + query_count_one, query_count_two, query_count_three + self.coordinator_stats.reset_counts() + else: + raise Exception("After {0} tries shuffle returned the same output".format(LIMIT_TRIES)) + # check TokenAwarePolicy still return the remaining replicas when one goes down self.coordinator_stats.reset_counts() stop(1) self._wait_for_nodes_down([1], cluster) From db64bbabec234707a2ceeca9017254a3997ea409 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 2 Feb 2017 10:16:32 -0500 Subject: [PATCH 0975/2005] avoid beta protocols when downgrading and added some protocol checks --- cassandra/__init__.py | 30 ++++++++++++++++++++++++++++++ cassandra/cluster.py | 17 +++++++++-------- cassandra/protocol.py | 14 +++++++------- 3 files changed, 46 insertions(+), 15 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 95e70b4577..4771118b86 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -164,6 +164,11 @@ class ProtocolVersion(object): A tuple of all supported protocol versions """ + BETA_VERSIONS = (V5,) + """ + A tuple of all beta protocol versions + """ + MIN_SUPPORTED = min(SUPPORTED_VERSIONS) """ Minimum protocol version supported by this driver. @@ -174,6 +179,31 @@ class ProtocolVersion(object): Maximum protocol versioni supported by this driver. """ + @classmethod + def get_lower_supported(cls, previous_version): + """ + Return the lower supported protocol version. Beta versions are omitted. + """ + try: + version = next(v for v in sorted(ProtocolVersion.SUPPORTED_VERSIONS, reverse=True) if + v not in ProtocolVersion.BETA_VERSIONS and v < previous_version) + except StopIteration: + version = 0 + + return version + + @classmethod + def uses_int_query_flags(cls, version): + return version >= cls.V5 + + @classmethod + def uses_prepare_flags(cls, version): + return version >= cls.V5 + + @classmethod + def uses_error_code_map(cls, version): + return version >= cls.V5 + class SchemaChangeType(object): DROPPED = 'DROPPED' diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 18c1734474..abbd5c82a8 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1113,14 +1113,15 @@ def protocol_downgrade(self, host_addr, previous_version): if self._protocol_version_explicit: raise DriverException("ProtocolError returned from server while using explicitly set client protocol_version %d" % (previous_version,)) - try: - new_version = next(v for v in sorted(ProtocolVersion.SUPPORTED_VERSIONS, reverse=True) if v < previous_version) - log.warning("Downgrading core protocol version from %d to %d for %s. " - "To avoid this, it is best practice to explicitly set Cluster(protocol_version) to the version supported by your cluster. " - "http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Cluster.protocol_version", self.protocol_version, new_version, host_addr) - self.protocol_version = new_version - except StopIteration: - raise DriverException("Cannot downgrade protocol version below minimum supported version: %d" % (ProtocolVersion.MIN_SUPPORTED,)) + new_version = ProtocolVersion.get_lower_supported(previous_version) + if new_version < ProtocolVersion.MIN_SUPPORTED: + raise DriverException( + "Cannot downgrade protocol version below minimum supported version: %d" % (ProtocolVersion.MIN_SUPPORTED,)) + + log.warning("Downgrading core protocol version from %d to %d for %s. " + "To avoid this, it is best practice to explicitly set Cluster(protocol_version) to the version supported by your cluster. " + "http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Cluster.protocol_version", self.protocol_version, new_version, host_addr) + self.protocol_version = new_version def connect(self, keyspace=None, wait_for_all_pools=False): """ diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 313c1aa9bd..2be5c5654f 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -22,6 +22,7 @@ from six.moves import range import io +from cassandra import ProtocolVersion from cassandra import type_codes, DriverException from cassandra import (Unavailable, WriteTimeout, ReadTimeout, WriteFailure, ReadFailure, FunctionFailure, @@ -260,7 +261,7 @@ def recv_error_info(f, protocol_version): received_responses = read_int(f) required_responses = read_int(f) - if protocol_version >= 5: + if ProtocolVersion.uses_error_code_map(protocol_version): error_code_map = read_error_code_map(f) failures = len(error_code_map) else: @@ -308,7 +309,7 @@ def recv_error_info(f, protocol_version): received_responses = read_int(f) required_responses = read_int(f) - if protocol_version >= 5: + if ProtocolVersion.uses_error_code_map(protocol_version): error_code_map = read_error_code_map(f) failures = len(error_code_map) else: @@ -557,7 +558,7 @@ def send_body(self, f, protocol_version): if self.timestamp is not None: flags |= _PROTOCOL_TIMESTAMP - if protocol_version >= 5: + if ProtocolVersion.uses_int_query_flags(protocol_version): write_uint(f, flags) else: write_byte(f, flags) @@ -772,7 +773,7 @@ def __init__(self, query): def send_body(self, f, protocol_version): write_longstring(f, self.query) - if protocol_version >= 5: + if ProtocolVersion.uses_prepare_flags(protocol_version): # Write the flags byte; with 0 value for now, but this should change in PYTHON-678 write_uint(f, 0) @@ -780,7 +781,6 @@ def send_body(self, f, protocol_version): class ExecuteMessage(_MessageType): opcode = 0x0A name = 'EXECUTE' - def __init__(self, query_id, query_params, consistency_level, serial_consistency_level=None, fetch_size=None, paging_state=None, timestamp=None, skip_meta=False): @@ -828,7 +828,7 @@ def send_body(self, f, protocol_version): if self.skip_meta: flags |= _SKIP_METADATA_FLAG - if protocol_version >= 5: + if ProtocolVersion.uses_int_query_flags(protocol_version): write_uint(f, flags) else: write_byte(f, flags) @@ -882,7 +882,7 @@ def send_body(self, f, protocol_version): if self.timestamp is not None: flags |= _PROTOCOL_TIMESTAMP - if protocol_version >= 5: + if ProtocolVersion.uses_int_query_flags(protocol_version): write_int(f, flags) else: write_byte(f, flags) From 530173937beccdc85667e3fa2d39da44b55c9217 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 3 Feb 2017 12:05:34 -0600 Subject: [PATCH 0976/2005] fixing node startup timing issue in consistency tests --- tests/integration/long/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index e5909dbfb9..ab87172a3f 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -21,7 +21,7 @@ from cassandra.query import named_tuple_factory -from tests.integration import get_node, get_cluster +from tests.integration import get_node, get_cluster, wait_for_node_socket IP_FORMAT = '127.0.0.%s' @@ -132,6 +132,7 @@ def wait_for_up(cluster, node): while tries < 100: host = cluster.metadata.get_host(addr) if host and host.is_up: + wait_for_node_socket(get_node(node), 60) log.debug("Done waiting for node %s to be up", node) return else: From ec45b8c827db7440442a7c1a53d5e988fab47ff7 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 3 Feb 2017 17:16:26 -0600 Subject: [PATCH 0977/2005] Adding binary port to bootstrap method --- tests/integration/long/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index ab87172a3f..b7120ff46d 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -107,6 +107,7 @@ def bootstrap(node, data_center=None, token=None): auto_bootstrap=False, thrift_interface=(IP_FORMAT % node, 9160), storage_interface=(IP_FORMAT % node, 7000), + binary_interface=(IP_FORMAT % node, 9042), jmx_port=str(7000 + 100 * node), remote_debug_port=0, initial_token=token if token else node * 10) From e821c5e7a6ed0bc9e8cec1f248401b4727bc809e Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 23 Jan 2017 12:27:58 -0500 Subject: [PATCH 0978/2005] let users specify Cython version --- setup.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 95f046ad30..0540478b5b 100644 --- a/setup.py +++ b/setup.py @@ -388,7 +388,11 @@ def run_setup(extensions): # 1.) build_ext eats errors at compile time, letting the install complete while producing useful feedback # 2.) there could be a case where the python environment has cython installed but the system doesn't have build tools if pre_build_check(): - kw['setup_requires'] = ['Cython>=0.20,<0.25'] + cython_dep = 'Cython>=0.20,<0.25' + user_specified_cython_version = os.environ.get('CASS_DRIVER_ALLOWED_CYTHON_VERSION') + if user_specified_cython_version is not None: + cython_dep = 'Cython==%s' % (user_specified_cython_version,) + kw['setup_requires'] = [cython_dep] else: sys.stderr.write("Bypassing Cython setup requirement\n") From 105b750eb1145de24e6742ea5bdba608f482ce3a Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 9 Feb 2017 12:21:28 -0500 Subject: [PATCH 0979/2005] add general semantics section to FAQ --- docs/cqlengine/faq.rst | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/cqlengine/faq.rst b/docs/cqlengine/faq.rst index dcaefae22c..fe269e3b03 100644 --- a/docs/cqlengine/faq.rst +++ b/docs/cqlengine/faq.rst @@ -48,3 +48,20 @@ resolve to the statement with the lastest timestamp. assert MyModel.objects(id=1).first().count == 3 assert MyModel.objects(id=1).first().text == '111' +How can I delete individual values from a row? +------------------------------------------------- + +When inserting with CQLEngine, ``None`` is equivalent to CQL ``NULL`` or to +issuing a ``DELETE`` on that column. For example: + +.. code-block:: python + + class MyModel(Model): + id = columns.Integer(primary_key=True) + text = columns.Text() + + m = MyModel.create(id=1, text='We can delete this with None') + assert MyModel.objects(id=1).first().text is not None + + m.update(text=None) + assert MyModel.objects(id=1).first().text is None From e9c56dbfab1e65c5b2c5c3922870c2c50e7130b4 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 9 Feb 2017 13:43:17 -0500 Subject: [PATCH 0980/2005] factor out setup code --- .../long/test_loadbalancingpolicies.py | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 17bcd91f11..0f9e9bd44c 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -530,12 +530,8 @@ def test_token_aware_with_shuffle_rf2(self): @test_category policy """ - use_singledc() keyspace = 'test_token_aware_with_rf_2' - cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy(), shuffle_replicas=True)) - self._wait_for_nodes_up(range(1, 4), cluster) - - create_schema(cluster, session, keyspace, replication_factor=2) + cluster, session = self._set_up_shuffle_test(keyspace, replication_factor=2) LIMIT_TRIES = 20 previous_query_count_two, previous_query_count_three = None, None @@ -570,6 +566,14 @@ def test_token_aware_with_shuffle_rf2(self): cluster.shutdown() + def _set_up_shuffle_test(self, keyspace, replication_factor): + use_singledc() + cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy(), shuffle_replicas=True)) + self._wait_for_nodes_up(range(1, 4), cluster) + + create_schema(cluster, session, keyspace, replication_factor=replication_factor) + return cluster, session + def test_token_aware_with_shuffle_rf3(self): """ Test to validate the hosts are shuffled when the `shuffle_replicas` is truthy @@ -580,12 +584,8 @@ def test_token_aware_with_shuffle_rf3(self): @test_category policy """ - use_singledc() keyspace = 'test_token_aware_with_rf_3' - cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy(), shuffle_replicas=True)) - self._wait_for_nodes_up(range(1, 4), cluster) - - create_schema(cluster, session, keyspace, replication_factor=3) + cluster, session = self._set_up_shuffle_test(keyspace, replication_factor=3) LIMIT_TRIES = 20 previous_query_count_one, previous_query_count_two, previous_query_count_three = None, None, None From 2d761f927828214d1ee7827037b689bbe869a48a Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 9 Feb 2017 14:02:17 -0500 Subject: [PATCH 0981/2005] pull out query loop logic --- .../long/test_loadbalancingpolicies.py | 49 +++++++------------ 1 file changed, 17 insertions(+), 32 deletions(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 0f9e9bd44c..6739cf203c 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -533,25 +533,7 @@ def test_token_aware_with_shuffle_rf2(self): keyspace = 'test_token_aware_with_rf_2' cluster, session = self._set_up_shuffle_test(keyspace, replication_factor=2) - LIMIT_TRIES = 20 - previous_query_count_two, previous_query_count_three = None, None - for _ in range(LIMIT_TRIES): - self._insert(session, keyspace) - self._query(session, keyspace) - - self.coordinator_stats.assert_query_count_equals(self, 1, 0) - query_count_two = self.coordinator_stats.get_query_count(2) - query_count_three = self.coordinator_stats.get_query_count(3) - self.assertEqual(query_count_two + query_count_three, 12) - self.coordinator_stats.reset_counts() - - if previous_query_count_two is not None: - if query_count_two != previous_query_count_two or query_count_three != previous_query_count_three: - break - previous_query_count_two, previous_query_count_three = query_count_two, query_count_three - self.coordinator_stats.reset_counts() - else: - raise Exception("After {0} tries shuffle returned the same output".format(LIMIT_TRIES)) + self._check_different_query_order(session=session, keyspace=keyspace) #check TokenAwarePolicy still return the remaining replicas when one goes down self.coordinator_stats.reset_counts() @@ -574,19 +556,7 @@ def _set_up_shuffle_test(self, keyspace, replication_factor): create_schema(cluster, session, keyspace, replication_factor=replication_factor) return cluster, session - def test_token_aware_with_shuffle_rf3(self): - """ - Test to validate the hosts are shuffled when the `shuffle_replicas` is truthy - @since 3.8 - @jira_ticket PYTHON-676 - @expected_result the request are spread across the replicas, - when one of them is down, the requests target the other avaiable ones - - @test_category policy - """ - keyspace = 'test_token_aware_with_rf_3' - cluster, session = self._set_up_shuffle_test(keyspace, replication_factor=3) - + def _check_different_query_order(self, session, keyspace): LIMIT_TRIES = 20 previous_query_count_one, previous_query_count_two, previous_query_count_three = None, None, None for _ in range(LIMIT_TRIES): @@ -609,6 +579,21 @@ def test_token_aware_with_shuffle_rf3(self): else: raise Exception("After {0} tries shuffle returned the same output".format(LIMIT_TRIES)) + def test_token_aware_with_shuffle_rf3(self): + """ + Test to validate the hosts are shuffled when the `shuffle_replicas` is truthy + @since 3.8 + @jira_ticket PYTHON-676 + @expected_result the request are spread across the replicas, + when one of them is down, the requests target the other avaiable ones + + @test_category policy + """ + keyspace = 'test_token_aware_with_rf_3' + cluster, session = self._set_up_shuffle_test(keyspace, replication_factor=3) + + self._check_different_query_order(session=session, keyspace=keyspace) + # check TokenAwarePolicy still return the remaining replicas when one goes down self.coordinator_stats.reset_counts() stop(1) From 785b46a8ffb1c15050d4603622db3627de14e329 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 9 Feb 2017 14:13:00 -0500 Subject: [PATCH 0982/2005] simplify different-order checking --- .../long/test_loadbalancingpolicies.py | 25 +++++++++---------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 6739cf203c..f9a9d77497 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -558,23 +558,22 @@ def _set_up_shuffle_test(self, keyspace, replication_factor): def _check_different_query_order(self, session, keyspace): LIMIT_TRIES = 20 - previous_query_count_one, previous_query_count_two, previous_query_count_three = None, None, None + + query_counts = set() for _ in range(LIMIT_TRIES): self._insert(session, keyspace) self._query(session, keyspace) - query_count_one = self.coordinator_stats.get_query_count(1) - query_count_two = self.coordinator_stats.get_query_count(2) - query_count_three = self.coordinator_stats.get_query_count(3) - self.assertEqual(query_count_one + query_count_two + query_count_three, 12) - - if previous_query_count_two is not None: - if query_count_one != previous_query_count_one \ - or query_count_two != previous_query_count_two \ - or query_count_three != previous_query_count_three: - break - previous_query_count_one, previous_query_count_two, previous_query_count_three = \ - query_count_one, query_count_two, query_count_three + loop_qcs = (self.coordinator_stats.get_query_count(1), + self.coordinator_stats.get_query_count(2), + self.coordinator_stats.get_query_count(3)) + + query_counts.add(loop_qcs) + self.assertEqual(sum(loop_qcs), 12) + + # end the loop if we get more than one query ordering + if len(loop_qcs) > 1: + break self.coordinator_stats.reset_counts() else: raise Exception("After {0} tries shuffle returned the same output".format(LIMIT_TRIES)) From 64af6acefb09926e60bc3289a45ecb8e336678a8 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 9 Feb 2017 14:17:53 -0500 Subject: [PATCH 0983/2005] rename order-change-checking method --- tests/integration/long/test_loadbalancingpolicies.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index f9a9d77497..bf7ed37ff1 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -533,7 +533,7 @@ def test_token_aware_with_shuffle_rf2(self): keyspace = 'test_token_aware_with_rf_2' cluster, session = self._set_up_shuffle_test(keyspace, replication_factor=2) - self._check_different_query_order(session=session, keyspace=keyspace) + self._check_query_order_changes(session=session, keyspace=keyspace) #check TokenAwarePolicy still return the remaining replicas when one goes down self.coordinator_stats.reset_counts() @@ -556,7 +556,7 @@ def _set_up_shuffle_test(self, keyspace, replication_factor): create_schema(cluster, session, keyspace, replication_factor=replication_factor) return cluster, session - def _check_different_query_order(self, session, keyspace): + def _check_query_order_changes(self, session, keyspace): LIMIT_TRIES = 20 query_counts = set() @@ -591,7 +591,7 @@ def test_token_aware_with_shuffle_rf3(self): keyspace = 'test_token_aware_with_rf_3' cluster, session = self._set_up_shuffle_test(keyspace, replication_factor=3) - self._check_different_query_order(session=session, keyspace=keyspace) + self._check_query_order_changes(session=session, keyspace=keyspace) # check TokenAwarePolicy still return the remaining replicas when one goes down self.coordinator_stats.reset_counts() From 763c1a3c9f2211860b247e4b39e2d61e02dee5c6 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 9 Feb 2017 14:28:40 -0500 Subject: [PATCH 0984/2005] simplify loop logic --- .../integration/long/test_loadbalancingpolicies.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index bf7ed37ff1..10db3037e2 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -557,10 +557,13 @@ def _set_up_shuffle_test(self, keyspace, replication_factor): return cluster, session def _check_query_order_changes(self, session, keyspace): - LIMIT_TRIES = 20 + LIMIT_TRIES, tried, query_counts = 20, 0, set() + + while len(query_counts) <= 1: + tried += 1 + if tried >= LIMIT_TRIES: + raise Exception("After {0} tries shuffle returned the same output".format(LIMIT_TRIES)) - query_counts = set() - for _ in range(LIMIT_TRIES): self._insert(session, keyspace) self._query(session, keyspace) @@ -572,11 +575,7 @@ def _check_query_order_changes(self, session, keyspace): self.assertEqual(sum(loop_qcs), 12) # end the loop if we get more than one query ordering - if len(loop_qcs) > 1: - break self.coordinator_stats.reset_counts() - else: - raise Exception("After {0} tries shuffle returned the same output".format(LIMIT_TRIES)) def test_token_aware_with_shuffle_rf3(self): """ From fb413ca08eb56290abe804036bbb65d38b30a904 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 9 Feb 2017 14:39:34 -0500 Subject: [PATCH 0985/2005] shorten line --- tests/integration/long/test_loadbalancingpolicies.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 10db3037e2..ab734a5818 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -550,7 +550,9 @@ def test_token_aware_with_shuffle_rf2(self): def _set_up_shuffle_test(self, keyspace, replication_factor): use_singledc() - cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy(), shuffle_replicas=True)) + cluster, session = self._cluster_session_with_lbp( + TokenAwarePolicy(RoundRobinPolicy(), shuffle_replicas=True) + ) self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=replication_factor) From b934c303e54b640798754d1cc4d1bfbd057dd321 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 9 Feb 2017 16:05:33 -0500 Subject: [PATCH 0986/2005] move utils so tests are together --- .../long/test_loadbalancingpolicies.py | 62 +++++++++---------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index ab734a5818..62598f051b 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -548,37 +548,6 @@ def test_token_aware_with_shuffle_rf2(self): cluster.shutdown() - def _set_up_shuffle_test(self, keyspace, replication_factor): - use_singledc() - cluster, session = self._cluster_session_with_lbp( - TokenAwarePolicy(RoundRobinPolicy(), shuffle_replicas=True) - ) - self._wait_for_nodes_up(range(1, 4), cluster) - - create_schema(cluster, session, keyspace, replication_factor=replication_factor) - return cluster, session - - def _check_query_order_changes(self, session, keyspace): - LIMIT_TRIES, tried, query_counts = 20, 0, set() - - while len(query_counts) <= 1: - tried += 1 - if tried >= LIMIT_TRIES: - raise Exception("After {0} tries shuffle returned the same output".format(LIMIT_TRIES)) - - self._insert(session, keyspace) - self._query(session, keyspace) - - loop_qcs = (self.coordinator_stats.get_query_count(1), - self.coordinator_stats.get_query_count(2), - self.coordinator_stats.get_query_count(3)) - - query_counts.add(loop_qcs) - self.assertEqual(sum(loop_qcs), 12) - - # end the loop if we get more than one query ordering - self.coordinator_stats.reset_counts() - def test_token_aware_with_shuffle_rf3(self): """ Test to validate the hosts are shuffled when the `shuffle_replicas` is truthy @@ -618,6 +587,37 @@ def test_token_aware_with_shuffle_rf3(self): cluster.shutdown() + def _set_up_shuffle_test(self, keyspace, replication_factor): + use_singledc() + cluster, session = self._cluster_session_with_lbp( + TokenAwarePolicy(RoundRobinPolicy(), shuffle_replicas=True) + ) + self._wait_for_nodes_up(range(1, 4), cluster) + + create_schema(cluster, session, keyspace, replication_factor=replication_factor) + return cluster, session + + def _check_query_order_changes(self, session, keyspace): + LIMIT_TRIES, tried, query_counts = 20, 0, set() + + while len(query_counts) <= 1: + tried += 1 + if tried >= LIMIT_TRIES: + raise Exception("After {0} tries shuffle returned the same output".format(LIMIT_TRIES)) + + self._insert(session, keyspace) + self._query(session, keyspace) + + loop_qcs = (self.coordinator_stats.get_query_count(1), + self.coordinator_stats.get_query_count(2), + self.coordinator_stats.get_query_count(3)) + + query_counts.add(loop_qcs) + self.assertEqual(sum(loop_qcs), 12) + + # end the loop if we get more than one query ordering + self.coordinator_stats.reset_counts() + def test_white_list(self): use_singledc() keyspace = 'test_white_list' From 8aa1f24ee5edf410141db36c122ef7d4d8adaf3d Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 6 Feb 2017 12:09:44 -0500 Subject: [PATCH 0987/2005] Fixed the profile timeout --- tests/integration/__init__.py | 4 ++-- tests/integration/cqlengine/__init__.py | 4 ++++ .../cqlengine/connections/test_connection.py | 3 +-- .../integration/cqlengine/query/test_named.py | 1 - .../integration/cqlengine/test_connections.py | 4 +++- tests/integration/long/test_schema.py | 2 ++ tests/integration/standard/test_cluster.py | 24 ++++++++++++++++--- tests/integration/standard/test_connection.py | 3 ++- .../standard/test_custom_protocol_handler.py | 10 ++++---- .../standard/test_cython_protocol_handlers.py | 4 ++-- tests/integration/standard/test_metadata.py | 4 ++++ tests/integration/standard/test_metrics.py | 3 +++ 12 files changed, 50 insertions(+), 16 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 2b720f786c..4996af2356 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -396,7 +396,7 @@ def execute_until_pass(session, query): try: return session.execute(query) except (ConfigurationException, AlreadyExists): - log.warn("Recieved already exists from query {0} not exiting".format(query)) + log.warn("Received already exists from query {0} not exiting".format(query)) # keyspace/table was already created/dropped return except (OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure): @@ -414,7 +414,7 @@ def execute_with_long_wait_retry(session, query, timeout=30): try: return session.execute(query, timeout=timeout) except (ConfigurationException, AlreadyExists): - log.warn("Recieved already exists from query {0} not exiting".format(query)) + log.warn("Received already exists from query {0} not exiting".format(query)) # keyspace/table was already created/dropped return except (OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure): diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index 3f163ded64..ef61eed114 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -41,6 +41,10 @@ def setup_package(): create_keyspace_simple(DEFAULT_KEYSPACE, 1) +def teardown_package(): + connection.unregister_connection("default") + + def is_prepend_reversed(): # do we have https://issues.apache.org/jira/browse/CASSANDRA-8733 ? ver, _ = get_server_versions() diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index 659893bd0f..0352e726fb 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -40,7 +40,7 @@ class ConnectionTest(BaseCassEngTestCase): @classmethod def setUpClass(cls): - cls.original_cluster = connection.get_cluster() + connection.unregister_connection('default') cls.keyspace1 = 'ctest1' cls.keyspace2 = 'ctest2' super(ConnectionTest, cls).setUpClass() @@ -56,7 +56,6 @@ def tearDownClass(cls): execute_with_long_wait_retry(cls.setup_session, "DROP KEYSPACE {0}".format(cls.keyspace1)) execute_with_long_wait_retry(cls.setup_session, "DROP KEYSPACE {0}".format(cls.keyspace2)) models.DEFAULT_KEYSPACE = DEFAULT_KEYSPACE - cls.original_cluster.shutdown() cls.setup_cluster.shutdown() setup_connection(DEFAULT_KEYSPACE) models.DEFAULT_KEYSPACE diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index 55129cb985..a02e0a4bf4 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -291,7 +291,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): models.DEFAULT_KEYSPACE = cls.default_keyspace - setup_connection(models.DEFAULT_KEYSPACE) super(TestNamedWithMV, cls).tearDownClass() @greaterthanorequalcass30 diff --git a/tests/integration/cqlengine/test_connections.py b/tests/integration/cqlengine/test_connections.py index 1f30b0a972..ec3babcf14 100644 --- a/tests/integration/cqlengine/test_connections.py +++ b/tests/integration/cqlengine/test_connections.py @@ -241,7 +241,8 @@ def test_connection_param_validation(self): @test_category object_mapper """ - session = Cluster(['127.0.0.1']).connect() + cluster = Cluster(['127.0.0.1']) + session = cluster.connect() with self.assertRaises(CQLEngineException): conn.register_connection("bad_coonection1", session=session, consistency="not_null") with self.assertRaises(CQLEngineException): @@ -252,6 +253,7 @@ def test_connection_param_validation(self): conn.register_connection("bad_coonection4", session=session, cluster_options="not_null") with self.assertRaises(CQLEngineException): conn.register_connection("bad_coonection5", hosts="not_null", session=session) + cluster.shutdown() class BatchQueryConnectionTests(BaseCassEngTestCase): diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index 349a158af8..d2131ed8cf 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -113,6 +113,7 @@ def test_for_schema_disagreements_same_keyspace(self): execute_until_pass(session, "INSERT INTO test.cf (key, value) VALUES ({0}, {0})".format(j)) execute_until_pass(session, "DROP KEYSPACE test") + cluster.shutdown() def test_for_schema_disagreement_attribute(self): """ @@ -149,6 +150,7 @@ def test_for_schema_disagreement_attribute(self): self.check_and_wait_for_agreement(session, rs, True) rs = session.execute("DROP KEYSPACE test_schema_disagreement") self.check_and_wait_for_agreement(session, rs, True) + cluster.shutdown() def check_and_wait_for_agreement(self, session, rs, exepected): self.assertEqual(rs.response_future.is_schema_agreed, exepected) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 2817bebec7..47e76b78fd 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -35,6 +35,7 @@ from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, CASSANDRA_VERSION, execute_until_pass, execute_with_long_wait_retry, get_node,\ MockLoggingHandler, get_unsupported_lower_protocol, get_unsupported_upper_protocol, protocolv5 from tests.integration.util import assert_quiescent_pool_state +import sys def setup_module(): @@ -74,6 +75,7 @@ def test_ignored_host_up(self): self.assertTrue(host.is_up) else: self.assertIsNone(host.is_up) + cluster.shutdown() def test_host_resolution(self): """ @@ -129,6 +131,7 @@ def test_raise_error_on_control_connection_timeout(self): with self.assertRaisesRegexp(NoHostAvailable, "OperationTimedOut\('errors=Timed out creating connection \(1 seconds\)"): cluster.connect() + cluster.shutdown() get_node(1).resume() @@ -431,7 +434,7 @@ def test_refresh_schema_type(self): self.assertEqual(original_test1rf_meta.export_as_string(), current_test1rf_meta.export_as_string()) self.assertIsNot(original_type_meta, current_type_meta) self.assertEqual(original_type_meta.as_cql_query(), current_type_meta.as_cql_query()) - session.shutdown() + cluster.shutdown() def test_refresh_schema_no_wait(self): @@ -879,7 +882,17 @@ def test_add_profile_timeout(self): self.assertEqual(set(h.address for h in pools), set(('127.0.0.1',))) node2 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2'])) - self.assertRaises(cassandra.OperationTimedOut, cluster.add_execution_profile, 'node2', node2, pool_wait_timeout=0.000000001) + + max_retry_count = 10 + for i in range(max_retry_count): + start = time.time() + try: + self.assertRaises(cassandra.OperationTimedOut, cluster.add_execution_profile, 'node2', + node2, pool_wait_timeout=sys.float_info.min) + except Exception: + end = time.time() + self.assertAlmostEqual(start, end, 1) + break class LocalHostAdressTranslator(AddressTranslator): @@ -933,6 +946,7 @@ def test_address_translator_with_mixed_nodes(self): c.connect() for host in c.metadata.all_hosts(): self.assertEqual(adder_map.get(str(host)), host.broadcast_address) + c.shutdown() class ContextManagementTest(unittest.TestCase): @@ -1090,6 +1104,7 @@ def test_prepare_on_ignored_hosts(self): # address for c in cluster.connection_factory.mock_calls: self.assertEqual(call(unignored_address), c) + cluster.shutdown() class DuplicateRpcTest(unittest.TestCase): @@ -1120,12 +1135,14 @@ def test_duplicate(self): mock_handler = MockLoggingHandler() logger = logging.getLogger(cassandra.cluster.__name__) logger.addHandler(mock_handler) - test_cluster = self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=self.load_balancing_policy) + test_cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=self.load_balancing_policy) test_cluster.connect() warnings = mock_handler.messages.get("warning") self.assertEqual(len(warnings), 1) self.assertTrue('multiple' in warnings[0]) logger.removeHandler(mock_handler) + test_cluster.shutdown() + @protocolv5 @@ -1166,3 +1183,4 @@ def test_valid_protocol_version_beta_options_connect(self): session = cluster.connect() self.assertEqual(cluster.protocol_version, cassandra.ProtocolVersion.MAX_SUPPORTED) self.assertTrue(session.execute("select release_version from system.local")[0]) + cluster.shutdown() diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 4beededeb7..538cf406f0 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -364,7 +364,8 @@ def test_connect_timeout(self): for i in range(max_retry_count): start = time.time() try: - self.get_connection(timeout=sys.float_info.min) + conn = self.get_connection(timeout=sys.float_info.min) + conn.close() except Exception as e: end = time.time() self.assertAlmostEqual(start, end, 1) diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 89b1b2fd1e..79b9d84636 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -62,7 +62,8 @@ def test_custom_raw_uuid_row_results(self): """ # Ensure that we get normal uuid back first - session = Cluster(protocol_version=PROTOCOL_VERSION).connect(keyspace="custserdes") + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + session = cluster.connect(keyspace="custserdes") session.row_factory = tuple_factory result = session.execute("SELECT schema_version FROM system.local") uuid_type = result[0][0] @@ -82,7 +83,7 @@ def test_custom_raw_uuid_row_results(self): result_set = session.execute("SELECT schema_version FROM system.local") uuid_type = result_set[0][0] self.assertEqual(type(uuid_type), uuid.UUID) - session.shutdown() + cluster.shutdown() def test_custom_raw_row_results_all_types(self): """ @@ -99,7 +100,8 @@ def test_custom_raw_row_results_all_types(self): @test_category data_types:serialization """ # Connect using a custom protocol handler that tracks the various types the result message is used with. - session = Cluster(protocol_version=PROTOCOL_VERSION).connect(keyspace="custserdes") + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + session = cluster.connect(keyspace="custserdes") session.client_protocol_handler = CustomProtocolHandlerResultMessageTracked session.row_factory = tuple_factory @@ -113,7 +115,7 @@ def test_custom_raw_row_results_all_types(self): self.assertEqual(actual, expected) # Ensure we have covered the various primitive types self.assertEqual(len(CustomResultMessageTracked.checked_rev_row_set), len(PRIMITIVE_DATATYPES)-1) - session.shutdown() + cluster.shutdown() class CustomResultMessageRaw(ResultMessage): diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index 7dc3db300e..28c3b0f2d8 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -42,7 +42,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - drop_keyspace_shutdown_cluster("testspace", cls.session, cls.session) + drop_keyspace_shutdown_cluster("testspace", cls.session, cls.cluster) @cythontest def test_cython_parser(self): @@ -188,7 +188,7 @@ def get_data(protocol_handler): session.row_factory = tuple_factory results = session.execute("SELECT * FROM test_table") - session.shutdown() + cluster.shutdown() return results diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index cebe8a3ad0..c54fc1ff22 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -967,6 +967,7 @@ def test_export_schema(self): cluster.connect() self.assertIsInstance(cluster.metadata.export_schema_as_string(), six.string_types) + cluster.shutdown() def test_export_keyspace_schema(self): """ @@ -2114,6 +2115,9 @@ def setup_class(cls): cls.session.set_keyspace(cls.keyspace_name) connection = cls.cluster.control_connection._connection cls.parser_class = get_schema_parser(connection, str(CASS_SERVER_VERSION[0]), timeout=20).__class__ + cls.cluster.control_connection.reconnect = Mock() + + @classmethod def teardown_class(cls): diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 45e5256fb3..1f133a431d 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -271,6 +271,9 @@ def test_duplicate_metrics_per_cluster(self): self.assertTrue("appcluster" in scales._Stats.stats.keys()) self.assertTrue("devops" in scales._Stats.stats.keys()) + cluster2.shutdown() + cluster3.shutdown() + class RequestAnalyzer(object): """ From 00ca39714b4543a8a50850e70e016d1d8b70cff4 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 9 Feb 2017 17:48:27 -0500 Subject: [PATCH 0988/2005] Fixed timer logic to behave properly with ConstantSpeculativeExecutionPolicy --- cassandra/cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 5d037a1eac..6088c17831 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3322,8 +3322,8 @@ def _on_speculative_execute(self): if self._time_remaining <= 0: self._on_timeout() return - if not self.send_request(error_no_hosts=False): - self._start_timer() + self.send_request(error_no_hosts=False) + self._start_timer() def _make_query_plan(self): From 0c9cdb98f0841cceaa2cc5a135ad41b5a5f890ce Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 9 Feb 2017 16:23:53 -0500 Subject: [PATCH 0989/2005] factor out common shuffle unit test logic --- tests/unit/test_policies.py | 77 ++++++++++++++++++++++--------------- 1 file changed, 45 insertions(+), 32 deletions(-) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index e7235fda0f..91fe2a1602 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -731,8 +731,42 @@ def test_statement_keyspace(self): self.assertEqual(replicas + hosts[:2], qplan) cluster.metadata.get_replicas.assert_called_with(statement_keyspace, routing_key) + def test_shuffles_if_given_keyspace_and_routing_key(self): + """ + Test to validate the hosts are shuffled when `shuffle_replicas` is truthy + @since 3.8 + @jira_ticket PYTHON-676 + @expected_result shuffle should be called, because the keyspace and the + routing key are set + + @test_category policy + """ + self._assert_shuffle(keyspace='keyspace', routing_key='routing_key') + + def test_no_shuffle_if_given_no_keyspace(self): + """ + Test to validate the hosts are not shuffled when no keyspace is provided + @since 3.8 + @jira_ticket PYTHON-676 + @expected_result shuffle should be called, because keyspace is None + + @test_category policy + """ + self._assert_shuffle(keyspace=None, routing_key='routing_key') + + def test_no_shuffle_if_given_no_routing_key(self): + """ + Test to validate the hosts are not shuffled when no routing_key is provided + @since 3.8 + @jira_ticket PYTHON-676 + @expected_result shuffle should be called, because routing_key is None + + @test_category policy + """ + self._assert_shuffle(keyspace='keyspace', routing_key=None) + @patch('cassandra.policies.shuffle') - def test_shuffle(self, patched_shuffle): + def _assert_shuffle(self, patched_shuffle, keyspace, routing_key): """ Test to validate the hosts are shuffled when the `shuffle_replicas` is truthy @since 3.8 @@ -758,41 +792,20 @@ def test_shuffle(self, patched_shuffle): policy = TokenAwarePolicy(child_policy, shuffle_replicas=True) policy.populate(cluster, hosts) - # no keyspace, no shuffle happens - cluster.metadata.get_replicas.reset_mock() - child_policy.make_query_plan.reset_mock() - keyspace = None - routing_key = 'routing_key' - query = Statement(routing_key=routing_key) - qplan = list(policy.make_query_plan(keyspace, query)) - self.assertEqual(hosts, qplan) - self.assertEqual(cluster.metadata.get_replicas.call_count, 0) - child_policy.make_query_plan.assert_called_once_with(keyspace, query) - self.assertEqual(patched_shuffle.call_count, 0) - - # no routing_key, no shuffle happens - cluster.metadata.get_replicas.reset_mock() - child_policy.make_query_plan.reset_mock() - keyspace = "shuffle_keyspace" - routing_key = None - query = Statement(routing_key=routing_key) - qplan = list(policy.make_query_plan(keyspace, query)) - self.assertEqual(hosts, qplan) - self.assertEqual(cluster.metadata.get_replicas.call_count, 0) - child_policy.make_query_plan.assert_called_once_with(keyspace, query) - self.assertEqual(patched_shuffle.call_count, 0) - - #routing_key and keyspace set, shuffle should happen cluster.metadata.get_replicas.reset_mock() child_policy.make_query_plan.reset_mock() - keyspace = "shuffle_keyspace" - routing_key = 'routing_key' query = Statement(routing_key=routing_key) qplan = list(policy.make_query_plan(keyspace, query)) - self.assertEqual(set(replicas), set(qplan[:2])) - self.assertEqual(hosts[:2], qplan[2:]) - child_policy.make_query_plan.assert_called_once_with(keyspace, query) - self.assertEqual(patched_shuffle.call_count, 1) + if keyspace is None or routing_key is None: + self.assertEqual(hosts, qplan) + self.assertEqual(cluster.metadata.get_replicas.call_count, 0) + child_policy.make_query_plan.assert_called_once_with(keyspace, query) + self.assertEqual(patched_shuffle.call_count, 0) + else: + self.assertEqual(set(replicas), set(qplan[:2])) + self.assertEqual(hosts[:2], qplan[2:]) + child_policy.make_query_plan.assert_called_once_with(keyspace, query) + self.assertEqual(patched_shuffle.call_count, 1) class ConvictionPolicyTest(unittest.TestCase): From 1e3c90a867d590805178aa42c3a1672da95830c0 Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 13 Feb 2017 12:53:47 -0500 Subject: [PATCH 0990/2005] Fixed some docs --- tests/integration/long/test_loadbalancingpolicies.py | 4 ++-- tests/unit/test_policies.py | 9 --------- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 62598f051b..a93419ea85 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -526,7 +526,7 @@ def test_token_aware_with_shuffle_rf2(self): @since 3.8 @jira_ticket PYTHON-676 @expected_result the request are spread across the replicas, - when one of them is down, the requests target the avaiable one + when one of them is down, the requests target the available one @test_category policy """ @@ -554,7 +554,7 @@ def test_token_aware_with_shuffle_rf3(self): @since 3.8 @jira_ticket PYTHON-676 @expected_result the request are spread across the replicas, - when one of them is down, the requests target the other avaiable ones + when one of them is down, the requests target the other available ones @test_category policy """ diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 91fe2a1602..494b2e05b3 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -767,15 +767,6 @@ def test_no_shuffle_if_given_no_routing_key(self): @patch('cassandra.policies.shuffle') def _assert_shuffle(self, patched_shuffle, keyspace, routing_key): - """ - Test to validate the hosts are shuffled when the `shuffle_replicas` is truthy - @since 3.8 - @jira_ticket PYTHON-676 - @expected_result shuffle should be called when the `shuffle_replicas` is truthy - if the keyspace and the routing key are set - - @test_category policy - """ hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)] for host in hosts: host.set_up() From 7d67633169dd4e71081627c4f209bf45acf41498 Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 13 Feb 2017 16:41:29 -0500 Subject: [PATCH 0991/2005] Added check so session and host are not both arguments in register_connection --- cassandra/cqlengine/connection.py | 3 ++- .../integration/cqlengine/test_connections.py | 24 +++++++++++++++++-- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index ea5a4b0702..100e8d5a59 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -172,7 +172,8 @@ def register_connection(name, hosts=None, consistency=None, lazy_connect=False, log.warning("Registering connection '{0}' when it already exists.".format(name)) if session is not None: - invalid_config_args = (consistency is not None or + invalid_config_args = (hosts is not None or + consistency is not None or lazy_connect is not False or retry_connect is not False or cluster_options is not None) diff --git a/tests/integration/cqlengine/test_connections.py b/tests/integration/cqlengine/test_connections.py index 1f30b0a972..c7d2ad4a8c 100644 --- a/tests/integration/cqlengine/test_connections.py +++ b/tests/integration/cqlengine/test_connections.py @@ -227,9 +227,26 @@ def test_connection_creation_from_session(self): @test_category object_mapper """ - session = Cluster(['127.0.0.1']).connect() + cluster = Cluster(['127.0.0.1']) + session = cluster.connect() connection_name = 'from_session' conn.register_connection(connection_name, session=session) + self.assertIsNotNone(conn.get_connection(connection_name).cluster.metadata.get_host("127.0.0.1")) + self.addCleanup(conn.unregister_connection, connection_name) + cluster.shutdown() + + def test_connection_from_hosts(self): + """ + Test to ensure that you can register a connection from a list of hosts + @since 3.8 + @jira_ticket PYTHON-692 + @expected_result queries should execute appropriately + + @test_category object_mapper + """ + connection_name = 'from_hosts' + conn.register_connection(connection_name, hosts=['127.0.0.1']) + self.assertIsNotNone(conn.get_connection(connection_name).cluster.metadata.get_host("127.0.0.1")) self.addCleanup(conn.unregister_connection, connection_name) def test_connection_param_validation(self): @@ -241,7 +258,8 @@ def test_connection_param_validation(self): @test_category object_mapper """ - session = Cluster(['127.0.0.1']).connect() + cluster = Cluster(['127.0.0.1']) + session = cluster.connect() with self.assertRaises(CQLEngineException): conn.register_connection("bad_coonection1", session=session, consistency="not_null") with self.assertRaises(CQLEngineException): @@ -253,6 +271,8 @@ def test_connection_param_validation(self): with self.assertRaises(CQLEngineException): conn.register_connection("bad_coonection5", hosts="not_null", session=session) + cluster.shutdown() + class BatchQueryConnectionTests(BaseCassEngTestCase): From 14093fca2216fca0d7c31af70f193b2291776683 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 9 Feb 2017 17:48:27 -0500 Subject: [PATCH 0992/2005] Fixed timer logic to behave properly with ConstantSpeculativeExecutionPolicy --- cassandra/cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 5d037a1eac..6088c17831 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3322,8 +3322,8 @@ def _on_speculative_execute(self): if self._time_remaining <= 0: self._on_timeout() return - if not self.send_request(error_no_hosts=False): - self._start_timer() + self.send_request(error_no_hosts=False) + self._start_timer() def _make_query_plan(self): From 0cee87569686200395ec17f71537a8f49c5b99d1 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 5 Dec 2016 14:41:03 -0500 Subject: [PATCH 0993/2005] implement and test MonotonicTimestampGenerator --- cassandra/cluster.py | 34 +++++- cassandra/timestamps.py | 107 +++++++++++++++++++ docs/api/cassandra/cluster.rst | 2 + docs/api/cassandra/timestamps.rst | 14 +++ tests/unit/test_timestamps.py | 168 ++++++++++++++++++++++++++++++ 5 files changed, 323 insertions(+), 2 deletions(-) create mode 100644 cassandra/timestamps.py create mode 100644 docs/api/cassandra/timestamps.rst create mode 100644 tests/unit/test_timestamps.py diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 92cc39377e..af8e3f4b8c 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -70,6 +70,7 @@ from cassandra.query import (SimpleStatement, PreparedStatement, BoundStatement, BatchStatement, bind_params, QueryTrace, named_tuple_factory, dict_factory, tuple_factory, FETCH_SIZE_UNSET) +from cassandra.timestamps import MonotonicTimestampGenerator def _is_eventlet_monkey_patched(): @@ -771,7 +772,8 @@ def __init__(self, prepare_on_all_hosts=True, reprepare_on_up=True, execution_profiles=None, - allow_beta_protocol_version=False): + allow_beta_protocol_version=False, + timestamp_generator=None): """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as extablishing connection pools or refreshing metadata. @@ -830,6 +832,13 @@ def __init__(self, if connection_class is not None: self.connection_class = connection_class + if timestamp_generator is not None: + if not callable(timestamp_generator): + raise ValueError("timestamp_generator must be callable") + self.timestamp_generator = timestamp_generator + else: + self.timestamp_generator = MonotonicTimestampGenerator() + self.profile_manager = ProfileManager() self.profile_manager.profiles[EXEC_PROFILE_DEFAULT] = ExecutionProfile(self.load_balancing_policy, self.default_retry_policy, @@ -1893,6 +1902,27 @@ def default_serial_consistency_level(self, cl): .. versionadded:: 2.1.0 """ + timestamp_generator = None + """ + When :attr:`use_client_timestamp` is set, sessions call this object and use + the result as the timestamp. (Note that timestamps specified within a CQL + query will override this timestamp.) By default, a new + :class:`~.MonotonicTimestampGenerator` is created for + each :class:`Cluster` instance. + + Applications can set this value for custom timestamp behavior. For + example, an application could share a timestamp generator across + :class:`Cluster` objects to guarantee that the application will use unique, + increasing timestamps across clusters, or set it to to ``lambda: + int(time.time() * 1e6)`` if losing records over clock inconsistencies is + acceptable for the application. Custom :attr:`timestamp_generator` s should + be callable, and calling them should return an integer representing seconds + since some point in time, typically UNIX epoch. + + .. versionadded:: 3.8.0 + """ + + encoder = None """ A :class:`~cassandra.encoder.Encoder` instance that will be used when @@ -2085,7 +2115,7 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time start_time = time.time() if self._protocol_version >= 3 and self.use_client_timestamp: - timestamp = int(start_time * 1e6) + timestamp = self.cluster.timestamp_generator() else: timestamp = None diff --git a/cassandra/timestamps.py b/cassandra/timestamps.py new file mode 100644 index 0000000000..737f894a2f --- /dev/null +++ b/cassandra/timestamps.py @@ -0,0 +1,107 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module contains utilities for generating timestamps for client-side +timestamp specification. +""" + +import logging +import time +from threading import Lock + +log = logging.getLogger(__name__) + +class MonotonicTimestampGenerator(object): + """ + An object that, when called, returns ``int(time.time() * 1e6)`` when + possible, but, if the value returned by ``time.time`` doesn't increase, + drifts into the future and logs warnings. + Exposed configuration attributes can be configured with arguments to + ``__init__`` or by changing attributes on an initialized object. + + .. versionadded:: 3.8.0 + """ + + warn_on_drift = True + """ + If true, log warnings when timestamps drift into the future as allowed by + :attr:`warning_threshold` and :attr:`warning_interval`. + """ + + warning_threshold = 0 + """ + This object will only issue warnings when the returned timestamp drifts + more than ``warning_threshold`` seconds into the future. + """ + + warning_interval = 0 + """ + This object will only issue warnings every ``warning_interval`` seconds. + """ + + def __init__(self, warn_on_drift=True, warning_threshold=0, warning_interval=0): + self.lock = Lock() + with self.lock: + self.last = 0 + self._last_warn = 0 + self.warn_on_drift = warn_on_drift + self.warning_threshold = warning_threshold + self.warning_interval = warning_interval + + def _next_timestamp(self, now, last): + """ + Returns the timestamp that should be used if ``now`` is the current + time and ``last`` is the last timestamp returned by this object. + Intended for internal and testing use only; to generate timestamps, + call an instantiated ``MonotonicTimestampGenerator`` object. + + :param int now: an integer to be used as the current time, typically + representing the current time in seconds since the UNIX epoch + :param int last: an integer representing the last timestamp returned by + this object + """ + if now > last: + self.last = now + return now + else: + self._maybe_warn(now=now) + self.last = last + 1 + return self.last + + def __call__(self): + """ + Makes ``MonotonicTimestampGenerator`` objects callable; defers + internally to _next_timestamp. + """ + with self.lock: + return self._next_timestamp(now=int(time.time() * 1e6), + last=self.last) + + def _maybe_warn(self, now): + # should be called from inside the self.lock. + diff = self.last - now + since_last_warn = now - self._last_warn + + warn = (self.warn_on_drift and + (diff >= self.warning_threshold) and + (since_last_warn >= self.warning_interval)) + if warn: + log.warn( + "Clock skew detected: current tick ({now}) was {diff} " + "microseconds behind the last generated timestamp " + "({last}), returned timestamps will be artificially " + "incremented to guarantee monotonicity.".format( + now=now, diff=diff, last=self.last)) + self._last_warn = now diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 05d66278d0..3d8917784f 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -134,6 +134,8 @@ .. autoattribute:: use_client_timestamp + .. autoattribute:: timestamp_generator + .. autoattribute:: encoder .. autoattribute:: client_protocol_handler diff --git a/docs/api/cassandra/timestamps.rst b/docs/api/cassandra/timestamps.rst new file mode 100644 index 0000000000..7c7f534aea --- /dev/null +++ b/docs/api/cassandra/timestamps.rst @@ -0,0 +1,14 @@ +``cassandra.timestamps`` - Timestamp Generation +============================================= + +.. module:: cassandra.timestamps + +.. autoclass:: MonotonicTimestampGenerator (warn_on_drift=True, warning_threshold=0, warning_interval=0) + + .. autoattribute:: warn_on_drift + + .. autoattribute:: warning_threshold + + .. autoattribute:: warning_interval + + .. automethod:: _next_timestamp diff --git a/tests/unit/test_timestamps.py b/tests/unit/test_timestamps.py new file mode 100644 index 0000000000..c38e8d523d --- /dev/null +++ b/tests/unit/test_timestamps.py @@ -0,0 +1,168 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import mock + +from cassandra import timestamps + + +class _TimestampTestMixin(object): + + @mock.patch('cassandra.timestamps.time') + def _call_and_check_results(self, + patched_time_module, + system_time_expected_stamp_pairs, + timestamp_generator=None): + """ + For each element in an iterable of (system_time, expected_timestamp) + pairs, call a :class:`cassandra.timestamps.MonotonicTimestampGenerator` + with system_times as the underlying time.time() result, then assert + that the result is expected_timestamp. Skips the check if + expected_timestamp is None. + """ + patched_time_module.time = mock.Mock() + system_times, expected_timestamps = zip(*system_time_expected_stamp_pairs) + + patched_time_module.time.side_effect = system_times + tsg = timestamp_generator or timestamps.MonotonicTimestampGenerator() + + for expected in expected_timestamps: + actual = tsg() + if expected is not None: + self.assertEqual(actual, expected) + + # assert we patched timestamps.time.time correctly + with self.assertRaises(StopIteration): + tsg() + + +class TestTimestampGeneratorOutput(unittest.TestCase, _TimestampTestMixin): + """ + Mock time.time and test the output of MonotonicTimestampGenerator.__call__ + given different patterns of changing results. + """ + + def test_timestamps_during_and_after_same_system_time(self): + """ + Timestamps should increase monotonically over repeated system time. + + Test that MonotonicTimestampGenerator's output increases by 1 when the + underlying system time is the same, then returns to normal when the + system time increases again. + """ + self._call_and_check_results( + system_time_expected_stamp_pairs=( + (15.0, 15 * 1e6), + (15.0, 15 * 1e6 + 1), + (15.0, 15 * 1e6 + 2), + (15.01, 15.01 * 1e6)) + ) + + def test_timestamps_during_and_after_backwards_system_time(self): + """ + Timestamps should increase monotonically over system time going backwards. + + Test that MonotonicTimestampGenerator's output increases by 1 when the + underlying system time goes backward, then returns to normal when the + system time increases again. + """ + self._call_and_check_results( + system_time_expected_stamp_pairs=( + (15.0, 15 * 1e6), + (13.0, 15 * 1e6 + 1), + (14.0, 15 * 1e6 + 2), + (13.5, 15 * 1e6 + 3), + (15.01, 15.01 * 1e6)) + ) + + +class TestTimestampGeneratorLogging(unittest.TestCase, _TimestampTestMixin): + + def setUp(self): + self.log_patcher = mock.patch('cassandra.timestamps.log') + self.addCleanup(self.log_patcher.stop) + self.patched_timestamp_log = self.log_patcher.start() + + def assertLastCallArgRegex(self, call, pattern): + last_warn_args, last_warn_kwargs = call + self.assertEqual(len(last_warn_args), 1) + self.assertEqual(len(last_warn_kwargs), 0) + self.assertRegexpMatches( + last_warn_args[0], + pattern, + ) + + def test_basic_log_content(self): + tsg = timestamps.MonotonicTimestampGenerator() + tsg._last_warn = 12 + + tsg._next_timestamp(20, tsg.last) + self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 0) + tsg._next_timestamp(16, tsg.last) + + self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1) + self.assertLastCallArgRegex( + self.patched_timestamp_log.warn.call_args, + r'Clock skew detected:.*\b16\b.*\b4\b.*\b20\b' + ) + + def test_disable_logging(self): + no_warn_tsg = timestamps.MonotonicTimestampGenerator(warn_on_drift=False) + + no_warn_tsg.last = 100 + no_warn_tsg._next_timestamp(99, no_warn_tsg.last) + self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 0) + + def test_warning_threshold_respected_no_logging(self): + tsg = timestamps.MonotonicTimestampGenerator( + warning_threshold=2, + ) + tsg.last, tsg._last_warn = 100, 97 + tsg._next_timestamp(98, tsg.last) + self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 0) + + def test_warning_threshold_respected_logs(self): + tsg = timestamps.MonotonicTimestampGenerator( + warning_threshold=1 + ) + tsg.last, tsg._last_warn = 100, 97 + tsg._next_timestamp(98, tsg.last) + self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1) + + def test_warning_interval_respected_no_logging(self): + tsg = timestamps.MonotonicTimestampGenerator( + warning_interval=2 + ) + tsg.last = 100 + tsg._next_timestamp(70, tsg.last) + self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1) + + tsg._next_timestamp(71, tsg.last) + self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1) + + def test_warning_interval_respected_logs(self): + tsg = timestamps.MonotonicTimestampGenerator( + warning_interval=1 + ) + tsg.last = 100 + tsg._next_timestamp(70, tsg.last) + self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1) + + tsg._next_timestamp(72, tsg.last) + self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 2) From 0c259705fd7e3f722f6d003f90a9c58097bbeb27 Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 23 Jan 2017 16:28:23 -0500 Subject: [PATCH 0994/2005] Fix, test, and document for monotonic timestamps --- cassandra/timestamps.py | 4 +- tests/unit/test_timestamps.py | 124 +++++++++++++++++++++++++++++++--- 2 files changed, 116 insertions(+), 12 deletions(-) diff --git a/cassandra/timestamps.py b/cassandra/timestamps.py index 737f894a2f..385b8c501b 100644 --- a/cassandra/timestamps.py +++ b/cassandra/timestamps.py @@ -95,8 +95,8 @@ def _maybe_warn(self, now): since_last_warn = now - self._last_warn warn = (self.warn_on_drift and - (diff >= self.warning_threshold) and - (since_last_warn >= self.warning_interval)) + (diff > self.warning_threshold * 1e6) and + (since_last_warn >= self.warning_interval * 1e6)) if warn: log.warn( "Clock skew detected: current tick ({now}) was {diff} " diff --git a/tests/unit/test_timestamps.py b/tests/unit/test_timestamps.py index c38e8d523d..c2f8b93da8 100644 --- a/tests/unit/test_timestamps.py +++ b/tests/unit/test_timestamps.py @@ -20,7 +20,8 @@ import mock from cassandra import timestamps - +import time +from threading import Thread, Lock class _TimestampTestMixin(object): @@ -60,11 +61,13 @@ class TestTimestampGeneratorOutput(unittest.TestCase, _TimestampTestMixin): def test_timestamps_during_and_after_same_system_time(self): """ - Timestamps should increase monotonically over repeated system time. - Test that MonotonicTimestampGenerator's output increases by 1 when the underlying system time is the same, then returns to normal when the system time increases again. + + @since 3.8.0 + @expected_result Timestamps should increase monotonically over repeated system time. + @test_category timing """ self._call_and_check_results( system_time_expected_stamp_pairs=( @@ -76,11 +79,13 @@ def test_timestamps_during_and_after_same_system_time(self): def test_timestamps_during_and_after_backwards_system_time(self): """ - Timestamps should increase monotonically over system time going backwards. - Test that MonotonicTimestampGenerator's output increases by 1 when the underlying system time goes backward, then returns to normal when the system time increases again. + + @since 3.8.0 + @expected_result Timestamps should increase monotonically over system time going backwards. + @test_category timing """ self._call_and_check_results( system_time_expected_stamp_pairs=( @@ -92,7 +97,7 @@ def test_timestamps_during_and_after_backwards_system_time(self): ) -class TestTimestampGeneratorLogging(unittest.TestCase, _TimestampTestMixin): +class TestTimestampGeneratorLogging(unittest.TestCase): def setUp(self): self.log_patcher = mock.patch('cassandra.timestamps.log') @@ -109,7 +114,17 @@ def assertLastCallArgRegex(self, call, pattern): ) def test_basic_log_content(self): + """ + Tests there are logs + + @since 3.8.0 + @jira_ticket PYTHON-676 + @expected_result logs + + @test_category timing + """ tsg = timestamps.MonotonicTimestampGenerator() + #The units of _last_warn is seconds tsg._last_warn = 12 tsg._next_timestamp(20, tsg.last) @@ -123,6 +138,15 @@ def test_basic_log_content(self): ) def test_disable_logging(self): + """ + Tests there are no logs when there is a clock skew if logging is disabled + + @since 3.8.0 + @jira_ticket PYTHON-676 + @expected_result no logs + + @test_category timing + """ no_warn_tsg = timestamps.MonotonicTimestampGenerator(warn_on_drift=False) no_warn_tsg.last = 100 @@ -130,24 +154,51 @@ def test_disable_logging(self): self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 0) def test_warning_threshold_respected_no_logging(self): + """ + Tests there are no logs if `warning_threshold` is not exceeded + + @since 3.8.0 + @jira_ticket PYTHON-676 + @expected_result no logs + + @test_category timing + """ tsg = timestamps.MonotonicTimestampGenerator( - warning_threshold=2, + warning_threshold=2e-6, ) tsg.last, tsg._last_warn = 100, 97 tsg._next_timestamp(98, tsg.last) self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 0) def test_warning_threshold_respected_logs(self): + """ + Tests there are logs if `warning_threshold` is exceeded + + @since 3.8.0 + @jira_ticket PYTHON-676 + @expected_result logs + + @test_category timing + """ tsg = timestamps.MonotonicTimestampGenerator( - warning_threshold=1 + warning_threshold=1e-6 ) tsg.last, tsg._last_warn = 100, 97 tsg._next_timestamp(98, tsg.last) self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1) def test_warning_interval_respected_no_logging(self): + """ + Tests there is only one log in the interval `warning_interval` + + @since 3.8.0 + @jira_ticket PYTHON-676 + @expected_result one log + + @test_category timing + """ tsg = timestamps.MonotonicTimestampGenerator( - warning_interval=2 + warning_interval=2e-6 ) tsg.last = 100 tsg._next_timestamp(70, tsg.last) @@ -157,8 +208,18 @@ def test_warning_interval_respected_no_logging(self): self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1) def test_warning_interval_respected_logs(self): + """ + Tests there are logs again if the + clock skew happens after`warning_interval` + + @since 3.8.0 + @jira_ticket PYTHON-676 + @expected_result logs + + @test_category timing + """ tsg = timestamps.MonotonicTimestampGenerator( - warning_interval=1 + warning_interval=1e-6 ) tsg.last = 100 tsg._next_timestamp(70, tsg.last) @@ -166,3 +227,46 @@ def test_warning_interval_respected_logs(self): tsg._next_timestamp(72, tsg.last) self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 2) + + +class TestTimestampGeneratorMultipleThreads(unittest.TestCase): + + def test_should_generate_incrementing_timestamps_for_all_threads(self): + """ + Tests when time is "stopped", values are assigned incrementally + + @since 3.8.0 + @jira_ticket PYTHON-676 + @expected_result the returned values increase + + @test_category timing + """ + lock = Lock() + + def request_time(): + for _ in range(timestamp_to_generate): + timestamp = tsg() + with lock: + generated_timestamps.append(timestamp) + + tsg = timestamps.MonotonicTimestampGenerator(warning_threshold=1) + fixed_time = 1 + num_threads = 5 + + timestamp_to_generate = 1000 + generated_timestamps = [] + + with mock.patch('time.time', new=mock.Mock(return_value=fixed_time)): + threads = [] + for _ in range(num_threads): + threads.append(Thread(target=request_time)) + + for t in threads: + t.start() + + for t in threads: + t.join() + + self.assertEqual(len(generated_timestamps), num_threads * timestamp_to_generate) + for i, timestamp in enumerate(sorted(generated_timestamps)): + self.assertEqual(int(i + 1e6), timestamp) From 2bbe1b0d6648a7e8f18a19a154cdfc7099b52a49 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 14 Feb 2017 10:37:44 -0500 Subject: [PATCH 0995/2005] Fixed for a connection closed during initial handshake --- cassandra/io/asyncorereactor.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index af7de4a805..9fd0d15a4e 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -322,6 +322,11 @@ def close(self): if not self.is_defunct: self.error_all_requests( ConnectionShutdown("Connection to %s was closed" % self.host)) + + #This happens when the connection is shutdown while waiting for the ReadyMessage + if not self.connected_event.is_set(): + self.last_error = ConnectionShutdown("Connection to %s was closed" % self.host) + # don't leave in-progress operations hanging self.connected_event.set() From aeb71aad7769efd563f115bf69a8f7e585564fd6 Mon Sep 17 00:00:00 2001 From: Shevchenko Vitaliy Date: Wed, 15 Feb 2017 14:54:23 +0200 Subject: [PATCH 0996/2005] Fix RuntimeError caused by change dict size during iteration --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 754e595286..2ccbc18fd4 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2242,7 +2242,7 @@ def prepare_on_all_hosts(self, query, excluded_host): Intended for internal use only. """ futures = [] - for host in self._pools.keys(): + for host in tuple(self._pools.keys()): if host != excluded_host and host.is_up: future = ResponseFuture(self, PrepareMessage(query=query), None, self.default_timeout) From ee677dbe8c5494e928d7d1ef6147aec18afc096f Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 16 Feb 2017 13:58:19 -0500 Subject: [PATCH 0997/2005] 3.8.0 CHANGELOG --- CHANGELOG.rst | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index eff7b04e68..7bdb5f61d7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,39 @@ +3.8.0 +===== + +Features +-------- + +* Quote index names in metadata CQL generation (PYTHON-616) +* On column deserialization failure, keep error message consistent between python and cython (PYTHON-631) +* TokenAwarePolicy always sends requests to the same replica for a given key (PYTHON-643) +* Added cql types to result set (PYTHON-648) +* Add __len__ to BatchStatement (PYTHON-650) +* Duration Type for Cassandra (PYTHON-655) +* Send flags with PREPARE message in v5 (PYTHON-684) + +Bug Fixes +--------- + +* Potential Timing issue if application exits prior to session pool initialization (PYTHON-636) +* "Host X.X.X.X has been marked down" without any exceptions (PYTHON-640) +* NoHostAvailable or OperationTimedOut when using execute_concurrent with a generator that inserts into more than one table (PYTHON-642) +* ResponseFuture creates Timers and don't cancel them even when result is received which leads to memory leaks (PYTHON-644) +* Driver cannot connect to Cassandra version > 3 (PYTHON-646) +* Unable to import model using UserType without setuping connection since 3.7 (PYTHON-649) +* Don't prepare queries on ignored hosts on_up (PYTHON-669) +* Sockets associated with sessions not getting cleaned up on session.shutdown() (PYTHON-673) +* Make client timestamps strictly monotonic (PYTHON-676) +* cassandra.cqlengine.connection.register_connection broken when hosts=None (PYTHON-692) + +Other +----- + +* Create a cqlengine doc section explaining None semantics (PYTHON-623) +* Resolve warnings in documentation generation (PYTHON-645) +* Cython dependency (PYTHON-686) +* Drop Support for Python 2.6 (PYTHON-690) + 3.7.1 ===== October 26, 2016 From e9391eb0f3f3660c4903468378629faa24ff45ee Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 21 Feb 2017 11:34:05 -0500 Subject: [PATCH 0998/2005] set defaults for timestamp generator logging --- cassandra/timestamps.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cassandra/timestamps.py b/cassandra/timestamps.py index 385b8c501b..e9602e161f 100644 --- a/cassandra/timestamps.py +++ b/cassandra/timestamps.py @@ -40,15 +40,17 @@ class MonotonicTimestampGenerator(object): :attr:`warning_threshold` and :attr:`warning_interval`. """ - warning_threshold = 0 + warning_threshold = 1 """ This object will only issue warnings when the returned timestamp drifts more than ``warning_threshold`` seconds into the future. + Defaults to 1 second. """ - warning_interval = 0 + warning_interval = 1 """ This object will only issue warnings every ``warning_interval`` seconds. + Defaults to 1 second. """ def __init__(self, warn_on_drift=True, warning_threshold=0, warning_interval=0): From 03a2636f33b9e8ea2d704ba16914b766b135f7e0 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 21 Feb 2017 13:41:59 -0500 Subject: [PATCH 0999/2005] add documentation for Cluster.timestamp_generator --- cassandra/cluster.py | 11 +++++++++++ docs/api/cassandra/cluster.rst | 2 ++ 2 files changed, 13 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 754e595286..26c36d9169 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -661,6 +661,17 @@ def default_retry_policy(self, policy): establishment, options passing, and authentication. """ + timestamp_generator = None + """ + An object, shared between all sessions created by this cluster instance, + that generates timestamps when client-side timestamp generation is enabled. + By default, each :class:`Cluster` uses a new + :class:`~.MonotonicTimestampGenerator`. + + Applications can set this value for custom timestamp behavior. See the + documentation for :meth:`Session.timestamp_generator`. + """ + @property def schema_metadata_enabled(self): """ diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 3d8917784f..c37851ad2d 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -64,6 +64,8 @@ .. autoattribute:: token_metadata_enabled :annotation: = True + .. autoattribute:: timestamp_generator + .. automethod:: connect .. automethod:: shutdown From 0cc95ace265ffe227b87aea315f29c61bf82624e Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 21 Feb 2017 14:34:21 -0500 Subject: [PATCH 1000/2005] 3.8.0 version --- cassandra/__init__.py | 2 +- docs.yaml | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 4771118b86..f8f8014cdc 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 7, 1, 'post0') +__version_info__ = (3, 8, 0) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs.yaml b/docs.yaml index a8fa9a4338..86cd85b496 100644 --- a/docs.yaml +++ b/docs.yaml @@ -11,6 +11,8 @@ sections: type: sphinx directory: docs versions: + - name: 3.8 + ref: 3.8-doc - name: 3.7 ref: 3.7-doc - name: 3.6 From 5c5a53ef8de50a1da48964e9bc8e196f3780ec4b Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 24 Feb 2017 10:30:24 -0500 Subject: [PATCH 1001/2005] version 3.8.0.post0 --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index f8f8014cdc..4d5d3b488f 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 8, 0) +__version_info__ = (3, 8, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From 018c7e70c821e6c40e752ca184e601db5b91cb3f Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 27 Feb 2017 18:55:50 -0500 Subject: [PATCH 1002/2005] Added tests for wrinte_uint --- .../standard/test_custom_protocol_handler.py | 57 +++++++++++++++++-- tests/unit/test_protocol.py | 36 ++++++++++++ 2 files changed, 89 insertions(+), 4 deletions(-) create mode 100644 tests/unit/test_protocol.py diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 79b9d84636..2da9a73cc1 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -17,15 +17,19 @@ except ImportError: import unittest # noqa -from cassandra.protocol import ProtocolHandler, ResultMessage, UUIDType, read_int, EventMessage -from cassandra.query import tuple_factory -from cassandra.cluster import Cluster -from tests.integration import use_singledc, PROTOCOL_VERSION, drop_keyspace_shutdown_cluster +from cassandra.protocol import ProtocolHandler, ResultMessage, QueryMessage, UUIDType, read_int +from cassandra.query import tuple_factory, SimpleStatement +from cassandra.cluster import Cluster, ResponseFuture +from cassandra import ProtocolVersion, ConsistencyLevel + +from tests.integration import use_singledc, PROTOCOL_VERSION, drop_keyspace_shutdown_cluster, \ + greaterthanorequalcass30, execute_with_long_wait_retry from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES from tests.integration.standard.utils import create_table_with_all_types, get_all_primitive_params from six import binary_type import uuid +import mock def setup_module(): @@ -117,6 +121,51 @@ def test_custom_raw_row_results_all_types(self): self.assertEqual(len(CustomResultMessageTracked.checked_rev_row_set), len(PRIMITIVE_DATATYPES)-1) cluster.shutdown() + @greaterthanorequalcass30 + def test_protocol_divergence_v4_fail_by_flag_uses_int(self): + """ + Test to validate that the _PAGE_SIZE_FLAG is not treated correctly in V4 if the flags are + written using write_uint instead of write_int + + @since 3.9 + @jira_ticket PYTHON-713 + @expected_result the fetch_size=1 parameter will be ignored + + @test_category connection + """ + self._protocol_divergence_fail_by_flag_uses_int(ProtocolVersion.V4, uses_int_query_flag=False, + int_flag=True) + + + def _send_query_message(self, session, timeout, **kwargs): + query = "SELECT * FROM test3rf.test" + message = QueryMessage(query=query, **kwargs) + future = ResponseFuture(session, message, query=None, timeout=timeout) + future.send_request() + return future + + def _protocol_divergence_fail_by_flag_uses_int(self, version, uses_int_query_flag, int_flag = True, beta=False): + cluster = Cluster(protocol_version=version, allow_beta_protocol_version=beta) + session = cluster.connect() + + query_one = SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (1, 1)") + query_two = SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (2, 2)") + + execute_with_long_wait_retry(session, query_one) + execute_with_long_wait_retry(session, query_two) + + with mock.patch('cassandra.protocol.ProtocolVersion.uses_int_query_flags', new=mock.Mock(return_value=int_flag)): + future = self._send_query_message(session, 10, + consistency_level=ConsistencyLevel.ONE, fetch_size=1) + + response = future.result() + + # This means the flag are not handled as they are meant by the server if uses_int=False + self.assertEqual(response.has_more_pages, uses_int_query_flag) + + execute_with_long_wait_retry(session, SimpleStatement("TRUNCATE test3rf.test")) + cluster.shutdown() + class CustomResultMessageRaw(ResultMessage): """ diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py new file mode 100644 index 0000000000..c2223e9dd8 --- /dev/null +++ b/tests/unit/test_protocol.py @@ -0,0 +1,36 @@ + + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + + +from mock import Mock + +from cassandra import ProtocolVersion +from cassandra.protocol import PrepareMessage + + +class MessageTest(unittest.TestCase): + def test_prepare_flag(self): + """ + Test to check the prepare flag is properly set, This should only happen for V5 at the moment. + + @since 3.9 + @jira_ticket PYTHON-713 + @expected_result the values are correctly written + + @test_category connection + """ + message = PrepareMessage("a") + io = Mock() + for version in ProtocolVersion.SUPPORTED_VERSIONS: + message.send_body(io, version) + if ProtocolVersion.uses_prepare_flags(version): + # This should pass after PYTHON-696 + self.assertEqual(len(io.write.mock_calls), 3) + # self.assertEqual(uint32_unpack(io.write.mock_calls[2][1][0]) & _WITH_SERIAL_CONSISTENCY_FLAG, 1) + else: + self.assertEqual(len(io.write.mock_calls), 2) + io.reset_mock() \ No newline at end of file From 6045c40d37439c65c10e47c2fa1b9cecd0352953 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 4 Jan 2017 15:35:09 -0600 Subject: [PATCH 1003/2005] Reformated tests from test_protocol --- tests/unit/test_protocol.py | 67 +++++++++++++++++++++++++++++++++---- 1 file changed, 61 insertions(+), 6 deletions(-) diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index c2223e9dd8..7e6a5b3702 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -1,18 +1,73 @@ - try: import unittest2 as unittest except ImportError: import unittest # noqa - from mock import Mock - from cassandra import ProtocolVersion -from cassandra.protocol import PrepareMessage - +from cassandra.protocol import PrepareMessage, QueryMessage, ExecuteMessage class MessageTest(unittest.TestCase): + + def test_prepare_message(self): + """ + Test to check the appropriate calls are made + + @since 3.9 + @jira_ticket PYTHON-713 + @expected_result the values are correctly written + + @test_category connection + """ + message = PrepareMessage("a") + io = Mock() + + message.send_body(io,4) + self._check_calls(io, [(b'\x00\x00\x00\x01',), (b'a',)]) + + io.reset_mock() + message.send_body(io,5) + + self._check_calls(io, [(b'\x00\x00\x00\x01',), (b'a',), (b'\x00\x00\x00\x00',)]) + + def test_execute_message(self): + message = ExecuteMessage('1',[],4) + io = Mock() + + message.send_body(io,4) + self._check_calls(io, [(b'\x00\x01',), (b'1',), (b'\x00\x04',), (b'\x01',), (b'\x00\x00',)]) + + io.reset_mock() + message.send_body(io, 5) + + self._check_calls(io, [(b'\x00\x01',), (b'1',), (b'\x00\x04',), (b'\x00\x00\x00\x01',), (b'\x00\x00',)]) + + def test_query_message(self): + """ + Test to check the appropriate calls are made + + @since 3.9 + @jira_ticket PYTHON-713 + @expected_result the values are correctly written + + @test_category connection + """ + message = QueryMessage("a",3) + io = Mock() + + message.send_body(io,4) + self._check_calls(io, [(b'\x00\x00\x00\x01',), (b'a',), (b'\x00\x03',), (b'\x00',)]) + + io.reset_mock() + message.send_body(io,5) + self._check_calls(io, [(b'\x00\x00\x00\x01',), (b'a',), (b'\x00\x03',), (b'\x00\x00\x00\x00',)]) + + def _check_calls(self, io, expected): + self.assertEqual(len(io.write.mock_calls), len(expected)) + for call, expect in zip(io.write.mock_calls, expected): + self.assertEqual(call[1], expect) + def test_prepare_flag(self): """ Test to check the prepare flag is properly set, This should only happen for V5 at the moment. @@ -33,4 +88,4 @@ def test_prepare_flag(self): # self.assertEqual(uint32_unpack(io.write.mock_calls[2][1][0]) & _WITH_SERIAL_CONSISTENCY_FLAG, 1) else: self.assertEqual(len(io.write.mock_calls), 2) - io.reset_mock() \ No newline at end of file + io.reset_mock() From 3c25965d046830fe064f9fb409b29f959bb51165 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 28 Feb 2017 17:01:21 -0500 Subject: [PATCH 1004/2005] Changed bit_length function to support python2 in Windows --- cassandra/marshal.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 5ce40a5313..1d7e29d58a 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -68,12 +68,19 @@ def varint_unpack(term): # noqa return val +def bit_length(n): + if six.PY3 or isinstance(n, int): + return int.bit_length(n) + else: + return long.bit_length(n) + + def varint_pack(big): pos = True if big == 0: return b'\x00' if big < 0: - bytelength = int.bit_length(abs(big) - 1) // 8 + 1 + bytelength = bit_length(abs(big) - 1) // 8 + 1 big = (1 << bytelength * 8) + big pos = False revbytes = bytearray() From 56889b2025197a3d3b8102501c6aee9b9979d340 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 28 Feb 2017 13:51:05 -0500 Subject: [PATCH 1005/2005] add total_ordering to Time/Date/Host/Token PYTHON-714 --- cassandra/metadata.py | 10 ++-------- cassandra/pool.py | 2 ++ cassandra/util.py | 3 +++ 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index e410631ae7..96b70ae83b 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -15,6 +15,7 @@ from binascii import unhexlify from bisect import bisect_right from collections import defaultdict, Mapping +from functools import total_ordering from hashlib import md5 from itertools import islice, cycle import json @@ -1492,6 +1493,7 @@ def get_replicas(self, keyspace, token): return [] +@total_ordering class Token(object): """ Abstract class representing a token. @@ -1512,14 +1514,6 @@ def from_key(cls, key): def from_string(cls, token_string): raise NotImplementedError() - def __cmp__(self, other): - if self.value < other.value: - return -1 - elif self.value == other.value: - return 0 - else: - return 1 - def __eq__(self, other): return self.value == other.value diff --git a/cassandra/pool.py b/cassandra/pool.py index c6801de345..e570b0154b 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -16,6 +16,7 @@ Connection pooling and host management. """ +from functools import total_ordering import logging import socket import time @@ -41,6 +42,7 @@ class NoConnectionsAvailable(Exception): pass +@total_ordering class Host(object): """ Represents a single Cassandra node. diff --git a/cassandra/util.py b/cassandra/util.py index 924b5c7905..b73baab7f3 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -15,6 +15,7 @@ from __future__ import with_statement import calendar import datetime +from functools import total_ordering import random import six import uuid @@ -861,6 +862,7 @@ def _serialize_key(self, key): long = int +@total_ordering class Time(object): ''' Idealized time, independent of day. @@ -985,6 +987,7 @@ def __str__(self): self.second, self.nanosecond) +@total_ordering class Date(object): ''' Idealized date: year, month, day From c6df661907a9dab0c789df1fc35ee7d449e55530 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 1 Mar 2017 18:20:13 -0500 Subject: [PATCH 1006/2005] Added tests for type ordering --- tests/unit/test_types.py | 136 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 136 insertions(+) diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 0b0f5efda5..9533dc3259 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -33,6 +33,10 @@ read_stringmap, read_inet, write_inet, read_string, write_longstring) from cassandra.query import named_tuple_factory +from cassandra.pool import Host +from cassandra.policies import SimpleConvictionPolicy, ConvictionPolicy +from cassandra.util import Date, Time +from cassandra.metadata import Token class TypeTests(unittest.TestCase): @@ -248,3 +252,135 @@ def test_cql_quote(self): self.assertEqual(cql_quote(u'test'), "'test'") self.assertEqual(cql_quote('test'), "'test'") self.assertEqual(cql_quote(0), '0') + + +class TestOrdering(unittest.TestCase): + def _check_order_consistency(self, smaller, bigger, equal=False): + self.assertLessEqual(smaller, bigger) + self.assertGreaterEqual(bigger, smaller) + if equal: + self.assertEqual(smaller, bigger) + else: + self.assertNotEqual(smaller, bigger) + self.assertLess(smaller, bigger) + self.assertGreater(bigger, smaller) + + def _shuffle_lists(self, *args): + return [item for sublist in zip(*args) for item in sublist] + + def _check_sequence_consistency(self, ordered_sequence, equal=False): + for i, el in enumerate(ordered_sequence): + for previous in ordered_sequence[:i]: + self._check_order_consistency(previous, el, equal) + for posterior in ordered_sequence[i + 1:]: + self._check_order_consistency(el, posterior, equal) + + def test_host_order(self): + """ + Test Host class is ordered consistently + + @since 3.9 + @jira_ticket PYTHON-714 + @expected_result the hosts are ordered correctly + + @test_category data_types + """ + hosts = [Host(addr, SimpleConvictionPolicy) for addr in + ("127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4")] + hosts_equal = [Host(addr, SimpleConvictionPolicy) for addr in + ("127.0.0.1", "127.0.0.1")] + hosts_equal_conviction = [Host("127.0.0.1", SimpleConvictionPolicy), Host("127.0.0.1", ConvictionPolicy)] + self._check_sequence_consistency(hosts) + self._check_sequence_consistency(hosts_equal, equal=True) + self._check_sequence_consistency(hosts_equal_conviction, equal=True) + + def test_date_order(self): + """ + Test Date class is ordered consistently + + @since 3.9 + @jira_ticket PYTHON-714 + @expected_result the dates are ordered correctly + + @test_category data_types + """ + dates_from_string = [Date("2017-01-01"), Date("2017-01-05"), Date("2017-01-09"), Date("2017-01-13")] + dates_from_string_equal = [Date("2017-01-01"), Date("2017-01-01")] + self._check_sequence_consistency(dates_from_string) + self._check_sequence_consistency(dates_from_string_equal, equal=True) + + date_format = "%Y-%m-%d" + + dates_from_value = [ + Date((datetime.datetime.strptime(dtstr, date_format) - + datetime.datetime(1970, 1, 1)).days) + for dtstr in ("2017-01-02", "2017-01-06", "2017-01-10", "2017-01-14") + ] + dates_from_value_equal = [Date(1), Date(1)] + self._check_sequence_consistency(dates_from_value) + self._check_sequence_consistency(dates_from_value_equal, equal=True) + + dates_from_datetime = [Date(datetime.datetime.strptime(dtstr, date_format)) + for dtstr in ("2017-01-03", "2017-01-07", "2017-01-11", "2017-01-15")] + dates_from_datetime_equal = [Date(datetime.datetime.strptime("2017-01-01", date_format)), + Date(datetime.datetime.strptime("2017-01-01", date_format))] + self._check_sequence_consistency(dates_from_datetime) + self._check_sequence_consistency(dates_from_datetime_equal, equal=True) + + dates_from_date = [ + Date(datetime.datetime.strptime(dtstr, date_format).date()) for dtstr in + ("2017-01-04", "2017-01-08", "2017-01-12", "2017-01-16") + ] + dates_from_date_equal = [datetime.datetime.strptime(dtstr, date_format) for dtstr in + ("2017-01-09", "2017-01-9")] + + self._check_sequence_consistency(dates_from_date) + self._check_sequence_consistency(dates_from_date_equal, equal=True) + + self._check_sequence_consistency(self._shuffle_lists(dates_from_string, dates_from_value, + dates_from_datetime, dates_from_date)) + + def test_timer_order(self): + """ + Test Time class is ordered consistently + + @since 3.9 + @jira_ticket PYTHON-714 + @expected_result the times are ordered correctly + + @test_category data_types + """ + time_from_int = [Time(1000), Time(4000), Time(7000), Time(10000)] + time_from_int_equal = [Time(1), Time(1)] + self._check_sequence_consistency(time_from_int) + self._check_sequence_consistency(time_from_int_equal, equal=True) + + time_from_datetime = [Time(datetime.time(hour=0, minute=0, second=0, microsecond=us)) + for us in (2, 5, 8, 11)] + time_from_datetime_equal = [Time(datetime.time(hour=0, minute=0, second=0, microsecond=us)) + for us in (1, 1)] + self._check_sequence_consistency(time_from_datetime) + self._check_sequence_consistency(time_from_datetime_equal, equal=True) + + time_from_string = [Time("00:00:00.000003000"), Time("00:00:00.000006000"), + Time("00:00:00.000009000"), Time("00:00:00.000012000")] + time_from_string_equal = [Time("00:00:00.000004000"), Time("00:00:00.000004000")] + self._check_sequence_consistency(time_from_string) + self._check_sequence_consistency(time_from_string_equal, equal=True) + + self._check_sequence_consistency(self._shuffle_lists(time_from_int, time_from_datetime, time_from_string)) + + def test_token_order(self): + """ + Test Token class is ordered consistently + + @since 3.9 + @jira_ticket PYTHON-714 + @expected_result the tokens are ordered correctly + + @test_category data_types + """ + tokens = [Token(1), Token(2), Token(3), Token(4)] + tokens_equal = [Token(1), Token(1)] + self._check_sequence_consistency(tokens) + self._check_sequence_consistency(tokens_equal, equal=True) From 45fdc5c5450f734f6775d2ebdc2a4068330a18b6 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 28 Feb 2017 14:58:53 -0500 Subject: [PATCH 1007/2005] Cherry-picked the windows support branch of dse --- tests/__init__.py | 7 ++++ tests/integration/__init__.py | 16 ++++++++ tests/integration/cqlengine/__init__.py | 5 ++- .../columns/test_container_columns.py | 9 ++++- .../cqlengine/connections/test_connection.py | 10 ++++- .../integration/cqlengine/test_connections.py | 28 ++++++------- .../standard/test_authentication.py | 32 +++++++++------ .../standard/test_client_warnings.py | 4 +- tests/integration/standard/test_cluster.py | 40 ++++++++++++------- tests/integration/standard/test_connection.py | 9 +++-- .../standard/test_custom_payload.py | 5 ++- .../standard/test_custom_protocol_handler.py | 1 + tests/integration/standard/test_metadata.py | 14 ++++--- tests/integration/standard/test_metrics.py | 6 +-- tests/integration/standard/test_policies.py | 4 ++ .../standard/test_prepared_statements.py | 6 ++- tests/integration/standard/test_query.py | 10 +++-- tests/unit/test_concurrent.py | 9 ++++- 18 files changed, 149 insertions(+), 66 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index abfb8bf792..199ec56164 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -12,9 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa import logging import sys import socket +import platform log = logging.getLogger() log.setLevel('DEBUG') @@ -41,3 +46,5 @@ def is_gevent_monkey_patched(): def is_monkey_patched(): return is_gevent_monkey_patched() or is_eventlet_monkey_patched() + +notwindows = unittest.skipUnless(not "Windows" in platform.system(), "This test is not adecuate for windows") diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 4996af2356..5db4e04faf 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -115,6 +115,7 @@ def _get_cass_version_from_dse(dse_version): return cass_ver +CASSANDRA_IP = os.getenv('CASSANDRA_IP', '127.0.0.1') CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) DSE_VERSION = os.getenv('DSE_VERSION', None) DSE_CRED = os.getenv('DSE_CREDS', None) @@ -141,6 +142,18 @@ def _get_cass_version_from_dse(dse_version): CCM_KWARGS['dse_credentials_file'] = DSE_CRED +#This changes the default contact_point parameter in Cluster +def set_default_cass_ip(): + if CASSANDRA_IP.startswith("127.0.0."): + return + defaults = list(Cluster.__init__.__defaults__) + defaults = [[CASSANDRA_IP]] + defaults[1:] + try: + Cluster.__init__.__defaults__ = tuple(defaults) + except: + Cluster.__init__.__func__.__defaults__ = tuple(defaults) + + def get_default_protocol(): if Version(CASSANDRA_VERSION) >= Version('2.2'): @@ -208,6 +221,7 @@ def get_unsupported_upper_protocol(): PROTOCOL_VERSION = int(os.getenv('PROTOCOL_VERSION', default_protocol_version)) +local = unittest.skipUnless(CASSANDRA_IP.startswith("127.0.0."), 'Tests only runs against local C*') notprotocolv1 = unittest.skipUnless(PROTOCOL_VERSION > 1, 'Protocol v1 not supported') lessthenprotocolv4 = unittest.skipUnless(PROTOCOL_VERSION < 4, 'Protocol versions 4 or greater not supported') greaterthanprotocolv3 = unittest.skipUnless(PROTOCOL_VERSION >= 4, 'Protocol versions less than 4 are not supported') @@ -299,6 +313,8 @@ def is_current_cluster(cluster_name, node_counts): def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): + set_default_cass_ip() + global CCM_CLUSTER if USE_CASS_EXTERNAL: if CCM_CLUSTER: diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index ef61eed114..760702fc1b 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -24,7 +24,7 @@ from cassandra.cqlengine.management import create_keyspace_simple, CQLENG_ALLOW_SCHEMA_MANAGEMENT import cassandra -from tests.integration import get_server_versions, use_single_node, PROTOCOL_VERSION +from tests.integration import get_server_versions, use_single_node, PROTOCOL_VERSION, CASSANDRA_IP, set_default_cass_ip DEFAULT_KEYSPACE = 'cqlengine_test' @@ -35,6 +35,7 @@ def setup_package(): warnings.simplefilter('always') # for testing warnings, make sure all are let through os.environ[CQLENG_ALLOW_SCHEMA_MANAGEMENT] = '1' + set_default_cass_ip() use_single_node() setup_connection(DEFAULT_KEYSPACE) @@ -52,7 +53,7 @@ def is_prepend_reversed(): def setup_connection(keyspace_name): - connection.setup(['127.0.0.1'], + connection.setup([CASSANDRA_IP], consistency=ConsistencyLevel.ONE, protocol_version=PROTOCOL_VERSION, default_keyspace=keyspace_name) diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index b2034bb11b..57f97ceaa0 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -20,11 +20,13 @@ import traceback from uuid import uuid4 -from cassandra import WriteTimeout +from cassandra import WriteTimeout, OperationTimedOut import cassandra.cqlengine.columns as columns from cassandra.cqlengine.functions import get_total_seconds from cassandra.cqlengine.models import Model, ValidationError from cassandra.cqlengine.management import sync_table, drop_table + +from tests.integration import CASSANDRA_IP from tests.integration.cqlengine import is_prepend_reversed from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration import greaterthancass20, CASSANDRA_VERSION @@ -134,8 +136,11 @@ def test_element_count_validation(self): break except WriteTimeout: ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb + except OperationTimedOut: + #This will happen if the host is remote + self.assertFalse(CASSANDRA_IP.startswith("127.0.0.")) self.assertRaises(ValidationError, TestSetModel.create, **{'text_set': set(str(uuid4()) for i in range(65536))}) def test_partial_updates(self): diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index 5f4bfe66a6..667a03048e 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -24,7 +24,7 @@ from cassandra.cluster import Cluster from cassandra.query import dict_factory -from tests.integration import PROTOCOL_VERSION, execute_with_long_wait_retry +from tests.integration import PROTOCOL_VERSION, execute_with_long_wait_retry, local from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine import DEFAULT_KEYSPACE, setup_connection from cassandra.cqlengine import models @@ -95,10 +95,18 @@ def test_connection_session_switch(self): self.assertEqual(1, TestConnectModel.objects.count()) self.assertEqual(TestConnectModel.objects.first(), TCM2) +<<<<<<< HEAD +======= + @local +>>>>>>> 1d91fa8... Added support for testing remotely and on Windows def test_connection_setup_with_setup(self): connection.setup(hosts=None, default_keyspace=None) self.assertIsNotNone(connection.get_connection("default").cluster.metadata.get_host("127.0.0.1")) +<<<<<<< HEAD +======= + @local +>>>>>>> 1d91fa8... Added support for testing remotely and on Windows def test_connection_setup_with_default(self): connection.default() self.assertIsNotNone(connection.get_connection("default").cluster.metadata.get_host("127.0.0.1")) diff --git a/tests/integration/cqlengine/test_connections.py b/tests/integration/cqlengine/test_connections.py index e50fb1567d..c658ef1867 100644 --- a/tests/integration/cqlengine/test_connections.py +++ b/tests/integration/cqlengine/test_connections.py @@ -23,6 +23,7 @@ from tests.integration.cqlengine import setup_connection, DEFAULT_KEYSPACE from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine.query import test_queryset +from tests.integration import local, CASSANDRA_IP class TestModel(Model): @@ -44,7 +45,6 @@ class AnotherTestModel(Model): count = columns.Integer() text = columns.Text() - class ContextQueryConnectionTests(BaseCassEngTestCase): @classmethod @@ -53,8 +53,8 @@ def setUpClass(cls): create_keyspace_simple('ks1', 1) conn.unregister_connection('default') - conn.register_connection('fake_cluster', ['127.0.0.100'], lazy_connect=True, retry_connect=True, default=True) - conn.register_connection('cluster', ['127.0.0.1']) + conn.register_connection('fake_cluster', ['1.2.3.4'], lazy_connect=True, retry_connect=True, default=True) + conn.register_connection('cluster', [CASSANDRA_IP]) with ContextQuery(TestModel, connection='cluster') as tm: sync_table(tm) @@ -141,7 +141,7 @@ def setUpClass(cls): super(ManagementConnectionTests, cls).setUpClass() conn.unregister_connection('default') conn.register_connection('fake_cluster', ['127.0.0.100'], lazy_connect=True, retry_connect=True, default=True) - conn.register_connection('cluster', ['127.0.0.1']) + conn.register_connection('cluster', [CASSANDRA_IP]) @classmethod def tearDownClass(cls): @@ -227,11 +227,11 @@ def test_connection_creation_from_session(self): @test_category object_mapper """ - cluster = Cluster(['127.0.0.1']) + cluster = Cluster([CASSANDRA_IP]) session = cluster.connect() connection_name = 'from_session' conn.register_connection(connection_name, session=session) - self.assertIsNotNone(conn.get_connection(connection_name).cluster.metadata.get_host("127.0.0.1")) + self.assertIsNotNone(conn.get_connection(connection_name).cluster.metadata.get_host(CASSANDRA_IP)) self.addCleanup(conn.unregister_connection, connection_name) cluster.shutdown() @@ -245,8 +245,8 @@ def test_connection_from_hosts(self): @test_category object_mapper """ connection_name = 'from_hosts' - conn.register_connection(connection_name, hosts=['127.0.0.1']) - self.assertIsNotNone(conn.get_connection(connection_name).cluster.metadata.get_host("127.0.0.1")) + conn.register_connection(connection_name, hosts=[CASSANDRA_IP]) + self.assertIsNotNone(conn.get_connection(connection_name).cluster.metadata.get_host(CASSANDRA_IP)) self.addCleanup(conn.unregister_connection, connection_name) def test_connection_param_validation(self): @@ -258,7 +258,7 @@ def test_connection_param_validation(self): @test_category object_mapper """ - cluster = Cluster(['127.0.0.1']) + cluster = Cluster([CASSANDRA_IP]) session = cluster.connect() with self.assertRaises(CQLEngineException): conn.register_connection("bad_coonection1", session=session, consistency="not_null") @@ -275,6 +275,8 @@ def test_connection_param_validation(self): cluster.shutdown() + cluster.shutdown() + class BatchQueryConnectionTests(BaseCassEngTestCase): conns = ['cluster'] @@ -289,7 +291,7 @@ def setUpClass(cls): conn.unregister_connection('default') conn.register_connection('fake_cluster', ['127.0.0.100'], lazy_connect=True, retry_connect=True, default=True) - conn.register_connection('cluster', ['127.0.0.1']) + conn.register_connection('cluster', [CASSANDRA_IP]) @classmethod def tearDownClass(cls): @@ -415,7 +417,6 @@ def test_batch_query_connection_override(self): with BatchQuery(connection='cluster') as b: obj1.batch(b).using(connection='test').save() - class UsingDescriptorTests(BaseCassEngTestCase): conns = ['cluster'] @@ -427,7 +428,7 @@ def setUpClass(cls): conn.unregister_connection('default') conn.register_connection('fake_cluster', ['127.0.0.100'], lazy_connect=True, retry_connect=True, default=True) - conn.register_connection('cluster', ['127.0.0.1']) + conn.register_connection('cluster', [CASSANDRA_IP]) @classmethod def tearDownClass(cls): @@ -527,13 +528,12 @@ def __init__(self, *args, **kwargs): super(ModelQuerySetNew, self).__init__(*args, **kwargs) self._connection = "cluster" - class BaseConnectionTestNoDefault(object): conns = ['cluster'] @classmethod def setUpClass(cls): - conn.register_connection('cluster', ['127.0.0.1']) + conn.register_connection('cluster', [CASSANDRA_IP]) test_queryset.TestModel.__queryset__ = ModelQuerySetNew test_queryset.IndexedTestModel.__queryset__ = ModelQuerySetNew test_queryset.IndexedCollectionsTestModel.__queryset__ = ModelQuerySetNew diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 473b398a12..5429eeef44 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -18,7 +18,8 @@ from cassandra.cluster import Cluster, NoHostAvailable from cassandra.auth import PlainTextAuthProvider, SASLClient, SaslAuthProvider -from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION +from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION, CASSANDRA_IP, \ + set_default_cass_ip from tests.integration.util import assert_quiescent_pool_state try: @@ -29,18 +30,25 @@ log = logging.getLogger(__name__) +#This can be tested for remote hosts, but the cluster has to be configured accordingly +#@local + + def setup_module(): - use_singledc(start=False) - ccm_cluster = get_cluster() - ccm_cluster.stop() - config_options = {'authenticator': 'PasswordAuthenticator', - 'authorizer': 'CassandraAuthorizer'} - ccm_cluster.set_configuration_options(config_options) - log.debug("Starting ccm test cluster with %s", config_options) - ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) - # there seems to be some race, with some versions of C* taking longer to - # get the auth (and default user) setup. Sleep here to give it a chance - time.sleep(10) + if CASSANDRA_IP.startswith("127.0.0."): + use_singledc(start=False) + ccm_cluster = get_cluster() + ccm_cluster.stop() + config_options = {'authenticator': 'PasswordAuthenticator', + 'authorizer': 'CassandraAuthorizer'} + ccm_cluster.set_configuration_options(config_options) + log.debug("Starting ccm test cluster with %s", config_options) + ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) + # there seems to be some race, with some versions of C* taking longer to + # get the auth (and default user) setup. Sleep here to give it a chance + time.sleep(10) + else: + set_default_cass_ip() def teardown_module(): diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index a463578f4e..24a54498e5 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -21,7 +21,7 @@ from cassandra.query import BatchStatement from cassandra.cluster import Cluster -from tests.integration import use_singledc, PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION, local def setup_module(): @@ -93,6 +93,7 @@ def test_warning_with_trace(self): self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') self.assertIsNotNone(future.get_query_trace()) + @local def test_warning_with_custom_payload(self): """ Test to validate client warning with custom payload @@ -111,6 +112,7 @@ def test_warning_with_custom_payload(self): self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') self.assertDictEqual(future.custom_payload, payload) + @local def test_warning_with_trace_and_custom_payload(self): """ Test to validate client warning with tracing and client warning diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 47e76b78fd..b40eb32bcc 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -32,8 +32,9 @@ WhiteListRoundRobinPolicy, AddressTranslator) from cassandra.query import SimpleStatement, TraceUnavailable, tuple_factory -from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, CASSANDRA_VERSION, execute_until_pass, execute_with_long_wait_retry, get_node,\ - MockLoggingHandler, get_unsupported_lower_protocol, get_unsupported_upper_protocol, protocolv5 + +from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, CASSANDRA_VERSION, DSE_VERSION, execute_until_pass, execute_with_long_wait_retry, get_node,\ + MockLoggingHandler, get_unsupported_lower_protocol, get_unsupported_upper_protocol, protocolv5, local, CASSANDRA_IP from tests.integration.util import assert_quiescent_pool_state import sys @@ -56,7 +57,7 @@ def distance(self, host): class ClusterTests(unittest.TestCase): - + @local def test_ignored_host_up(self): """ Test to ensure that is_up is not set by default on ignored hosts @@ -77,6 +78,7 @@ def test_ignored_host_up(self): self.assertIsNone(host.is_up) cluster.shutdown() + @local def test_host_resolution(self): """ Test to insure A records are resolved appropriately. @@ -90,6 +92,7 @@ def test_host_resolution(self): cluster = Cluster(contact_points=["localhost"], protocol_version=PROTOCOL_VERSION, connect_timeout=1) self.assertTrue('127.0.0.1' in cluster.contact_points_resolved) + @local def test_host_duplication(self): """ Ensure that duplicate hosts in the contact points are surfaced in the cluster metadata @@ -109,6 +112,7 @@ def test_host_duplication(self): self.assertEqual(len(cluster.metadata.all_hosts()), 3) cluster.shutdown() + @local def test_raise_error_on_control_connection_timeout(self): """ Test for initial control connection timeout @@ -436,9 +440,9 @@ def test_refresh_schema_type(self): self.assertEqual(original_type_meta.as_cql_query(), current_type_meta.as_cql_query()) cluster.shutdown() + @local def test_refresh_schema_no_wait(self): - - contact_points = ['127.0.0.1'] + contact_points = [CASSANDRA_IP] cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=10, contact_points=contact_points, load_balancing_policy=WhiteListRoundRobinPolicy(contact_points)) session = cluster.connect() @@ -447,6 +451,7 @@ def test_refresh_schema_no_wait(self): new_schema_ver = uuid4() session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (new_schema_ver,)) + try: agreement_timeout = 1 @@ -689,6 +694,7 @@ def test_pool_management(self): cluster.shutdown() + @local def test_profile_load_balancing(self): """ Tests that profile load balancing policies are honored. @@ -700,7 +706,7 @@ def test_profile_load_balancing(self): @test_category config_profiles """ query = "select release_version from system.local" - node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) + node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy([CASSANDRA_IP])) with Cluster(execution_profiles={'node1': node1}) as cluster: session = cluster.connect(wait_for_all_pools=True) @@ -713,7 +719,7 @@ def test_profile_load_balancing(self): self.assertEqual(queried_hosts, expected_hosts) # by name we should only hit the one - expected_hosts = set(h for h in cluster.metadata.all_hosts() if h.address == '127.0.0.1') + expected_hosts = set(h for h in cluster.metadata.all_hosts() if h.address == CASSANDRA_IP) queried_hosts = set() for _ in cluster.metadata.all_hosts(): rs = session.execute(query, execution_profile='node1') @@ -837,6 +843,7 @@ def test_missing_exec_prof(self): with self.assertRaises(ValueError): session.execute(query, execution_profile='rr3') + @local def test_profile_pool_management(self): """ Tests that changes to execution profiles correctly impact our cluster's pooling @@ -863,6 +870,7 @@ def test_profile_pool_management(self): pools = session.get_pool_state() self.assertEqual(set(h.address for h in pools), set(('127.0.0.1', '127.0.0.2', '127.0.0.3'))) + @local def test_add_profile_timeout(self): """ Tests that EP Timeouts are honored. @@ -889,10 +897,12 @@ def test_add_profile_timeout(self): try: self.assertRaises(cassandra.OperationTimedOut, cluster.add_execution_profile, 'node2', node2, pool_wait_timeout=sys.float_info.min) + break except Exception: end = time.time() self.assertAlmostEqual(start, end, 1) - break + else: + raise Exception("add_execution_profile didn't timeout after {0} retries".format(max_retry_count)) class LocalHostAdressTranslator(AddressTranslator): @@ -904,7 +914,7 @@ def translate(self, addr): new_addr = self.addr_map.get(addr) return new_addr - +@local class TestAddressTranslation(unittest.TestCase): def test_address_translator_basic(self): @@ -948,11 +958,11 @@ def test_address_translator_with_mixed_nodes(self): self.assertEqual(adder_map.get(str(host)), host.broadcast_address) c.shutdown() - +@local class ContextManagementTest(unittest.TestCase): - - load_balancing_policy = WhiteListRoundRobinPolicy(['127.0.0.1']) - cluster_kwargs = {'load_balancing_policy': load_balancing_policy, + load_balancing_policy = WhiteListRoundRobinPolicy([CASSANDRA_IP]) + cluster_kwargs = {'execution_profiles': {EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy= + load_balancing_policy)}, 'schema_metadata_enabled': False, 'token_metadata_enabled': False} @@ -1068,7 +1078,7 @@ def test_down_event_with_active_connection(self): time.sleep(.01) self.assertTrue(was_marked_down) - +@local class DontPrepareOnIgnoredHostsTest(unittest.TestCase): ignored_addresses = ['127.0.0.3'] @@ -1106,7 +1116,7 @@ def test_prepare_on_ignored_hosts(self): self.assertEqual(call(unignored_address), c) cluster.shutdown() - +@local class DuplicateRpcTest(unittest.TestCase): load_balancing_policy = WhiteListRoundRobinPolicy(['127.0.0.1']) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 538cf406f0..44f730ab96 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -32,7 +32,7 @@ from cassandra.pool import HostConnectionPool from tests import is_monkey_patched -from tests.integration import use_singledc, PROTOCOL_VERSION, get_node +from tests.integration import use_singledc, PROTOCOL_VERSION, get_node, CASSANDRA_IP, local try: from cassandra.io.libevreactor import LibevConnection @@ -49,7 +49,8 @@ class ConnectionTimeoutTest(unittest.TestCase): def setUp(self): self.defaultInFlight = Connection.max_in_flight Connection.max_in_flight = 2 - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy= + WhiteListRoundRobinPolicy([CASSANDRA_IP])) self.session = self.cluster.connect() def tearDown(self): @@ -104,6 +105,7 @@ def setUp(self): def tearDown(self): self.cluster.shutdown() + @local def test_heart_beat_timeout(self): # Setup a host listener to ensure the nodes don't go down test_listener = TestHostListener() @@ -190,7 +192,8 @@ def get_connection(self, timeout=5): e = None for i in range(5): try: - conn = self.klass.factory(host='127.0.0.1', timeout=timeout, protocol_version=PROTOCOL_VERSION) + contact_point = CASSANDRA_IP + conn = self.klass.factory(host=contact_point, timeout=timeout, protocol_version=PROTOCOL_VERSION) break except (OperationTimedOut, NoHostAvailable) as e: continue diff --git a/tests/integration/standard/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py index 3d4b849661..4ca49f0d8e 100644 --- a/tests/integration/standard/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -23,11 +23,14 @@ from cassandra.query import (SimpleStatement, BatchStatement, BatchType) from cassandra.cluster import Cluster -from tests.integration import use_singledc, PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION, local def setup_module(): use_singledc() +#These test rely on the custom payload being returned but by default C* +#ignores all the payloads. +@local class CustomPayloadTests(unittest.TestCase): def setUp(self): diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 2da9a73cc1..fdc5e4f35b 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -69,6 +69,7 @@ def test_custom_raw_uuid_row_results(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect(keyspace="custserdes") session.row_factory = tuple_factory + result = session.execute("SELECT schema_version FROM system.local") uuid_type = result[0][0] self.assertEqual(type(uuid_type), uuid.UUID) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index c54fc1ff22..4adfd6c171 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -37,7 +37,7 @@ BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION, BasicExistingSegregatedKeyspaceUnitTestCase, dseonly, DSE_VERSION, - get_supported_protocol_versions, greaterthanorequalcass30) + get_supported_protocol_versions, greaterthanorequalcass30, local) from tests.integration import greaterthancass21 @@ -48,13 +48,14 @@ def setup_module(): class HostMetatDataTests(BasicExistingKeyspaceUnitTestCase): + @local def test_broadcast_listen_address(self): """ Check to ensure that the broadcast and listen adresss is populated correctly @since 3.3 @jira_ticket PYTHON-332 - @expected_result They are populated for C*> 2.0.16, 2.1.6, 2.2.0 + @expected_result They are populated for C*> 2.1.6, 2.2.0 @test_category metadata """ @@ -81,7 +82,7 @@ def test_host_release_version(self): for host in self.cluster.metadata.all_hosts(): self.assertTrue(host.release_version.startswith(CASSANDRA_VERSION)) - +@local class MetaDataRemovalTest(unittest.TestCase): def setUp(self): @@ -1090,6 +1091,7 @@ def test_case_sensitivity(self): ksname = 'AnInterestingKeyspace' cfname = 'AnInterestingTable' + session.execute("DROP KEYSPACE IF EXISTS {0}".format(ksname)) session.execute(""" CREATE KEYSPACE "%s" WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} @@ -1146,6 +1148,7 @@ def test_already_exists_exceptions(self): self.assertRaises(AlreadyExists, session.execute, ddl % (ksname, cfname)) cluster.shutdown() + @local def test_replicas(self): """ Ensure cluster.metadata.get_replicas return correctly when not attached to keyspace @@ -1184,6 +1187,7 @@ def test_token_map(self): self.assertEqual(set(get_replicas('test1rf', token)), set([owners[(i + 1) % 3]])) cluster.shutdown() + @local def test_legacy_tables(self): if CASS_SERVER_VERSION < (2, 1, 0): @@ -1450,7 +1454,7 @@ class TokenMetadataTest(unittest.TestCase): """ Test of TokenMap creation and other behavior. """ - + @local def test_token(self): expected_node_count = len(get_cluster().nodes) @@ -2117,8 +2121,6 @@ def setup_class(cls): cls.parser_class = get_schema_parser(connection, str(CASS_SERVER_VERSION[0]), timeout=20).__class__ cls.cluster.control_connection.reconnect = Mock() - - @classmethod def teardown_class(cls): drop_keyspace_shutdown_cluster(cls.keyspace_name, cls.session, cls.cluster) diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 1f133a431d..625617f696 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -28,12 +28,12 @@ from cassandra.cluster import Cluster, NoHostAvailable from tests.integration import get_cluster, get_node, use_singledc, PROTOCOL_VERSION, execute_until_pass from greplin import scales -from tests.integration import BasicSharedKeyspaceUnitTestCaseWTable, BasicExistingKeyspaceUnitTestCase +from tests.integration import BasicSharedKeyspaceUnitTestCaseWTable, BasicExistingKeyspaceUnitTestCase, local def setup_module(): use_singledc() - +@local class MetricsTests(unittest.TestCase): def setUp(self): @@ -178,7 +178,7 @@ def test_unavailable(self): class MetricsNamespaceTest(BasicSharedKeyspaceUnitTestCaseWTable): - + @local def test_metrics_per_cluster(self): """ Test to validate that metrics can be scopped to invdividual clusters diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 31bcd6a968..43f5b0f368 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -18,11 +18,13 @@ import unittest2 as unittest except ImportError: import unittest # noqa + from cassandra import OperationTimedOut from cassandra.cluster import ExecutionProfile from cassandra.query import SimpleStatement from cassandra.policies import ConstantSpeculativeExecutionPolicy, RoundRobinPolicy from tests.integration import BasicSharedKeyspaceUnitTestCase, greaterthancass21 +from tests import notwindows def setup_module(): @@ -52,6 +54,8 @@ def setUp(self): self.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr) self.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim) + #This doesn't work well with Windows clock granularity + @notwindows @greaterthancass21 def test_speculative_execution(self): """ diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index ea7588a3b8..50a0ed4127 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -48,7 +48,11 @@ def test_basic(self): """ Test basic PreparedStatement usage """ - + self.session.execute( + """ + DROP KEYSPACE IF EXISTS preparedtests + """ + ) self.session.execute( """ CREATE KEYSPACE preparedtests diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index e655312fbd..b51a806b93 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -25,7 +25,9 @@ BatchStatement, BatchType, dict_factory, TraceUnavailable) from cassandra.cluster import Cluster, NoHostAvailable from cassandra.policies import HostDistance, RoundRobinPolicy -from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions +from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, \ + greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, local +from tests import notwindows import time import re @@ -115,6 +117,7 @@ def test_trace_ignores_row_factory(self): for event in trace.events: str(event) + @local @greaterthanprotocolv3 def test_client_ip_in_trace(self): """ @@ -179,6 +182,7 @@ def test_trace_cl(self): self.assertIsNotNone(response_future.get_query_trace(max_wait=2.0, query_cl=ConsistencyLevel.ANY).trace_id) self.assertIsNotNone(response_future.get_query_trace(max_wait=2.0, query_cl=ConsistencyLevel.QUORUM).trace_id) + @notwindows def test_incomplete_query_trace(self): """ Tests to ensure that partial tracing works. @@ -430,9 +434,9 @@ def test_prepared_metadata_generation(self): future = session.execute_async(select_statement) results = future.result() if base_line is None: - base_line = results[0].__dict__.keys() + base_line = results[0]._asdict().keys() else: - self.assertEqual(base_line, results[0].__dict__.keys()) + self.assertEqual(base_line, results[0]._asdict().keys()) cluster.shutdown() diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index f46761094d..b0b0f05a9e 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -24,6 +24,7 @@ import threading from six.moves.queue import PriorityQueue import sys +import platform from cassandra.cluster import Cluster, Session from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args @@ -115,7 +116,6 @@ def run(self): self._stopper.wait(.001) return - class ConcurrencyTest((unittest.TestCase)): def test_results_ordering_forward(self): @@ -231,7 +231,12 @@ def validate_result_ordering(self, results): for success, result in results: self.assertTrue(success) current_time_added = list(result)[0] - self.assertLess(last_time_added, current_time_added) + + #Windows clock granularity makes this equal most of the times + if "Windows" in platform.system(): + self.assertLessEqual(last_time_added, current_time_added) + else: + self.assertLess(last_time_added, current_time_added) last_time_added = current_time_added def test_recursion_limited(self): From 8621f4507cb685212b6cf502201183b884a8119b Mon Sep 17 00:00:00 2001 From: Julien Duponchelle Date: Tue, 7 Mar 2017 14:29:45 +0100 Subject: [PATCH 1008/2005] Fixe a documentation typo --- cassandra/cqlengine/columns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 064e4a9105..649dd3a9fd 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -477,7 +477,7 @@ def __init__(self, instance, column, value): class Counter(Integer): """ - Stores a counter that can be inremented and decremented + Stores a counter that can be incremented and decremented """ db_type = 'counter' From d42c64c728b39635a0739e69cd6c0428966e91fc Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 6 Mar 2017 18:11:13 -0500 Subject: [PATCH 1009/2005] Merged from dse --- tests/integration/__init__.py | 5 +- .../standard/test_authentication.py | 51 ++++++++++--------- 2 files changed, 31 insertions(+), 25 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 4996af2356..4944bb9c0b 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -450,8 +450,9 @@ def drop_keyspace_shutdown_cluster(keyspace_name, session, cluster): ex_type, ex, tb = sys.exc_info() log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb - log.warn("Shutting down cluster") - cluster.shutdown() + finally: + log.warn("Shutting down cluster") + cluster.shutdown() def setup_keyspace(ipformat=None, wait=True): diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 473b398a12..7241b00b04 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -99,38 +99,43 @@ def test_auth_connect(self): def test_connect_wrong_pwd(self): cluster = self.cluster_as('cassandra', 'wrong_pass') - self.assertRaisesRegexp(NoHostAvailable, - '.*AuthenticationFailed.*Bad credentials.*Username and/or ' - 'password are incorrect.*', - cluster.connect) - assert_quiescent_pool_state(self, cluster) - cluster.shutdown() + try: + self.assertRaisesRegexp(NoHostAvailable, + '.*AuthenticationFailed.', + cluster.connect) + assert_quiescent_pool_state(self, cluster) + finally: + cluster.shutdown() def test_connect_wrong_username(self): cluster = self.cluster_as('wrong_user', 'cassandra') - self.assertRaisesRegexp(NoHostAvailable, - '.*AuthenticationFailed.*Bad credentials.*Username and/or ' - 'password are incorrect.*', - cluster.connect) - assert_quiescent_pool_state(self, cluster) - cluster.shutdown() + try: + self.assertRaisesRegexp(NoHostAvailable, + '.*AuthenticationFailed.*', + cluster.connect) + assert_quiescent_pool_state(self, cluster) + finally: + cluster.shutdown() def test_connect_empty_pwd(self): cluster = self.cluster_as('Cassandra', '') - self.assertRaisesRegexp(NoHostAvailable, - '.*AuthenticationFailed.*Bad credentials.*Username and/or ' - 'password are incorrect.*', - cluster.connect) - assert_quiescent_pool_state(self, cluster) - cluster.shutdown() + try: + self.assertRaisesRegexp(NoHostAvailable, + '.*AuthenticationFailed.*', + cluster.connect) + assert_quiescent_pool_state(self, cluster) + finally: + cluster.shutdown() def test_connect_no_auth_provider(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.assertRaisesRegexp(NoHostAvailable, - '.*AuthenticationFailed.*Remote end requires authentication.*', - cluster.connect) - assert_quiescent_pool_state(self, cluster) - cluster.shutdown() + try: + self.assertRaisesRegexp(NoHostAvailable, + '.*AuthenticationFailed.*', + cluster.connect) + assert_quiescent_pool_state(self, cluster) + finally: + cluster.shutdown() class SaslAuthenticatorTests(AuthenticationTests): From d4d4d81aa80dc0da80cfc37b10922338587a1cef Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 10 Mar 2017 20:29:53 -0500 Subject: [PATCH 1010/2005] Added fix for gevent and eventlet --- cassandra/io/eventletreactor.py | 2 +- cassandra/io/geventreactor.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index cf1616d45b..2b1ac1c156 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -138,7 +138,7 @@ def handle_read(self): self.defunct(err) return # leave the read loop - if self._iobuf.tell(): + if buf and self._iobuf.tell(): self.process_io_buffer() else: log.debug("Connection %s closed by server", self) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index bf0a4cc181..7f05e2ba73 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -125,7 +125,7 @@ def handle_read(self): self.defunct(err) return # leave the read loop - if self._iobuf.tell(): + if buf and self._iobuf.tell(): self.process_io_buffer() else: log.debug("Connection %s closed by server", self) From dd97735533a65ce4e7136c9c1c9ce1d76fdb553b Mon Sep 17 00:00:00 2001 From: ittus Date: Tue, 14 Mar 2017 10:07:32 +0800 Subject: [PATCH 1011/2005] Fixed docs import BatchQuery --- docs/cqlengine/batches.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/cqlengine/batches.rst b/docs/cqlengine/batches.rst index 29520674d7..c2b711a51f 100644 --- a/docs/cqlengine/batches.rst +++ b/docs/cqlengine/batches.rst @@ -12,7 +12,7 @@ Batch Query General Use Pattern .. code-block:: python - from cassandra.cqlengine import BatchQuery + from cassandra.cqlengine.query import BatchQuery #using a context manager with BatchQuery() as b: From cbde7a7c9350e6cc968992d9a7b10861060c243d Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 14 Mar 2017 13:40:55 -0400 Subject: [PATCH 1012/2005] Skipped ALTER TYPE tests for C* 3.1 --- build.yaml | 2 +- tests/integration/standard/test_metadata.py | 3 ++- tests/integration/standard/test_udts.py | 9 ++++++--- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/build.yaml b/build.yaml index 0b387d5808..e73bfbcab0 100644 --- a/build.yaml +++ b/build.yaml @@ -14,7 +14,7 @@ cassandra: - 2.1 - 2.2 - 3.0 - - 3.7 + - 3.10 env: EVENT_LOOP_MANAGER: - libev diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index c54fc1ff22..7c95c2ab31 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -37,7 +37,7 @@ BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION, BasicExistingSegregatedKeyspaceUnitTestCase, dseonly, DSE_VERSION, - get_supported_protocol_versions, greaterthanorequalcass30) + get_supported_protocol_versions, greaterthanorequalcass30, lessthancass30) from tests.integration import greaterthancass21 @@ -2470,6 +2470,7 @@ def test_base_table_column_addition_mv(self): mv_alltime_fouls_comumn = self.cluster.metadata.keyspaces[self.keyspace_name].views["alltimehigh"].columns['fouls'] self.assertEqual(mv_alltime_fouls_comumn.cql_type, 'int') + @lessthancass30 def test_base_table_type_alter_mv(self): """ test to ensure that materialized view metadata is properly updated when a type in the base table diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 41957f1227..fe02050291 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -26,9 +26,10 @@ from cassandra.query import dict_factory from cassandra.util import OrderedMap -from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass, BasicSegregatedKeyspaceUnitTestCase, greaterthancass20, greaterthanorequalcass36 -from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, PRIMITIVE_DATATYPES_KEYS, COLLECTION_TYPES, \ - get_sample, get_collection_sample +from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass, BasicSegregatedKeyspaceUnitTestCase, \ + greaterthancass20, greaterthanorequalcass36, lessthancass30 +from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, PRIMITIVE_DATATYPES_KEYS, \ + COLLECTION_TYPES, get_sample, get_collection_sample nested_collection_udt = namedtuple('nested_collection_udt', ['m', 't', 'l', 's']) nested_collection_udt_nested = namedtuple('nested_collection_udt_nested', ['m', 't', 'l', 's', 'u']) @@ -691,6 +692,7 @@ def test_non_alphanum_identifiers(self): self.assertEqual(k[0], 'alphanum') self.assertEqual(k.field_0_, 'alphanum') # named tuple with positional field name + @lessthancass30 def test_type_alteration(self): s = self.session type_name = "type_name" @@ -723,6 +725,7 @@ def test_type_alteration(self): self.assertEqual(val['v0'], 3) self.assertEqual(val['v1'], six.b('\xde\xad\xbe\xef')) + @lessthancass30 def test_alter_udt(self): """ Test to ensure that altered UDT's are properly surfaced without needing to restart the underlying session. From 49196074bf54513fffadb484f77f5bd8e5b4e413 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 14 Mar 2017 14:08:35 -0400 Subject: [PATCH 1013/2005] Fixed AsyncoreConnectionTests.test_multiple_connections test --- tests/integration/standard/test_connection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 538cf406f0..272c4346bf 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -24,7 +24,7 @@ import time from cassandra import ConsistencyLevel, OperationTimedOut -from cassandra.cluster import NoHostAvailable, Cluster +from cassandra.cluster import NoHostAvailable, ConnectionShutdown, Cluster from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.protocol import QueryMessage from cassandra.connection import Connection @@ -192,7 +192,7 @@ def get_connection(self, timeout=5): try: conn = self.klass.factory(host='127.0.0.1', timeout=timeout, protocol_version=PROTOCOL_VERSION) break - except (OperationTimedOut, NoHostAvailable) as e: + except (OperationTimedOut, NoHostAvailable, ConnectionShutdown) as e: continue if conn: From a12f5adb07304a47c27845e02ecb64128270fcd4 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 14 Mar 2017 15:39:09 -0400 Subject: [PATCH 1014/2005] Fixed merge conflict --- tests/integration/cqlengine/connections/test_connection.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index 667a03048e..65102b5298 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -95,18 +95,12 @@ def test_connection_session_switch(self): self.assertEqual(1, TestConnectModel.objects.count()) self.assertEqual(TestConnectModel.objects.first(), TCM2) -<<<<<<< HEAD -======= @local ->>>>>>> 1d91fa8... Added support for testing remotely and on Windows def test_connection_setup_with_setup(self): connection.setup(hosts=None, default_keyspace=None) self.assertIsNotNone(connection.get_connection("default").cluster.metadata.get_host("127.0.0.1")) -<<<<<<< HEAD -======= @local ->>>>>>> 1d91fa8... Added support for testing remotely and on Windows def test_connection_setup_with_default(self): connection.default() self.assertIsNotNone(connection.get_connection("default").cluster.metadata.get_host("127.0.0.1")) From 90de49628e754415e1621667b89670ffb53ade09 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 15 Mar 2017 12:04:43 -0400 Subject: [PATCH 1015/2005] Ensure Evenlet greenthreads exit gracefully --- cassandra/io/eventletreactor.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index cf1616d45b..4553975113 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -19,6 +19,7 @@ import eventlet from eventlet.green import socket from eventlet.queue import Queue +from greenlet import GreenletExit import logging from threading import Event import time @@ -80,7 +81,6 @@ def service_timeouts(cls): def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) - self._write_queue = Queue() self._connect_socket() @@ -126,6 +126,8 @@ def handle_write(self): log.debug("Exception during socket send for %s: %s", self, err) self.defunct(err) return # Leave the write loop + except GreenletExit: # graceful greenthread exit + return def handle_read(self): while True: @@ -137,6 +139,8 @@ def handle_read(self): self, err) self.defunct(err) return # leave the read loop + except GreenletExit: # graceful greenthread exit + return if self._iobuf.tell(): self.process_io_buffer() From 8252196a0d9e169d1182de908b8335f1730dfdb2 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 15 Mar 2017 14:01:20 -0400 Subject: [PATCH 1016/2005] Fixed typo --- tests/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/__init__.py b/tests/__init__.py index 199ec56164..c21074e1e6 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -47,4 +47,4 @@ def is_gevent_monkey_patched(): def is_monkey_patched(): return is_gevent_monkey_patched() or is_eventlet_monkey_patched() -notwindows = unittest.skipUnless(not "Windows" in platform.system(), "This test is not adecuate for windows") +notwindows = unittest.skipUnless(not "Windows" in platform.system(), "This test is not adequate for windows") From 13f7925eeb60fc315b7dd0447dca9f7309b49319 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 16 Mar 2017 09:17:54 -0400 Subject: [PATCH 1017/2005] Added more values to the decimal conversion tests --- tests/unit/test_marshalling.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index 1f0dde4b6d..d515a2aa2e 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -140,7 +140,7 @@ def test_date(self): def test_decimal(self): # testing implicit numeric conversion # int, tuple(sign, digits, exp), float - converted_types = (10001, (0, (1, 0, 0, 0, 0, 1), -3), 100.1) + converted_types = (10001, (0, (1, 0, 0, 0, 0, 1), -3), 100.1, -87.629798) if sys.version_info < (2, 7): # Decimal in Python 2.6 does not accept floats for lossless initialization From 990e7dd936dc5ed5c075de71e84193719da78d67 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 16 Mar 2017 14:01:43 -0400 Subject: [PATCH 1018/2005] version 3.8.1 --- CHANGELOG.rst | 11 +++++++++++ cassandra/__init__.py | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7bdb5f61d7..adba5264cc 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,14 @@ +3.8.1 +===== +March 16, 2017 + +Bug Fixes +--------- + +* implement __le__/__ge__/__ne__ on some custom types (PYTHON-714) +* Fix bug in eventlet and gevent reactors that could cause hangs (PYTHON-721) +* Fix DecimalType regression (PYTHON-724) + 3.8.0 ===== diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 4d5d3b488f..2108c8f535 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 8, 0, 'post0') +__version_info__ = (3, 8, 1) __version__ = '.'.join(map(str, __version_info__)) From b21e9ddc958f942a854039e216b30aa45b04d094 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 16 Mar 2017 14:51:07 -0400 Subject: [PATCH 1019/2005] post-release version (3.8.1) --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 2108c8f535..edb99c609e 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 8, 1) +__version_info__ = (3, 8, 1, 'post0') __version__ = '.'.join(map(str, __version_info__)) From 9ac54596e16935fde3442898fbd1f95d75cbb8bc Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 6 Mar 2017 16:19:44 -0500 Subject: [PATCH 1020/2005] Fixed timestamp log warning --- cassandra/timestamps.py | 4 ++-- tests/unit/test_timestamps.py | 14 ++++++++++---- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/cassandra/timestamps.py b/cassandra/timestamps.py index e9602e161f..87cf47b4ec 100644 --- a/cassandra/timestamps.py +++ b/cassandra/timestamps.py @@ -53,7 +53,7 @@ class MonotonicTimestampGenerator(object): Defaults to 1 second. """ - def __init__(self, warn_on_drift=True, warning_threshold=0, warning_interval=0): + def __init__(self, warn_on_drift=True, warning_threshold=warning_threshold, warning_interval=warning_interval): self.lock = Lock() with self.lock: self.last = 0 @@ -97,7 +97,7 @@ def _maybe_warn(self, now): since_last_warn = now - self._last_warn warn = (self.warn_on_drift and - (diff > self.warning_threshold * 1e6) and + (diff >= self.warning_threshold * 1e6) and (since_last_warn >= self.warning_interval * 1e6)) if warn: log.warn( diff --git a/tests/unit/test_timestamps.py b/tests/unit/test_timestamps.py index c2f8b93da8..67a82a621c 100644 --- a/tests/unit/test_timestamps.py +++ b/tests/unit/test_timestamps.py @@ -123,7 +123,10 @@ def test_basic_log_content(self): @test_category timing """ - tsg = timestamps.MonotonicTimestampGenerator() + tsg = timestamps.MonotonicTimestampGenerator( + warning_threshold=1e-6, + warning_interval=1e-6 + ) #The units of _last_warn is seconds tsg._last_warn = 12 @@ -181,7 +184,8 @@ def test_warning_threshold_respected_logs(self): @test_category timing """ tsg = timestamps.MonotonicTimestampGenerator( - warning_threshold=1e-6 + warning_threshold=1e-6, + warning_interval=1e-6 ) tsg.last, tsg._last_warn = 100, 97 tsg._next_timestamp(98, tsg.last) @@ -198,6 +202,7 @@ def test_warning_interval_respected_no_logging(self): @test_category timing """ tsg = timestamps.MonotonicTimestampGenerator( + warning_threshold=1e-6, warning_interval=2e-6 ) tsg.last = 100 @@ -219,7 +224,8 @@ def test_warning_interval_respected_logs(self): @test_category timing """ tsg = timestamps.MonotonicTimestampGenerator( - warning_interval=1e-6 + warning_interval=1e-6, + warning_threshold=1e-6, ) tsg.last = 100 tsg._next_timestamp(70, tsg.last) @@ -249,7 +255,7 @@ def request_time(): with lock: generated_timestamps.append(timestamp) - tsg = timestamps.MonotonicTimestampGenerator(warning_threshold=1) + tsg = timestamps.MonotonicTimestampGenerator() fixed_time = 1 num_threads = 5 From 6cc62f7dcd04b2bc6bb175f00a029c11cabeda61 Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 6 Mar 2017 10:36:29 -0500 Subject: [PATCH 1021/2005] Merged cqlengine/connection.py --- cassandra/cqlengine/connection.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 100e8d5a59..5673caf46b 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -184,6 +184,8 @@ def register_connection(name, hosts=None, consistency=None, lazy_connect=False, conn = Connection.from_session(name, session=session) conn.setup_session() else: # use hosts argument + if consistency is None: + consistency = ConsistencyLevel.LOCAL_ONE conn = Connection( name, hosts=hosts, consistency=consistency, lazy_connect=lazy_connect, From 8d8a7363bca95de08addcea431041aed2ea7e721 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 17 Mar 2017 16:41:10 -0400 Subject: [PATCH 1022/2005] use explicit values for MTG.__init__ --- cassandra/timestamps.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/timestamps.py b/cassandra/timestamps.py index 87cf47b4ec..9dfe807494 100644 --- a/cassandra/timestamps.py +++ b/cassandra/timestamps.py @@ -53,7 +53,7 @@ class MonotonicTimestampGenerator(object): Defaults to 1 second. """ - def __init__(self, warn_on_drift=True, warning_threshold=warning_threshold, warning_interval=warning_interval): + def __init__(self, warn_on_drift=True, warning_threshold=1, warning_interval=1): self.lock = Lock() with self.lock: self.last = 0 From 4bec82485e23b7b50a6310fdc7e2d9d40d439e46 Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 17 Mar 2017 16:43:15 -0400 Subject: [PATCH 1023/2005] Added missing import --- cassandra/cqlengine/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 5673caf46b..6ac389c7ee 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -17,7 +17,7 @@ import six import threading -from cassandra.cluster import Cluster, _NOT_SET, NoHostAvailable, UserTypeDoesNotExist +from cassandra.cluster import Cluster, _NOT_SET, NoHostAvailable, UserTypeDoesNotExist, ConsistencyLevel from cassandra.query import SimpleStatement, dict_factory from cassandra.cqlengine import CQLEngineException From afd1cb09ca68529b468760ba1da275bef5c27736 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 20 Mar 2017 12:31:36 -0400 Subject: [PATCH 1024/2005] make cluster.connect() fails when no connection succeed --- cassandra/cluster.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 26c36d9169..fa58ebe602 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1975,6 +1975,12 @@ def __init__(self, cluster, hosts, keyspace=None): while futures.not_done and not any(f.result() for f in futures.done): futures = wait_futures(futures.not_done, return_when=FIRST_COMPLETED) + if not any(f.result() for f in self._initial_connect_futures): + msg = "Unable to connect to any servers" + if self.keyspace: + msg += " using keyspace '%s'" % self.keyspace + raise NoHostAvailable(msg, [h.address for h in hosts]) + def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None): """ Execute the given query and synchronously wait for the response. From 1bc0cf9e1c85565c63e70a6ce6273aac2fd10c65 Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 17 Mar 2017 11:44:02 -0400 Subject: [PATCH 1025/2005] Updated build.yaml --- build.yaml | 10 +++---- tests/integration/long/test_failure_types.py | 4 +-- tests/integration/standard/test_query.py | 30 +++++++++++++++----- 3 files changed, 30 insertions(+), 14 deletions(-) diff --git a/build.yaml b/build.yaml index e73bfbcab0..5d36e56d56 100644 --- a/build.yaml +++ b/build.yaml @@ -10,11 +10,11 @@ python: os: - ubuntu/trusty64 cassandra: - - 2.0 - - 2.1 - - 2.2 - - 3.0 - - 3.10 + - '2.0' + - '2.1' + - '2.2' + - '3.0' + - '3.11' env: EVENT_LOOP_MANAGER: - libev diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index 1a81aa0ce1..7f0bed23b4 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -157,9 +157,9 @@ def _perform_cql_statement(self, text, consistency_level, expected_exception, se self.execute_helper(session, statement) if self.support_v5 and (isinstance(cm.exception, WriteFailure) or isinstance(cm.exception, ReadFailure)): if isinstance(cm.exception, ReadFailure): - self.assertEqual(cm.exception.error_code_map.values()[0], 1) + self.assertEqual(list(cm.exception.error_code_map.values())[0], 1) else: - self.assertEqual(cm.exception.error_code_map.values()[0], 0) + self.assertEqual(list(cm.exception.error_code_map.values())[0], 0) def test_write_failures_from_coordinator(self): """ diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index b51a806b93..e9fe02d5ef 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -20,21 +20,29 @@ except ImportError: import unittest # noqa import logging +from cassandra import ProtocolVersion from cassandra import ConsistencyLevel, Unavailable, InvalidRequest, cluster from cassandra.query import (PreparedStatement, BoundStatement, SimpleStatement, BatchStatement, BatchType, dict_factory, TraceUnavailable) from cassandra.cluster import Cluster, NoHostAvailable from cassandra.policies import HostDistance, RoundRobinPolicy from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, \ - greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, local + greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, local, get_cluster, setup_keyspace from tests import notwindows import time import re - def setup_module(): - use_singledc() + use_singledc(start=False) + ccm_cluster = get_cluster() + ccm_cluster.clear() + # This is necessary because test_too_many_statements may + # timeout otherwise + config_options = {'write_request_timeout_in_ms': '20000'} + ccm_cluster.set_configuration_options(config_options) + ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) + setup_keyspace() global CASS_SERVER_VERSION CASS_SERVER_VERSION = get_server_versions()[0] @@ -424,7 +432,9 @@ def test_prepared_metadata_generation(self): base_line = None for proto_version in get_supported_protocol_versions(): - cluster = Cluster(protocol_version=proto_version) + beta_flag = True if proto_version in ProtocolVersion.BETA_VERSIONS else False + cluster = Cluster(protocol_version=proto_version, allow_beta_protocol_version=beta_flag) + session = cluster.connect() select_statement = session.prepare("SELECT * FROM system.local") if proto_version == 1: @@ -462,10 +472,16 @@ def test_prepare_on_all_hosts(self): mock_handler = MockLoggingHandler() logger = logging.getLogger(cluster.__name__) logger.addHandler(mock_handler) + self.assertGreaterEqual(len(clus.metadata.all_hosts()), 3) select_statement = session.prepare("SELECT * FROM system.local") - session.execute(select_statement) - session.execute(select_statement) - session.execute(select_statement) + reponse_first = session.execute(select_statement) + reponse_second = session.execute(select_statement) + reponse_third = session.execute(select_statement) + + self.assertEqual(len({reponse_first.response_future.attempted_hosts[0], + reponse_second.response_future.attempted_hosts[0], + reponse_third.response_future.attempted_hosts[0]}), 3) + self.assertEqual(2, mock_handler.get_message_count('debug', "Re-preparing")) finally: clus.shutdown() From 71e030c289abf75f9a5e0dd80b61e856dce08438 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 16 Mar 2017 14:48:17 -0400 Subject: [PATCH 1026/2005] Fixed some failing tests in Jenkins --- tests/integration/__init__.py | 5 +++-- tests/integration/long/test_failure_types.py | 2 -- tests/integration/long/test_loadbalancingpolicies.py | 4 ++-- tests/integration/long/test_schema.py | 3 ++- tests/integration/standard/test_cluster.py | 10 ++++++---- tests/integration/standard/test_metrics.py | 2 ++ tests/unit/test_marshalling.py | 8 -------- 7 files changed, 15 insertions(+), 19 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 98d407c5b6..d3102f6717 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -28,7 +28,8 @@ from subprocess import call from itertools import groupby -from cassandra import OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure, AlreadyExists +from cassandra import OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure, AlreadyExists, \ + InvalidRequest from cassandra.cluster import Cluster from cassandra.protocol import ConfigurationException from cassandra.policies import RoundRobinPolicy @@ -411,7 +412,7 @@ def execute_until_pass(session, query): while tries < 100: try: return session.execute(query) - except (ConfigurationException, AlreadyExists): + except (ConfigurationException, AlreadyExists, InvalidRequest): log.warn("Received already exists from query {0} not exiting".format(query)) # keyspace/table was already created/dropped return diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index 7f0bed23b4..fe7610eefd 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -322,8 +322,6 @@ def setUp(self): """ Setup sessions and pause node1 """ - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect() # self.node1, self.node2, self.node3 = get_cluster().nodes.values() self.node1 = get_node(1) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index a93419ea85..9a1f7010b2 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -650,5 +650,5 @@ def test_white_list(self): self.fail() except NoHostAvailable: pass - - cluster.shutdown() + finally: + cluster.shutdown() diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index d2131ed8cf..49f88fb4d6 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -140,7 +140,8 @@ def test_for_schema_disagreement_attribute(self): self.check_and_wait_for_agreement(session, rs, False) rs = session.execute("DROP KEYSPACE test_schema_disagreement") self.check_and_wait_for_agreement(session, rs, False) - + cluster.shutdown() + # These should have schema agreement cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=100) session = cluster.connect() diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index b40eb32bcc..1c66c8654f 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -813,7 +813,9 @@ def test_clone_shared_lbp(self): rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) exec_profiles = {'rr1': rr1} with Cluster(execution_profiles=exec_profiles) as cluster: - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) + self.assertGreater(len(cluster.metadata.all_hosts()), 1, "We only have one host connected at this point") + rr1_clone = session.execution_profile_clone_update('rr1', row_factory=tuple_factory) cluster.add_execution_profile("rr1_clone", rr1_clone) rr1_queried_hosts = set() @@ -895,10 +897,11 @@ def test_add_profile_timeout(self): for i in range(max_retry_count): start = time.time() try: - self.assertRaises(cassandra.OperationTimedOut, cluster.add_execution_profile, 'node2', + self.assertRaises(cassandra.OperationTimedOut, cluster.add_execution_profile, + 'profile_{0}'.format(i), node2, pool_wait_timeout=sys.float_info.min) break - except Exception: + except AssertionError: end = time.time() self.assertAlmostEqual(start, end, 1) else: @@ -1176,7 +1179,6 @@ def test_invalid_protocol_version_beta_option(self): cluster.connect() except Exception as e: self.fail("Unexpected error encountered {0}".format(e.message)) - cluster.shutdown() @protocolv5 def test_valid_protocol_version_beta_options_connect(self): diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 625617f696..b6ef97b708 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -143,6 +143,8 @@ def test_unavailable(self): self.assertTrue(results) # Stop node gracefully + # Sometimes this commands continues with the other nodes having not noticed + # 1 is down, and a Timeout error is returned instead of an Unavailable get_node(1).stop(wait=True, wait_other_notice=True) try: diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index d515a2aa2e..9a7787fb31 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -142,14 +142,6 @@ def test_decimal(self): # int, tuple(sign, digits, exp), float converted_types = (10001, (0, (1, 0, 0, 0, 0, 1), -3), 100.1, -87.629798) - if sys.version_info < (2, 7): - # Decimal in Python 2.6 does not accept floats for lossless initialization - # Just verifying expected exception here - f = converted_types[-1] - self.assertIsInstance(f, float) - self.assertRaises(TypeError, DecimalType.to_binary, f, ProtocolVersion.MAX_SUPPORTED) - converted_types = converted_types[:-1] - for proto_ver in range(1, ProtocolVersion.MAX_SUPPORTED + 1): for n in converted_types: expected = Decimal(n) From 78660f455a32bb73e7b7b601ad51bb919bcbd191 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 22 Mar 2017 12:43:01 -0400 Subject: [PATCH 1027/2005] Testing for PYTHON-665 - Fixed failing unit tests after Session.__init__ change - Added integration test for PYTHON-665 --- tests/integration/standard/test_cluster.py | 25 +++++++++++++- tests/unit/test_cluster.py | 39 ++++++++++++++-------- tests/unit/test_concurrent.py | 6 +++- tests/unit/utils.py | 18 ++++++++++ 4 files changed, 72 insertions(+), 16 deletions(-) create mode 100644 tests/unit/utils.py diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index b40eb32bcc..79da6560b7 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -25,11 +25,12 @@ import logging import cassandra -from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import Cluster, Session, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.concurrent import execute_concurrent from cassandra.policies import (RoundRobinPolicy, ExponentialReconnectionPolicy, RetryPolicy, SimpleConvictionPolicy, HostDistance, WhiteListRoundRobinPolicy, AddressTranslator) +from cassandra.pool import Host from cassandra.query import SimpleStatement, TraceUnavailable, tuple_factory @@ -177,6 +178,28 @@ def test_basic(self): cluster.shutdown() + def test_session_host_parameter(self): + """ + Test for protocol negotiation + + Very that NoHostAvailable is risen in Session.__init__ when there are no valid connections and that + no error is arisen otherwise, despite maybe being some invalid hosts + + @since 3.9 + @jira_ticket PYTHON-665 + @expected_result NoHostAvailable when the driver is unable to connect to a valid host, + no exception otherwise + + @test_category connection + """ + with self.assertRaises(NoHostAvailable): + Session(Cluster(), []) + with self.assertRaises(NoHostAvailable): + Session(Cluster(), [Host("1.2.3.4", SimpleConvictionPolicy)]) + session = Session(Cluster(), [Host(x, SimpleConvictionPolicy) for x in + ("127.0.0.1", "127.0.0.2", "1.2.3.4")]) + session.shutdown() + def test_protocol_negotiation(self): """ Test for protocol negotiation diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index b3337f6c4d..2532d8c76d 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -21,10 +21,12 @@ from cassandra import ConsistencyLevel, DriverException, Timeout, Unavailable, RequestExecutionException, ReadTimeout, WriteTimeout, CoordinationFailure, ReadFailure, WriteFailure, FunctionFailure, AlreadyExists,\ InvalidRequest, Unauthorized, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, RequestValidationException, ConfigurationException from cassandra.cluster import _Scheduler, Session, Cluster, _NOT_SET, default_lbp_factory, \ - ExecutionProfile, _ConfigMode, EXEC_PROFILE_DEFAULT -from cassandra.policies import HostDistance, RetryPolicy, RoundRobinPolicy, DowngradingConsistencyRetryPolicy + ExecutionProfile, _ConfigMode, EXEC_PROFILE_DEFAULT, NoHostAvailable +from cassandra.policies import HostDistance, RetryPolicy, RoundRobinPolicy, \ + DowngradingConsistencyRetryPolicy, SimpleConvictionPolicy from cassandra.query import SimpleStatement, named_tuple_factory, tuple_factory - +from cassandra.pool import Host +from tests.unit.utils import mock_session_pools class ExceptionTypeTest(unittest.TestCase): @@ -121,7 +123,7 @@ def test_event_delay_timing(self, *_): class SessionTest(unittest.TestCase): # TODO: this suite could be expanded; for now just adding a test covering a PR - + @mock_session_pools def test_default_serial_consistency_level(self, *_): """ Make sure default_serial_consistency_level passes through to a query message. @@ -129,7 +131,7 @@ def test_default_serial_consistency_level(self, *_): PR #510 """ - s = Session(Cluster(protocol_version=4), []) + s = Session(Cluster(protocol_version=4), [Host("127.0.0.1", SimpleConvictionPolicy)]) # default is None self.assertIsNone(s.default_serial_consistency_level) @@ -159,21 +161,23 @@ def _verify_response_future_profile(self, rf, prof): self.assertEqual(rf.timeout, prof.request_timeout) self.assertEqual(rf.row_factory, prof.row_factory) + @mock_session_pools def test_default_exec_parameters(self): cluster = Cluster() self.assertEqual(cluster._config_mode, _ConfigMode.UNCOMMITTED) self.assertEqual(cluster.load_balancing_policy.__class__, default_lbp_factory().__class__) self.assertEqual(cluster.default_retry_policy.__class__, RetryPolicy) - session = Session(cluster, hosts=[]) + session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) self.assertEqual(session.default_timeout, 10.0) self.assertEqual(session.default_consistency_level, ConsistencyLevel.LOCAL_ONE) self.assertEqual(session.default_serial_consistency_level, None) self.assertEqual(session.row_factory, named_tuple_factory) + @mock_session_pools def test_default_legacy(self): cluster = Cluster(load_balancing_policy=RoundRobinPolicy(), default_retry_policy=DowngradingConsistencyRetryPolicy()) self.assertEqual(cluster._config_mode, _ConfigMode.LEGACY) - session = Session(cluster, hosts=[]) + session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) session.default_timeout = 3.7 session.default_consistency_level = ConsistencyLevel.ALL session.default_serial_consistency_level = ConsistencyLevel.SERIAL @@ -183,10 +187,11 @@ def test_default_legacy(self): session.default_timeout, session.row_factory) self._verify_response_future_profile(rf, expected_profile) + @mock_session_pools def test_default_profile(self): non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)]) cluster = Cluster(execution_profiles={'non-default': non_default_profile}) - session = Session(cluster, hosts=[]) + session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES) @@ -197,10 +202,11 @@ def test_default_profile(self): rf = session.execute_async("query", execution_profile='non-default') self._verify_response_future_profile(rf, non_default_profile) + @mock_session_pools def test_statement_params_override_legacy(self): cluster = Cluster(load_balancing_policy=RoundRobinPolicy(), default_retry_policy=DowngradingConsistencyRetryPolicy()) self.assertEqual(cluster._config_mode, _ConfigMode.LEGACY) - session = Session(cluster, hosts=[]) + session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) ss = SimpleStatement("query", retry_policy=DowngradingConsistencyRetryPolicy(), consistency_level=ConsistencyLevel.ALL, serial_consistency_level=ConsistencyLevel.SERIAL) @@ -217,10 +223,11 @@ def test_statement_params_override_legacy(self): serial_consistency_level=ss._serial_consistency_level) self._verify_response_future_profile(rf, expected_profile) + @mock_session_pools def test_statement_params_override_profile(self): non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)]) cluster = Cluster(execution_profiles={'non-default': non_default_profile}) - session = Session(cluster, hosts=[]) + session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES) @@ -240,6 +247,7 @@ def test_statement_params_override_profile(self): ss.consistency_level, ss._serial_consistency_level, my_timeout, non_default_profile.row_factory) self._verify_response_future_profile(rf, expected_profile) + @mock_session_pools def test_no_profile_with_legacy(self): # don't construct with both self.assertRaises(ValueError, Cluster, load_balancing_policy=RoundRobinPolicy(), execution_profiles={'a': ExecutionProfile()}) @@ -253,7 +261,7 @@ def test_no_profile_with_legacy(self): # session settings lock out profiles cluster = Cluster() - session = Session(cluster, hosts=[]) + session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) for attr, value in (('default_timeout', 1), ('default_consistency_level', ConsistencyLevel.ANY), ('default_serial_consistency_level', ConsistencyLevel.SERIAL), @@ -265,6 +273,7 @@ def test_no_profile_with_legacy(self): # don't accept profile self.assertRaises(ValueError, session.execute_async, "query", execution_profile='some name here') + @mock_session_pools def test_no_legacy_with_profile(self): cluster_init = Cluster(execution_profiles={'name': ExecutionProfile()}) cluster_add = Cluster() @@ -275,18 +284,19 @@ def test_no_legacy_with_profile(self): for attr, value in (('default_retry_policy', RetryPolicy()), ('load_balancing_policy', default_lbp_factory())): self.assertRaises(ValueError, setattr, cluster, attr, value) - session = Session(cluster, hosts=[]) + session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) for attr, value in (('default_timeout', 1), ('default_consistency_level', ConsistencyLevel.ANY), ('default_serial_consistency_level', ConsistencyLevel.SERIAL), ('row_factory', tuple_factory)): self.assertRaises(ValueError, setattr, session, attr, value) + @mock_session_pools def test_profile_name_value(self): internalized_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)]) cluster = Cluster(execution_profiles={'by-name': internalized_profile}) - session = Session(cluster, hosts=[]) + session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES) rf = session.execute_async("query", execution_profile='by-name') @@ -296,10 +306,11 @@ def test_profile_name_value(self): rf = session.execute_async("query", execution_profile=by_value) self._verify_response_future_profile(rf, by_value) + @mock_session_pools def test_exec_profile_clone(self): cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(), 'one': ExecutionProfile()}) - session = Session(cluster, hosts=[]) + session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) profile_attrs = {'request_timeout': 1, 'consistency_level': ConsistencyLevel.ANY, diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index b0b0f05a9e..ca7bd09623 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -28,6 +28,9 @@ from cassandra.cluster import Cluster, Session from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args +from cassandra.pool import Host +from cassandra.policies import SimpleConvictionPolicy +from tests.unit.utils import mock_session_pools class MockResponseResponseFuture(): @@ -239,6 +242,7 @@ def validate_result_ordering(self, results): self.assertLess(last_time_added, current_time_added) last_time_added = current_time_added + @mock_session_pools def test_recursion_limited(self): """ Verify that recursion is controlled when raise_on_first_error=False and something is wrong with the query. @@ -246,7 +250,7 @@ def test_recursion_limited(self): PYTHON-585 """ max_recursion = sys.getrecursionlimit() - s = Session(Cluster(), []) + s = Session(Cluster(), [Host("127.0.0.1", SimpleConvictionPolicy)]) self.assertRaises(TypeError, execute_concurrent_with_args, s, "doesn't matter", [('param',)] * max_recursion, raise_on_first_error=True) results = execute_concurrent_with_args(s, "doesn't matter", [('param',)] * max_recursion, raise_on_first_error=False) # previously diff --git a/tests/unit/utils.py b/tests/unit/utils.py new file mode 100644 index 0000000000..b3ac113194 --- /dev/null +++ b/tests/unit/utils.py @@ -0,0 +1,18 @@ +from concurrent.futures import Future +from functools import wraps +from cassandra.cluster import Session +from mock import patch + +def mock_session_pools(f): + """ + Helper decorator that allows tests to initialize :class:.`Session` objects + without actually connecting to a Cassandra cluster. + """ + @wraps(f) + def wrapper(*args, **kwargs): + with patch.object(Session, "add_or_renew_pool") as mocked_add_or_renew_pool: + future = Future() + future.set_result(object()) + mocked_add_or_renew_pool.return_value = future + f(*args, **kwargs) + return wrapper From a16f5f7b85536014a855a92168229e2ae887703d Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 23 Mar 2017 09:23:52 -0400 Subject: [PATCH 1028/2005] Added protocol version to test_session_host_parameter --- tests/integration/standard/test_cluster.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 79da6560b7..e64d741e30 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -193,10 +193,10 @@ def test_session_host_parameter(self): @test_category connection """ with self.assertRaises(NoHostAvailable): - Session(Cluster(), []) + Session(Cluster(protocol_version=PROTOCOL_VERSION), []) with self.assertRaises(NoHostAvailable): - Session(Cluster(), [Host("1.2.3.4", SimpleConvictionPolicy)]) - session = Session(Cluster(), [Host(x, SimpleConvictionPolicy) for x in + Session(Cluster(protocol_version=PROTOCOL_VERSION), [Host("1.2.3.4", SimpleConvictionPolicy)]) + session = Session(Cluster(protocol_version=PROTOCOL_VERSION), [Host(x, SimpleConvictionPolicy) for x in ("127.0.0.1", "127.0.0.2", "1.2.3.4")]) session.shutdown() From 2f47415816e30181db0cb3a0afdb0f0ff4d51297 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 24 Mar 2017 13:05:10 -0400 Subject: [PATCH 1029/2005] a cqlengine column with a default value should not be updated if not specified explicitly --- cassandra/cqlengine/models.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 88bd535595..42b2832ebf 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -407,6 +407,8 @@ def __init__(self, **values): value = column.to_python(value) value_mngr = column.value_manager(self, column, value) value_mngr.explicit = name in values + if not value_mngr.explicit and column.has_default: + value_mngr.previous_value = value self._values[name] = value_mngr def __repr__(self): From d1460529946568fc1ff1b3a24d9a7ad177745624 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 24 Mar 2017 16:33:20 -0400 Subject: [PATCH 1030/2005] Fix some issues related to default values --- cassandra/cqlengine/columns.py | 2 +- tests/integration/cqlengine/model/test_model_io.py | 10 +++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 649dd3a9fd..be204a891e 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -37,7 +37,7 @@ def __init__(self, instance, column, value): @property def deleted(self): - return self.column._val_is_null(self.value) and (self.explicit or self.previous_value is not None) + return self.column._val_is_null(self.value) and (self.explicit or not self.column._val_is_null(self.previous_value)) @property def changed(self): diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index c5fd5e37ca..65bb4d214c 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -514,16 +514,20 @@ class TestDefaultValueTracking(Model): # yet. self.assertTrue(instance._values['id'].previous_value is None) self.assertTrue(instance._values['int1'].previous_value is None) - self.assertTrue(instance._values['int2'].previous_value is None) self.assertTrue(instance._values['int3'].previous_value is None) - self.assertTrue(instance._values['int4'].previous_value is None) self.assertTrue(instance._values['int5'].previous_value is None) self.assertTrue(instance._values['int6'].previous_value is None) + # When a column has a default value and that field has no explicit value specified at + # the instance creation, the previous_value should be set to the default value to + # avoid any undesired update + self.assertEqual(instance._values['int2'].previous_value, 456) + self.assertIsNotNone(instance._values['int4']) + # All explicitely set columns, and those with default values are # flagged has changed. self.assertTrue(set(instance.get_changed_columns()) == set([ - 'id', 'int1', 'int2', 'int3', 'int4', 'int5'])) + 'id', 'int1', 'int3', 'int5'])) def test_save_to_none(self): """ From 4aa6c6e98cffac94f6adbad9b022a2c0b46ae0a5 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 27 Mar 2017 11:53:05 -0400 Subject: [PATCH 1031/2005] repro PYTHON-657 --- .../cqlengine/model/test_model_io.py | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 65bb4d214c..a1cce730dc 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -22,6 +22,7 @@ from decimal import Decimal from operator import itemgetter +import cassandra from cassandra.cqlengine import columns from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine.management import sync_table @@ -482,6 +483,37 @@ def test_previous_value_tracking_on_instantiation(self): self.assertTrue(self.instance._values['count'].previous_value is None) self.assertTrue(self.instance.count is None) + def test_value_override_with_default(self): + """ + Updating a row with a new Model instance shouldn't set columns to defaults + + @since 3.9 + @jira_ticket PYTHON-657 + @expected_result column value should not change + + @test_category object_mapper + """ + class ModelWithDefault(Model): + id = columns.Integer(primary_key=True) + mf = columns.Map(columns.Integer, columns.Integer) + dummy = columns.Integer(default=42) + sync_table(ModelWithDefault) + + initial = ModelWithDefault(id=1, mf={0: 0}, dummy=0) + initial.save() + + session = cassandra.cluster.Cluster().connect() + session.execute('USE ' + DEFAULT_KEYSPACE) + self.assertEqual( + list(session.execute('SELECT * from model_with_default'))[0].dummy, 0 + ) + + second = ModelWithDefault(id=1) + second.update(mf={0: 1}) + self.assertEqual( + list(session.execute('SELECT * from model_with_default'))[0].dummy, 0 + ) + def test_previous_value_tracking_on_instantiation_with_default(self): class TestDefaultValueTracking(Model): From 589c15abeee37667824c270d8fc3db69912f63e8 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 22 Mar 2017 20:16:37 -0400 Subject: [PATCH 1032/2005] persisted values should not be set explicit --- cassandra/cqlengine/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 88bd535595..d0e459c199 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -490,6 +490,7 @@ def _construct_instance(cls, values): def _set_persisted(self): for v in self._values.values(): v.reset_previous_value() + v.explicit = False self._is_persisted = True def _can_update(self): From ca9a54707303ed0c2897beac1bb7f16407e3cfca Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 30 Mar 2017 14:40:45 -0400 Subject: [PATCH 1033/2005] initialize libevreactor for cluster unit tests These tests weren't failing on CI, because CI runs all the unit tests together, and LibevConnection.initialize_reactor is called in the setup for other tests, in particular tests.unit.io.test_libev{reactor,timer}. --- tests/unit/test_cluster.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 2532d8c76d..438f5b3388 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -28,6 +28,15 @@ from cassandra.pool import Host from tests.unit.utils import mock_session_pools +try: + from dse.io.libevreactor import LibevConnection +except ImportError: + LibevConnection = None # noqa + +def setUp(): + LibevConnection.initialize_reactor() + + class ExceptionTypeTest(unittest.TestCase): def test_exception_types(self): From 8bc8c54b430a558bdfae380ede30e3dd7017897b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 30 Mar 2017 15:47:27 -0400 Subject: [PATCH 1034/2005] tentative fix for cqlengine tracking issues --- cassandra/cqlengine/columns.py | 3 ++- cassandra/cqlengine/models.py | 11 +++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index be204a891e..35a70e9023 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -47,7 +47,7 @@ def changed(self): :rtype: boolean """ - return self.value != self.previous_value + return self.explicit and self.value != self.previous_value def reset_previous_value(self): self.previous_value = deepcopy(self.value) @@ -57,6 +57,7 @@ def getval(self): def setval(self, val): self.value = val + self.explicit = True def delval(self): self.value = None diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 280f716130..f60190873c 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -407,8 +407,6 @@ def __init__(self, **values): value = column.to_python(value) value_mngr = column.value_manager(self, column, value) value_mngr.explicit = name in values - if not value_mngr.explicit and column.has_default: - value_mngr.previous_value = value self._values[name] = value_mngr def __repr__(self): @@ -490,7 +488,8 @@ def _construct_instance(cls, values): return instance def _set_persisted(self): - for v in self._values.values(): + # ensure we don't modify to any values not affected by the last save/update + for v in [v for v in self._values.values() if v.changed]: v.reset_previous_value() v.explicit = False self._is_persisted = True @@ -591,6 +590,10 @@ def _raw_column_family_name(cls): return cls._table_name + def _set_column_value(self, name, value): + """Function to change a column value without changing then value manager states""" + self._values[name].value = value # internal assignement, skip the main setter + def validate(self): """ Cleans and validates the field values @@ -600,7 +603,7 @@ def validate(self): if v is None and not self._values[name].explicit and col.has_default: v = col.get_default() val = col.validate(v) - setattr(self, name, val) + self._set_column_value(name, val) # Let an instance be used like a dict of its columns keys/values def __iter__(self): From a0abe34c55803c154b68e700d18263919f9d29d4 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 30 Mar 2017 16:17:18 -0400 Subject: [PATCH 1035/2005] typo --- cassandra/cqlengine/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index f60190873c..9bb9104934 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -591,7 +591,7 @@ def _raw_column_family_name(cls): return cls._table_name def _set_column_value(self, name, value): - """Function to change a column value without changing then value manager states""" + """Function to change a column value without changing the value manager states""" self._values[name].value = value # internal assignement, skip the main setter def validate(self): From 27c33beb3b9ee8023b299f6b6b57715fa7abab49 Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 31 Mar 2017 10:05:12 -0400 Subject: [PATCH 1036/2005] Changed import in tests --- tests/unit/test_cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 438f5b3388..abc179a7bc 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -29,7 +29,7 @@ from tests.unit.utils import mock_session_pools try: - from dse.io.libevreactor import LibevConnection + from cassandra.io.libevreactor import LibevConnection except ImportError: LibevConnection = None # noqa From 8025dc6c71dfe0975ec4f6c19b6f4d7f308de8e2 Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 31 Mar 2017 11:26:48 -0400 Subject: [PATCH 1037/2005] Skipped some tests if LibevConnection is not installed --- tests/unit/test_cluster.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index abc179a7bc..d4f5b8d49a 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -33,9 +33,6 @@ except ImportError: LibevConnection = None # noqa -def setUp(): - LibevConnection.initialize_reactor() - class ExceptionTypeTest(unittest.TestCase): @@ -131,6 +128,11 @@ def test_event_delay_timing(self, *_): class SessionTest(unittest.TestCase): + def setUp(self): + if LibevConnection is None: + raise unittest.SkipTest('libev does not appear to be installed correctly') + LibevConnection.initialize_reactor() + # TODO: this suite could be expanded; for now just adding a test covering a PR @mock_session_pools def test_default_serial_consistency_level(self, *_): @@ -161,6 +163,10 @@ def test_default_serial_consistency_level(self, *_): class ExecutionProfileTest(unittest.TestCase): + def setUp(self): + if LibevConnection is None: + raise unittest.SkipTest('libev does not appear to be installed correctly') + LibevConnection.initialize_reactor() def _verify_response_future_profile(self, rf, prof): self.assertEqual(rf._load_balancer, prof.load_balancing_policy) From 7716cc98abe09986f6e4a93bbc1410eac937f429 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 31 Mar 2017 14:09:27 -0400 Subject: [PATCH 1038/2005] cqlengine complex logic to determine if a container change or not --- cassandra/cqlengine/columns.py | 15 ++++++++++++++- .../integration/cqlengine/model/test_model_io.py | 8 ++------ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 35a70e9023..79787c2dad 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -47,7 +47,20 @@ def changed(self): :rtype: boolean """ - return self.explicit and self.value != self.previous_value + if self.explicit: + return self.value != self.previous_value + + if isinstance(self.column, BaseContainerColumn): + default_value = self.column.get_default() + if self.column._val_is_null(default_value): + return not self.column._val_is_null(self.value) and self.value != self.previous_value + else: + if self.previous_value is None: + return self.value != default_value + else: + return self.value != self.previous_value + + return False def reset_previous_value(self): self.previous_value = deepcopy(self.value) diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index a1cce730dc..fe4faf7afa 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -546,16 +546,12 @@ class TestDefaultValueTracking(Model): # yet. self.assertTrue(instance._values['id'].previous_value is None) self.assertTrue(instance._values['int1'].previous_value is None) + self.assertTrue(instance._values['int2'].previous_value is None) self.assertTrue(instance._values['int3'].previous_value is None) + self.assertTrue(instance._values['int4'].previous_value is None) self.assertTrue(instance._values['int5'].previous_value is None) self.assertTrue(instance._values['int6'].previous_value is None) - # When a column has a default value and that field has no explicit value specified at - # the instance creation, the previous_value should be set to the default value to - # avoid any undesired update - self.assertEqual(instance._values['int2'].previous_value, 456) - self.assertIsNotNone(instance._values['int4']) - # All explicitely set columns, and those with default values are # flagged has changed. self.assertTrue(set(instance.get_changed_columns()) == set([ From 45ceafead312941f0cb4d0963eb29b6aa67d134b Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 31 Mar 2017 14:51:06 -0400 Subject: [PATCH 1039/2005] Patched only time in eventlet and gevent unit tests --- tests/__init__.py | 10 ++++++++++ tests/unit/io/eventlet_utils.py | 18 ++++++++++++++---- tests/unit/io/test_eventletreactor.py | 25 ++++++++++++++++++++----- tests/unit/io/test_geventreactor.py | 12 ++++++------ 4 files changed, 50 insertions(+), 15 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index c21074e1e6..852de9a820 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -44,6 +44,16 @@ def is_gevent_monkey_patched(): return socket.socket is gevent.socket.socket +def is_gevent_time_monkey_patched(): + import gevent.monkey + return "time" in gevent.monkey.saved + + +def is_eventlet_time_monkey_patched(): + import eventlet + return eventlet.patcher.is_monkey_patched('time') + + def is_monkey_patched(): return is_gevent_monkey_patched() or is_eventlet_monkey_patched() diff --git a/tests/unit/io/eventlet_utils.py b/tests/unit/io/eventlet_utils.py index e06d3f777f..05ef6bc939 100644 --- a/tests/unit/io/eventlet_utils.py +++ b/tests/unit/io/eventlet_utils.py @@ -16,13 +16,20 @@ import os import select import socket -import thread -import Queue +try: + import thread + import Queue + import __builtin__ +except: + import _thread as thread + import queue as Queue + import builtins + import threading -import __builtin__ import ssl import time - +import eventlet +from imp import reload def eventlet_un_patch_all(): """ @@ -34,4 +41,7 @@ def eventlet_un_patch_all(): for to_unpatch in modules_to_unpatch: reload(to_unpatch) +def restore_saved_module(module): + reload(module) + del eventlet.patcher.already_patched[module.__name__] diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index 6aa76fa790..44af1f40e8 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -19,8 +19,10 @@ import unittest # noqa from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback -from tests import is_eventlet_monkey_patched +from tests import is_eventlet_time_monkey_patched, is_gevent_time_monkey_patched +from tests.unit.io.eventlet_utils import restore_saved_module import time +from eventlet import monkey_patch try: from cassandra.io.eventletreactor import EventletConnection @@ -29,12 +31,25 @@ class EventletTimerTest(unittest.TestCase): + need_unpatch = False + + @classmethod + def setUpClass(cls): + if is_eventlet_time_monkey_patched(): + return # no dynamic patching if we have eventlet applied + if EventletConnection is not None: + if not is_gevent_time_monkey_patched(): + cls.need_unpatch = True + monkey_patch(time=True) + + @classmethod + def tearDownClass(cls): + if cls.need_unpatch: + restore_saved_module(time) def setUp(self): - if EventletConnection is None: - raise unittest.SkipTest("Eventlet libraries not available") - if not is_eventlet_monkey_patched(): - raise unittest.SkipTest("Can't test eventlet without monkey patching") + if not is_eventlet_time_monkey_patched(): + raise unittest.SkipTest("Can't test gevent without monkey patching") EventletConnection.initialize_reactor() def test_multi_timer_validation(self, *args): diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py index 4a8ba30748..ed5f9281dc 100644 --- a/tests/unit/io/test_geventreactor.py +++ b/tests/unit/io/test_geventreactor.py @@ -19,12 +19,12 @@ import time from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback -from tests import is_gevent_monkey_patched, is_eventlet_monkey_patched +from tests import is_gevent_time_monkey_patched, is_eventlet_monkey_patched try: from cassandra.io.geventreactor import GeventConnection import gevent.monkey - from gevent_utils import gevent_un_patch_all + from tests.unit.io.gevent_utils import restore_saved_module except ImportError: GeventConnection = None # noqa @@ -38,17 +38,17 @@ def setUpClass(cls): if is_eventlet_monkey_patched(): return # no dynamic patching if we have eventlet applied if GeventConnection is not None: - if not is_gevent_monkey_patched(): + if not is_gevent_time_monkey_patched(): cls.need_unpatch = True - gevent.monkey.patch_all() + gevent.monkey.patch_time() @classmethod def tearDownClass(cls): if cls.need_unpatch: - gevent_un_patch_all() + restore_saved_module("time") def setUp(self): - if not is_gevent_monkey_patched(): + if not is_gevent_time_monkey_patched(): raise unittest.SkipTest("Can't test gevent without monkey patching") GeventConnection.initialize_reactor() From 4c9ede85a61985ae77b2890587552363be29569a Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 31 Mar 2017 15:10:00 -0400 Subject: [PATCH 1040/2005] Reorganized eventlet and gevent tests in unit tests --- tests/unit/io/test_eventletreactor.py | 57 +++++--------------------- tests/unit/io/test_geventreactor.py | 59 +++++---------------------- tests/unit/io/utils.py | 36 ++++++++++++++++ 3 files changed, 56 insertions(+), 96 deletions(-) diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index 44af1f40e8..761f9cf676 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -18,7 +18,7 @@ except ImportError: import unittest # noqa -from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback +from tests.unit.io.utils import TimerConnectionTests from tests import is_eventlet_time_monkey_patched, is_gevent_time_monkey_patched from tests.unit.io.eventlet_utils import restore_saved_module import time @@ -30,53 +30,16 @@ EventletConnection = None # noqa -class EventletTimerTest(unittest.TestCase): - need_unpatch = False - - @classmethod - def setUpClass(cls): - if is_eventlet_time_monkey_patched(): - return # no dynamic patching if we have eventlet applied - if EventletConnection is not None: - if not is_gevent_time_monkey_patched(): - cls.need_unpatch = True - monkey_patch(time=True) - - @classmethod - def tearDownClass(cls): - if cls.need_unpatch: - restore_saved_module(time) - +class EventletTimerTest(unittest.TestCase, TimerConnectionTests): def setUp(self): - if not is_eventlet_time_monkey_patched(): - raise unittest.SkipTest("Can't test gevent without monkey patching") + if not EventletConnection: + raise unittest.SkipTest("Can't test eventlet without monkey patching") + monkey_patch(time=True) EventletConnection.initialize_reactor() - def test_multi_timer_validation(self, *args): - """ - Verify that timer timeouts are honored appropriately - """ - # Tests timers submitted in order at various timeouts - submit_and_wait_for_completion(self, EventletConnection, 0, 100, 1, 100) - # Tests timers submitted in reverse order at various timeouts - submit_and_wait_for_completion(self, EventletConnection, 100, 0, -1, 100) - # Tests timers submitted in varying order at various timeouts - submit_and_wait_for_completion(self, EventletConnection, 0, 100, 1, 100, True) - - def test_timer_cancellation(self): - """ - Verify that timer cancellation is honored - """ + def tearDown(self): + restore_saved_module(time) + EventletConnection._timers = None - # Various lists for tracking callback stage - timeout = .1 - callback = TimerCallback(timeout) - timer = EventletConnection.create_timer(timeout, callback.invoke) - timer.cancel() - # Release context allow for timer thread to run. - time.sleep(.2) - timer_manager = EventletConnection._timers - # Assert that the cancellation was honored - self.assertFalse(timer_manager._queue) - self.assertFalse(timer_manager._new_timers) - self.assertFalse(callback.was_invoked()) + def getClass(self): + return EventletConnection diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py index ed5f9281dc..f0ee1ef2e3 100644 --- a/tests/unit/io/test_geventreactor.py +++ b/tests/unit/io/test_geventreactor.py @@ -17,9 +17,9 @@ except ImportError: import unittest # noqa -import time -from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback + from tests import is_gevent_time_monkey_patched, is_eventlet_monkey_patched +from tests.unit.io.utils import TimerConnectionTests try: from cassandra.io.geventreactor import GeventConnection @@ -29,55 +29,16 @@ GeventConnection = None # noqa -class GeventTimerTest(unittest.TestCase): - - need_unpatch = False - - @classmethod - def setUpClass(cls): - if is_eventlet_monkey_patched(): - return # no dynamic patching if we have eventlet applied - if GeventConnection is not None: - if not is_gevent_time_monkey_patched(): - cls.need_unpatch = True - gevent.monkey.patch_time() - - @classmethod - def tearDownClass(cls): - if cls.need_unpatch: - restore_saved_module("time") - +class GeventTimerTest(unittest.TestCase, TimerConnectionTests): def setUp(self): - if not is_gevent_time_monkey_patched(): + if not GeventConnection: raise unittest.SkipTest("Can't test gevent without monkey patching") + gevent.monkey.patch_time() GeventConnection.initialize_reactor() - def test_multi_timer_validation(self): - """ - Verify that timer timeouts are honored appropriately - """ - - # Tests timers submitted in order at various timeouts - submit_and_wait_for_completion(self, GeventConnection, 0, 100, 1, 100) - # Tests timers submitted in reverse order at various timeouts - submit_and_wait_for_completion(self, GeventConnection, 100, 0, -1, 100) - # Tests timers submitted in varying order at various timeouts - submit_and_wait_for_completion(self, GeventConnection, 0, 100, 1, 100, True), - - def test_timer_cancellation(self): - """ - Verify that timer cancellation is honored - """ + def tearDown(self): + restore_saved_module("time") + GeventConnection._timers = None - # Various lists for tracking callback stage - timeout = .1 - callback = TimerCallback(timeout) - timer = GeventConnection.create_timer(timeout, callback.invoke) - timer.cancel() - # Release context allow for timer thread to run. - time.sleep(.2) - timer_manager = GeventConnection._timers - # Assert that the cancellation was honored - self.assertFalse(timer_manager._queue) - self.assertFalse(timer_manager._new_timers) - self.assertFalse(callback.was_invoked()) + def getClass(self): + return GeventConnection diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 58ed78ea26..3a3179dc82 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -12,6 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + import time @@ -103,3 +108,34 @@ def submit_and_wait_for_completion(unit_test, connection, start, end, increment, # ensure they are all called back in a timely fashion for callback in completed_callbacks: unit_test.assertAlmostEqual(callback.expected_wait, callback.get_wait_time(), delta=.15) + + +class TimerConnectionTests(object): + def test_multi_timer_validation(self): + """ + Verify that timer timeouts are honored appropriately + """ + # Tests timers submitted in order at various timeouts + submit_and_wait_for_completion(self, self.getClass(), 0, 100, 1, 100) + # Tests timers submitted in reverse order at various timeouts + submit_and_wait_for_completion(self, self.getClass(), 100, 0, -1, 100) + # Tests timers submitted in varying order at various timeouts + submit_and_wait_for_completion(self, self.getClass(), 0, 100, 1, 100, True), + + def test_timer_cancellation(self): + """ + Verify that timer cancellation is honored + """ + + # Various lists for tracking callback stage + timeout = .1 + callback = TimerCallback(timeout) + timer = self.getClass().create_timer(timeout, callback.invoke) + timer.cancel() + # Release context allow for timer thread to run. + time.sleep(.2) + timer_manager = self.getClass()._timers + # Assert that the cancellation was honored + self.assertFalse(timer_manager._queue) + self.assertFalse(timer_manager._new_timers) + self.assertFalse(callback.was_invoked()) From a6991be255c6b5cdff9da9def988a8f31c3aad4c Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 31 Mar 2017 16:24:16 -0400 Subject: [PATCH 1041/2005] Skip tests if it's pypy --- tests/__init__.py | 1 + tests/unit/io/test_eventletreactor.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/__init__.py b/tests/__init__.py index 852de9a820..348e1df833 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -58,3 +58,4 @@ def is_monkey_patched(): return is_gevent_monkey_patched() or is_eventlet_monkey_patched() notwindows = unittest.skipUnless(not "Windows" in platform.system(), "This test is not adequate for windows") +notpypy = unittest.skipUnless(not '__pypy__' in sys.builtin_module_names, "This tests is not suitable for pypy") diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index 761f9cf676..bfc7a528cd 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -19,8 +19,8 @@ import unittest # noqa from tests.unit.io.utils import TimerConnectionTests -from tests import is_eventlet_time_monkey_patched, is_gevent_time_monkey_patched from tests.unit.io.eventlet_utils import restore_saved_module +from tests import notpypy import time from eventlet import monkey_patch @@ -31,7 +31,9 @@ class EventletTimerTest(unittest.TestCase, TimerConnectionTests): + @notpypy def setUp(self): + #There are some issues with some versions of pypy and eventlet if not EventletConnection: raise unittest.SkipTest("Can't test eventlet without monkey patching") monkey_patch(time=True) From 1c9b379059eda71c6dd39becb2ed0b9007e7624b Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 31 Mar 2017 19:28:58 -0400 Subject: [PATCH 1042/2005] Skipped by default eventlet and gevent tests in Unit tests --- tests/__init__.py | 5 +++++ tests/unit/io/test_eventletreactor.py | 19 ++++++++++++------- tests/unit/io/test_geventreactor.py | 5 ++++- tests/unit/io/test_libevtimer.py | 1 + 4 files changed, 22 insertions(+), 8 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index 348e1df833..80fec0cca7 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -20,6 +20,7 @@ import sys import socket import platform +import os log = logging.getLogger() log.setLevel('DEBUG') @@ -57,5 +58,9 @@ def is_eventlet_time_monkey_patched(): def is_monkey_patched(): return is_gevent_monkey_patched() or is_eventlet_monkey_patched() + +MONKEY_PATCH_LOOP = bool(os.getenv('MONKEY_PATCH_LOOP', False)) + notwindows = unittest.skipUnless(not "Windows" in platform.system(), "This test is not adequate for windows") notpypy = unittest.skipUnless(not '__pypy__' in sys.builtin_module_names, "This tests is not suitable for pypy") +notmonkeypatch = unittest.skipUnless(MONKEY_PATCH_LOOP, "Skpping this test because monkey patching is required") \ No newline at end of file diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index bfc7a528cd..01445735b1 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -19,29 +19,34 @@ import unittest # noqa from tests.unit.io.utils import TimerConnectionTests -from tests.unit.io.eventlet_utils import restore_saved_module -from tests import notpypy +from tests.unit.io.eventlet_utils import restore_saved_module, eventlet_un_patch_all +from tests import notpypy, MONKEY_PATCH_LOOP import time -from eventlet import monkey_patch +from eventlet import monkey_patch, kill +import threading try: from cassandra.io.eventletreactor import EventletConnection except ImportError: EventletConnection = None # noqa - class EventletTimerTest(unittest.TestCase, TimerConnectionTests): + # There are some issues with some versions of pypy and eventlet @notpypy def setUp(self): - #There are some issues with some versions of pypy and eventlet + if not MONKEY_PATCH_LOOP: + raise unittest.SkipTest("Skpping because monkey patching may affect the rest of the tests") + if not EventletConnection: raise unittest.SkipTest("Can't test eventlet without monkey patching") - monkey_patch(time=True) + + monkey_patch(thread=False, time=True) EventletConnection.initialize_reactor() def tearDown(self): - restore_saved_module(time) + kill(EventletConnection._timeout_watcher) EventletConnection._timers = None + restore_saved_module(time) def getClass(self): return EventletConnection diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py index f0ee1ef2e3..4bcfa315bf 100644 --- a/tests/unit/io/test_geventreactor.py +++ b/tests/unit/io/test_geventreactor.py @@ -18,8 +18,8 @@ import unittest # noqa -from tests import is_gevent_time_monkey_patched, is_eventlet_monkey_patched from tests.unit.io.utils import TimerConnectionTests +from tests import MONKEY_PATCH_LOOP try: from cassandra.io.geventreactor import GeventConnection @@ -31,6 +31,9 @@ class GeventTimerTest(unittest.TestCase, TimerConnectionTests): def setUp(self): + if not MONKEY_PATCH_LOOP: + raise unittest.SkipTest("Skpping because monkey patching may affect the rest of the tests") + if not GeventConnection: raise unittest.SkipTest("Can't test gevent without monkey patching") gevent.monkey.patch_time() diff --git a/tests/unit/io/test_libevtimer.py b/tests/unit/io/test_libevtimer.py index 4b21d03170..fb491962d8 100644 --- a/tests/unit/io/test_libevtimer.py +++ b/tests/unit/io/test_libevtimer.py @@ -22,6 +22,7 @@ import time from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback +from tests.unit.io.utils import TimerConnectionTests from tests import is_monkey_patched From 0542c9bf9d3281e9676c1314b50b877101699fe4 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Sat, 1 Apr 2017 11:40:12 -0400 Subject: [PATCH 1043/2005] minor code change --- cassandra/cqlengine/columns.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 79787c2dad..bfa6a14a18 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -54,11 +54,10 @@ def changed(self): default_value = self.column.get_default() if self.column._val_is_null(default_value): return not self.column._val_is_null(self.value) and self.value != self.previous_value - else: - if self.previous_value is None: - return self.value != default_value - else: - return self.value != self.previous_value + elif self.previous_value is None: + return self.value != default_value + + return self.value != self.previous_value return False From ae4fbd34a92193f4e485d943cf84064c04ae1b17 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 3 Apr 2017 15:07:31 -0400 Subject: [PATCH 1044/2005] Add map item deletion in cqlengine --- cassandra/cqlengine/query.py | 13 +++++++++++-- cassandra/cqlengine/statements.py | 12 ++++++++++-- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index fd001979f8..2dae671ff5 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1252,6 +1252,9 @@ class Row(Model): # add items to a map Row.objects(row_id=5).update(map_column__update={1: 2, 3: 4}) + + # remove items from a map + Row.objects(row_id=5).update(map_column__remove={1, 2}) """ if not values: return @@ -1270,8 +1273,14 @@ class Row(Model): if col.is_primary_key: raise ValidationError("Cannot apply update to primary key '{0}' for {1}.{2}".format(col_name, self.__module__, self.model.__name__)) - # we should not provide default values in this use case. - val = col.validate(val) + if col_op == 'remove' and isinstance(col, columns.Map): + if not isinstance(val, set): + raise ValidationError( + "Cannot apply update operation '{0}' on column '{1}' with value '{2}'. A set is required.".format(col_op, col_name, val)) + val = {v: None for v in val} + else: + # we should not provide default values in this use case. + val = col.validate(val) if val is None: nulled_columns.add(col_name) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 44ae165e8b..90e7bb3f39 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -359,10 +359,13 @@ class MapUpdateClause(ContainerUpdateClause): col_type = columns.Map _updates = None + _removals = None def _analyze(self): if self._operation == "update": self._updates = self.value.keys() + elif self._operation == "remove": + self._removals = {v for v in self.value.keys()} else: if self.previous is None: self._updates = sorted([k for k, v in self.value.items()]) @@ -373,12 +376,14 @@ def _analyze(self): def get_context_size(self): if self.is_assignment: return 1 - return len(self._updates or []) * 2 + return int((len(self._updates or []) * 2) + int(bool(self._removals))) def update_context(self, ctx): ctx_id = self.context_id if self.is_assignment: ctx[str(ctx_id)] = {} + elif self._removals is not None: + ctx[str(ctx_id)] = self._removals else: for key in self._updates or []: val = self.value.get(key) @@ -390,7 +395,7 @@ def update_context(self, ctx): def is_assignment(self): if not self._analyzed: self._analyze() - return self.previous is None and not self._updates + return self.previous is None and not self._updates and not self._removals def __unicode__(self): qs = [] @@ -398,6 +403,9 @@ def __unicode__(self): ctx_id = self.context_id if self.is_assignment: qs += ['"{0}" = %({1})s'.format(self.field, ctx_id)] + elif self._removals is not None: + qs += ['"{0}" = "{0}" - %({1})s'.format(self.field, ctx_id)] + ctx_id += 1 else: for _ in self._updates or []: qs += ['"{0}"[%({1})s] = %({2})s'.format(self.field, ctx_id, ctx_id + 1)] From c5cc20e62659fb4eabbeaa6d70bbb590fb21a0da Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 4 Apr 2017 11:44:51 -0400 Subject: [PATCH 1045/2005] add tests for PYTHON-688 --- .../cqlengine/query/test_updates.py | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index 747f6d3abe..4dc0ad1274 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -242,6 +242,52 @@ def test_map_update_none_deletes_key(self): obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_map, {"foo": '1'}) + @execute_count(5) + def test_map_update_remove(self): + """ + Test that map item removal with update(__remove=...) works + + @jira_ticket PYTHON-688 + """ + partition = uuid4() + cluster = 1 + TestQueryUpdateModel.objects.create( + partition=partition, + cluster=cluster, + text_map={"foo": '1', "bar": '2'} + ) + TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update( + text_map__remove={"bar"} + ) + obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) + self.assertEqual(obj.text_map, {"foo": '1'}) + + TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update( + text_map__remove={"foo"} + ) + self.assertEqual( + TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster).text_map, + {} + ) + + def test_map_remove_rejects_non_sets(self): + """ + Map item removal requires a set to match the CQL API + + @jira_ticket PYTHON-688 + """ + partition = uuid4() + cluster = 1 + TestQueryUpdateModel.objects.create( + partition=partition, + cluster=cluster, + text_map={"foo": '1', "bar": '2'} + ) + with self.assertRaises(ValidationError): + TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update( + text_map__remove=["bar"] + ) + class StaticDeleteModel(Model): example_id = columns.Integer(partition_key=True, primary_key=True, default=uuid4) From ea8c943381b7ca45191701b0f3ffeaaed066763d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 4 Apr 2017 13:34:49 -0500 Subject: [PATCH 1046/2005] save ResponseFuture callbacks even when run_now PYTHON-733 Fixes an issue where, if the first page of a response is received before add_callback, the callback is not saved, and will not work upon receiving future pages. --- cassandra/cluster.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index fa58ebe602..00369ffabc 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3881,10 +3881,9 @@ def add_callback(self, fn, *args, **kwargs): """ run_now = False with self._callback_lock: + self._callbacks.append((fn, args, kwargs)) if self._final_result is not _NOT_SET: run_now = True - else: - self._callbacks.append((fn, args, kwargs)) if run_now: fn(self._final_result, *args, **kwargs) return self @@ -3897,10 +3896,9 @@ def add_errback(self, fn, *args, **kwargs): """ run_now = False with self._callback_lock: + self._errbacks.append((fn, args, kwargs)) if self._final_exception: run_now = True - else: - self._errbacks.append((fn, args, kwargs)) if run_now: fn(self._final_exception, *args, **kwargs) return self From 256cb9bc745120caa265a58179b8216b5827eef5 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 4 Apr 2017 15:37:50 -0400 Subject: [PATCH 1047/2005] document choice to always register callback funcs --- cassandra/cluster.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 00369ffabc..660abaad5f 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3881,6 +3881,9 @@ def add_callback(self, fn, *args, **kwargs): """ run_now = False with self._callback_lock: + # Always add fn to self._callbacks, even when we're about to + # execute it, to prevent races with functions like + # start_fetching_next_page that reset _final_result self._callbacks.append((fn, args, kwargs)) if self._final_result is not _NOT_SET: run_now = True @@ -3896,6 +3899,9 @@ def add_errback(self, fn, *args, **kwargs): """ run_now = False with self._callback_lock: + # Always add fn to self._errbacks, even when we're about to execute + # it, to prevent races with functions like start_fetching_next_page + # that reset _final_exception self._errbacks.append((fn, args, kwargs)) if self._final_exception: run_now = True From 0609b1125f3459bb8c0bc6815443f1160f4df29a Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 4 Apr 2017 17:02:21 -0400 Subject: [PATCH 1048/2005] Fix persistance behavior after a save when a column has a default value --- cassandra/cqlengine/query.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index fd001979f8..0f9d4ea2bf 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1448,6 +1448,9 @@ def save(self): if self.instance._values[name].changed: nulled_fields.add(col.db_field_name) continue + if col.has_default and not self.instance._values[name].changed: + # Ensure default columns included in a save() are marked as explicit, to get them *persisted* properly + self.instance._values[name].explicit = True insert.add_assignment(col, getattr(self.instance, name, None)) # skip query execution if it's empty From 07fd138a56053033b3ced8c33d457d008b2ab2b7 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 4 Apr 2017 16:17:12 -0400 Subject: [PATCH 1049/2005] Added some more asserts to test_map_update_remove in tests --- tests/integration/cqlengine/query/test_updates.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index 4dc0ad1274..135d826cc5 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -257,13 +257,14 @@ def test_map_update_remove(self): text_map={"foo": '1', "bar": '2'} ) TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update( - text_map__remove={"bar"} + text_map__remove={"bar"}, + text_map__update={"foz": '4', "foo": '2'} ) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) - self.assertEqual(obj.text_map, {"foo": '1'}) + self.assertEqual(obj.text_map, {"foo": '2', "foz": '4'}) TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update( - text_map__remove={"foo"} + text_map__remove={"foo", "foz"} ) self.assertEqual( TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster).text_map, From 57cf4dc3927d140822e6b1d3949583e48597b4af Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 4 Apr 2017 17:14:44 -0400 Subject: [PATCH 1050/2005] prevent callbacks being executed twice addresses comment from @bjmb --- cassandra/cluster.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 660abaad5f..cf85870d32 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3728,13 +3728,20 @@ def _set_final_result(self, response): with self._callback_lock: self._final_result = response + # save off current callbacks inside lock for execution outside it + # -- prevents case where _final_result is set, then a callback is + # added and executed on the spot, then executed again as a + # registered callback + to_call = tuple( + partial(fn, response, *args, **kwargs) + for (fn, args, kwargs) in self._callbacks + ) self._event.set() # apply each callback - for callback in self._callbacks: - fn, args, kwargs = callback - fn(response, *args, **kwargs) + for callback_partial in to_call: + callback_partial() def _set_final_exception(self, response): self._cancel_timer() From e83180eb54f4c48e3011b8d911f1f5e3cb4968f9 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 4 Apr 2017 17:29:36 -0400 Subject: [PATCH 1051/2005] fix race condition in error callbacks --- cassandra/cluster.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cf85870d32..14ce63f800 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3750,11 +3750,19 @@ def _set_final_exception(self, response): with self._callback_lock: self._final_exception = response + # save off current errbacks inside lock for execution outside it -- + # prevents case where _final_exception is set, then an errback is + # added and executed on the spot, then executed again as a + # registered errback + to_call = tuple( + partial(fn, response, *args, **kwargs) + for (fn, args, kwargs) in self._errbacks + ) self._event.set() - for errback in self._errbacks: - fn, args, kwargs = errback - fn(response, *args, **kwargs) + # apply each callback + for callback_partial in to_call: + callback_partial() def _retry(self, reuse_connection, consistency_level, host): if self._final_exception: From fea53e572d5385bc9946bf98a9ad9ece2d52f3f1 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 4 Apr 2017 18:02:07 -0400 Subject: [PATCH 1052/2005] Added some tests for PYTHON-733 --- .../integration/standard/test_query_paging.py | 27 ++++++++++++++++--- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index 30f1316adf..145dd8d5e4 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -254,6 +254,15 @@ def test_async_paging_verify_writes(self): self.assertSequenceEqual(range(1, 101), value_array) def test_paging_callbacks(self): + """ + Test to validate callback api + @since 3.9.0 + @jira_ticket PYTHON-733 + @expected_result callbacks shouldn't be called twice per message + and the fetch_size should be handled in a transparent way to the user + + @test_category queries + """ statements_and_params = zip(cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]), [(i, ) for i in range(100)]) execute_concurrent(self.session, list(statements_and_params)) @@ -266,8 +275,10 @@ def test_paging_callbacks(self): event = Event() counter = count() + number_of_calls = count() - def handle_page(rows, future, counter): + def handle_page(rows, future, counter, number_of_calls): + next(number_of_calls) for row in rows: next(counter) @@ -280,26 +291,34 @@ def handle_error(err): event.set() self.fail(err) - future.add_callbacks(callback=handle_page, callback_args=(future, counter), errback=handle_error) + future.add_callbacks(callback=handle_page, callback_args=(future, counter, number_of_calls), + errback=handle_error) event.wait() + self.assertEqual(next(number_of_calls), 100 // fetch_size + 1) self.assertEqual(next(counter), 100) # simple statement future = self.session.execute_async(SimpleStatement("SELECT * FROM test3rf.test"), timeout=20) event.clear() counter = count() + number_of_calls = count() - future.add_callbacks(callback=handle_page, callback_args=(future, counter), errback=handle_error) + future.add_callbacks(callback=handle_page, callback_args=(future, counter, number_of_calls), + errback=handle_error) event.wait() + self.assertEqual(next(number_of_calls), 100 // fetch_size + 1) self.assertEqual(next(counter), 100) # prepared statement future = self.session.execute_async(prepared, timeout=20) event.clear() counter = count() + number_of_calls = count() - future.add_callbacks(callback=handle_page, callback_args=(future, counter), errback=handle_error) + future.add_callbacks(callback=handle_page, callback_args=(future, counter, number_of_calls), + errback=handle_error) event.wait() + self.assertEqual(next(number_of_calls), 100 // fetch_size + 1) self.assertEqual(next(counter), 100) def test_concurrent_with_paging(self): From 5ecef596ec3869c020eb72dbac45049ab4c86fe4 Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 3 Apr 2017 14:42:04 -0400 Subject: [PATCH 1053/2005] Added some tests for PYTHON-657 --- .../cqlengine/model/test_model_io.py | 32 ---- .../cqlengine/model/test_updates.py | 159 ++++++++++++++++++ 2 files changed, 159 insertions(+), 32 deletions(-) diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index fe4faf7afa..36d75055ca 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -30,7 +30,6 @@ from cassandra.cqlengine.models import Model from cassandra.query import SimpleStatement from cassandra.util import Date, Time -from cassandra.cqltypes import Int32Type from cassandra.cqlengine.statements import SelectStatement, DeleteStatement, WhereClause from cassandra.cqlengine.operators import EqualsOperator @@ -483,37 +482,6 @@ def test_previous_value_tracking_on_instantiation(self): self.assertTrue(self.instance._values['count'].previous_value is None) self.assertTrue(self.instance.count is None) - def test_value_override_with_default(self): - """ - Updating a row with a new Model instance shouldn't set columns to defaults - - @since 3.9 - @jira_ticket PYTHON-657 - @expected_result column value should not change - - @test_category object_mapper - """ - class ModelWithDefault(Model): - id = columns.Integer(primary_key=True) - mf = columns.Map(columns.Integer, columns.Integer) - dummy = columns.Integer(default=42) - sync_table(ModelWithDefault) - - initial = ModelWithDefault(id=1, mf={0: 0}, dummy=0) - initial.save() - - session = cassandra.cluster.Cluster().connect() - session.execute('USE ' + DEFAULT_KEYSPACE) - self.assertEqual( - list(session.execute('SELECT * from model_with_default'))[0].dummy, 0 - ) - - second = ModelWithDefault(id=1) - second.update(mf={0: 1}) - self.assertEqual( - list(session.execute('SELECT * from model_with_default'))[0].dummy, 0 - ) - def test_previous_value_tracking_on_instantiation_with_default(self): class TestDefaultValueTracking(Model): diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index bc39d142cf..054929c965 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -134,3 +134,162 @@ def test_primary_key_update_failure(self): m0 = TestUpdateModel.create(count=5, text='monkey') with self.assertRaises(ValidationError): m0.update(partition=uuid4()) + + +class ModelWithDefault(Model): + id = columns.Integer(primary_key=True) + mf = columns.Map(columns.Integer, columns.Integer) + dummy = columns.Integer(default=42) + + +class ModelWithDefaultCollection(Model): + id = columns.Integer(primary_key=True) + mf = columns.Map(columns.Integer, columns.Integer, default={2:2}) + dummy = columns.Integer(default=42) + + +class ModelWithDefaultTests(BaseCassEngTestCase): + def setUp(self): + sync_table(ModelWithDefault) + + def tearDown(self): + drop_table(ModelWithDefault) + + def _assert_model_with_default_row(self, obj, expected): + self.assertListEqual([obj.id, obj.dummy, obj.mf], expected) + + def test_value_override_with_default(self): + """ + Updating a row with a new Model instance shouldn't set columns to defaults + + @since 3.9 + @jira_ticket PYTHON-657 + @expected_result column value should not change + + @test_category object_mapper + """ + initial = ModelWithDefault(id=1, mf={0: 0}, dummy=0) + initial.save() + + self._assert_model_with_default_row(ModelWithDefault.objects.all().get(), + [1, 0, {0: 0}]) + + second = ModelWithDefault(id=1) + second.update(mf={0: 1}) + + self._assert_model_with_default_row(ModelWithDefault.objects.all().get(), + [1, 0, {0: 1}]) + + def test_value_is_written_if_is_default(self): + """ + Check if the we try to update with the default value, the update + happens correctly + @since 3.9 + @jira_ticket PYTHON-657 + @expected_result column value should be updated + + @test_category object_mapper + :return: + """ + initial = ModelWithDefault(id=1) + initial.mf = {0: 0} + initial.dummy = 42 + initial.update() + + self._assert_model_with_default_row(ModelWithDefault.objects.all().get(), + [1, 42, {0: 0}]) + + + def test_null_update_is_respected(self): + """ + Check if the we try to update with None under particular + circumstances, ir works correctly + @since 3.9 + @jira_ticket PYTHON-657 + @expected_result column value should be updated to None + + @test_category object_mapper + :return: + """ + ModelWithDefault.create(id=1, mf={0: 0}).save() + + q = ModelWithDefault.objects.all().allow_filtering() + obj = q.filter(id=1).get() + + obj.update(dummy=None) + + self._assert_model_with_default_row(ModelWithDefault.objects.all().get(), + [1, None, {0: 0}]) + + + def test_only_set_values_is_updated(self): + """ + Test the updates work as expected when an object is deleted + @since 3.9 + @jira_ticket PYTHON-657 + @expected_result the non updated column is None and the + updated column has the set value + + @test_category object_mapper + """ + + ModelWithDefault.create(id=1, mf={1: 1}, dummy=1).save() + + item = ModelWithDefault.filter(id=1).first() + ModelWithDefault.objects(id=1).delete() + item.mf = {1: 2} + + item.save() + + self._assert_model_with_default_row(ModelWithDefault.objects.all().get(), + [1, None, {1: 2}]) + + def test_collections(self): + """ + Test the updates work as expected when an object is deleted + @since 3.9 + @jira_ticket PYTHON-657 + @expected_result the non updated column is None and the + updated column has the set value + + @test_category object_mapper + """ + ModelWithDefault.create(id=1, mf={1: 1, 2: 1}, dummy=1).save() + item = ModelWithDefault.filter(id=1).first() + + item.update(mf={2:1}) + self._assert_model_with_default_row(ModelWithDefault.objects.all().get(), + [1, 1, {2: 1}]) + + def test_collection_with_default(self): + """ + Test the updates work as expected when an object is deleted + @since 3.9 + @jira_ticket PYTHON-657 + @expected_result the non updated column is None and the + updated column has the set value + + @test_category object_mapper + """ + sync_table(ModelWithDefaultCollection) + item = ModelWithDefaultCollection.create(id=1, mf={1: 1}, dummy=1).save() + self._assert_model_with_default_row(ModelWithDefaultCollection.objects.all().get(), + [1, 1, {1: 1}]) + + item.update(mf={2: 2}) + self._assert_model_with_default_row(ModelWithDefaultCollection.objects.all().get(), + [1, 1, {2: 2}]) + + item.update(mf=None) + self._assert_model_with_default_row(ModelWithDefaultCollection.objects.all().get(), + [1, 1, {}]) + + item = ModelWithDefaultCollection.create(id=2, dummy=2).save() + self._assert_model_with_default_row(ModelWithDefaultCollection.objects.all().get(id=2), + [2, 2, {2: 2}]) + + item.update(mf={1: 1, 4: 4}) + self._assert_model_with_default_row(ModelWithDefaultCollection.objects.all().get(id=2), + [2, 2, {1: 1, 4: 4}]) + + drop_table(ModelWithDefaultCollection) From 6f16e166c38c82a6783f09285fe90ef16c0fdba7 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 4 Apr 2017 19:42:32 -0400 Subject: [PATCH 1054/2005] Skipped test test_map_update_remove if cassandra <= 2.0 --- tests/integration/cqlengine/query/test_updates.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index 135d826cc5..7ed433c0b3 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -21,7 +21,7 @@ from tests.integration.cqlengine import is_prepend_reversed from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine import execute_count - +from tests.integration import greaterthancass20 class TestQueryUpdateModel(Model): @@ -242,6 +242,7 @@ def test_map_update_none_deletes_key(self): obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_map, {"foo": '1'}) + @greaterthancass20 @execute_count(5) def test_map_update_remove(self): """ From 99cc8e83a0edc662808b509ec328d889b85c72c5 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 4 Apr 2017 20:21:01 -0400 Subject: [PATCH 1055/2005] instance fetched from DB should be entirely set persisted --- cassandra/cqlengine/models.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 9bb9104934..79afae7030 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -484,12 +484,12 @@ def _construct_instance(cls, values): klass = cls instance = klass(**values) - instance._set_persisted() + instance._set_persisted(force=True) return instance - def _set_persisted(self): + def _set_persisted(self, force=False): # ensure we don't modify to any values not affected by the last save/update - for v in [v for v in self._values.values() if v.changed]: + for v in [v for v in self._values.values() if v.changed or force]: v.reset_previous_value() v.explicit = False self._is_persisted = True From 1999f6da6b326852e27ed765951eadf188f7420a Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 5 Apr 2017 09:46:00 -0400 Subject: [PATCH 1056/2005] Added tests for PYTHON-719 --- .../cqlengine/query/test_updates.py | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index 747f6d3abe..adb543b1cd 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -242,6 +242,34 @@ def test_map_update_none_deletes_key(self): obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_map, {"foo": '1'}) + @execute_count(3) + def test_an_extra_delete_is_not_sent(self): + """ + Test to ensure that an extra DELETE is not sent if an object is read + from the DB with a None value + + @since 3.9 + @jira_ticket PYTHON-719 + @expected_result only three queries are executed, the first one for + inserting the object, the second one for reading it, and the third + one for updating it + + @test_category object_mapper + """ + partition = uuid4() + cluster = 1 + + TestQueryUpdateModel.objects.create( + partition=partition, cluster=cluster) + + obj = TestQueryUpdateModel.objects( + partition=partition, cluster=cluster).first() + + self.assertFalse(any([obj._values[column].deleted for column in obj._values])) + + obj.text = 'foo' + obj.save() + class StaticDeleteModel(Model): example_id = columns.Integer(partition_key=True, primary_key=True, default=uuid4) From fba89cea8a11f79ea3f5c219448908551ce7a20a Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 5 Apr 2017 13:56:50 -0400 Subject: [PATCH 1057/2005] Acknowledged review for PYTHON-657 tests --- .../cqlengine/model/test_updates.py | 51 +++++++++---------- 1 file changed, 23 insertions(+), 28 deletions(-) diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index 054929c965..606c4346e1 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -155,9 +155,6 @@ def setUp(self): def tearDown(self): drop_table(ModelWithDefault) - def _assert_model_with_default_row(self, obj, expected): - self.assertListEqual([obj.id, obj.dummy, obj.mf], expected) - def test_value_override_with_default(self): """ Updating a row with a new Model instance shouldn't set columns to defaults @@ -171,14 +168,14 @@ def test_value_override_with_default(self): initial = ModelWithDefault(id=1, mf={0: 0}, dummy=0) initial.save() - self._assert_model_with_default_row(ModelWithDefault.objects.all().get(), - [1, 0, {0: 0}]) + self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), + {'id': 1, 'dummy': 0, 'mf': {0: 0}}) second = ModelWithDefault(id=1) second.update(mf={0: 1}) - self._assert_model_with_default_row(ModelWithDefault.objects.all().get(), - [1, 0, {0: 1}]) + self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), + {'id': 1, 'dummy': 0, 'mf': {0: 1}}) def test_value_is_written_if_is_default(self): """ @@ -196,14 +193,13 @@ def test_value_is_written_if_is_default(self): initial.dummy = 42 initial.update() - self._assert_model_with_default_row(ModelWithDefault.objects.all().get(), - [1, 42, {0: 0}]) - + self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), + {'id': 1, 'dummy': 42, 'mf': {0: 0}}) def test_null_update_is_respected(self): """ Check if the we try to update with None under particular - circumstances, ir works correctly + circumstances, it works correctly @since 3.9 @jira_ticket PYTHON-657 @expected_result column value should be updated to None @@ -218,9 +214,8 @@ def test_null_update_is_respected(self): obj.update(dummy=None) - self._assert_model_with_default_row(ModelWithDefault.objects.all().get(), - [1, None, {0: 0}]) - + self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), + {'id': 1, 'dummy': None, 'mf': {0: 0}}) def test_only_set_values_is_updated(self): """ @@ -241,8 +236,8 @@ def test_only_set_values_is_updated(self): item.save() - self._assert_model_with_default_row(ModelWithDefault.objects.all().get(), - [1, None, {1: 2}]) + self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), + {'id': 1, 'dummy': None, 'mf': {1: 2}}) def test_collections(self): """ @@ -258,8 +253,8 @@ def test_collections(self): item = ModelWithDefault.filter(id=1).first() item.update(mf={2:1}) - self._assert_model_with_default_row(ModelWithDefault.objects.all().get(), - [1, 1, {2: 1}]) + self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), + {'id': 1, 'dummy': 1, 'mf': {2: 1}}) def test_collection_with_default(self): """ @@ -273,23 +268,23 @@ def test_collection_with_default(self): """ sync_table(ModelWithDefaultCollection) item = ModelWithDefaultCollection.create(id=1, mf={1: 1}, dummy=1).save() - self._assert_model_with_default_row(ModelWithDefaultCollection.objects.all().get(), - [1, 1, {1: 1}]) + self.assertEqual(ModelWithDefaultCollection.objects().all().get()._as_dict(), + {'id': 1, 'dummy': 1, 'mf': {1: 1}}) item.update(mf={2: 2}) - self._assert_model_with_default_row(ModelWithDefaultCollection.objects.all().get(), - [1, 1, {2: 2}]) + self.assertEqual(ModelWithDefaultCollection.objects().all().get()._as_dict(), + {'id': 1, 'dummy': 1, 'mf': {2: 2}}) item.update(mf=None) - self._assert_model_with_default_row(ModelWithDefaultCollection.objects.all().get(), - [1, 1, {}]) + self.assertEqual(ModelWithDefaultCollection.objects().all().get()._as_dict(), + {'id': 1, 'dummy': 1, 'mf': {}}) item = ModelWithDefaultCollection.create(id=2, dummy=2).save() - self._assert_model_with_default_row(ModelWithDefaultCollection.objects.all().get(id=2), - [2, 2, {2: 2}]) + self.assertEqual(ModelWithDefaultCollection.objects().all().get(id=2)._as_dict(), + {'id': 2, 'dummy': 2, 'mf': {2: 2}}) item.update(mf={1: 1, 4: 4}) - self._assert_model_with_default_row(ModelWithDefaultCollection.objects.all().get(id=2), - [2, 2, {1: 1, 4: 4}]) + self.assertEqual(ModelWithDefaultCollection.objects().all().get(id=2)._as_dict(), + {'id': 2, 'dummy': 2, 'mf': {1: 1, 4: 4}}) drop_table(ModelWithDefaultCollection) From 2e5b875b7f31a4105029974ad0ca29399bb80b72 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 5 Apr 2017 14:29:56 -0400 Subject: [PATCH 1058/2005] Clarified some parts of the code in the tests --- tests/integration/cqlengine/query/test_updates.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index d214336a7e..3410e252b0 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -313,11 +313,12 @@ def test_an_extra_delete_is_not_sent(self): obj = TestQueryUpdateModel.objects( partition=partition, cluster=cluster).first() - self.assertFalse(any([obj._values[column].deleted for column in obj._values])) + self.assertFalse({k: v for (k, v) in obj._values.items() if v.deleted}) obj.text = 'foo' obj.save() - + #execute_count will check the execution count and + #assert no more calls than necessary where made class StaticDeleteModel(Model): example_id = columns.Integer(partition_key=True, primary_key=True, default=uuid4) From b7645369e7d85be0b89c45848e7ca264966f4628 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 6 Apr 2017 13:39:22 -0400 Subject: [PATCH 1059/2005] raise error on primary key update only if its value changed --- cassandra/cqlengine/models.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 79afae7030..29bd8f71dc 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -754,18 +754,24 @@ def update(self, **values): It is possible to do a blind update, that is, to update a field without having first selected the object out of the database. See :ref:`Blind Updates ` """ - for k, v in values.items(): - col = self._columns.get(k) + for column_id, v in values.items(): + col = self._columns.get(column_id) # check for nonexistant columns if col is None: - raise ValidationError("{0}.{1} has no column named: {2}".format(self.__module__, self.__class__.__name__, k)) + raise ValidationError( + "{0}.{1} has no column named: {2}".format( + self.__module__, self.__class__.__name__, column_id)) # check for primary key update attempts if col.is_primary_key: - raise ValidationError("Cannot apply update to primary key '{0}' for {1}.{2}".format(k, self.__module__, self.__class__.__name__)) + current_value = getattr(self, column_id) + if v != current_value: + raise ValidationError( + "Cannot apply update to primary key '{0}' for {1}.{2}".format( + column_id, self.__module__, self.__class__.__name__)) - setattr(self, k, v) + setattr(self, column_id, v) # handle polymorphic models if self._is_polymorphic: From 722d8455dee4bb138ada2079e19e694bd9ef80d9 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 5 Apr 2017 11:40:44 -0400 Subject: [PATCH 1060/2005] Updated tox.ini to run Gevent and Eventlet tests independently --- appveyor/run_test.ps1 | 5 +++++ tox.ini | 32 +++++++++++++++++++++++++++++--- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/appveyor/run_test.ps1 b/appveyor/run_test.ps1 index 34ba227c0a..553540d2e4 100644 --- a/appveyor/run_test.ps1 +++ b/appveyor/run_test.ps1 @@ -12,6 +12,11 @@ python -c "import platform; print(platform.architecture())" $wc = New-Object 'System.Net.WebClient' nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit + +$env:MONKEY_PATCH_LOOP=1 +nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit\io\test_geventreactor.py +nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit\io\test_eventletreactor.py + echo "uploading unit results" $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\unit_results.xml)) diff --git a/tox.ini b/tox.ini index 3b68b73c8a..06bb82746b 100644 --- a/tox.ini +++ b/tox.ini @@ -7,13 +7,39 @@ deps = nose PyYAML six packaging + cython + gevent + eventlet + twisted <15.5.0 [testenv] deps = {[base]deps} - cython - py{27}: gevent - twisted <15.5.0 + setenv = LIBEV_EMBED=0 CARES_EMBED=0 changedir = {envtmpdir} commands = nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/ + + +[testenv:py27] +deps = {[base]deps} + +setenv = LIBEV_EMBED=0 + CARES_EMBED=0 + MONKEY_PATCH_LOOP=1 +changedir = {envtmpdir} +commands = + nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/io/test_eventletreactor.py + nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/io/test_geventreactor.py + + +[testenv:py34] +deps = {[base]deps} + +setenv = LIBEV_EMBED=0 + CARES_EMBED=0 + MONKEY_PATCH_LOOP=1 +changedir = {envtmpdir} +commands = + nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/io/test_eventletreactor.py + nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/io/test_geventreactor.py From 2056b145c2bc44410074fbc556fce4f95df955ce Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 6 Apr 2017 16:17:54 -0400 Subject: [PATCH 1061/2005] Fix tests --- tests/integration/cqlengine/model/test_updates.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index 606c4346e1..668db434b5 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -91,10 +91,10 @@ def test_noop_model_direct_update(self): m0.update(count=5) assert execute.call_count == 0 - with self.assertRaises(ValidationError): + with patch.object(self.session, 'execute') as execute: m0.update(partition=m0.partition) - with self.assertRaises(ValidationError): + with patch.object(self.session, 'execute') as execute: m0.update(cluster=m0.cluster) def test_noop_model_assignation_update(self): From 9848ef3987d74d6cc6c98a4f2f3bd9497c312691 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 6 Apr 2017 15:09:10 -0400 Subject: [PATCH 1062/2005] Acknowledge the some review comments for tests --- tests/__init__.py | 4 ++-- tests/unit/io/eventlet_utils.py | 5 +++-- tests/unit/io/test_eventletreactor.py | 27 ++++++++++++--------------- tests/unit/io/test_geventreactor.py | 19 +++++++------------ tests/unit/io/utils.py | 10 +++++----- tox.ini | 4 +++- 6 files changed, 32 insertions(+), 37 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index 80fec0cca7..feba16fb9c 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -59,8 +59,8 @@ def is_monkey_patched(): return is_gevent_monkey_patched() or is_eventlet_monkey_patched() -MONKEY_PATCH_LOOP = bool(os.getenv('MONKEY_PATCH_LOOP', False)) +MONKEY_PATCH_LOOP = bool(os.getenv('MONKEY_PATCH_LOOP', False) == "1") notwindows = unittest.skipUnless(not "Windows" in platform.system(), "This test is not adequate for windows") -notpypy = unittest.skipUnless(not '__pypy__' in sys.builtin_module_names, "This tests is not suitable for pypy") +notpypy = unittest.skipUnless(not platform.python_implementation() == 'PyPy', "This tests is not suitable for pypy") notmonkeypatch = unittest.skipUnless(MONKEY_PATCH_LOOP, "Skpping this test because monkey patching is required") \ No newline at end of file diff --git a/tests/unit/io/eventlet_utils.py b/tests/unit/io/eventlet_utils.py index 05ef6bc939..2ed929c81d 100644 --- a/tests/unit/io/eventlet_utils.py +++ b/tests/unit/io/eventlet_utils.py @@ -20,10 +20,11 @@ import thread import Queue import __builtin__ -except: + #For python3 compatibility +except ImportError: import _thread as thread import queue as Queue - import builtins + import builtins as __builtin__ import threading import ssl diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index 01445735b1..69ee23e405 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -19,34 +19,31 @@ import unittest # noqa from tests.unit.io.utils import TimerConnectionTests -from tests.unit.io.eventlet_utils import restore_saved_module, eventlet_un_patch_all -from tests import notpypy, MONKEY_PATCH_LOOP +from tests.unit.io.eventlet_utils import restore_saved_module +from tests import notpypy +from tests import notmonkeypatch + import time from eventlet import monkey_patch, kill -import threading try: from cassandra.io.eventletreactor import EventletConnection except ImportError: EventletConnection = None # noqa +@unittest.skipUnless(EventletConnection is not None, "Skpping the eventlet tests because it's not installed") +@notmonkeypatch +# There are some issues with some versions of pypy and eventlet +@notpypy class EventletTimerTest(unittest.TestCase, TimerConnectionTests): - # There are some issues with some versions of pypy and eventlet - @notpypy - def setUp(self): - if not MONKEY_PATCH_LOOP: - raise unittest.SkipTest("Skpping because monkey patching may affect the rest of the tests") - - if not EventletConnection: - raise unittest.SkipTest("Can't test eventlet without monkey patching") - monkey_patch(thread=False, time=True) + def setUp(self): + self.connection_class = EventletConnection + # We only to patch the time module + monkey_patch(time=True) EventletConnection.initialize_reactor() def tearDown(self): kill(EventletConnection._timeout_watcher) EventletConnection._timers = None restore_saved_module(time) - - def getClass(self): - return EventletConnection diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py index 4bcfa315bf..ad7f9de739 100644 --- a/tests/unit/io/test_geventreactor.py +++ b/tests/unit/io/test_geventreactor.py @@ -19,8 +19,7 @@ from tests.unit.io.utils import TimerConnectionTests -from tests import MONKEY_PATCH_LOOP - +from tests import notmonkeypatch try: from cassandra.io.geventreactor import GeventConnection import gevent.monkey @@ -28,20 +27,16 @@ except ImportError: GeventConnection = None # noqa - +@unittest.skipUnless(GeventConnection is not None, "Skpping the gevent tests because it's not installed") +@notmonkeypatch class GeventTimerTest(unittest.TestCase, TimerConnectionTests): - def setUp(self): - if not MONKEY_PATCH_LOOP: - raise unittest.SkipTest("Skpping because monkey patching may affect the rest of the tests") - if not GeventConnection: - raise unittest.SkipTest("Can't test gevent without monkey patching") + def setUp(self): + self.connection_class = GeventConnection + #We only to patch the time module gevent.monkey.patch_time() GeventConnection.initialize_reactor() def tearDown(self): restore_saved_module("time") - GeventConnection._timers = None - - def getClass(self): - return GeventConnection + GeventConnection._timers = None \ No newline at end of file diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 3a3179dc82..4533f77143 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -116,11 +116,11 @@ def test_multi_timer_validation(self): Verify that timer timeouts are honored appropriately """ # Tests timers submitted in order at various timeouts - submit_and_wait_for_completion(self, self.getClass(), 0, 100, 1, 100) + submit_and_wait_for_completion(self, self.connection_class, 0, 100, 1, 100) # Tests timers submitted in reverse order at various timeouts - submit_and_wait_for_completion(self, self.getClass(), 100, 0, -1, 100) + submit_and_wait_for_completion(self, self.connection_class, 100, 0, -1, 100) # Tests timers submitted in varying order at various timeouts - submit_and_wait_for_completion(self, self.getClass(), 0, 100, 1, 100, True), + submit_and_wait_for_completion(self, self.connection_class, 0, 100, 1, 100, True), def test_timer_cancellation(self): """ @@ -130,11 +130,11 @@ def test_timer_cancellation(self): # Various lists for tracking callback stage timeout = .1 callback = TimerCallback(timeout) - timer = self.getClass().create_timer(timeout, callback.invoke) + timer = self.connection_class.create_timer(timeout, callback.invoke) timer.cancel() # Release context allow for timer thread to run. time.sleep(.2) - timer_manager = self.getClass()._timers + timer_manager = self.connection_class._timers # Assert that the cancellation was honored self.assertFalse(timer_manager._queue) self.assertFalse(timer_manager._new_timers) diff --git a/tox.ini b/tox.ini index 06bb82746b..9b22131533 100644 --- a/tox.ini +++ b/tox.ini @@ -8,7 +8,6 @@ deps = nose six packaging cython - gevent eventlet twisted <15.5.0 @@ -17,12 +16,14 @@ deps = {[base]deps} setenv = LIBEV_EMBED=0 CARES_EMBED=0 + MONKEY_PATCH_LOOP=0 changedir = {envtmpdir} commands = nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/ [testenv:py27] deps = {[base]deps} + gevent setenv = LIBEV_EMBED=0 CARES_EMBED=0 @@ -35,6 +36,7 @@ commands = [testenv:py34] deps = {[base]deps} + gevent setenv = LIBEV_EMBED=0 CARES_EMBED=0 From 6766d77b72e297a477d98eb257413df1da75fa7b Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 6 Apr 2017 17:12:13 -0400 Subject: [PATCH 1063/2005] Removed unused statements from appveyor, this tests where failing always because the need a cluster to test against --- appveyor/run_test.ps1 | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/appveyor/run_test.ps1 b/appveyor/run_test.ps1 index 553540d2e4..55774ee9af 100644 --- a/appveyor/run_test.ps1 +++ b/appveyor/run_test.ps1 @@ -20,21 +20,4 @@ nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results. echo "uploading unit results" $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\unit_results.xml)) -if($env:ci_type -eq 'standard' -Or $env:ci_type -eq 'long'){ - echo "Running CQLEngine integration tests" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=cqlengine_results.xml .\tests\integration\cqlengine - $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\cqlengine_results.xml)) - echo "uploading CQLEngine test results" - - echo "Running standard integration tests" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml .\tests\integration\standard - $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\standard_results.xml)) - echo "uploading standard integration test results" -} - -if($env:ci_type -eq 'long'){ - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=cqlengine_results.xml .\tests\integration\cqlengine - $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\cqlengine_results.xml)) - echo "uploading standard integration test results" -} exit 0 From b044b34e809a975d72014a903dfce32e13962e0b Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 7 Apr 2017 10:48:31 -0400 Subject: [PATCH 1064/2005] Made copy of Cluster._pools when they are iterated --- cassandra/cluster.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 6d3dc76ac3..87213e3803 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1565,7 +1565,7 @@ def _ensure_core_connections(self): open, attempt to open connections until that number is met. """ for session in self.sessions: - for pool in session._pools.values(): + for pool in tuple(session._pools.values()): pool.ensure_core_connections() @staticmethod @@ -2304,7 +2304,7 @@ def shutdown(self): future.cancel() wait_futures(self._initial_connect_futures) - for pool in list(self._pools.values()): + for pool in tuple(self._pools.values()): pool.shutdown() def __enter__(self): @@ -2455,7 +2455,7 @@ def pool_finished_setting_keyspace(pool, host_errors): if not remaining_callbacks: callback(host_errors) - for pool in self._pools.values(): + for pool in tuple(self._pools.values()): pool._set_keyspace_for_all_conns(keyspace, pool_finished_setting_keyspace) def user_type_registered(self, keyspace, user_type, klass): @@ -2496,7 +2496,7 @@ def submit(self, fn, *args, **kwargs): return self.cluster.executor.submit(fn, *args, **kwargs) def get_pool_state(self): - return dict((host, pool.get_state()) for host, pool in self._pools.items()) + return dict((host, pool.get_state()) for host, pool in tuple(self._pools.items())) def get_pools(self): return self._pools.values() From 2859da85db902389122760d4a0e1bff06d7d493d Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 7 Apr 2017 11:27:32 -0400 Subject: [PATCH 1065/2005] Added tests for PYTHON-705 --- tests/integration/cqlengine/model/test_updates.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index 668db434b5..d6a9fdee45 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -61,6 +61,17 @@ def test_update_model(self): self.assertEqual(m2.count, m1.count) self.assertEqual(m2.text, m0.text) + #This shouldn't raise a Validation error as the PR is not changing + m0.update(partition=m0.partition, cluster=m0.cluster) + + #Assert a ValidationError is risen if the PR changes + with self.assertRaises(ValidationError): + m0.update(partition=m0.partition, cluster=20) + + # Assert a ValidationError is risen if the columns doesn't exist + with self.assertRaises(ValidationError): + m0.update(invalid_column=20) + def test_update_values(self): """ tests calling update on models with values passed in """ m0 = TestUpdateModel.create(count=5, text='monkey') From 22fe6f7b0b8f3b9529f6425de90de641fa5faece Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 7 Apr 2017 16:41:23 -0400 Subject: [PATCH 1066/2005] Fixed profile timeout test --- tests/integration/standard/test_cluster.py | 23 +++++++++++----------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 85568b5e08..ff513c1c01 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -906,18 +906,17 @@ def test_add_profile_timeout(self): @test_category config_profiles """ + max_retry_count = 10 + for i in range(max_retry_count): + node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) + with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1}) as cluster: + session = cluster.connect(wait_for_all_pools=True) + pools = session.get_pool_state() + self.assertGreater(len(cluster.metadata.all_hosts()), 2) + self.assertEqual(set(h.address for h in pools), set(('127.0.0.1',))) - node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) - with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1}) as cluster: - session = cluster.connect(wait_for_all_pools=True) - pools = session.get_pool_state() - self.assertGreater(len(cluster.metadata.all_hosts()), 2) - self.assertEqual(set(h.address for h in pools), set(('127.0.0.1',))) + node2 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2', '127.0.0.3'])) - node2 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2'])) - - max_retry_count = 10 - for i in range(max_retry_count): start = time.time() try: self.assertRaises(cassandra.OperationTimedOut, cluster.add_execution_profile, @@ -927,8 +926,8 @@ def test_add_profile_timeout(self): except AssertionError: end = time.time() self.assertAlmostEqual(start, end, 1) - else: - raise Exception("add_execution_profile didn't timeout after {0} retries".format(max_retry_count)) + else: + raise Exception("add_execution_profile didn't timeout after {0} retries".format(max_retry_count)) class LocalHostAdressTranslator(AddressTranslator): From 986127c413e723f2a3da288f7d2f1cb1ada00293 Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 31 Mar 2017 19:57:39 -0400 Subject: [PATCH 1067/2005] Added support for different loops in integrations tests --- build.yaml | 55 ++++++++++++++++++++++++++++++----- tests/integration/__init__.py | 30 +++++++++++++++++-- 2 files changed, 74 insertions(+), 11 deletions(-) diff --git a/build.yaml b/build.yaml index 5d36e56d56..996ebf8346 100644 --- a/build.yaml +++ b/build.yaml @@ -3,6 +3,49 @@ schedules: schedule: per_commit branches: include: [master, /python.*/] + env_vars: | + EVENT_LOOP_MANAGER='libev' + + nightly_libev: + schedule: nightly + branches: + include: [master] + env_vars: | + EVENT_LOOP_MANAGER='libev' + matrix: + exclude: + - cassandra: ['2.0', '2.1', '2.2', '3.0'] + + nightly_gevent: + schedule: nightly + branches: + include: [master] + env_vars: | + EVENT_LOOP_MANAGER='gevent' + matrix: + exclude: + - pythoon: 3.4 + - cassandra: ['2.0', '2.1', '2.2', '3.0'] + + nightly_eventlet: + schedule: nightly + branches: + include: [master] + env_vars: | + EVENT_LOOP_MANAGER='eventlet' + matrix: + exclude: + - cassandra: ['2.0', '2.1', '2.2', '3.0'] + + nightly_async: + schedule: nightly + branches: + include: [master] + env_vars: | + EVENT_LOOP_MANAGER='async' + matrix: + exclude: + - cassandra: ['2.0', '2.1', '2.2', '3.0'] python: - 2.7 @@ -16,8 +59,6 @@ cassandra: - '3.0' - '3.11' env: - EVENT_LOOP_MANAGER: - - libev CYTHON: - CYTHON - NO_CYTHON @@ -28,9 +69,7 @@ build: pip install git+https://github.com/pcmanus/ccm.git # Install dependencies - if [[ $EVENT_LOOP_MANAGER == 'libev' ]]; then - sudo apt-get install -y libev4 libev-dev - fi + sudo apt-get install -y libev4 libev-dev pip install -r test-requirements.txt pip install nose-ignore-docstring FORCE_CYTHON=False @@ -46,12 +85,12 @@ build: fi echo "==========RUNNING CQLENGINE TESTS==========" - CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true echo "==========RUNNING INTEGRATION TESTS==========" - CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true echo "==========RUNNING LONG INTEGRATION TESTS==========" - CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true - xunit: - "*_results.xml" diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index d3102f6717..f3200a5f0d 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -11,6 +11,32 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import os + +from cassandra.io.geventreactor import GeventConnection +from cassandra.io.libevreactor import LibevConnection +from cassandra.io.asyncorereactor import AsyncoreConnection +from cassandra.io.eventletreactor import EventletConnection +from cassandra.io.twistedreactor import TwistedConnection + +EVENT_LOOP_MANAGER = os.getenv('EVENT_LOOP_MANAGER', "gevent") +if EVENT_LOOP_MANAGER == "gevent": + import gevent.monkey + gevent.monkey.patch_all() + connection_class = GeventConnection +elif EVENT_LOOP_MANAGER == "eventlet": + from eventlet import monkey_patch + monkey_patch() + connection_class = EventletConnection +elif EVENT_LOOP_MANAGER == "async": + connection_class = AsyncoreConnection +elif EVENT_LOOP_MANAGER == "twisted": + connection_class = TwistedConnection +else: + connection_class = LibevConnection + +from cassandra.cluster import Cluster +Cluster.connection_class = connection_class try: import unittest2 as unittest @@ -18,7 +44,6 @@ import unittest # noqa from packaging.version import Version import logging -import os import socket import sys import time @@ -30,9 +55,8 @@ from cassandra import OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure, AlreadyExists, \ InvalidRequest -from cassandra.cluster import Cluster + from cassandra.protocol import ConfigurationException -from cassandra.policies import RoundRobinPolicy try: from ccmlib.cluster import Cluster as CCMCluster From 9fdd93634ddfdc88cff02821045bf57ac3e276f8 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 6 Apr 2017 16:59:19 -0400 Subject: [PATCH 1068/2005] Updated tox.ini and build.yaml for testing with python 3.6 --- build.yaml | 2 ++ tox.ini | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/build.yaml b/build.yaml index 5d36e56d56..424be9f58a 100644 --- a/build.yaml +++ b/build.yaml @@ -7,6 +7,8 @@ schedules: python: - 2.7 - 3.4 + - 3.6 + os: - ubuntu/trusty64 cassandra: diff --git a/tox.ini b/tox.ini index 3b68b73c8a..7e98d8e851 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py{27,33,34},pypy +envlist = py{27,33,34,36},pypy [base] deps = nose From 611daed724b316a54d381bb1b71d0242858b31c0 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 11 Apr 2017 09:53:42 -0400 Subject: [PATCH 1069/2005] Updated build.yaml --- build.yaml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/build.yaml b/build.yaml index 996ebf8346..19ad31059b 100644 --- a/build.yaml +++ b/build.yaml @@ -25,7 +25,7 @@ schedules: matrix: exclude: - pythoon: 3.4 - - cassandra: ['2.0', '2.1', '2.2', '3.0'] + cassandra: ['2.0', '2.1', '2.2', '3.0'] nightly_eventlet: schedule: nightly @@ -47,6 +47,16 @@ schedules: exclude: - cassandra: ['2.0', '2.1', '2.2', '3.0'] + nightly_twister: + schedule: nightly + branches: + include: [master] + env_vars: | + EVENT_LOOP_MANAGER='twisted' + matrix: + exclude: + - cassandra: ['2.0', '2.1', '2.2', '3.0'] + python: - 2.7 - 3.4 From 925deb9ded6371c9b743339620fb5dca5f9e1fc5 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 11 Apr 2017 10:08:39 -0400 Subject: [PATCH 1070/2005] Added eventlet in the tox dependencies --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 7e98d8e851..06f2d64bab 100644 --- a/tox.ini +++ b/tox.ini @@ -7,6 +7,7 @@ deps = nose PyYAML six packaging + eventlet [testenv] deps = {[base]deps} From 17229e3181f1bd9c78d4c3a79a7b1a68c5b8876e Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 11 Apr 2017 14:02:49 -0400 Subject: [PATCH 1071/2005] version 3.9.0 --- CHANGELOG.rst | 22 ++++++++++++++++++++++ cassandra/__init__.py | 2 +- docs.yaml | 2 ++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index adba5264cc..f63ef99880 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,25 @@ +3.9.0 +===== + +Features +-------- +* cqlengine: remove elements by key from a map (PYTHON-688) + +Bug Fixes +--------- +* improve error handling when connecting to non-existent keyspace (PYTHON-665) +* Sockets associated with sessions not getting cleaned up on session.shutdown() (PYTHON-673) +* rare flake on integration.standard.test_cluster.ClusterTests.test_clone_shared_lbp (PYTHON-727) +* MontonicTimestampGenerator.__init__ ignores class defaults (PYTHON-728) +* race where callback or errback for request may not be called (PYTHON-733) +* cqlengine: model.update() should not update columns with a default value that hasn't changed (PYTHON-657) +* cqlengine: field value manager's explicit flag is True when queried back from cassandra (PYTHON-719) + +Other +----- +* Connection not closed in example_mapper (PYTHON-723) +* Remove mention of pre-2.0 C* versions from OSS 3.0+ docs (PYTHON-710) + 3.8.1 ===== March 16, 2017 diff --git a/cassandra/__init__.py b/cassandra/__init__.py index edb99c609e..76db7bbf25 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 8, 1, 'post0') +__version_info__ = (3, 9, 0) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs.yaml b/docs.yaml index 86cd85b496..e4c5c85779 100644 --- a/docs.yaml +++ b/docs.yaml @@ -11,6 +11,8 @@ sections: type: sphinx directory: docs versions: + - name: 3.9 + ref: 3.9-doc - name: 3.8 ref: 3.8-doc - name: 3.7 From 09a21cddaba70a37c53faadcac3f98a3c384be17 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 11 Apr 2017 14:28:49 -0400 Subject: [PATCH 1072/2005] post-release version (3.9.0) --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 76db7bbf25..c1c0350bc8 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 9, 0) +__version_info__ = (3, 9, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From 6f677d25572132cdbc9f8f4cba40662a14da5b03 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 11 Apr 2017 16:28:36 -0400 Subject: [PATCH 1073/2005] Set libev loop as default for tests --- tests/integration/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index f3200a5f0d..6cc309a5dc 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -19,7 +19,7 @@ from cassandra.io.eventletreactor import EventletConnection from cassandra.io.twistedreactor import TwistedConnection -EVENT_LOOP_MANAGER = os.getenv('EVENT_LOOP_MANAGER', "gevent") +EVENT_LOOP_MANAGER = os.getenv('EVENT_LOOP_MANAGER', "libev") if EVENT_LOOP_MANAGER == "gevent": import gevent.monkey gevent.monkey.patch_all() From 68fabee38d3dbc805c957501fcaddbcc4300cc17 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 11 Apr 2017 14:27:49 -0400 Subject: [PATCH 1074/2005] Added Python 3.5 support in testing --- build.yaml | 1 + tests/unit/cython/utils.py | 5 ++++- tox.ini | 3 +-- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/build.yaml b/build.yaml index 424be9f58a..c1a0a76128 100644 --- a/build.yaml +++ b/build.yaml @@ -7,6 +7,7 @@ schedules: python: - 2.7 - 3.4 + - 3.5 - 3.6 os: diff --git a/tests/unit/cython/utils.py b/tests/unit/cython/utils.py index 916de13763..76d46ef8e8 100644 --- a/tests/unit/cython/utils.py +++ b/tests/unit/cython/utils.py @@ -13,7 +13,10 @@ # limitations under the License. from cassandra.cython_deps import HAVE_CYTHON, HAVE_NUMPY -from tests.integration import VERIFY_CYTHON +try: + from tests.integration import VERIFY_CYTHON +except ImportError: + VERIFY_CYTHON = False try: import unittest2 as unittest diff --git a/tox.ini b/tox.ini index 06f2d64bab..7204c882e3 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py{27,33,34,36},pypy +envlist = py{27,33,34,35,36},pypy [base] deps = nose @@ -7,7 +7,6 @@ deps = nose PyYAML six packaging - eventlet [testenv] deps = {[base]deps} From 6b3e34d3f4243031d4ef82d902ff8d606ddd4e17 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 11 Apr 2017 19:00:52 -0400 Subject: [PATCH 1075/2005] Updated build.yaml --- build.yaml | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/build.yaml b/build.yaml index 9de03928b9..6cc2650d89 100644 --- a/build.yaml +++ b/build.yaml @@ -5,6 +5,9 @@ schedules: include: [master, /python.*/] env_vars: | EVENT_LOOP_MANAGER='libev' + matrix: + exclude: + - python: [3.4, 3.6] nightly_libev: schedule: nightly @@ -14,7 +17,7 @@ schedules: EVENT_LOOP_MANAGER='libev' matrix: exclude: - - cassandra: ['2.0', '2.1', '2.2', '3.0'] + - cassandra: ['2.0', '2.1', '3.0'] nightly_gevent: schedule: nightly @@ -24,8 +27,8 @@ schedules: EVENT_LOOP_MANAGER='gevent' matrix: exclude: - - pythoon: 3.4 - cassandra: ['2.0', '2.1', '2.2', '3.0'] + - python: 3.4 + cassandra: ['2.0', '2.1', '3.0'] nightly_eventlet: schedule: nightly @@ -35,7 +38,7 @@ schedules: EVENT_LOOP_MANAGER='eventlet' matrix: exclude: - - cassandra: ['2.0', '2.1', '2.2', '3.0'] + - cassandra: ['2.0', '2.1', '3.0'] nightly_async: schedule: nightly @@ -45,7 +48,7 @@ schedules: EVENT_LOOP_MANAGER='async' matrix: exclude: - - cassandra: ['2.0', '2.1', '2.2', '3.0'] + - cassandra: ['2.0', '2.1', '3.0'] nightly_twister: schedule: nightly @@ -55,7 +58,7 @@ schedules: EVENT_LOOP_MANAGER='twisted' matrix: exclude: - - cassandra: ['2.0', '2.1', '2.2', '3.0'] + - cassandra: ['2.0', '2.1', '3.0'] python: - 2.7 From f81b3e56e7bbcdde4062d3287b5371d1fff43ae7 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 13 Apr 2017 16:01:28 -0400 Subject: [PATCH 1076/2005] add prebuild steps for documentor --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index e4c5c85779..e8adac73c5 100644 --- a/docs.yaml +++ b/docs.yaml @@ -10,6 +10,8 @@ sections: prefix: / type: sphinx directory: docs +prebuilds: + - python setup.py build_ext --inplace --force --no-murmur3 --no-cython versions: - name: 3.9 ref: 3.9-doc From 2b20575e62e8ff111751d09a3cfe712ffa6f70b8 Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 17 Apr 2017 10:22:53 -0400 Subject: [PATCH 1077/2005] Updated build.yaml --- build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.yaml b/build.yaml index 6cc2650d89..8a7efd8282 100644 --- a/build.yaml +++ b/build.yaml @@ -27,7 +27,7 @@ schedules: EVENT_LOOP_MANAGER='gevent' matrix: exclude: - - python: 3.4 + - python: [3.4, 3.6] cassandra: ['2.0', '2.1', '3.0'] nightly_eventlet: From d343aa53fd1d116ae78ffceef7a0de7c68e1c27c Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 14 Apr 2017 15:06:44 -0400 Subject: [PATCH 1078/2005] Acknowledge comments from the review, updated Gevent and Eventlet unit tests --- appveyor/run_test.ps1 | 1 + tests/__init__.py | 2 +- tests/unit/io/test_eventletreactor.py | 30 +++++++++++++-------------- tests/unit/io/test_geventreactor.py | 24 +++++++++++---------- tox.ini | 1 - 5 files changed, 29 insertions(+), 29 deletions(-) diff --git a/appveyor/run_test.ps1 b/appveyor/run_test.ps1 index 55774ee9af..9e809a3245 100644 --- a/appveyor/run_test.ps1 +++ b/appveyor/run_test.ps1 @@ -16,6 +16,7 @@ nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results. $env:MONKEY_PATCH_LOOP=1 nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit\io\test_geventreactor.py nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit\io\test_eventletreactor.py +Remove-Item $env:MONKEY_PATCH_LOOP echo "uploading unit results" $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\unit_results.xml)) diff --git a/tests/__init__.py b/tests/__init__.py index feba16fb9c..1dec045fd3 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -59,7 +59,7 @@ def is_monkey_patched(): return is_gevent_monkey_patched() or is_eventlet_monkey_patched() -MONKEY_PATCH_LOOP = bool(os.getenv('MONKEY_PATCH_LOOP', False) == "1") +MONKEY_PATCH_LOOP = bool(os.getenv('MONKEY_PATCH_LOOP', False)) notwindows = unittest.skipUnless(not "Windows" in platform.system(), "This test is not adequate for windows") notpypy = unittest.skipUnless(not platform.python_implementation() == 'PyPy', "This tests is not suitable for pypy") diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index 69ee23e405..040685e306 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -19,31 +19,29 @@ import unittest # noqa from tests.unit.io.utils import TimerConnectionTests -from tests.unit.io.eventlet_utils import restore_saved_module -from tests import notpypy -from tests import notmonkeypatch +from tests import notpypy, MONKEY_PATCH_LOOP, notmonkeypatch -import time -from eventlet import monkey_patch, kill +from eventlet import monkey_patch try: from cassandra.io.eventletreactor import EventletConnection except ImportError: EventletConnection = None # noqa -@unittest.skipUnless(EventletConnection is not None, "Skpping the eventlet tests because it's not installed") -@notmonkeypatch # There are some issues with some versions of pypy and eventlet @notpypy +@unittest.skipIf(EventletConnection is None, "Skpping the eventlet tests because it's not installed") +@notmonkeypatch class EventletTimerTest(unittest.TestCase, TimerConnectionTests): - - def setUp(self): - self.connection_class = EventletConnection - # We only to patch the time module - monkey_patch(time=True) + @classmethod + def setUpClass(cls): + # This is run even though the class is skipped, so we need + # to make sure no monkey patching is happening + if not MONKEY_PATCH_LOOP: + return + monkey_patch() + cls.connection_class = EventletConnection EventletConnection.initialize_reactor() - def tearDown(self): - kill(EventletConnection._timeout_watcher) - EventletConnection._timers = None - restore_saved_module(time) + # There is no unpatching because there is not a clear way + # of doing it reliably diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py index ad7f9de739..1b2accf575 100644 --- a/tests/unit/io/test_geventreactor.py +++ b/tests/unit/io/test_geventreactor.py @@ -19,24 +19,26 @@ from tests.unit.io.utils import TimerConnectionTests -from tests import notmonkeypatch +from tests import MONKEY_PATCH_LOOP, notmonkeypatch try: from cassandra.io.geventreactor import GeventConnection import gevent.monkey - from tests.unit.io.gevent_utils import restore_saved_module except ImportError: GeventConnection = None # noqa -@unittest.skipUnless(GeventConnection is not None, "Skpping the gevent tests because it's not installed") + +@unittest.skipIf(GeventConnection is None, "Skpping the gevent tests because it's not installed") @notmonkeypatch class GeventTimerTest(unittest.TestCase, TimerConnectionTests): - - def setUp(self): - self.connection_class = GeventConnection - #We only to patch the time module - gevent.monkey.patch_time() + @classmethod + def setUpClass(cls): + # This is run even though the class is skipped, so we need + # to make sure no monkey patching is happening + if not MONKEY_PATCH_LOOP: + return + gevent.monkey.patch_all() + cls.connection_class = GeventConnection GeventConnection.initialize_reactor() - def tearDown(self): - restore_saved_module("time") - GeventConnection._timers = None \ No newline at end of file + # There is no unpatching because there is not a clear way + # of doing it reliably diff --git a/tox.ini b/tox.ini index 9b22131533..f55966d1d5 100644 --- a/tox.ini +++ b/tox.ini @@ -16,7 +16,6 @@ deps = {[base]deps} setenv = LIBEV_EMBED=0 CARES_EMBED=0 - MONKEY_PATCH_LOOP=0 changedir = {envtmpdir} commands = nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/ From a35bd194225467558d3b432f071e6858da65d2c1 Mon Sep 17 00:00:00 2001 From: Vitaliy Kozlovskiy Date: Wed, 19 Apr 2017 20:43:41 +0300 Subject: [PATCH 1079/2005] PEP 289 (just 1 line) (#739) --- cassandra/cqlengine/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 29bd8f71dc..b57187e984 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -504,7 +504,7 @@ def _can_update(self): if not self._is_persisted: return False - return all([not self._values[k].changed for k in self._primary_keys]) + return all(not self._values[k].changed for k in self._primary_keys) @classmethod def _get_keyspace(cls): From ba8728a3992e64d20569cb3265c41657a1454340 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 19 Apr 2017 13:46:42 -0500 Subject: [PATCH 1080/2005] update six dependency (#709) table extensions use viewkeys --- requirements.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 54bd98a1c2..25c0d62c73 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -six >=1.6 +six >=1.9 futures <=2.2.0 # Futures is not required for Python 3, but it works up through 2.2.0 (after which it introduced breaking syntax). # This is left here to make sure install -r works with any runtime. When installing via setup.py, futures is omitted diff --git a/setup.py b/setup.py index 0540478b5b..e0eed9579a 100644 --- a/setup.py +++ b/setup.py @@ -396,7 +396,7 @@ def run_setup(extensions): else: sys.stderr.write("Bypassing Cython setup requirement\n") - dependencies = ['six >=1.6'] + dependencies = ['six >=1.9'] if not PY3: dependencies.append('futures') From 2e09a832d34e81b555ada4c1eec3a6a5d6b49781 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 20 Apr 2017 14:35:18 -0400 Subject: [PATCH 1081/2005] Updated build.yaml to have a clean PYTHONPATH --- build.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/build.yaml b/build.yaml index 8a7efd8282..6f57d700a3 100644 --- a/build.yaml +++ b/build.yaml @@ -82,6 +82,7 @@ build: - script: | export JAVA_HOME=$CCM_JAVA_HOME export PATH=$JAVA_HOME/bin:$PATH + export PYTHONPATH="" pip install git+https://github.com/pcmanus/ccm.git # Install dependencies From f1058cbbe5e2e67e39f3cc70cb6f28690ebf25e0 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 20 Apr 2017 16:58:35 -0400 Subject: [PATCH 1082/2005] Updated travis.yml, tox.ini --- .travis.yml | 25 +++++++++++++------------ tox.ini | 15 +-------------- 2 files changed, 14 insertions(+), 26 deletions(-) diff --git a/.travis.yml b/.travis.yml index 190dde47ba..622ddee052 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,16 +1,16 @@ language: python -python: 2.7 +python: + - "2.7" + - "3.3" + - "3.4" + - "3.5" + - "3.6" + - "pypy-5.4.1" + sudo: false env: - - TOX_ENV=py27 CASS_VER=12 CASS_DRIVER_NO_CYTHON=1 - - TOX_ENV=py27 CASS_VER=20 CASS_DRIVER_NO_CYTHON=1 - - TOX_ENV=py27 CASS_VER=21 CASS_DRIVER_NO_CYTHON=1 - - TOX_ENV=py27 CASS_VER=21 - - TOX_ENV=py33 CASS_VER=21 CASS_DRIVER_NO_CYTHON=1 - - TOX_ENV=py33 CASS_VER=21 - - TOX_ENV=py34 CASS_VER=21 CASS_DRIVER_NO_CYTHON=1 - - TOX_ENV=py34 CASS_VER=21 - - TOX_ENV=pypy CASS_VER=21 CASS_DRIVER_NO_CYTHON=1 + - CASS_DRIVER_NO_CYTHON=1 + - addons: apt: @@ -22,7 +22,8 @@ addons: - libev-dev install: - - pip install tox + - pip install tox-travis script: - - tox -e $TOX_ENV + - tox + - tox -e patched_loops diff --git a/tox.ini b/tox.ini index 8c2b4dab8f..53526cccb4 100644 --- a/tox.ini +++ b/tox.ini @@ -20,20 +20,7 @@ changedir = {envtmpdir} commands = nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/ -[testenv:py27] -deps = {[base]deps} - gevent - -setenv = LIBEV_EMBED=0 - CARES_EMBED=0 - MONKEY_PATCH_LOOP=1 -changedir = {envtmpdir} -commands = - nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/io/test_eventletreactor.py - nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/io/test_geventreactor.py - - -[testenv:py34] +[testenv:patched_loops] deps = {[base]deps} gevent From 6fb7c6f20502d16c8c752941528ee3a20951b38d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 24 Apr 2017 12:09:10 -0400 Subject: [PATCH 1083/2005] Make sure all trace mutations are there before considering it as complete --- cassandra/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/query.py b/cassandra/query.py index 9865fb85b0..2edaf3b316 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -925,7 +925,7 @@ def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): session_results = self._execute( SimpleStatement(self._SELECT_SESSIONS_FORMAT, consistency_level=query_cl), (self.trace_id,), time_spent, max_wait) - is_complete = session_results and session_results[0].duration is not None + is_complete = session_results and session_results[0].duration is not None and session_results[0].started_at is not None if not session_results or (wait_for_complete and not is_complete): time.sleep(self._BASE_RETRY_SLEEP * (2 ** attempt)) attempt += 1 From 5b143c9c128071cdbc8db40a4e38b13c9b550b9a Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 24 Apr 2017 12:38:37 -0400 Subject: [PATCH 1084/2005] add comment --- cassandra/query.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cassandra/query.py b/cassandra/query.py index 2edaf3b316..24a4a0ee05 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -925,6 +925,7 @@ def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): session_results = self._execute( SimpleStatement(self._SELECT_SESSIONS_FORMAT, consistency_level=query_cl), (self.trace_id,), time_spent, max_wait) + # PYTHON-730: There is race condition that the duration mutation is written before started_at the for fast queries is_complete = session_results and session_results[0].duration is not None and session_results[0].started_at is not None if not session_results or (wait_for_complete and not is_complete): time.sleep(self._BASE_RETRY_SLEEP * (2 ** attempt)) From 4b7330ddf7441f839be02dc9dd3eecb3ff7909a3 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 21 Apr 2017 17:16:06 -0400 Subject: [PATCH 1085/2005] fixes for ExponentialReconnectionPolicy - documentation specified floats for base_delay and max_delay; fix tests to match - fix issue from PYTHON-707 --- cassandra/policies.py | 12 ++++++++++-- tests/unit/test_policies.py | 18 ++++++++++++++---- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 9be0125f8f..108dcbaab5 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -564,9 +564,17 @@ def __init__(self, base_delay, max_delay, max_attempts=64): self.max_attempts = max_attempts def new_schedule(self): - i = 0 + i, overflowed = 0, False while self.max_attempts is None or i < self.max_attempts: - yield min(self.base_delay * (2 ** i), self.max_delay) + if overflowed: + yield self.max_delay + else: + try: + yield min(self.base_delay * (2 ** i), self.max_delay) + except OverflowError: + overflowed = True + yield self.max_delay + i += 1 diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 494b2e05b3..f1f4995d89 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -885,8 +885,8 @@ def test_bad_vals(self): self.assertRaises(ValueError, ExponentialReconnectionPolicy, 1, 2,-1) def test_schedule_no_max(self): - base_delay = 2 - max_delay = 100 + base_delay = 2.0 + max_delay = 100.0 test_iter = 10000 policy = ExponentialReconnectionPolicy(base_delay=base_delay, max_delay=max_delay, max_attempts=None) sched_slice = list(islice(policy.new_schedule(), 0, test_iter)) @@ -895,8 +895,8 @@ def test_schedule_no_max(self): self.assertEqual(len(sched_slice), test_iter) def test_schedule_with_max(self): - base_delay = 2 - max_delay = 100 + base_delay = 2.0 + max_delay = 100.0 max_attempts = 64 policy = ExponentialReconnectionPolicy(base_delay=base_delay, max_delay=max_delay, max_attempts=max_attempts) schedule = list(policy.new_schedule()) @@ -909,6 +909,16 @@ def test_schedule_with_max(self): else: self.assertEqual(delay, max_delay) + def test_schedule_exactly_one_attempt(self): + base_delay = 2.0 + max_delay = 100.0 + max_attempts = 1 + policy = ExponentialReconnectionPolicy( + base_delay=base_delay, max_delay=max_delay, max_attempts=max_attempts + ) + self.assertEqual(len(list(policy.new_schedule())), 1) + + ONE = ConsistencyLevel.ONE From 6a8fc3c814815787c1f7aaedd5c34a683cf8be42 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 24 Apr 2017 14:04:24 -0400 Subject: [PATCH 1086/2005] update supported server versions in README --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 01fd6d9fe3..d6509935d5 100644 --- a/README.rst +++ b/README.rst @@ -4,7 +4,7 @@ DataStax Python Driver for Apache Cassandra .. image:: https://travis-ci.org/datastax/python-driver.png?branch=master :target: https://travis-ci.org/datastax/python-driver -A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (1.2+) and DataStax Enterprise (3.1+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. +A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (2.1+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. The driver supports Python 2.7, 3.3, and 3.4. From d553848e7b38a601e5cb0e1d6051fbaddeebf56b Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 24 Apr 2017 14:05:39 -0400 Subject: [PATCH 1087/2005] update supported Python versions in README --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index d6509935d5..1f1e0bdf43 100644 --- a/README.rst +++ b/README.rst @@ -6,7 +6,7 @@ DataStax Python Driver for Apache Cassandra A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (2.1+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. -The driver supports Python 2.7, 3.3, and 3.4. +The driver supports Python 2.7, 3.3, 3.4, 3.5, and 3.6. Feedback Requested ------------------ From a48f0857cddc51e6a2cc49ac4dcb1ec15b4cdd46 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 24 Apr 2017 14:06:02 -0400 Subject: [PATCH 1088/2005] update copyright --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 1f1e0bdf43..3ea0bf91cf 100644 --- a/README.rst +++ b/README.rst @@ -72,7 +72,7 @@ you can use `freenode's web-based client Date: Mon, 24 Apr 2017 14:11:48 -0400 Subject: [PATCH 1089/2005] refer users to Academy Slack --- README.rst | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/README.rst b/README.rst index 3ea0bf91cf..406da617bd 100644 --- a/README.rst +++ b/README.rst @@ -63,12 +63,9 @@ If you would like to contribute, please feel free to open a pull request. Getting Help ------------ -Your two best options for getting help with the driver are the +Your best options for getting help with the driver are the `mailing list `_ -and the IRC channel. - -For IRC, use the #datastax-drivers channel on irc.freenode.net. If you don't have an IRC client, -you can use `freenode's web-based client `_. +and the ``#datastax-drivers`` channel in the `DataStax Academy Slack `_. License ------- From ac946ea51c9bed38b8410a46574f2987c0af287d Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 24 Apr 2017 17:04:25 -0400 Subject: [PATCH 1090/2005] update year in licenses --- benchmarks/base.py | 2 +- benchmarks/callback_full_pipeline.py | 2 +- benchmarks/future_batches.py | 2 +- benchmarks/future_full_pipeline.py | 2 +- benchmarks/future_full_throttle.py | 2 +- benchmarks/sync.py | 2 +- cassandra/__init__.py | 2 +- cassandra/auth.py | 2 +- cassandra/buffer.pxd | 2 +- cassandra/bytesio.pxd | 2 +- cassandra/bytesio.pyx | 2 +- cassandra/cluster.py | 2 +- cassandra/cmurmur3.c | 2 +- cassandra/concurrent.py | 2 +- cassandra/connection.py | 2 +- cassandra/cqlengine/__init__.py | 2 +- cassandra/cqlengine/columns.py | 2 +- cassandra/cqlengine/connection.py | 2 +- cassandra/cqlengine/functions.py | 2 +- cassandra/cqlengine/management.py | 2 +- cassandra/cqlengine/models.py | 2 +- cassandra/cqlengine/named.py | 2 +- cassandra/cqlengine/operators.py | 2 +- cassandra/cqlengine/query.py | 2 +- cassandra/cqlengine/statements.py | 2 +- cassandra/cqltypes.py | 2 +- cassandra/cython_marshal.pyx | 2 +- cassandra/cython_utils.pyx | 2 +- cassandra/deserializers.pxd | 2 +- cassandra/deserializers.pyx | 2 +- cassandra/encoder.py | 2 +- cassandra/io/__init__.py | 2 +- cassandra/io/asyncorereactor.py | 2 +- cassandra/io/eventletreactor.py | 2 +- cassandra/io/geventreactor.py | 2 +- cassandra/io/libevreactor.py | 2 +- cassandra/io/twistedreactor.py | 2 +- cassandra/ioutils.pyx | 2 +- cassandra/marshal.py | 2 +- cassandra/metadata.py | 2 +- cassandra/metrics.py | 2 +- cassandra/numpy_parser.pyx | 2 +- cassandra/obj_parser.pyx | 2 +- cassandra/parsing.pxd | 2 +- cassandra/parsing.pyx | 2 +- cassandra/policies.py | 2 +- cassandra/pool.py | 2 +- cassandra/protocol.py | 2 +- cassandra/query.py | 2 +- cassandra/row_parser.pyx | 2 +- cassandra/timestamps.py | 2 +- cassandra/tuple.pxd | 2 +- cassandra/type_codes.pxd | 2 +- cassandra/util.py | 2 +- docs/conf.py | 2 +- example_core.py | 2 +- example_mapper.py | 2 +- examples/request_init_listener.py | 2 +- setup.py | 2 +- tests/__init__.py | 2 +- tests/integration/__init__.py | 2 +- tests/integration/cqlengine/__init__.py | 2 +- tests/integration/cqlengine/base.py | 2 +- tests/integration/cqlengine/columns/__init__.py | 2 +- tests/integration/cqlengine/columns/test_container_columns.py | 2 +- tests/integration/cqlengine/columns/test_counter_column.py | 2 +- tests/integration/cqlengine/columns/test_static_column.py | 2 +- tests/integration/cqlengine/columns/test_validation.py | 2 +- tests/integration/cqlengine/columns/test_value_io.py | 2 +- tests/integration/cqlengine/connections/__init__.py | 2 +- tests/integration/cqlengine/connections/test_connection.py | 2 +- tests/integration/cqlengine/management/__init__.py | 2 +- .../cqlengine/management/test_compaction_settings.py | 2 +- tests/integration/cqlengine/management/test_management.py | 2 +- tests/integration/cqlengine/model/__init__.py | 2 +- tests/integration/cqlengine/model/test_class_construction.py | 2 +- tests/integration/cqlengine/model/test_equality_operations.py | 2 +- tests/integration/cqlengine/model/test_model.py | 2 +- tests/integration/cqlengine/model/test_model_io.py | 2 +- tests/integration/cqlengine/model/test_polymorphism.py | 2 +- tests/integration/cqlengine/model/test_udts.py | 2 +- tests/integration/cqlengine/model/test_updates.py | 2 +- tests/integration/cqlengine/model/test_value_lists.py | 2 +- tests/integration/cqlengine/operators/__init__.py | 2 +- tests/integration/cqlengine/operators/test_where_operators.py | 2 +- tests/integration/cqlengine/query/__init__.py | 2 +- tests/integration/cqlengine/query/test_batch_query.py | 2 +- tests/integration/cqlengine/query/test_datetime_queries.py | 2 +- tests/integration/cqlengine/query/test_named.py | 2 +- tests/integration/cqlengine/query/test_queryoperators.py | 2 +- tests/integration/cqlengine/query/test_queryset.py | 2 +- tests/integration/cqlengine/query/test_updates.py | 2 +- tests/integration/cqlengine/statements/__init__.py | 2 +- .../integration/cqlengine/statements/test_assignment_clauses.py | 2 +- tests/integration/cqlengine/statements/test_base_clause.py | 2 +- tests/integration/cqlengine/statements/test_base_statement.py | 2 +- tests/integration/cqlengine/statements/test_delete_statement.py | 2 +- tests/integration/cqlengine/statements/test_insert_statement.py | 2 +- tests/integration/cqlengine/statements/test_select_statement.py | 2 +- tests/integration/cqlengine/statements/test_update_statement.py | 2 +- tests/integration/cqlengine/statements/test_where_clause.py | 2 +- tests/integration/cqlengine/test_batch_query.py | 2 +- tests/integration/cqlengine/test_connections.py | 2 +- tests/integration/cqlengine/test_consistency.py | 2 +- tests/integration/cqlengine/test_context_query.py | 2 +- tests/integration/cqlengine/test_ifexists.py | 2 +- tests/integration/cqlengine/test_ifnotexists.py | 2 +- tests/integration/cqlengine/test_lwt_conditional.py | 2 +- tests/integration/cqlengine/test_timestamp.py | 2 +- tests/integration/cqlengine/test_ttl.py | 2 +- tests/integration/datatype_utils.py | 2 +- tests/integration/long/__init__.py | 2 +- tests/integration/long/test_consistency.py | 2 +- tests/integration/long/test_failure_types.py | 2 +- tests/integration/long/test_ipv6.py | 2 +- tests/integration/long/test_large_data.py | 2 +- tests/integration/long/test_loadbalancingpolicies.py | 2 +- tests/integration/long/test_schema.py | 2 +- tests/integration/long/test_ssl.py | 2 +- tests/integration/long/utils.py | 2 +- tests/integration/standard/__init__.py | 2 +- tests/integration/standard/test_authentication.py | 2 +- tests/integration/standard/test_client_warnings.py | 2 +- tests/integration/standard/test_cluster.py | 2 +- tests/integration/standard/test_concurrent.py | 2 +- tests/integration/standard/test_connection.py | 2 +- tests/integration/standard/test_control_connection.py | 2 +- tests/integration/standard/test_custom_payload.py | 2 +- tests/integration/standard/test_custom_protocol_handler.py | 2 +- tests/integration/standard/test_metadata.py | 2 +- tests/integration/standard/test_metrics.py | 2 +- tests/integration/standard/test_policies.py | 2 +- tests/integration/standard/test_prepared_statements.py | 2 +- tests/integration/standard/test_query.py | 2 +- tests/integration/standard/test_query_paging.py | 2 +- tests/integration/standard/test_routing.py | 2 +- tests/integration/standard/test_row_factories.py | 2 +- tests/integration/standard/test_types.py | 2 +- tests/integration/standard/test_udts.py | 2 +- tests/integration/util.py | 2 +- tests/stress_tests/test_load.py | 2 +- tests/stress_tests/test_multi_inserts.py | 2 +- tests/unit/__init__.py | 2 +- tests/unit/cqlengine/__init__.py | 2 +- tests/unit/cqlengine/test_columns.py | 2 +- tests/unit/cqlengine/test_connection.py | 2 +- tests/unit/cqlengine/test_udt.py | 2 +- tests/unit/cython/__init__.py | 2 +- tests/unit/cython/bytesio_testhelper.pyx | 2 +- tests/unit/cython/test_bytesio.py | 2 +- tests/unit/cython/test_types.py | 2 +- tests/unit/cython/test_utils.py | 2 +- tests/unit/cython/types_testhelper.pyx | 2 +- tests/unit/cython/utils.py | 2 +- tests/unit/cython/utils_testhelper.pyx | 2 +- tests/unit/io/__init__.py | 2 +- tests/unit/io/eventlet_utils.py | 2 +- tests/unit/io/gevent_utils.py | 2 +- tests/unit/io/test_asyncorereactor.py | 2 +- tests/unit/io/test_eventletreactor.py | 2 +- tests/unit/io/test_geventreactor.py | 2 +- tests/unit/io/test_libevreactor.py | 2 +- tests/unit/io/test_libevtimer.py | 2 +- tests/unit/io/test_twistedreactor.py | 2 +- tests/unit/io/utils.py | 2 +- tests/unit/test_cluster.py | 2 +- tests/unit/test_concurrent.py | 2 +- tests/unit/test_connection.py | 2 +- tests/unit/test_control_connection.py | 2 +- tests/unit/test_exception.py | 2 +- tests/unit/test_host_connection_pool.py | 2 +- tests/unit/test_marshalling.py | 2 +- tests/unit/test_metadata.py | 2 +- tests/unit/test_orderedmap.py | 2 +- tests/unit/test_parameter_binding.py | 2 +- tests/unit/test_policies.py | 2 +- tests/unit/test_query.py | 2 +- tests/unit/test_response_future.py | 2 +- tests/unit/test_resultset.py | 2 +- tests/unit/test_sortedset.py | 2 +- tests/unit/test_time_util.py | 2 +- tests/unit/test_timestamps.py | 2 +- tests/unit/test_types.py | 2 +- tests/unit/test_util_types.py | 2 +- 184 files changed, 184 insertions(+), 184 deletions(-) diff --git a/benchmarks/base.py b/benchmarks/base.py index 10cce0c15c..fb97d64dc3 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/callback_full_pipeline.py b/benchmarks/callback_full_pipeline.py index 5f5cc35819..68b54f4d52 100644 --- a/benchmarks/callback_full_pipeline.py +++ b/benchmarks/callback_full_pipeline.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/future_batches.py b/benchmarks/future_batches.py index 5600a3232d..d3c47e280c 100644 --- a/benchmarks/future_batches.py +++ b/benchmarks/future_batches.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/future_full_pipeline.py b/benchmarks/future_full_pipeline.py index ecc2ce6fc6..58105f1197 100644 --- a/benchmarks/future_full_pipeline.py +++ b/benchmarks/future_full_pipeline.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/future_full_throttle.py b/benchmarks/future_full_throttle.py index 8fbd06b500..d42c98be91 100644 --- a/benchmarks/future_full_throttle.py +++ b/benchmarks/future_full_throttle.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/sync.py b/benchmarks/sync.py index 531e41fbe8..a9ea19ce58 100644 --- a/benchmarks/sync.py +++ b/benchmarks/sync.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/__init__.py b/cassandra/__init__.py index c1c0350bc8..fc271d8eba 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/auth.py b/cassandra/auth.py index b562728a24..773c77ac94 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/buffer.pxd b/cassandra/buffer.pxd index f9976f09aa..248ae334ea 100644 --- a/cassandra/buffer.pxd +++ b/cassandra/buffer.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/bytesio.pxd b/cassandra/bytesio.pxd index a0bb083fac..6bc7510ec4 100644 --- a/cassandra/bytesio.pxd +++ b/cassandra/bytesio.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/bytesio.pyx b/cassandra/bytesio.pyx index 3334697023..eaa3d861ec 100644 --- a/cassandra/bytesio.pyx +++ b/cassandra/bytesio.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 87213e3803..397e3cd511 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cmurmur3.c b/cassandra/cmurmur3.c index 2f4cfa0fd6..b28ddb54cb 100644 --- a/cassandra/cmurmur3.c +++ b/cassandra/cmurmur3.c @@ -6,7 +6,7 @@ * * Copyright (c) 2011 Austin Appleby (Murmur3 routine) * Copyright (c) 2011 Patrick Hensley (Python wrapper, packaging) - * Copyright 2013-2016 DataStax (Minor modifications to match Cassandra's MM3 hashes) + * Copyright 2013-2017 DataStax (Minor modifications to match Cassandra's MM3 hashes) * */ diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index a08c0292e3..204bef964b 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/connection.py b/cassandra/connection.py index 399321fab1..216b79058c 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/__init__.py b/cassandra/cqlengine/__init__.py index 48b9da7bfb..2783513843 100644 --- a/cassandra/cqlengine/__init__.py +++ b/cassandra/cqlengine/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index bfa6a14a18..92d7c470e3 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 6ac389c7ee..8b7649988e 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index ee0fd10a4c..761d5437a3 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index fe313214bb..a4d7e59084 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index b57187e984..0b6ad89a10 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/named.py b/cassandra/cqlengine/named.py index 14d14c402e..9acee17f9f 100644 --- a/cassandra/cqlengine/named.py +++ b/cassandra/cqlengine/named.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index c4f9828f5b..dcaf1081eb 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 1d83c19b08..98154cbfb7 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 90e7bb3f39..b05a59059a 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 7604524729..735bf2a9e8 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cython_marshal.pyx b/cassandra/cython_marshal.pyx index 61b6daccc1..1c94921ccd 100644 --- a/cassandra/cython_marshal.pyx +++ b/cassandra/cython_marshal.pyx @@ -1,6 +1,6 @@ # -- cython: profile=True # -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cython_utils.pyx b/cassandra/cython_utils.pyx index 3c6fae036b..551e6a6a7b 100644 --- a/cassandra/cython_utils.pyx +++ b/cassandra/cython_utils.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/deserializers.pxd b/cassandra/deserializers.pxd index 0846417054..2fddf7cd4c 100644 --- a/cassandra/deserializers.pxd +++ b/cassandra/deserializers.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/deserializers.pyx b/cassandra/deserializers.pyx index 6c29ebdea1..a0d6c7a755 100644 --- a/cassandra/deserializers.pyx +++ b/cassandra/deserializers.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 98d562d1bc..805b1917fe 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/__init__.py b/cassandra/io/__init__.py index 87fc3685e0..077defb32e 100644 --- a/cassandra/io/__init__.py +++ b/cassandra/io/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 9fd0d15a4e..66fce9bb49 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index 792c3aacd9..d85bef5d6e 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -1,5 +1,5 @@ # Copyright 2014 Symantec Corporation -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 7f05e2ba73..53518f5531 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 39f871a135..5d163484f0 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index ccd976bd2d..ec872fdaff 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/ioutils.pyx b/cassandra/ioutils.pyx index c59a6a0cf4..5f56503140 100644 --- a/cassandra/ioutils.pyx +++ b/cassandra/ioutils.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 1d7e29d58a..4b3faee627 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 96b70ae83b..1a935db84e 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/metrics.py b/cassandra/metrics.py index d0c5b9e39c..deb6f07031 100644 --- a/cassandra/metrics.py +++ b/cassandra/metrics.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/numpy_parser.pyx b/cassandra/numpy_parser.pyx index ed755d00a4..fbb040102d 100644 --- a/cassandra/numpy_parser.pyx +++ b/cassandra/numpy_parser.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/obj_parser.pyx b/cassandra/obj_parser.pyx index 08dea2224e..d709a36dbf 100644 --- a/cassandra/obj_parser.pyx +++ b/cassandra/obj_parser.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/parsing.pxd b/cassandra/parsing.pxd index abfc74d12d..49fa8b6d0e 100644 --- a/cassandra/parsing.pxd +++ b/cassandra/parsing.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/parsing.pyx b/cassandra/parsing.pyx index 06cfe0bb8f..4fb033a33e 100644 --- a/cassandra/parsing.pyx +++ b/cassandra/parsing.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/policies.py b/cassandra/policies.py index 9be0125f8f..8a1998ff4c 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/pool.py b/cassandra/pool.py index e570b0154b..4784be5773 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 2be5c5654f..099dd601c9 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/query.py b/cassandra/query.py index 24a4a0ee05..4e3497485e 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/row_parser.pyx b/cassandra/row_parser.pyx index 0377e1224a..c85da3d5fb 100644 --- a/cassandra/row_parser.pyx +++ b/cassandra/row_parser.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/timestamps.py b/cassandra/timestamps.py index 9dfe807494..2ebf0b7f6c 100644 --- a/cassandra/timestamps.py +++ b/cassandra/timestamps.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/tuple.pxd b/cassandra/tuple.pxd index 840cb7eb0b..514bb52984 100644 --- a/cassandra/tuple.pxd +++ b/cassandra/tuple.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/type_codes.pxd b/cassandra/type_codes.pxd index b7e491f095..7078b61c6d 100644 --- a/cassandra/type_codes.pxd +++ b/cassandra/type_codes.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/util.py b/cassandra/util.py index b73baab7f3..822ea1ab63 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/docs/conf.py b/docs/conf.py index 167c7bd89b..99cc4c7e25 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -43,7 +43,7 @@ # General information about the project. project = u'Cassandra Driver' -copyright = u'2013-2016 DataStax' +copyright = u'2013-2017 DataStax' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/example_core.py b/example_core.py index 3235c79bad..bed7c259f2 100644 --- a/example_core.py +++ b/example_core.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/example_mapper.py b/example_mapper.py index 7cb580e890..9263c61c1e 100755 --- a/example_mapper.py +++ b/example_mapper.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/request_init_listener.py b/examples/request_init_listener.py index 7d662b151d..80616eaa44 100644 --- a/examples/request_init_listener.py +++ b/examples/request_init_listener.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/setup.py b/setup.py index e0eed9579a..93443a2139 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/__init__.py b/tests/__init__.py index 1dec045fd3..14ea6b5ba9 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 6cc309a5dc..9795cff16d 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index 760702fc1b..8fd91ce5b0 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/base.py b/tests/integration/cqlengine/base.py index 5536efb3d2..150b9ecfa6 100644 --- a/tests/integration/cqlengine/base.py +++ b/tests/integration/cqlengine/base.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/__init__.py b/tests/integration/cqlengine/columns/__init__.py index 87fc3685e0..077defb32e 100644 --- a/tests/integration/cqlengine/columns/__init__.py +++ b/tests/integration/cqlengine/columns/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index 57f97ceaa0..7ab5ded19c 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/test_counter_column.py b/tests/integration/cqlengine/columns/test_counter_column.py index addfd95a1f..a751ab8425 100644 --- a/tests/integration/cqlengine/columns/test_counter_column.py +++ b/tests/integration/cqlengine/columns/test_counter_column.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/test_static_column.py b/tests/integration/cqlengine/columns/test_static_column.py index 543dc84732..08e8ce7f9f 100644 --- a/tests/integration/cqlengine/columns/test_static_column.py +++ b/tests/integration/cqlengine/columns/test_static_column.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 4980415208..625b072171 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py index 42dc2420e7..95d3cde886 100644 --- a/tests/integration/cqlengine/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/connections/__init__.py b/tests/integration/cqlengine/connections/__init__.py index 1c7af46e71..bcdbd2269e 100644 --- a/tests/integration/cqlengine/connections/__init__.py +++ b/tests/integration/cqlengine/connections/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index 65102b5298..121b79d03f 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/management/__init__.py b/tests/integration/cqlengine/management/__init__.py index 87fc3685e0..077defb32e 100644 --- a/tests/integration/cqlengine/management/__init__.py +++ b/tests/integration/cqlengine/management/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index 4fe349e069..c0c1359a16 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index f8b3f70316..a5fd39b9a3 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/__init__.py b/tests/integration/cqlengine/model/__init__.py index 87fc3685e0..077defb32e 100644 --- a/tests/integration/cqlengine/model/__init__.py +++ b/tests/integration/cqlengine/model/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index e447056376..5124b1c091 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_equality_operations.py b/tests/integration/cqlengine/model/test_equality_operations.py index 9391ce6a79..ba7b34d396 100644 --- a/tests/integration/cqlengine/model/test_equality_operations.py +++ b/tests/integration/cqlengine/model/test_equality_operations.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index b31b8d5aee..19b818a069 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 36d75055ca..3b7c9586c1 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_polymorphism.py b/tests/integration/cqlengine/model/test_polymorphism.py index 18feb653c5..3400847a52 100644 --- a/tests/integration/cqlengine/model/test_polymorphism.py +++ b/tests/integration/cqlengine/model/test_polymorphism.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index bb876d800d..fff7001fcb 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index d6a9fdee45..bfab3af58e 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_value_lists.py b/tests/integration/cqlengine/model/test_value_lists.py index 8bd9b218f5..9027266bec 100644 --- a/tests/integration/cqlengine/model/test_value_lists.py +++ b/tests/integration/cqlengine/model/test_value_lists.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/operators/__init__.py b/tests/integration/cqlengine/operators/__init__.py index 1c7af46e71..bcdbd2269e 100644 --- a/tests/integration/cqlengine/operators/__init__.py +++ b/tests/integration/cqlengine/operators/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index 0683d3901e..203ffe62a5 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/__init__.py b/tests/integration/cqlengine/query/__init__.py index 87fc3685e0..077defb32e 100644 --- a/tests/integration/cqlengine/query/__init__.py +++ b/tests/integration/cqlengine/query/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_batch_query.py b/tests/integration/cqlengine/query/test_batch_query.py index dafad04a1e..91727c3836 100644 --- a/tests/integration/cqlengine/query/test_batch_query.py +++ b/tests/integration/cqlengine/query/test_batch_query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_datetime_queries.py b/tests/integration/cqlengine/query/test_datetime_queries.py index 813dcad07a..2c08f6fe99 100644 --- a/tests/integration/cqlengine/query/test_datetime_queries.py +++ b/tests/integration/cqlengine/query/test_datetime_queries.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index a02e0a4bf4..3fbe847ad1 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_queryoperators.py b/tests/integration/cqlengine/query/test_queryoperators.py index 5741b5a2d7..f52db06fff 100644 --- a/tests/integration/cqlengine/query/test_queryoperators.py +++ b/tests/integration/cqlengine/query/test_queryoperators.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index ea303373b8..fbb23e18f1 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index 3410e252b0..2daa3a48fb 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/__init__.py b/tests/integration/cqlengine/statements/__init__.py index 1c7af46e71..bcdbd2269e 100644 --- a/tests/integration/cqlengine/statements/__init__.py +++ b/tests/integration/cqlengine/statements/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_assignment_clauses.py b/tests/integration/cqlengine/statements/test_assignment_clauses.py index 4fc3a28374..c8a242efa1 100644 --- a/tests/integration/cqlengine/statements/test_assignment_clauses.py +++ b/tests/integration/cqlengine/statements/test_assignment_clauses.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_base_clause.py b/tests/integration/cqlengine/statements/test_base_clause.py index 14d98782ea..ef85de2622 100644 --- a/tests/integration/cqlengine/statements/test_base_clause.py +++ b/tests/integration/cqlengine/statements/test_base_clause.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index fb98966854..029360771c 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_delete_statement.py b/tests/integration/cqlengine/statements/test_delete_statement.py index aa449fc7c1..dc051b76ae 100644 --- a/tests/integration/cqlengine/statements/test_delete_statement.py +++ b/tests/integration/cqlengine/statements/test_delete_statement.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_insert_statement.py b/tests/integration/cqlengine/statements/test_insert_statement.py index 7cde948a3c..b788052b88 100644 --- a/tests/integration/cqlengine/statements/test_insert_statement.py +++ b/tests/integration/cqlengine/statements/test_insert_statement.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_select_statement.py b/tests/integration/cqlengine/statements/test_select_statement.py index 6d728acfbc..791b755049 100644 --- a/tests/integration/cqlengine/statements/test_select_statement.py +++ b/tests/integration/cqlengine/statements/test_select_statement.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_update_statement.py b/tests/integration/cqlengine/statements/test_update_statement.py index e81263ff4f..7be341727c 100644 --- a/tests/integration/cqlengine/statements/test_update_statement.py +++ b/tests/integration/cqlengine/statements/test_update_statement.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_where_clause.py b/tests/integration/cqlengine/statements/test_where_clause.py index e3d95d4fa4..c6d05b916e 100644 --- a/tests/integration/cqlengine/statements/test_where_clause.py +++ b/tests/integration/cqlengine/statements/test_where_clause.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index 355a118235..424d0b27a7 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_connections.py b/tests/integration/cqlengine/test_connections.py index c658ef1867..4be46ce4f6 100644 --- a/tests/integration/cqlengine/test_connections.py +++ b/tests/integration/cqlengine/test_connections.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_consistency.py b/tests/integration/cqlengine/test_consistency.py index d52a43e3de..cb04bd75ec 100644 --- a/tests/integration/cqlengine/test_consistency.py +++ b/tests/integration/cqlengine/test_consistency.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_context_query.py b/tests/integration/cqlengine/test_context_query.py index 0a29688d96..9a4fa05203 100644 --- a/tests/integration/cqlengine/test_context_query.py +++ b/tests/integration/cqlengine/test_context_query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_ifexists.py b/tests/integration/cqlengine/test_ifexists.py index 6d693f7b45..4ff0aff49e 100644 --- a/tests/integration/cqlengine/test_ifexists.py +++ b/tests/integration/cqlengine/test_ifexists.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py index 8c5e89c185..0bd7fb9a21 100644 --- a/tests/integration/cqlengine/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index 8395154c34..0c1e108b34 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_timestamp.py b/tests/integration/cqlengine/test_timestamp.py index 9b3ffbeafa..90a6c2ea35 100644 --- a/tests/integration/cqlengine/test_timestamp.py +++ b/tests/integration/cqlengine/test_timestamp.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_ttl.py b/tests/integration/cqlengine/test_ttl.py index 3e16292781..ed7bb3d5cd 100644 --- a/tests/integration/cqlengine/test_ttl.py +++ b/tests/integration/cqlengine/test_ttl.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/datatype_utils.py b/tests/integration/datatype_utils.py index 8824053d2c..dc89971a93 100644 --- a/tests/integration/datatype_utils.py +++ b/tests/integration/datatype_utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/__init__.py b/tests/integration/long/__init__.py index caa7e71667..df218cf114 100644 --- a/tests/integration/long/__init__.py +++ b/tests/integration/long/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index 98463b6052..2091859dbf 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index fe7610eefd..4af31b5229 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index 8808a90d9e..2943dbdef4 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_large_data.py b/tests/integration/long/test_large_data.py index 13542916aa..bbd0cf23bd 100644 --- a/tests/integration/long/test_large_data.py +++ b/tests/integration/long/test_large_data.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 9a1f7010b2..ed15fc9a06 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index 49f88fb4d6..6e979a4541 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 7f90665745..a5d16c4544 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index b7120ff46d..40ecc0bbf3 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/__init__.py b/tests/integration/standard/__init__.py index ba5c6e26fd..b72d6c4e63 100644 --- a/tests/integration/standard/__init__.py +++ b/tests/integration/standard/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 2ae34dc84d..1eb6fbf6c1 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index 24a54498e5..6c9f7c52a0 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index ff513c1c01..da0a37447e 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index c1e12133ae..b11e031f87 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index ebc89d65de..d7b1f49687 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_control_connection.py b/tests/integration/standard/test_control_connection.py index d486f35f8e..89042cd060 100644 --- a/tests/integration/standard/test_control_connection.py +++ b/tests/integration/standard/test_control_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py index 4ca49f0d8e..22bc45759b 100644 --- a/tests/integration/standard/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index fdc5e4f35b..d9d8b6b2f0 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 2119f6f3a6..ba628b53d8 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index b6ef97b708..02689d10c6 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 43f5b0f368..84f252cd19 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 50a0ed4127..65886826c3 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index e9fe02d5ef..ee7db1a8b0 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index 145dd8d5e4..88694047a7 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_routing.py b/tests/integration/standard/test_routing.py index b22184a044..c27a53684c 100644 --- a/tests/integration/standard/test_routing.py +++ b/tests/integration/standard/test_routing.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index df9765ee78..527ddeea32 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 40fb1c4c35..05dc608b6b 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index fe02050291..91cc12441d 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/util.py b/tests/integration/util.py index ff71185a4a..26f2ca7739 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/stress_tests/test_load.py b/tests/stress_tests/test_load.py index 523ee2004b..14c8778b4c 100644 --- a/tests/stress_tests/test_load.py +++ b/tests/stress_tests/test_load.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/stress_tests/test_multi_inserts.py b/tests/stress_tests/test_multi_inserts.py index e39e73e8b7..8aa1d68143 100644 --- a/tests/stress_tests/test_multi_inserts.py +++ b/tests/stress_tests/test_multi_inserts.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 87fc3685e0..077defb32e 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cqlengine/__init__.py b/tests/unit/cqlengine/__init__.py index 87fc3685e0..077defb32e 100644 --- a/tests/unit/cqlengine/__init__.py +++ b/tests/unit/cqlengine/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cqlengine/test_columns.py b/tests/unit/cqlengine/test_columns.py index c0c9e21b25..47c55b9715 100644 --- a/tests/unit/cqlengine/test_columns.py +++ b/tests/unit/cqlengine/test_columns.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cqlengine/test_connection.py b/tests/unit/cqlengine/test_connection.py index 58728987f0..b6646ec756 100644 --- a/tests/unit/cqlengine/test_connection.py +++ b/tests/unit/cqlengine/test_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cqlengine/test_udt.py b/tests/unit/cqlengine/test_udt.py index 950429a800..bedeef8e3d 100644 --- a/tests/unit/cqlengine/test_udt.py +++ b/tests/unit/cqlengine/test_udt.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/__init__.py b/tests/unit/cython/__init__.py index 87fc3685e0..077defb32e 100644 --- a/tests/unit/cython/__init__.py +++ b/tests/unit/cython/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/bytesio_testhelper.pyx b/tests/unit/cython/bytesio_testhelper.pyx index e86fdb73c2..b78b329170 100644 --- a/tests/unit/cython/bytesio_testhelper.pyx +++ b/tests/unit/cython/bytesio_testhelper.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/test_bytesio.py b/tests/unit/cython/test_bytesio.py index 0c2ae7bfa8..488f18bd97 100644 --- a/tests/unit/cython/test_bytesio.py +++ b/tests/unit/cython/test_bytesio.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/test_types.py b/tests/unit/cython/test_types.py index d9f3a746e1..04014934dd 100644 --- a/tests/unit/cython/test_types.py +++ b/tests/unit/cython/test_types.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/test_utils.py b/tests/unit/cython/test_utils.py index 209056f645..18d4b65312 100644 --- a/tests/unit/cython/test_utils.py +++ b/tests/unit/cython/test_utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/types_testhelper.pyx b/tests/unit/cython/types_testhelper.pyx index 3cd60c550f..538ce43c23 100644 --- a/tests/unit/cython/types_testhelper.pyx +++ b/tests/unit/cython/types_testhelper.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/utils.py b/tests/unit/cython/utils.py index 76d46ef8e8..6531894d5b 100644 --- a/tests/unit/cython/utils.py +++ b/tests/unit/cython/utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/utils_testhelper.pyx b/tests/unit/cython/utils_testhelper.pyx index 32816d3a31..489fbd1b7f 100644 --- a/tests/unit/cython/utils_testhelper.pyx +++ b/tests/unit/cython/utils_testhelper.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/__init__.py b/tests/unit/io/__init__.py index 87fc3685e0..077defb32e 100644 --- a/tests/unit/io/__init__.py +++ b/tests/unit/io/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/eventlet_utils.py b/tests/unit/io/eventlet_utils.py index 2ed929c81d..c06f4cdc0e 100644 --- a/tests/unit/io/eventlet_utils.py +++ b/tests/unit/io/eventlet_utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/gevent_utils.py b/tests/unit/io/gevent_utils.py index 12aab8d2f1..4d368ea6e9 100644 --- a/tests/unit/io/gevent_utils.py +++ b/tests/unit/io/gevent_utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index ab5bd64091..4be5a9b68e 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index 040685e306..8d0ca21347 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py index 1b2accf575..9db1e2ef93 100644 --- a/tests/unit/io/test_geventreactor.py +++ b/tests/unit/io/test_geventreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 309134c940..273dcc0035 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_libevtimer.py b/tests/unit/io/test_libevtimer.py index fb491962d8..e0799605dc 100644 --- a/tests/unit/io/test_libevtimer.py +++ b/tests/unit/io/test_libevtimer.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index fd4181460a..76a766fe3d 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 4533f77143..0c8ab4b24d 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index d4f5b8d49a..40924293fd 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index ca7bd09623..f8c22cb831 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index b8cb640b46..3209d312ef 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index e16562b475..e9efc2d021 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_exception.py b/tests/unit/test_exception.py index a88b5260fa..e01e782f83 100644 --- a/tests/unit/test_exception.py +++ b/tests/unit/test_exception.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index 5fe230f402..e4ea0a98d8 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index 9a7787fb31..c7b1c9ca4f 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index cd1c9118ed..a9a4852617 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_orderedmap.py b/tests/unit/test_orderedmap.py index 6fa43ccdfe..71470ec90a 100644 --- a/tests/unit/test_orderedmap.py +++ b/tests/unit/test_orderedmap.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index 555dfe3834..ef03b3c5bc 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 494b2e05b3..4a79e0f644 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_query.py b/tests/unit/test_query.py index 037710f650..e345a61527 100644 --- a/tests/unit/test_query.py +++ b/tests/unit/test_query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 6628686b9a..70cba84094 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index c0fbad4a65..8812c76cf5 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_sortedset.py b/tests/unit/test_sortedset.py index a2b264710f..91047d0013 100644 --- a/tests/unit/test_sortedset.py +++ b/tests/unit/test_sortedset.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_time_util.py b/tests/unit/test_time_util.py index 4455e58887..a683076fee 100644 --- a/tests/unit/test_time_util.py +++ b/tests/unit/test_time_util.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_timestamps.py b/tests/unit/test_timestamps.py index 67a82a621c..50c0ba92ec 100644 --- a/tests/unit/test_timestamps.py +++ b/tests/unit/test_timestamps.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 9533dc3259..21e1a4165b 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py index 2b9bc14a90..de75829d81 100644 --- a/tests/unit/test_util_types.py +++ b/tests/unit/test_util_types.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From c2e5b621029368223d5ff824f59b590feedea087 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 26 Apr 2017 11:24:50 -0400 Subject: [PATCH 1091/2005] add note re: DSE --- README.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.rst b/README.rst index 406da617bd..c586a76f56 100644 --- a/README.rst +++ b/README.rst @@ -8,6 +8,8 @@ A modern, `feature-rich `_ a The driver supports Python 2.7, 3.3, 3.4, 3.5, and 3.6. +If you require compatibility with DataStax Enterprise, use the `DataStax Enterprise Python Driver `_. + Feedback Requested ------------------ **Help us focus our efforts!** Provide your input on the `Platform and Runtime Survey `_ (we kept it short). From 3d30d2b6b208dd62a6c68bd1180329c2d570bcc3 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 26 Apr 2017 16:43:28 -0400 Subject: [PATCH 1092/2005] Raise TraceUnavailable if the future is not done --- cassandra/cluster.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 397e3cd511..d75eab6477 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -67,7 +67,7 @@ HostConnectionPool, HostConnection, NoConnectionsAvailable) from cassandra.query import (SimpleStatement, PreparedStatement, BoundStatement, - BatchStatement, bind_params, QueryTrace, + BatchStatement, bind_params, QueryTrace, TraceUnavailable, named_tuple_factory, dict_factory, tuple_factory, FETCH_SIZE_UNSET) from cassandra.timestamps import MonotonicTimestampGenerator @@ -3836,8 +3836,15 @@ def get_query_trace(self, max_wait=None, query_cl=ConsistencyLevel.LOCAL_ONE): details from Cassandra. If the trace is not available after `max_wait`, :exc:`cassandra.query.TraceUnavailable` will be raised. + If the ResponseFuture is not done (async execution) and you try to retrieve the trace, + :exc:`cassandra.query.TraceUnavailable` will be raised. + `query_cl` is the consistency level used to poll the trace tables. """ + if self._final_result is _NOT_SET: + raise TraceUnavailable( + "Trace information was not available. The ResponseFuture is not done.") + if self._query_traces: return self._get_query_trace(len(self._query_traces) - 1, max_wait, query_cl) From 2a13983ee2a9881f0b0e07124cd057fdbdb836ab Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 26 Apr 2017 16:49:32 -0400 Subject: [PATCH 1093/2005] fix condition --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d75eab6477..c641b84e51 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3841,7 +3841,7 @@ def get_query_trace(self, max_wait=None, query_cl=ConsistencyLevel.LOCAL_ONE): `query_cl` is the consistency level used to poll the trace tables. """ - if self._final_result is _NOT_SET: + if self._final_result is _NOT_SET and self._final_exception is None: raise TraceUnavailable( "Trace information was not available. The ResponseFuture is not done.") From 21232c7b35ecbb9c41a9ee32a045e4a6cfeec80d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 27 Apr 2017 15:19:24 -0400 Subject: [PATCH 1094/2005] Ensure UDT are validated --- cassandra/cqlengine/columns.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 92d7c470e3..8ebebf7c06 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -973,6 +973,13 @@ def cql_type(self): field_names=[c.db_field_name for c in self.user_type._fields.values()], field_types=[c.cql_type for c in self.user_type._fields.values()]) + def validate(self, value): + val = super(UserDefinedType, self).validate(value) + if val is None: + return + val.validate() + return val + def resolve_udts(col_def, out_list): for col in col_def.sub_types: From 81d2a9a98cfe728fd6c3c0eff24396fa88faa628 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 27 Apr 2017 15:44:36 -0400 Subject: [PATCH 1095/2005] Added tests for python-196 --- tests/integration/standard/test_cluster.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index da0a37447e..ba9fab4c6e 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -580,7 +580,20 @@ def check_trace(trace): check_trace(future.get_query_trace()) cluster.shutdown() - def test_trace_timeout(self): + def test_trace_unavaiable(self): + """ + First checks that TraceUnavailable is arisen if the + max_wait parameter is negative + + Then checks that TraceUnavailable is arisen if the + result hasn't been set yet + + @since 3.10 + @jira_ticket PYTHON-196 + @expected_result TraceUnavailable is arisen in both cases + + @test_category query + """ cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() @@ -589,6 +602,11 @@ def test_trace_timeout(self): future = session.execute_async(statement, trace=True) future.result() self.assertRaises(TraceUnavailable, future.get_query_trace, -1.0) + + query = SimpleStatement("SELECT * FROM system.local") + future = session.execute_async(query, trace=True) + self.assertRaises(TraceUnavailable, future.get_query_trace, max_wait=120) + cluster.shutdown() def test_string_coverage(self): From 9adf7048001f9d8d9dd060d2b252ea9def226a4b Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 28 Apr 2017 10:02:15 -0400 Subject: [PATCH 1096/2005] Fixed typo --- tests/integration/standard/test_cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index ba9fab4c6e..9b216b2c23 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -580,7 +580,7 @@ def check_trace(trace): check_trace(future.get_query_trace()) cluster.shutdown() - def test_trace_unavaiable(self): + def test_trace_unavailable(self): """ First checks that TraceUnavailable is arisen if the max_wait parameter is negative From d014f88349a568bebc4a02605b2b15ef958410b8 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 4 May 2017 09:37:18 -0700 Subject: [PATCH 1097/2005] remove uses of map that don't work in py3 --- cassandra/io/libevreactor.py | 4 +++- tests/unit/test_policies.py | 6 ++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 5d163484f0..aadbe3a53c 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -121,7 +121,9 @@ def _cleanup(self): for conn in self._live_conns | self._new_conns | self._closed_conns: conn.close() - map(lambda w: w.stop(), (w for w in (conn._write_watcher, conn._read_watcher) if w)) + for watcher in (conn._write_watcher, conn._read_watcher): + if watcher: + watcher.stop() self.notify() # wake the timer watcher log.debug("Waiting for event loop thread to join...") diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 4a79e0f644..e2df3056e1 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -108,8 +108,10 @@ def check_query_plan(): self.assertEqual(sorted(qplan), hosts) threads = [Thread(target=check_query_plan) for i in range(4)] - map(lambda t: t.start(), threads) - map(lambda t: t.join(), threads) + for t in threads: + t.start() + for t in threads: + t.join() def test_thread_safety_during_modification(self): hosts = range(100) From 373dc95556a3451e556e429dd8940c5ea4374e38 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 5 May 2017 14:16:18 -0400 Subject: [PATCH 1098/2005] Fix duration encoding for large values --- cassandra/marshal.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 4b3faee627..712fa6e7f0 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -135,10 +135,13 @@ def vints_pack(values): num_extra_bytes = 0 num_bits = v.bit_length() # We need to reserve (num_extra_bytes+1) bits in the first byte - # ie. with 1 extra byte, the first byte needs to be something like '10XXXXXX' - while num_bits > (8-(num_extra_bytes+1)): + # ie. with 1 extra byte, the first byte needs to be something like '10XXXXXX' # 2 bits reserved + # ie. with 8 extra bytes, the first byte needs to be '11111111' # 8 bits reserved + reserved_bits = num_extra_bytes + 1 + while num_bits > (8-(reserved_bits)): num_extra_bytes += 1 num_bits -= 8 + reserved_bits = min(num_extra_bytes + 1, 8) revbytes.append(v & 0xff) v >>= 8 From d0a7d5f14cffa255db19b9a05c09fda18d33106f Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 8 May 2017 10:41:09 -0400 Subject: [PATCH 1099/2005] Added test reproducing using of nonexistent prepared statement in ResponseFuture --- tests/integration/standard/test_query.py | 71 ++++++++++++++++-------- 1 file changed, 49 insertions(+), 22 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index c8bd048e2f..2dcd9e7d94 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -393,17 +393,14 @@ def test_bound_keyspace(self): class ForcedHostSwitchPolicy(RoundRobinPolicy): def make_query_plan(self, working_keyspace=None, query=None): - if query is not None and "system.local" in str(query): - if hasattr(self, 'counter'): - self.counter += 1 - else: - self.counter = 0 - index = self.counter % 3 - a = list(self._live_hosts) - value = [a[index]] - return value + if hasattr(self, 'counter'): + self.counter += 1 else: - return list(self._live_hosts) + self.counter = 0 + index = self.counter % 3 + a = list(self._live_hosts) + value = [a[index]] + return value class PreparedStatementMetdataTest(unittest.TestCase): @@ -456,18 +453,48 @@ def test_prepare_on_all_hosts(self): clus = Cluster( load_balancing_policy=white_list, protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, reprepare_on_up=False) - try: - session = clus.connect(wait_for_all_pools=True) - mock_handler = MockLoggingHandler() - logger = logging.getLogger(cluster.__name__) - logger.addHandler(mock_handler) - select_statement = session.prepare("SELECT * FROM system.local") - session.execute(select_statement) - session.execute(select_statement) - session.execute(select_statement) - self.assertEqual(2, mock_handler.get_message_count('debug', "Re-preparing")) - finally: - clus.shutdown() + self.addCleanup(clus.shutdown) + + session = clus.connect(wait_for_all_pools=True) + mock_handler = MockLoggingHandler() + logger = logging.getLogger(cluster.__name__) + logger.addHandler(mock_handler) + select_statement = session.prepare("SELECT * FROM system.local") + session.execute(select_statement) + session.execute(select_statement) + session.execute(select_statement) + self.assertEqual(2, mock_handler.get_message_count('debug', "Re-preparing")) + + + def test_prepare_batch_statement(self): + """ + Test to validate a prepared statement used inside a batch statement is correctly handled + by the driver + + @since 3.10 + @jira_ticket PYTHON-706 + @expected_result queries will have to re-prepared on hosts that aren't the control connection + and the batch statement will be sent. + """ + white_list = ForcedHostSwitchPolicy() + clus = Cluster( + load_balancing_policy=white_list, + protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, + reprepare_on_up=False) + self.addCleanup(clus.shutdown) + + session = clus.connect(wait_for_all_pools=True) + + insert_statement = session.prepare("INSERT INTO test3rf.test (k, v) VALUES (?, ?)") + + # This is going to query a host where the query + # is not prepared + batch_statement = BatchStatement(consistency_level=ConsistencyLevel.ONE) + batch_statement.add(insert_statement, (1, 2)) + session.execute(batch_statement) + select_results = session.execute("SELECT * FROM test3rf.test WHERE k = 1") + first_row = select_results[0][:2] + self.assertEqual((1, 2), first_row) class PrintStatementTests(unittest.TestCase): From e80f4b6b6b90a8149a14c528ee9d206bedf7d3a1 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 8 May 2017 15:14:16 -0400 Subject: [PATCH 1100/2005] Add Duration support to cqlengine --- cassandra/cqlengine/columns.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 92d7c470e3..25854add93 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -22,6 +22,7 @@ from cassandra.cqltypes import SimpleDateType, _cqltypes, UserType from cassandra.cqlengine import ValidationError from cassandra.cqlengine.functions import get_total_seconds +from cassandra.util import Duration as _Duration log = logging.getLogger(__name__) @@ -598,6 +599,25 @@ def to_database(self, value): return value if isinstance(value, util.Time) else util.Time(value) +class Duration(Column): + """ + Stores a duration (months, days, nanoseconds) + + .. versionadded:: 3.10.0 + + requires C* 3.10+ and protocol v4+ + """ + db_type = 'duration' + + def validate(self, value): + val = super(Duration, self).validate(value) + if val is None: + return + if not isinstance(val, _Duration): + raise TypeError('{0} {1} is not a valid Duration.'.format(self.column_name, value)) + return val + + class UUID(Column): """ Stores a type 1 or 4 UUID From 7bc0bfd8cbae84a62bd6334f6f74198b760522f7 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 11 Apr 2017 16:28:13 -0400 Subject: [PATCH 1101/2005] Refactored/Added some tests to CQLEngine --- cassandra/cqlengine/connection.py | 1 - tests/integration/cqlengine/__init__.py | 3 +- tests/integration/cqlengine/base.py | 13 + .../cqlengine/columns/test_validation.py | 207 ++++++++++++---- .../integration/cqlengine/model/test_udts.py | 229 ++++++++++-------- .../cqlengine/model/test_updates.py | 97 +++++--- .../cqlengine/query/test_queryoperators.py | 2 +- .../cqlengine/query/test_updates.py | 13 +- .../statements/test_base_statement.py | 79 ++++++ 9 files changed, 457 insertions(+), 187 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 8b7649988e..2f4890d2b9 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -337,7 +337,6 @@ def execute(query, params=None, consistency_level=None, timeout=NOT_SET, connect query = SimpleStatement(str(query), consistency_level=consistency_level, fetch_size=query.fetch_size) elif isinstance(query, six.string_types): query = SimpleStatement(query, consistency_level=consistency_level) - log.debug(format_log_context(query.query_string, connection=connection)) result = conn.session.execute(query, params, timeout=timeout) diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index 8fd91ce5b0..2cb2e28d3d 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -21,7 +21,7 @@ from cassandra import ConsistencyLevel from cassandra.cqlengine import connection -from cassandra.cqlengine.management import create_keyspace_simple, CQLENG_ALLOW_SCHEMA_MANAGEMENT +from cassandra.cqlengine.management import create_keyspace_simple, drop_keyspace, CQLENG_ALLOW_SCHEMA_MANAGEMENT import cassandra from tests.integration import get_server_versions, use_single_node, PROTOCOL_VERSION, CASSANDRA_IP, set_default_cass_ip @@ -45,7 +45,6 @@ def setup_package(): def teardown_package(): connection.unregister_connection("default") - def is_prepend_reversed(): # do we have https://issues.apache.org/jira/browse/CASSANDRA-8733 ? ver, _ = get_server_versions() diff --git a/tests/integration/cqlengine/base.py b/tests/integration/cqlengine/base.py index 150b9ecfa6..7be5894774 100644 --- a/tests/integration/cqlengine/base.py +++ b/tests/integration/cqlengine/base.py @@ -19,7 +19,20 @@ import sys from cassandra.cqlengine.connection import get_session +from cassandra.cqlengine.models import Model +from cassandra.cqlengine import columns +from uuid import uuid4 + +class TestQueryUpdateModel(Model): + + partition = columns.UUID(primary_key=True, default=uuid4) + cluster = columns.Integer(primary_key=True) + count = columns.Integer(required=False) + text = columns.Text(required=False, index=True) + text_set = columns.Set(columns.Text, required=False) + text_list = columns.List(columns.Text, required=False) + text_map = columns.Map(columns.Text, columns.Text, required=False) class BaseCassEngTestCase(unittest.TestCase): diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 625b072171..f03bcd5efe 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -18,30 +18,21 @@ import unittest # noqa import sys -from datetime import datetime, timedelta, date, tzinfo +from datetime import datetime, timedelta, date, tzinfo, time from decimal import Decimal as D from uuid import uuid4, uuid1 from cassandra import InvalidRequest -from cassandra.cqlengine.columns import TimeUUID -from cassandra.cqlengine.columns import Ascii -from cassandra.cqlengine.columns import Text -from cassandra.cqlengine.columns import Integer -from cassandra.cqlengine.columns import BigInt -from cassandra.cqlengine.columns import VarInt -from cassandra.cqlengine.columns import DateTime -from cassandra.cqlengine.columns import Date -from cassandra.cqlengine.columns import UUID -from cassandra.cqlengine.columns import Boolean -from cassandra.cqlengine.columns import Decimal -from cassandra.cqlengine.columns import Inet +from cassandra.cqlengine.columns import TimeUUID, Ascii, Text, Integer, BigInt, VarInt, DateTime, Date, UUID, Boolean, \ + Decimal, Inet, Time, UserDefinedType, Map, List, Set, Tuple, Double, Float from cassandra.cqlengine.connection import execute from cassandra.cqlengine.management import sync_table, drop_table from cassandra.cqlengine.models import Model, ValidationError +from cassandra.cqlengine.usertype import UserType from cassandra import util from tests.integration import PROTOCOL_VERSION -from tests.integration.cqlengine.base import BaseCassEngTestCase +from tests.integration.cqlengine.base import BaseCassEngTestCase, TestQueryUpdateModel class TestDatetime(BaseCassEngTestCase): @@ -62,7 +53,7 @@ def test_datetime_io(self): now = datetime.now() self.DatetimeTest.objects.create(test_id=0, created_at=now) dt2 = self.DatetimeTest.objects(test_id=0).first() - assert dt2.created_at.timetuple()[:6] == now.timetuple()[:6] + self.assertEqual(dt2.created_at.timetuple()[:6], now.timetuple()[:6]) def test_datetime_tzinfo_io(self): class TZ(tzinfo): @@ -74,21 +65,27 @@ def dst(self, date_time): now = datetime(1982, 1, 1, tzinfo=TZ()) dt = self.DatetimeTest.objects.create(test_id=1, created_at=now) dt2 = self.DatetimeTest.objects(test_id=1).first() - assert dt2.created_at.timetuple()[:6] == (now + timedelta(hours=1)).timetuple()[:6] + self.assertEqual(dt2.created_at.timetuple()[:6], (now + timedelta(hours=1)).timetuple()[:6]) def test_datetime_date_support(self): today = date.today() self.DatetimeTest.objects.create(test_id=2, created_at=today) dt2 = self.DatetimeTest.objects(test_id=2).first() - assert dt2.created_at.isoformat() == datetime(today.year, today.month, today.day).isoformat() + self.assertEqual(dt2.created_at.isoformat(), datetime(today.year, today.month, today.day).isoformat()) + + result = self.DatetimeTest.objects.all().allow_filtering().filter(test_id=2).first() + self.assertEqual(result.created_at, datetime.combine(today, datetime.min.time())) + + result = self.DatetimeTest.objects.all().allow_filtering().filter(test_id=2, created_at=today).first() + self.assertEqual(result.created_at, datetime.combine(today, datetime.min.time())) def test_datetime_none(self): dt = self.DatetimeTest.objects.create(test_id=3, created_at=None) dt2 = self.DatetimeTest.objects(test_id=3).first() - assert dt2.created_at is None + self.assertIsNone(dt2.created_at) dts = self.DatetimeTest.objects.filter(test_id=3).values_list('created_at') - assert dts[0][0] is None + self.assertIsNone(dts[0][0]) def test_datetime_invalid(self): dt_value= 'INVALID' @@ -99,13 +96,13 @@ def test_datetime_timestamp(self): dt_value = 1454520554 self.DatetimeTest.objects.create(test_id=5, created_at=dt_value) dt2 = self.DatetimeTest.objects(test_id=5).first() - assert dt2.created_at == datetime.utcfromtimestamp(dt_value) + self.assertEqual(dt2.created_at, datetime.utcfromtimestamp(dt_value)) def test_datetime_large(self): dt_value = datetime(2038, 12, 31, 10, 10, 10, 123000) self.DatetimeTest.objects.create(test_id=6, created_at=dt_value) dt2 = self.DatetimeTest.objects(test_id=6).first() - assert dt2.created_at == dt_value + self.assertEqual(dt2.created_at, dt_value) def test_datetime_truncate_microseconds(self): """ @@ -187,49 +184,170 @@ def test_varint_io(self): int2 = self.VarIntTest.objects(test_id=0).first() self.assertEqual(int1.bignum, int2.bignum) + with self.assertRaises(ValidationError): + self.VarIntTest.objects.create(test_id=0, bignum="not_a_number") -class TestDate(BaseCassEngTestCase): - class DateTest(Model): - - test_id = Integer(primary_key=True) - created_at = Date() +class DataType(): @classmethod def setUpClass(cls): if PROTOCOL_VERSION < 4: return - sync_table(cls.DateTest) + + class DataTypeTest(Model): + test_id = Integer(primary_key=True) + class_param = cls.db_klass() + + cls.model_class = DataTypeTest + sync_table(cls.model_class) @classmethod def tearDownClass(cls): if PROTOCOL_VERSION < 4: return - drop_table(cls.DateTest) + drop_table(cls.model_class) def setUp(self): if PROTOCOL_VERSION < 4: raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) - def test_date_io(self): - today = date.today() - self.DateTest.objects.create(test_id=0, created_at=today) - result = self.DateTest.objects(test_id=0).first() - self.assertEqual(result.created_at, util.Date(today)) + def _check_value_is_correct_in_db(self, value): + + if value is None: + result = self.model_class.objects.all().allow_filtering().filter(test_id=0).first() + self.assertIsNone(result.class_param) + + else: + if not isinstance(value, self.python_klass): + value_to_compare = self.python_klass(value) + else: + value_to_compare = value + + result = self.model_class.objects(test_id=0).first() + self.assertIsInstance(result.class_param, self.python_klass) + self.assertEqual(result.class_param, value_to_compare) + + result = self.model_class.objects.all().allow_filtering().filter(test_id=0).first() + self.assertIsInstance(result.class_param, self.python_klass) + self.assertEqual(result.class_param, value_to_compare) + + result = self.model_class.objects.all().allow_filtering().filter(test_id=0, class_param=value).first() + self.assertIsInstance(result.class_param, self.python_klass) + self.assertEqual(result.class_param, value_to_compare) + + return result def test_date_io_using_datetime(self): - now = datetime.utcnow() - self.DateTest.objects.create(test_id=0, created_at=now) - result = self.DateTest.objects(test_id=0).first() - self.assertIsInstance(result.created_at, util.Date) - self.assertEqual(result.created_at, util.Date(now)) + first_value = self.first_value + second_value = self.second_value + third_value = self.third_value + + self.model_class.objects.create(test_id=0, class_param=first_value) + result = self._check_value_is_correct_in_db(first_value) + result.delete() + + self.model_class.objects.create(test_id=0, class_param=second_value) + result = self._check_value_is_correct_in_db(second_value) + + result.update(class_param=third_value).save() + result = self._check_value_is_correct_in_db(third_value) + + result.update(class_param=None).save() + self._check_value_is_correct_in_db(None) def test_date_none(self): - self.DateTest.objects.create(test_id=1, created_at=None) - dt2 = self.DateTest.objects(test_id=1).first() - assert dt2.created_at is None + self.model_class.objects.create(test_id=1, class_param=None) + dt2 = self.model_class.objects(test_id=1).first() + self.assertIsNone(dt2.class_param) + + dts = self.model_class.objects(test_id=1).values_list('class_param') + self.assertIsNone(dts[0][0]) + - dts = self.DateTest.objects(test_id=1).values_list('created_at') - assert dts[0][0] is None +class TestDate(DataType, BaseCassEngTestCase): + @classmethod + def setUpClass(cls): + cls.db_klass, cls.python_klass = Date, util.Date + cls.first_value, cls.second_value, cls.third_value = \ + datetime.utcnow(), util.Date(datetime(1, 1, 1)), datetime(1, 1, 2) + super(TestDate, cls).setUpClass() + + +class TestTime(DataType, BaseCassEngTestCase): + @classmethod + def setUpClass(cls): + cls.db_klass, cls.python_klass = Time, util.Time + cls.first_value, cls.second_value, cls.third_value = \ + time(2, 12, 7, 48), util.Time(time(2, 12, 7, 49)), time(2, 12, 7, 50) + super(TestTime, cls).setUpClass() + + +class TestDateTime(DataType, BaseCassEngTestCase): + @classmethod + def setUpClass(cls): + cls.db_klass, cls.python_klass = DateTime, datetime + cls.first_value, cls.second_value, cls.third_value = \ + datetime(2017, 4, 13, 18, 34, 24, 317000), datetime(1, 1, 1), datetime(1, 1, 2) + super(TestDateTime, cls).setUpClass() + + +class TestBoolean(DataType, BaseCassEngTestCase): + @classmethod + def setUpClass(cls): + cls.db_klass, cls.python_klass = Boolean, bool + cls.first_value, cls.second_value, cls.third_value = True, False, True + super(TestBoolean, cls).setUpClass() + + +class User(UserType): + # We use Date and Time to ensure to_python + # is called for these columns + age = Integer() + date_param = Date() + map_param = Map(Integer, Time) + list_param = List(Date) + set_param = Set(Date) + tuple_param = Tuple(Date, Decimal, Boolean, VarInt, Double, UUID) + + +class UserModel(Model): + test_id = Integer(primary_key=True) + class_param = UserDefinedType(User) + + +class TestUDT(DataType, BaseCassEngTestCase): + @classmethod + def setUpClass(cls): + cls.db_klass, cls.python_klass = UserDefinedType, User + cls.first_value = User( + age=1, + date_param=datetime.utcnow(), + map_param={1: time(2, 12, 7, 50), 2: util.Time(time(2, 12, 7, 49))}, + list_param=[datetime(1, 1, 2), datetime(1, 1, 3)], + set_param=set((datetime(1, 1, 3), util.Date(datetime(1, 1, 1)))), + tuple_param=(datetime(1, 1, 3), 2, False, 1, 2.324, uuid4()) + ) + + cls.second_value = User( + age=1, + date_param=datetime.utcnow(), + map_param={1: time(2, 12, 7, 50), 2: util.Time(time(2, 12, 7, 49))}, + list_param=[datetime(1, 1, 2), datetime(1, 2, 3)], + set_param=set((datetime(1, 1, 3), util.Date(datetime(1, 1, 1)))), + tuple_param=(datetime(1, 1, 2), 2, False, 1, 2.324, uuid4()) + ) + + cls.third_value = User( + age=2, + date_param=datetime.utcnow(), + map_param={1: time(2, 12, 7, 51), 2: util.Time(time(2, 12, 7, 49))}, + list_param=[datetime(1, 1, 2), datetime(1, 1, 4)], + set_param=set((datetime(1, 1, 3), util.Date(datetime(1, 1, 2)))), + tuple_param=(datetime(1, 1, 2), 3, False, 1, 2.3214, uuid4()) + ) + + cls.model_class = UserModel + sync_table(cls.model_class) class TestDecimal(BaseCassEngTestCase): @@ -319,7 +437,7 @@ class TestInteger(BaseCassEngTestCase): class IntegerTest(Model): test_id = UUID(primary_key=True, default=lambda:uuid4()) - value = Integer(default=0, required=True) + value = Integer(default=0, required=True) def test_default_zero_fields_validate(self): """ Tests that integer columns with a default value of 0 validate """ @@ -644,4 +762,3 @@ def test_non_address_fails(self): # TODO: presently this only tests that the server blows it up. Is there supposed to be local validation? with self.assertRaises(InvalidRequest): self.InetTestModel.create(address="what is going on here?") - diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index fff7001fcb..e208787e30 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -24,11 +24,45 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine.usertype import UserType, UserTypeDefinitionException from cassandra.cqlengine import columns, connection -from cassandra.cqlengine.management import sync_table, sync_type, create_keyspace_simple, drop_keyspace, drop_table +from cassandra.cqlengine.management import sync_table, drop_table, sync_type, create_keyspace_simple, drop_keyspace +from cassandra.cqlengine import ValidationError from cassandra.util import Date, Time from tests.integration import PROTOCOL_VERSION from tests.integration.cqlengine.base import BaseCassEngTestCase +from tests.integration.cqlengine import DEFAULT_KEYSPACE + + +class User(UserType): + age = columns.Integer() + name = columns.Text() + + +class UserModel(Model): + id = columns.Integer(primary_key=True) + info = columns.UserDefinedType(User) + + +class AllDatatypes(UserType): + a = columns.Ascii() + b = columns.BigInt() + c = columns.Blob() + d = columns.Boolean() + e = columns.DateTime() + f = columns.Decimal() + g = columns.Double() + h = columns.Float() + i = columns.Inet() + j = columns.Integer() + k = columns.Text() + l = columns.TimeUUID() + m = columns.UUID() + n = columns.VarInt() + + +class AllDatatypesModel(Model): + id = columns.Integer(primary_key=True) + data = columns.UserDefinedType(AllDatatypes) class UserDefinedTypeTests(BaseCassEngTestCase): @@ -42,7 +76,7 @@ class User(UserType): age = columns.Integer() name = columns.Text() - sync_type("cqlengine_test", User) + sync_type(DEFAULT_KEYSPACE, User) user = User(age=42, name="John") self.assertEqual(42, user.age) self.assertEqual("John", user.name) @@ -53,8 +87,10 @@ class User(UserType): name = columns.Text() gender = columns.Text() - sync_type("cqlengine_test", User) - user = User(age=42, name="John", gender="male") + sync_type(DEFAULT_KEYSPACE, User) + user = User(age=42) + user["name"] = "John" + user["gender"] = "male" self.assertEqual(42, user.age) self.assertEqual("John", user.name) self.assertEqual("male", user.gender) @@ -64,19 +100,12 @@ class User(UserType): age = columns.Integer() name = columns.Text() - sync_type("cqlengine_test", User) + sync_type(DEFAULT_KEYSPACE, User) user = User(age=42, name="John", gender="male") with self.assertRaises(AttributeError): user.gender def test_can_insert_udts(self): - class User(UserType): - age = columns.Integer() - name = columns.Text() - - class UserModel(Model): - id = columns.Integer(primary_key=True) - info = columns.UserDefinedType(User) sync_table(UserModel) @@ -90,16 +119,9 @@ class UserModel(Model): self.assertTrue(type(john.info) is User) self.assertEqual(42, john.info.age) self.assertEqual("John", john.info.name) + drop_table(UserModel) def test_can_update_udts(self): - class User(UserType): - age = columns.Integer() - name = columns.Text() - - class UserModel(Model): - id = columns.Integer(primary_key=True) - info = columns.UserDefinedType(User) - sync_table(UserModel) user = User(age=42, name="John") @@ -113,18 +135,11 @@ class UserModel(Model): created_user.update() mary_info = UserModel.objects().first().info - self.assertEqual(22, mary_info.age) - self.assertEqual("Mary", mary_info.name) + self.assertEqual(22, mary_info["age"]) + self.assertEqual("Mary", mary_info["name"]) + drop_table(UserModel) def test_can_update_udts_with_nones(self): - class User(UserType): - age = columns.Integer() - name = columns.Text() - - class UserModel(Model): - id = columns.Integer(primary_key=True) - info = columns.UserDefinedType(User) - sync_table(UserModel) user = User(age=42, name="John") @@ -139,45 +154,43 @@ class UserModel(Model): john_info = UserModel.objects().first().info self.assertIsNone(john_info) + drop_table(UserModel) def test_can_create_same_udt_different_keyspaces(self): - class User(UserType): - age = columns.Integer() - name = columns.Text() - - sync_type("cqlengine_test", User) + sync_type(DEFAULT_KEYSPACE, User) create_keyspace_simple("simplex", 1) sync_type("simplex", User) drop_keyspace("simplex") def test_can_insert_partial_udts(self): - class User(UserType): + class UserGender(UserType): age = columns.Integer() name = columns.Text() gender = columns.Text() - class UserModel(Model): + class UserModelGender(Model): id = columns.Integer(primary_key=True) - info = columns.UserDefinedType(User) + info = columns.UserDefinedType(UserGender) - sync_table(UserModel) + sync_table(UserModelGender) - user = User(age=42, name="John") - UserModel.create(id=0, info=user) + user = UserGender(age=42, name="John") + UserModelGender.create(id=0, info=user) - john_info = UserModel.objects().first().info + john_info = UserModelGender.objects().first().info self.assertEqual(42, john_info.age) self.assertEqual("John", john_info.name) self.assertIsNone(john_info.gender) - user = User(age=42) - UserModel.create(id=0, info=user) + user = UserGender(age=42) + UserModelGender.create(id=0, info=user) - john_info = UserModel.objects().first().info + john_info = UserModelGender.objects().first().info self.assertEqual(42, john_info.age) self.assertIsNone(john_info.name) self.assertIsNone(john_info.gender) + drop_table(UserModelGender) def test_can_insert_nested_udts(self): class Depth_0(UserType): @@ -215,6 +228,8 @@ class DepthModel(Model): self.assertEqual(udts[2], output.v_2) self.assertEqual(udts[3], output.v_3) + drop_table(DepthModel) + def test_can_insert_udts_with_nones(self): """ Test for inserting all column types as empty into a UserType as None's @@ -230,27 +245,6 @@ def test_can_insert_udts_with_nones(self): @test_category data_types:udt """ - - class AllDatatypes(UserType): - a = columns.Ascii() - b = columns.BigInt() - c = columns.Blob() - d = columns.Boolean() - e = columns.DateTime() - f = columns.Decimal() - g = columns.Double() - h = columns.Float() - i = columns.Inet() - j = columns.Integer() - k = columns.Text() - l = columns.TimeUUID() - m = columns.UUID() - n = columns.VarInt() - - class AllDatatypesModel(Model): - id = columns.Integer(primary_key=True) - data = columns.UserDefinedType(AllDatatypes) - sync_table(AllDatatypesModel) input = AllDatatypes(a=None, b=None, c=None, d=None, e=None, f=None, g=None, h=None, i=None, j=None, k=None, @@ -262,6 +256,8 @@ class AllDatatypesModel(Model): output = AllDatatypesModel.objects().first().data self.assertEqual(input, output) + drop_table(AllDatatypesModel) + def test_can_insert_udts_with_all_datatypes(self): """ Test for inserting all column types into a UserType @@ -277,27 +273,6 @@ def test_can_insert_udts_with_all_datatypes(self): @test_category data_types:udt """ - - class AllDatatypes(UserType): - a = columns.Ascii() - b = columns.BigInt() - c = columns.Blob() - d = columns.Boolean() - e = columns.DateTime() - f = columns.Decimal() - g = columns.Double() - h = columns.Float() - i = columns.Inet() - j = columns.Integer() - k = columns.Text() - l = columns.TimeUUID() - m = columns.UUID() - n = columns.VarInt() - - class AllDatatypesModel(Model): - id = columns.Integer(primary_key=True) - data = columns.UserDefinedType(AllDatatypes) - sync_table(AllDatatypesModel) input = AllDatatypes(a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, @@ -313,6 +288,8 @@ class AllDatatypesModel(Model): for i in range(ord('a'), ord('a') + 14): self.assertEqual(input[chr(i)], output[chr(i)]) + drop_table(AllDatatypesModel) + def test_can_insert_udts_protocol_v4_datatypes(self): """ Test for inserting all protocol v4 column types into a UserType @@ -354,6 +331,8 @@ class Allv4DatatypesModel(Model): for i in range(ord('a'), ord('a') + 3): self.assertEqual(input[chr(i)], output[chr(i)]) + drop_table(Allv4DatatypesModel) + def test_nested_udts_inserts(self): """ Test for inserting collections of user types using cql engine. @@ -394,6 +373,8 @@ class Container(Model): names_output = Container.objects().first().names self.assertEqual(names_output, names) + drop_table(Container) + def test_udts_with_unicode(self): """ Test for inserting models with unicode and udt columns. @@ -410,10 +391,6 @@ def test_udts_with_unicode(self): ascii_name = 'normal name' unicode_name = u'Fran\u00E7ois' - class User(UserType): - age = columns.Integer() - name = columns.Text() - class UserModelText(Model): id = columns.Text(primary_key=True) info = columns.UserDefinedType(User) @@ -427,10 +404,9 @@ class UserModelText(Model): UserModelText.create(id=unicode_name, info=user_template_ascii) UserModelText.create(id=unicode_name, info=user_template_unicode) + drop_table(UserModelText) + def test_register_default_keyspace(self): - class User(UserType): - age = columns.Integer() - name = columns.Text() from cassandra.cqlengine import models from cassandra.cqlengine import connection @@ -495,6 +471,8 @@ class TheModel(Model): self.assertEqual(info.a, age) self.assertEqual(info.n, name) + drop_table(TheModel) + def test_db_field_overload(self): """ Tests for db_field UserTypeDefinitionException @@ -520,9 +498,6 @@ class something_silly_2(UserType): def test_set_udt_fields(self): # PYTHON-502 - class User(UserType): - age = columns.Integer() - name = columns.Text() u = User() u.age = 20 @@ -562,3 +537,63 @@ class OuterModel(Model): self.assertEqual(t.nested[0].default_text, "default text") self.assertIsNotNone(t.simple.test_id) self.assertEqual(t.simple.default_text, "default text") + + drop_table(OuterModel) + + def test_udt_validate(self): + """ + Test to verify restrictions are honored and that validate is called + for each member of the UDT when an updated is attempted + + @since 3.10 + @jira_ticket PYTHON-505 + @expected_result a validation error is arisen due to the name being + too long + + @test_category data_types:object_mapper + """ + class UserValidate(UserType): + age = columns.Integer() + name = columns.Text(max_length=2) + + class UserModelValidate(Model): + id = columns.Integer(primary_key=True) + info = columns.UserDefinedType(UserValidate) + + sync_table(UserModelValidate) + + user = UserValidate(age=1, name="Robert") + item = UserModelValidate(id=1, info=user) + with self.assertRaises(ValidationError): + item.save() + + drop_table(UserModelValidate) + + def test_udt_validate_with_default(self): + """ + Test to verify restrictions are honored and that validate is called + on the default value + + @since 3.10 + @jira_ticket PYTHON-505 + @expected_result a validation error is arisen due to the name being + too long + + @test_category data_types:object_mapper + """ + class UserValidateDefault(UserType): + age = columns.Integer() + name = columns.Text(max_length=2, default="Robert") + + class UserModelValidateDefault(Model): + id = columns.Integer(primary_key=True) + info = columns.UserDefinedType(UserValidateDefault) + + sync_table(UserModelValidateDefault) + + user = UserValidateDefault(age=1) + item = UserModelValidateDefault(id=1, info=user) + with self.assertRaises(ValidationError): + item.save() + + drop_table(UserModelValidateDefault) diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index bfab3af58e..79c1372ae9 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -21,7 +21,7 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns from cassandra.cqlengine.management import sync_table, drop_table - +from cassandra.cqlengine.usertype import UserType class TestUpdateModel(Model): @@ -147,19 +147,40 @@ def test_primary_key_update_failure(self): m0.update(partition=uuid4()) +class UDT(UserType): + age = columns.Integer() + mf = columns.Map(columns.Integer, columns.Integer) + dummy_udt = columns.Integer(default=42) + + class ModelWithDefault(Model): - id = columns.Integer(primary_key=True) - mf = columns.Map(columns.Integer, columns.Integer) - dummy = columns.Integer(default=42) + id = columns.Integer(primary_key=True) + mf = columns.Map(columns.Integer, columns.Integer) + dummy = columns.Integer(default=42) + udt = columns.UserDefinedType(UDT) + udt_default = columns.UserDefinedType(UDT, default=UDT(age=1, mf={2:2})) + + +class UDTWithDefault(UserType): + age = columns.Integer() + mf = columns.Map(columns.Integer, columns.Integer, default={2:2}) + dummy_udt = columns.Integer(default=42) class ModelWithDefaultCollection(Model): - id = columns.Integer(primary_key=True) - mf = columns.Map(columns.Integer, columns.Integer, default={2:2}) - dummy = columns.Integer(default=42) + id = columns.Integer(primary_key=True) + mf = columns.Map(columns.Integer, columns.Integer, default={2:2}) + dummy = columns.Integer(default=42) + udt = columns.UserDefinedType(UDT) + udt_default = columns.UserDefinedType(UDT, default=UDT(age=1, mf={2: 2})) class ModelWithDefaultTests(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + cls.default_udt = UDT(age=1, mf={2:2}, dummy_udt=42) + def setUp(self): sync_table(ModelWithDefault) @@ -176,17 +197,19 @@ def test_value_override_with_default(self): @test_category object_mapper """ - initial = ModelWithDefault(id=1, mf={0: 0}, dummy=0) + first_udt = UDT(age=1, mf={2:2}, dummy_udt=0) + initial = ModelWithDefault(id=1, mf={0: 0}, dummy=0, udt=first_udt, udt_default=first_udt) initial.save() self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), - {'id': 1, 'dummy': 0, 'mf': {0: 0}}) + {'id': 1, 'dummy': 0, 'mf': {0: 0}, "udt": first_udt, "udt_default": first_udt}) + second_udt = UDT(age=1, mf={3: 3}, dummy_udt=12) second = ModelWithDefault(id=1) - second.update(mf={0: 1}) + second.update(mf={0: 1}, udt=second_udt) self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), - {'id': 1, 'dummy': 0, 'mf': {0: 1}}) + {'id': 1, 'dummy': 0, 'mf': {0: 1}, "udt": second_udt, "udt_default": first_udt}) def test_value_is_written_if_is_default(self): """ @@ -202,10 +225,11 @@ def test_value_is_written_if_is_default(self): initial = ModelWithDefault(id=1) initial.mf = {0: 0} initial.dummy = 42 + initial.udt_default = self.default_udt initial.update() self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), - {'id': 1, 'dummy': 42, 'mf': {0: 0}}) + {'id': 1, 'dummy': 42, 'mf': {0: 0}, "udt": None, "udt_default": self.default_udt}) def test_null_update_is_respected(self): """ @@ -223,10 +247,11 @@ def test_null_update_is_respected(self): q = ModelWithDefault.objects.all().allow_filtering() obj = q.filter(id=1).get() - obj.update(dummy=None) + updated_udt = UDT(age=1, mf={2:2}, dummy_udt=None) + obj.update(dummy=None, udt_default=updated_udt) self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), - {'id': 1, 'dummy': None, 'mf': {0: 0}}) + {'id': 1, 'dummy': None, 'mf': {0: 0}, "udt": None, "udt_default": updated_udt}) def test_only_set_values_is_updated(self): """ @@ -244,28 +269,32 @@ def test_only_set_values_is_updated(self): item = ModelWithDefault.filter(id=1).first() ModelWithDefault.objects(id=1).delete() item.mf = {1: 2} - + udt, default_udt = UDT(age=1, mf={2:3}), UDT(age=1, mf={2:3}) + item.udt, item.default_udt = udt, default_udt item.save() self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), - {'id': 1, 'dummy': None, 'mf': {1: 2}}) + {'id': 1, 'dummy': None, 'mf': {1: 2}, "udt": udt, "udt_default": default_udt}) def test_collections(self): """ - Test the updates work as expected when an object is deleted + Test the updates work as expected on Map objects @since 3.9 @jira_ticket PYTHON-657 - @expected_result the non updated column is None and the - updated column has the set value + @expected_result the row is updated when the Map object is + reduced @test_category object_mapper """ - ModelWithDefault.create(id=1, mf={1: 1, 2: 1}, dummy=1).save() + udt, udt_default = UDT(age=1, mf={1: 1, 2: 1}), UDT(age=1, mf={1: 1, 2: 1}) + + ModelWithDefault.create(id=1, mf={1: 1, 2: 1}, dummy=1, udt=udt, udt_default=udt_default).save() item = ModelWithDefault.filter(id=1).first() + udt, udt_default = UDT(age=1, mf={2: 1}), UDT(age=1, mf={2: 1}) item.update(mf={2:1}) self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), - {'id': 1, 'dummy': 1, 'mf': {2: 1}}) + {'id': 1, 'dummy': 1, 'mf': {2: 1}, "udt": udt, "udt_default": udt_default}) def test_collection_with_default(self): """ @@ -278,24 +307,32 @@ def test_collection_with_default(self): @test_category object_mapper """ sync_table(ModelWithDefaultCollection) - item = ModelWithDefaultCollection.create(id=1, mf={1: 1}, dummy=1).save() + + udt, udt_default = UDT(age=1, mf={6: 6}), UDT(age=1, mf={6: 6}) + + item = ModelWithDefaultCollection.create(id=1, mf={1: 1}, dummy=1, udt=udt, udt_default=udt_default).save() self.assertEqual(ModelWithDefaultCollection.objects().all().get()._as_dict(), - {'id': 1, 'dummy': 1, 'mf': {1: 1}}) + {'id': 1, 'dummy': 1, 'mf': {1: 1}, "udt": udt, "udt_default": udt_default}) - item.update(mf={2: 2}) + udt, udt_default = UDT(age=1, mf={5: 5}), UDT(age=1, mf={5: 5}) + item.update(mf={2: 2}, udt=udt, udt_default=udt_default) self.assertEqual(ModelWithDefaultCollection.objects().all().get()._as_dict(), - {'id': 1, 'dummy': 1, 'mf': {2: 2}}) + {'id': 1, 'dummy': 1, 'mf': {2: 2}, "udt": udt, "udt_default": udt_default}) - item.update(mf=None) + udt, udt_default = UDT(age=1, mf=None), UDT(age=1, mf=None) + expected_udt, expected_udt_default = UDT(age=1, mf={}), UDT(age=1, mf={}) + item.update(mf=None, udt=udt, udt_default=udt_default) self.assertEqual(ModelWithDefaultCollection.objects().all().get()._as_dict(), - {'id': 1, 'dummy': 1, 'mf': {}}) + {'id': 1, 'dummy': 1, 'mf': {}, "udt": expected_udt, "udt_default": expected_udt_default}) + udt_default = UDT(age=1, mf=None), UDT(age=1, mf={5:5}) item = ModelWithDefaultCollection.create(id=2, dummy=2).save() self.assertEqual(ModelWithDefaultCollection.objects().all().get(id=2)._as_dict(), - {'id': 2, 'dummy': 2, 'mf': {2: 2}}) + {'id': 2, 'dummy': 2, 'mf': {2: 2}, "udt": None, "udt_default": udt_default}) - item.update(mf={1: 1, 4: 4}) + udt, udt_default = UDT(age=1, mf={1: 1, 6: 6}), UDT(age=1, mf={1: 1, 6: 6}) + item.update(mf={1: 1, 4: 4}, udt=udt, udt_default=udt_default) self.assertEqual(ModelWithDefaultCollection.objects().all().get(id=2)._as_dict(), - {'id': 2, 'dummy': 2, 'mf': {1: 1, 4: 4}}) + {'id': 2, 'dummy': 2, 'mf': {1: 1, 4: 4}, "udt": udt, "udt_default": udt_default}) drop_table(ModelWithDefaultCollection) diff --git a/tests/integration/cqlengine/query/test_queryoperators.py b/tests/integration/cqlengine/query/test_queryoperators.py index f52db06fff..46d46d65e6 100644 --- a/tests/integration/cqlengine/query/test_queryoperators.py +++ b/tests/integration/cqlengine/query/test_queryoperators.py @@ -150,7 +150,7 @@ def test_named_table_pk_token_function(self): TokenTestModel.create(key=i, val=i) named = NamedTable(DEFAULT_KEYSPACE, TokenTestModel.__table_name__) - query = named.objects.all().limit(1) + query = named.all().limit(1) first_page = list(query) last = first_page[-1] self.assertTrue(len(first_page) is 1) diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index 2daa3a48fb..7c4917be7d 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -18,21 +18,12 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine.management import sync_table, drop_table from cassandra.cqlengine import columns + from tests.integration.cqlengine import is_prepend_reversed -from tests.integration.cqlengine.base import BaseCassEngTestCase +from tests.integration.cqlengine.base import BaseCassEngTestCase, TestQueryUpdateModel from tests.integration.cqlengine import execute_count from tests.integration import greaterthancass20 -class TestQueryUpdateModel(Model): - - partition = columns.UUID(primary_key=True, default=uuid4) - cluster = columns.Integer(primary_key=True) - count = columns.Integer(required=False) - text = columns.Text(required=False, index=True) - text_set = columns.Set(columns.Text, required=False) - text_list = columns.List(columns.Text, required=False) - text_map = columns.Map(columns.Text, columns.Text, required=False) - class QueryUpdateTests(BaseCassEngTestCase): diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index 029360771c..21388d0c7e 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -16,8 +16,19 @@ except ImportError: import unittest # noqa +from uuid import uuid4 + from cassandra.query import FETCH_SIZE_UNSET from cassandra.cqlengine.statements import BaseCQLStatement +from cassandra.cqlengine.management import sync_table, drop_table +from cassandra.cqlengine.statements import InsertStatement, UpdateStatement, SelectStatement, DeleteStatement, \ + WhereClause +from cassandra.cqlengine.operators import EqualsOperator +from cassandra.cqlengine.columns import Column + +from tests.integration.cqlengine.base import BaseCassEngTestCase, TestQueryUpdateModel +from tests.integration.cqlengine import DEFAULT_KEYSPACE +from cassandra.cqlengine.connection import execute class BaseStatementTest(unittest.TestCase): @@ -32,3 +43,71 @@ def test_fetch_size(self): stmt = BaseCQLStatement('table', None) self.assertEqual(stmt.fetch_size, FETCH_SIZE_UNSET) + + +class ExecuteStatementTest(BaseCassEngTestCase): + @classmethod + def setUpClass(cls): + super(ExecuteStatementTest, cls).setUpClass() + sync_table(TestQueryUpdateModel) + cls.table_name = '{0}.test_query_update_model'.format(DEFAULT_KEYSPACE) + + @classmethod + def tearDownClass(cls): + super(ExecuteStatementTest, cls).tearDownClass() + drop_table(TestQueryUpdateModel) + + def _verify_statement(self, original): + st = SelectStatement(self.table_name) + result = execute(st) + response = result[0] + + for assignment in original.assignments: + self.assertEqual(response[assignment.field], assignment.value) + self.assertEqual(len(response), 7) + + def test_insert_statement_execute(self): + """ + Test to verify the execution of BaseCQLStatements using connection.execute + + @since 3.10 + @jira_ticket PYTHON-505 + @expected_result inserts a row in C*, updates the rows and then deletes + all the rows using BaseCQLStatements + + @test_category data_types:object_mapper + """ + partition = uuid4() + cluster = 1 + + #Verifying insert statement + st = InsertStatement(self.table_name) + st.add_assignment(Column(db_field='partition'), partition) + st.add_assignment(Column(db_field='cluster'), cluster) + + st.add_assignment(Column(db_field='count'), 1) + st.add_assignment(Column(db_field='text'), "text_for_db") + st.add_assignment(Column(db_field='text_set'), set(("foo", "bar"))) + st.add_assignment(Column(db_field='text_list'), ["foo", "bar"]) + st.add_assignment(Column(db_field='text_map'), {"foo": '1', "bar": '2'}) + + execute(st) + self._verify_statement(st) + + # Verifying update statement + where = [WhereClause('partition', EqualsOperator(), partition), + WhereClause('cluster', EqualsOperator(), cluster)] + + st = UpdateStatement(self.table_name, where=where) + st.add_assignment(Column(db_field='count'), 2) + st.add_assignment(Column(db_field='text'), "text_for_db_update") + st.add_assignment(Column(db_field='text_set'), set(("foo_update", "bar_update"))) + st.add_assignment(Column(db_field='text_list'), ["foo_update", "bar_update"]) + st.add_assignment(Column(db_field='text_map'), {"foo": '3', "bar": '4'}) + + execute(st) + self._verify_statement(st) + + # Verifying delete statement + execute(DeleteStatement(self.table_name, where=where)) + self.assertEqual(TestQueryUpdateModel.objects.count(), 0) From 768e0207da10ec96458764e86db1469b93b65d0e Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 13 Apr 2017 19:11:26 -0400 Subject: [PATCH 1102/2005] Added some tests for comparison between columns --- .../cqlengine/columns/test_validation.py | 10 +++++----- tests/integration/cqlengine/model/test_model.py | 13 +++++++++++++ 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index f03bcd5efe..a9ba7001a3 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -278,7 +278,7 @@ class TestTime(DataType, BaseCassEngTestCase): def setUpClass(cls): cls.db_klass, cls.python_klass = Time, util.Time cls.first_value, cls.second_value, cls.third_value = \ - time(2, 12, 7, 48), util.Time(time(2, 12, 7, 49)), time(2, 12, 7, 50) + None, util.Time(time(2, 12, 7, 49)), time(2, 12, 7, 50) super(TestTime, cls).setUpClass() @@ -295,7 +295,7 @@ class TestBoolean(DataType, BaseCassEngTestCase): @classmethod def setUpClass(cls): cls.db_klass, cls.python_klass = Boolean, bool - cls.first_value, cls.second_value, cls.third_value = True, False, True + cls.first_value, cls.second_value, cls.third_value = None, False, True super(TestBoolean, cls).setUpClass() @@ -333,17 +333,17 @@ def setUpClass(cls): date_param=datetime.utcnow(), map_param={1: time(2, 12, 7, 50), 2: util.Time(time(2, 12, 7, 49))}, list_param=[datetime(1, 1, 2), datetime(1, 2, 3)], - set_param=set((datetime(1, 1, 3), util.Date(datetime(1, 1, 1)))), + set_param=None, tuple_param=(datetime(1, 1, 2), 2, False, 1, 2.324, uuid4()) ) cls.third_value = User( age=2, - date_param=datetime.utcnow(), + date_param=None, map_param={1: time(2, 12, 7, 51), 2: util.Time(time(2, 12, 7, 49))}, list_param=[datetime(1, 1, 2), datetime(1, 1, 4)], set_param=set((datetime(1, 1, 3), util.Date(datetime(1, 1, 2)))), - tuple_param=(datetime(1, 1, 2), 3, False, 1, 2.3214, uuid4()) + tuple_param=(None, 3, False, None, 2.3214, uuid4()) ) cls.model_class = UserModel diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index 19b818a069..6357eef00a 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -24,6 +24,7 @@ from cassandra.cqlengine.models import Model, ModelDefinitionException from uuid import uuid1 from tests.integration import pypy +from tests.integration.cqlengine.base import TestQueryUpdateModel class TestModel(unittest.TestCase): """ Tests the non-io functionality of models """ @@ -207,3 +208,15 @@ class DerivedTimeModel(TimeModelBase): DerivedTimeModel.create(uuid=uuid_value2, value="second") DerivedTimeModel.objects.filter(uuid=uuid_value) + +class TestColumnComparison(unittest.TestCase): + def test_comparison(self): + l = [TestQueryUpdateModel.partition.column, + TestQueryUpdateModel.cluster.column, + TestQueryUpdateModel.count.column, + TestQueryUpdateModel.text.column, + TestQueryUpdateModel.text_set.column, + TestQueryUpdateModel.text_list.column, + TestQueryUpdateModel.text_map.column] + + self.assertEqual(l, sorted(l)) \ No newline at end of file From 5ce17d52908fdbf45c803b129ae8dfe45f0b7b4f Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 14 Apr 2017 09:29:31 -0400 Subject: [PATCH 1103/2005] Skipped some tests depending on C* version --- .../integration/cqlengine/columns/test_validation.py | 11 +++++++---- tests/integration/cqlengine/model/test_model.py | 6 +++++- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index a9ba7001a3..c599f05898 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -31,7 +31,7 @@ from cassandra.cqlengine.usertype import UserType from cassandra import util -from tests.integration import PROTOCOL_VERSION +from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION from tests.integration.cqlengine.base import BaseCassEngTestCase, TestQueryUpdateModel @@ -191,7 +191,7 @@ def test_varint_io(self): class DataType(): @classmethod def setUpClass(cls): - if PROTOCOL_VERSION < 4: + if PROTOCOL_VERSION < 4 or CASSANDRA_VERSION < "3.0": return class DataTypeTest(Model): @@ -203,12 +203,12 @@ class DataTypeTest(Model): @classmethod def tearDownClass(cls): - if PROTOCOL_VERSION < 4: + if PROTOCOL_VERSION < 4 or CASSANDRA_VERSION < "3.0": return drop_table(cls.model_class) def setUp(self): - if PROTOCOL_VERSION < 4: + if PROTOCOL_VERSION < 4 or CASSANDRA_VERSION < "3.0": raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) def _check_value_is_correct_in_db(self, value): @@ -318,6 +318,9 @@ class UserModel(Model): class TestUDT(DataType, BaseCassEngTestCase): @classmethod def setUpClass(cls): + if PROTOCOL_VERSION < 4 or CASSANDRA_VERSION < "3.0": + return + cls.db_klass, cls.python_klass = UserDefinedType, User cls.first_value = User( age=1, diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index 6357eef00a..1fb920cbf5 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -219,4 +219,8 @@ def test_comparison(self): TestQueryUpdateModel.text_list.column, TestQueryUpdateModel.text_map.column] - self.assertEqual(l, sorted(l)) \ No newline at end of file + self.assertEqual(l, sorted(l)) + self.assertNotEqual(TestQueryUpdateModel.partition.column, TestQueryUpdateModel.cluster.column) + self.assertLessEqual(TestQueryUpdateModel.partition.column, TestQueryUpdateModel.cluster.column) + self.assertGreater(TestQueryUpdateModel.cluster.column, TestQueryUpdateModel.partition.column) + self.assertGreaterEqual(TestQueryUpdateModel.cluster.column, TestQueryUpdateModel.partition.column) From 700b60abc44578f2a0cc5ec940900eb4d2612995 Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 14 Apr 2017 13:48:24 -0400 Subject: [PATCH 1104/2005] Added some for tests for CQLEngine coverage --- .../integration/cqlengine/model/test_model.py | 2 +- .../cqlengine/model/test_model_io.py | 41 +++++++++++++++++++ .../cqlengine/test_lwt_conditional.py | 21 ++++++++++ 3 files changed, 63 insertions(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index 1fb920cbf5..8d346f80c3 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -174,7 +174,7 @@ class IllegalFilterColumnModel(Model): my_primary_key = columns.Integer(primary_key=True) filter = columns.Text() -@pypy + class ModelOverWriteTest(unittest.TestCase): def test_model_over_write(self): diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 3b7c9586c1..a8c46253a5 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -702,6 +702,13 @@ def test_query_with_date(self): self.assertTrue(inst.date == day) +class BasicModelNoRouting(Model): + __table_name__ = 'basic_model_no_routing' + __compute_routing_key__ = False + k = columns.Integer(primary_key=True) + v = columns.Integer() + + class BasicModel(Model): __table_name__ = 'basic_model_routing' k = columns.Integer(primary_key=True) @@ -729,6 +736,7 @@ class TestModelRoutingKeys(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestModelRoutingKeys, cls).setUpClass() + sync_table(BasicModelNoRouting) sync_table(BasicModel) sync_table(BasicModelMulti) sync_table(ComplexModelRouting) @@ -736,10 +744,43 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): super(TestModelRoutingKeys, cls).tearDownClass() + drop_table(BasicModelNoRouting) drop_table(BasicModel) drop_table(BasicModelMulti) drop_table(ComplexModelRouting) + def test_routing_key_is_ignored(self): + """ + Compares the routing key generated by simple partition key using the model with the one generated by the equivalent + bound statement. It also verifies basic operations work with no routing key + @since 3.2 + @jira_ticket PYTHON-505 + @expected_result they shouldn't match + + @test_category object_mapper + """ + + prepared = self.session.prepare( + """ + INSERT INTO {0}.basic_model_no_routing (k, v) VALUES (?, ?) + """.format(DEFAULT_KEYSPACE)) + bound = prepared.bind((1, 2)) + + mrk = BasicModelNoRouting._routing_key_from_values([1], self.session.cluster.protocol_version) + simple = SimpleStatement("") + simple.routing_key = mrk + self.assertNotEqual(bound.routing_key, simple.routing_key) + + # Verify that basic create, update and delete work with no routing key + t = BasicModelNoRouting.create(k=2, v=3) + t.update(v=4).save() + f = BasicModelNoRouting.objects.filter(k=2).first() + self.assertEqual(t, f) + + t.delete() + self.assertEqual(BasicModelNoRouting.objects.count(), 0) + + def test_routing_key_generation_basic(self): """ Compares the routing key generated by simple partition key using the model with the one generated by the equivalent diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index 0c1e108b34..0b7d0e578b 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -249,3 +249,24 @@ def test_column_delete_after_update(self): self.assertIsNone(TestConditionalModel.objects(id=t.id).first().text) self.assertEqual(TestConditionalModel.objects(id=t.id).first().count, 6) + + def test_conditional_without_instance(self): + """ + Test to ensure that the iff method is honored if it's called + directly from the Model class + + @jira_ticket PYTHON-505 + @expected_result the value is updated + + @test_category object_mapper + """ + uuid = uuid4() + TestConditionalModel.create(id=uuid, text='test_for_cassandra', count=5) + + # This uses the iff method directly from the model class without + # an instance having been created + TestConditionalModel.iff(count=5).filter(id=uuid).update(text=None, count=6) + + t = TestConditionalModel.filter(id=uuid).first() + self.assertIsNone(t.text) + self.assertEqual(t.count, 6) From de43f22e642745c7548729b1675d76ed4434b1ea Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 17 Apr 2017 15:44:44 -0400 Subject: [PATCH 1105/2005] Acknowledge comments on the review for PYTHON-505 --- .../cqlengine/columns/test_validation.py | 58 ++++++++++++++----- .../integration/cqlengine/model/test_udts.py | 40 +++++-------- 2 files changed, 59 insertions(+), 39 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index c599f05898..5998690cdc 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -23,8 +23,10 @@ from uuid import uuid4, uuid1 from cassandra import InvalidRequest -from cassandra.cqlengine.columns import TimeUUID, Ascii, Text, Integer, BigInt, VarInt, DateTime, Date, UUID, Boolean, \ - Decimal, Inet, Time, UserDefinedType, Map, List, Set, Tuple, Double, Float +from cassandra.cqlengine.columns import (TimeUUID, Ascii, Text, Integer, BigInt, + VarInt, DateTime, Date, UUID, Boolean, + Decimal, Inet, Time, UserDefinedType, + Map, List, Set, Tuple, Double) from cassandra.cqlengine.connection import execute from cassandra.cqlengine.management import sync_table, drop_table from cassandra.cqlengine.models import Model, ValidationError @@ -237,7 +239,7 @@ def _check_value_is_correct_in_db(self, value): return result - def test_date_io_using_datetime(self): + def test_param_io(self): first_value = self.first_value second_value = self.second_value third_value = self.third_value @@ -255,7 +257,7 @@ def test_date_io_using_datetime(self): result.update(class_param=None).save() self._check_value_is_correct_in_db(None) - def test_date_none(self): + def test_param_none(self): self.model_class.objects.create(test_id=1, class_param=None) dt2 = self.model_class.objects(test_id=1).first() self.assertIsNone(dt2.class_param) @@ -267,35 +269,61 @@ def test_date_none(self): class TestDate(DataType, BaseCassEngTestCase): @classmethod def setUpClass(cls): - cls.db_klass, cls.python_klass = Date, util.Date - cls.first_value, cls.second_value, cls.third_value = \ - datetime.utcnow(), util.Date(datetime(1, 1, 1)), datetime(1, 1, 2) + cls.db_klass, cls.python_klass = ( + Date, + util.Date + ) + + cls.first_value, cls.second_value, cls.third_value = ( + datetime.utcnow(), + util.Date(datetime(1, 1, 1)), + datetime(1, 1, 2) + ) super(TestDate, cls).setUpClass() class TestTime(DataType, BaseCassEngTestCase): @classmethod def setUpClass(cls): - cls.db_klass, cls.python_klass = Time, util.Time - cls.first_value, cls.second_value, cls.third_value = \ - None, util.Time(time(2, 12, 7, 49)), time(2, 12, 7, 50) + cls.db_klass, cls.python_klass = ( + Time, + util.Time + ) + cls.first_value, cls.second_value, cls.third_value = ( + None, + util.Time(time(2, 12, 7, 49)), + time(2, 12, 7, 50) + ) super(TestTime, cls).setUpClass() class TestDateTime(DataType, BaseCassEngTestCase): @classmethod def setUpClass(cls): - cls.db_klass, cls.python_klass = DateTime, datetime - cls.first_value, cls.second_value, cls.third_value = \ - datetime(2017, 4, 13, 18, 34, 24, 317000), datetime(1, 1, 1), datetime(1, 1, 2) + cls.db_klass, cls.python_klass = ( + DateTime, + datetime + ) + cls.first_value, cls.second_value, cls.third_value = ( + datetime(2017, 4, 13, 18, 34, 24, 317000), + datetime(1, 1, 1), + datetime(1, 1, 2) + ) super(TestDateTime, cls).setUpClass() class TestBoolean(DataType, BaseCassEngTestCase): @classmethod def setUpClass(cls): - cls.db_klass, cls.python_klass = Boolean, bool - cls.first_value, cls.second_value, cls.third_value = None, False, True + cls.db_klass, cls.python_klass = ( + Boolean, + bool + ) + cls.first_value, cls.second_value, cls.third_value = ( + None, + False, + True + ) super(TestBoolean, cls).setUpClass() diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index e208787e30..b232a7bc1e 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -108,6 +108,7 @@ class User(UserType): def test_can_insert_udts(self): sync_table(UserModel) + self.addCleanup(drop_table, UserModel) user = User(age=42, name="John") UserModel.create(id=0, info=user) @@ -119,10 +120,10 @@ def test_can_insert_udts(self): self.assertTrue(type(john.info) is User) self.assertEqual(42, john.info.age) self.assertEqual("John", john.info.name) - drop_table(UserModel) def test_can_update_udts(self): sync_table(UserModel) + self.addCleanup(drop_table, UserModel) user = User(age=42, name="John") created_user = UserModel.create(id=0, info=user) @@ -137,10 +138,10 @@ def test_can_update_udts(self): mary_info = UserModel.objects().first().info self.assertEqual(22, mary_info["age"]) self.assertEqual("Mary", mary_info["name"]) - drop_table(UserModel) def test_can_update_udts_with_nones(self): sync_table(UserModel) + self.addCleanup(drop_table, UserModel) user = User(age=42, name="John") created_user = UserModel.create(id=0, info=user) @@ -154,7 +155,6 @@ def test_can_update_udts_with_nones(self): john_info = UserModel.objects().first().info self.assertIsNone(john_info) - drop_table(UserModel) def test_can_create_same_udt_different_keyspaces(self): sync_type(DEFAULT_KEYSPACE, User) @@ -174,6 +174,7 @@ class UserModelGender(Model): info = columns.UserDefinedType(UserGender) sync_table(UserModelGender) + self.addCleanup(drop_table, UserModelGender) user = UserGender(age=42, name="John") UserModelGender.create(id=0, info=user) @@ -190,7 +191,6 @@ class UserModelGender(Model): self.assertEqual(42, john_info.age) self.assertIsNone(john_info.name) self.assertIsNone(john_info.gender) - drop_table(UserModelGender) def test_can_insert_nested_udts(self): class Depth_0(UserType): @@ -214,6 +214,7 @@ class DepthModel(Model): v_3 = columns.UserDefinedType(Depth_3) sync_table(DepthModel) + self.addCleanup(drop_table, DepthModel) udts = [Depth_0(age=42, name="John")] udts.append(Depth_1(value=udts[0])) @@ -228,8 +229,6 @@ class DepthModel(Model): self.assertEqual(udts[2], output.v_2) self.assertEqual(udts[3], output.v_3) - drop_table(DepthModel) - def test_can_insert_udts_with_nones(self): """ Test for inserting all column types as empty into a UserType as None's @@ -246,6 +245,7 @@ def test_can_insert_udts_with_nones(self): @test_category data_types:udt """ sync_table(AllDatatypesModel) + self.addCleanup(drop_table, AllDatatypesModel) input = AllDatatypes(a=None, b=None, c=None, d=None, e=None, f=None, g=None, h=None, i=None, j=None, k=None, l=None, m=None, n=None) @@ -256,8 +256,6 @@ def test_can_insert_udts_with_nones(self): output = AllDatatypesModel.objects().first().data self.assertEqual(input, output) - drop_table(AllDatatypesModel) - def test_can_insert_udts_with_all_datatypes(self): """ Test for inserting all column types into a UserType @@ -274,6 +272,7 @@ def test_can_insert_udts_with_all_datatypes(self): @test_category data_types:udt """ sync_table(AllDatatypesModel) + self.addCleanup(drop_table, AllDatatypesModel) input = AllDatatypes(a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, e=datetime.utcfromtimestamp(872835240), f=Decimal('12.3E+7'), g=2.39, @@ -288,8 +287,6 @@ def test_can_insert_udts_with_all_datatypes(self): for i in range(ord('a'), ord('a') + 14): self.assertEqual(input[chr(i)], output[chr(i)]) - drop_table(AllDatatypesModel) - def test_can_insert_udts_protocol_v4_datatypes(self): """ Test for inserting all protocol v4 column types into a UserType @@ -321,6 +318,7 @@ class Allv4DatatypesModel(Model): data = columns.UserDefinedType(Allv4Datatypes) sync_table(Allv4DatatypesModel) + self.addCleanup(drop_table, Allv4DatatypesModel) input = Allv4Datatypes(a=Date(date(1970, 1, 1)), b=32523, c=Time(time(16, 47, 25, 7)), d=123) Allv4DatatypesModel.create(id=0, data=input) @@ -331,8 +329,6 @@ class Allv4DatatypesModel(Model): for i in range(ord('a'), ord('a') + 3): self.assertEqual(input[chr(i)], output[chr(i)]) - drop_table(Allv4DatatypesModel) - def test_nested_udts_inserts(self): """ Test for inserting collections of user types using cql engine. @@ -366,6 +362,8 @@ class Container(Model): # Create table, insert data sync_table(Container) + self.addCleanup(drop_table, Container) + Container.create(id=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), names=names) # Validate input and output matches @@ -373,8 +371,6 @@ class Container(Model): names_output = Container.objects().first().names self.assertEqual(names_output, names) - drop_table(Container) - def test_udts_with_unicode(self): """ Test for inserting models with unicode and udt columns. @@ -396,6 +392,8 @@ class UserModelText(Model): info = columns.UserDefinedType(User) sync_table(UserModelText) + self.addCleanup(drop_table, UserModelText) + # Two udt instances one with a unicode one with ascii user_template_ascii = User(age=25, name=ascii_name) user_template_unicode = User(age=25, name=unicode_name) @@ -404,8 +402,6 @@ class UserModelText(Model): UserModelText.create(id=unicode_name, info=user_template_ascii) UserModelText.create(id=unicode_name, info=user_template_unicode) - drop_table(UserModelText) - def test_register_default_keyspace(self): from cassandra.cqlengine import models @@ -443,6 +439,7 @@ class TheModel(Model): info = columns.UserDefinedType(db_field_different) sync_table(TheModel) + self.addCleanup(drop_table, TheModel) cluster = connection.get_cluster() type_meta = cluster.metadata.keyspaces[TheModel._get_keyspace()].user_types[db_field_different.type_name()] @@ -471,8 +468,6 @@ class TheModel(Model): self.assertEqual(info.a, age) self.assertEqual(info.n, name) - drop_table(TheModel) - def test_db_field_overload(self): """ Tests for db_field UserTypeDefinitionException @@ -528,6 +523,7 @@ class OuterModel(Model): simple = columns.UserDefinedType(NestedUdt) sync_table(OuterModel) + self.addCleanup(drop_table, OuterModel) t = OuterModel.create(name='test1') t.nested = [NestedUdt(something='test')] @@ -538,8 +534,6 @@ class OuterModel(Model): self.assertIsNotNone(t.simple.test_id) self.assertEqual(t.simple.default_text, "default text") - drop_table(OuterModel) - def test_udt_validate(self): """ Test to verify restrictions are honored and that validate is called @@ -561,14 +555,13 @@ class UserModelValidate(Model): info = columns.UserDefinedType(UserValidate) sync_table(UserModelValidate) + self.addCleanup(drop_table, UserModelValidate) user = UserValidate(age=1, name="Robert") item = UserModelValidate(id=1, info=user) with self.assertRaises(ValidationError): item.save() - drop_table(UserModelValidate) - def test_udt_validate_with_default(self): """ Test to verify restrictions are honored and that validate is called @@ -590,10 +583,9 @@ class UserModelValidateDefault(Model): info = columns.UserDefinedType(UserValidateDefault) sync_table(UserModelValidateDefault) + self.addCleanup(drop_table, UserModelValidateDefault) user = UserValidateDefault(age=1) item = UserModelValidateDefault(id=1, info=user) with self.assertRaises(ValidationError): item.save() - - drop_table(UserModelValidateDefault) From 989963b6a12853438ede0afb24376a78d4954b17 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 20 Apr 2017 15:30:34 -0400 Subject: [PATCH 1106/2005] Added some comments to clarify some parts of the tests --- .../cqlengine/columns/test_validation.py | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 5998690cdc..23a2ee1179 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -211,14 +211,23 @@ def tearDownClass(cls): def setUp(self): if PROTOCOL_VERSION < 4 or CASSANDRA_VERSION < "3.0": - raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) + raise unittest.SkipTest("Protocol v4 datatypes " + "require native protocol 4+ and C* version >=3.0, " + "currently using protocol {0} and C* version {1}". + format(PROTOCOL_VERSION, CASSANDRA_VERSION)) def _check_value_is_correct_in_db(self, value): - + """ + Check that different ways of reading the value + from the model class give the same expected result + """ if value is None: result = self.model_class.objects.all().allow_filtering().filter(test_id=0).first() self.assertIsNone(result.class_param) + result = self.model_class.objects(test_id=0).first() + self.assertIsNone(result.class_param) + else: if not isinstance(value, self.python_klass): value_to_compare = self.python_klass(value) @@ -244,20 +253,28 @@ def test_param_io(self): second_value = self.second_value third_value = self.third_value + # Check value is correctly written/read from the DB self.model_class.objects.create(test_id=0, class_param=first_value) result = self._check_value_is_correct_in_db(first_value) result.delete() + # Check the previous value has been correctly deleted and write a new value self.model_class.objects.create(test_id=0, class_param=second_value) result = self._check_value_is_correct_in_db(second_value) + # Check the value can be correctly updated from the Model class result.update(class_param=third_value).save() result = self._check_value_is_correct_in_db(third_value) + # Check None is correctly written to the DB result.update(class_param=None).save() self._check_value_is_correct_in_db(None) def test_param_none(self): + """ + Test that None value is correctly written to the db + and then is correctly read + """ self.model_class.objects.create(test_id=1, class_param=None) dt2 = self.model_class.objects(test_id=1).first() self.assertIsNone(dt2.class_param) From 09130b39c28eece4765793cce162c5ffe5ffe94c Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 20 Apr 2017 15:34:36 -0400 Subject: [PATCH 1107/2005] Fixed some tests --- cassandra/cqlengine/columns.py | 16 ++++++++++++++++ cassandra/util.py | 3 +++ 2 files changed, 19 insertions(+) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 8ebebf7c06..00676e2616 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -579,6 +579,15 @@ def to_database(self, value): d = value if isinstance(value, util.Date) else util.Date(value) return d.days_from_epoch + SimpleDateType.EPOCH_OFFSET_DAYS + def to_python(self, value): + value = super(Date, self).to_database(value) + if value is None: + return + if isinstance(value, util.Date): + return value + if isinstance(value, datetime): + value = value.date() + return util.Date(value) class Time(Column): """ @@ -597,6 +606,13 @@ def to_database(self, value): # str(util.Time) yields desired CQL encoding return value if isinstance(value, util.Time) else util.Time(value) + def to_python(self, value): + value = super(Time, self).to_database(value) + if value is None: + return + if isinstance(value, util.Time): + return value + return util.Time(value) class UUID(Column): """ diff --git a/cassandra/util.py b/cassandra/util.py index 822ea1ab63..9277103398 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1065,6 +1065,9 @@ def __eq__(self, other): except Exception: return False + def __ne__(self, other): + return not self.__eq__(other) + def __lt__(self, other): if not isinstance(other, Date): return NotImplemented From fedb68989d72d26c31627ec09badd9885e8f800d Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 20 Apr 2017 15:51:00 -0400 Subject: [PATCH 1108/2005] Fixed the validation UDT data tests --- cassandra/util.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cassandra/util.py b/cassandra/util.py index 9277103398..829658d6ad 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -974,6 +974,9 @@ def __eq__(self, other): datetime.time(hour=self.hour, minute=self.minute, second=self.second, microsecond=self.nanosecond // Time.MICRO) == other + def __ne__(self, other): + return not self.__eq__(other) + def __lt__(self, other): if not isinstance(other, Time): return NotImplemented @@ -1067,7 +1070,7 @@ def __eq__(self, other): def __ne__(self, other): return not self.__eq__(other) - + def __lt__(self, other): if not isinstance(other, Date): return NotImplemented From eac82517d143fdbeb1cb2e88caa352b990ab4d32 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 20 Apr 2017 18:41:20 -0400 Subject: [PATCH 1109/2005] Skipped tests for 2.0, 2.1 and 2.2 C* versions --- tests/integration/cqlengine/columns/test_validation.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 23a2ee1179..8eb0d2708c 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -33,8 +33,8 @@ from cassandra.cqlengine.usertype import UserType from cassandra import util -from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION -from tests.integration.cqlengine.base import BaseCassEngTestCase, TestQueryUpdateModel +from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthanorequalcass30 +from tests.integration.cqlengine.base import BaseCassEngTestCase class TestDatetime(BaseCassEngTestCase): @@ -69,6 +69,7 @@ def dst(self, date_time): dt2 = self.DatetimeTest.objects(test_id=1).first() self.assertEqual(dt2.created_at.timetuple()[:6], (now + timedelta(hours=1)).timetuple()[:6]) + @greaterthanorequalcass30 def test_datetime_date_support(self): today = date.today() self.DatetimeTest.objects.create(test_id=2, created_at=today) From e1d7e19725a970f64111936b3bebea7a76e5aa95 Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 24 Apr 2017 16:22:08 -0400 Subject: [PATCH 1110/2005] Acknowledged review comments, removed unnecessary lines in to_python methods --- cassandra/cqlengine/columns.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 00676e2616..80b2e5c477 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -570,7 +570,6 @@ class Date(Column): db_type = 'date' def to_database(self, value): - value = super(Date, self).to_database(value) if value is None: return @@ -580,7 +579,6 @@ def to_database(self, value): return d.days_from_epoch + SimpleDateType.EPOCH_OFFSET_DAYS def to_python(self, value): - value = super(Date, self).to_database(value) if value is None: return if isinstance(value, util.Date): From 3691659ec2b5c75f61601ed75896c0f61e511c4e Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 25 Apr 2017 11:46:24 -0400 Subject: [PATCH 1111/2005] Fixed test_collections test --- tests/integration/cqlengine/model/test_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index 79c1372ae9..15f744c4d4 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -292,7 +292,7 @@ def test_collections(self): item = ModelWithDefault.filter(id=1).first() udt, udt_default = UDT(age=1, mf={2: 1}), UDT(age=1, mf={2: 1}) - item.update(mf={2:1}) + item.update(mf={2:1}, udt=udt, udt_default=udt_default) self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), {'id': 1, 'dummy': 1, 'mf': {2: 1}, "udt": udt, "udt_default": udt_default}) From a3aaac38d6d938f5d53507fa469306fce5435281 Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 28 Apr 2017 11:29:49 -0400 Subject: [PATCH 1112/2005] Fixed variable name typo in cqlengine tests, simplified some statements --- .../cqlengine/model/test_updates.py | 36 +++++++++---------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index 15f744c4d4..c22668bcf8 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -179,7 +179,7 @@ class ModelWithDefaultTests(BaseCassEngTestCase): @classmethod def setUpClass(cls): - cls.default_udt = UDT(age=1, mf={2:2}, dummy_udt=42) + cls.udt_default = UDT(age=1, mf={2:2}, dummy_udt=42) def setUp(self): sync_table(ModelWithDefault) @@ -201,14 +201,14 @@ def test_value_override_with_default(self): initial = ModelWithDefault(id=1, mf={0: 0}, dummy=0, udt=first_udt, udt_default=first_udt) initial.save() - self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), + self.assertEqual(ModelWithDefault.all().get()._as_dict(), {'id': 1, 'dummy': 0, 'mf': {0: 0}, "udt": first_udt, "udt_default": first_udt}) second_udt = UDT(age=1, mf={3: 3}, dummy_udt=12) second = ModelWithDefault(id=1) second.update(mf={0: 1}, udt=second_udt) - self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), + self.assertEqual(ModelWithDefault.all().get()._as_dict(), {'id': 1, 'dummy': 0, 'mf': {0: 1}, "udt": second_udt, "udt_default": first_udt}) def test_value_is_written_if_is_default(self): @@ -218,18 +218,16 @@ def test_value_is_written_if_is_default(self): @since 3.9 @jira_ticket PYTHON-657 @expected_result column value should be updated - - @test_category object_mapper :return: """ initial = ModelWithDefault(id=1) initial.mf = {0: 0} initial.dummy = 42 - initial.udt_default = self.default_udt + initial.udt_default = self.udt_default initial.update() - self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), - {'id': 1, 'dummy': 42, 'mf': {0: 0}, "udt": None, "udt_default": self.default_udt}) + self.assertEqual(ModelWithDefault.all().get()._as_dict(), + {'id': 1, 'dummy': 42, 'mf': {0: 0}, "udt": None, "udt_default": self.udt_default}) def test_null_update_is_respected(self): """ @@ -250,7 +248,7 @@ def test_null_update_is_respected(self): updated_udt = UDT(age=1, mf={2:2}, dummy_udt=None) obj.update(dummy=None, udt_default=updated_udt) - self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), + self.assertEqual(ModelWithDefault.all().get()._as_dict(), {'id': 1, 'dummy': None, 'mf': {0: 0}, "udt": None, "udt_default": updated_udt}) def test_only_set_values_is_updated(self): @@ -269,12 +267,12 @@ def test_only_set_values_is_updated(self): item = ModelWithDefault.filter(id=1).first() ModelWithDefault.objects(id=1).delete() item.mf = {1: 2} - udt, default_udt = UDT(age=1, mf={2:3}), UDT(age=1, mf={2:3}) - item.udt, item.default_udt = udt, default_udt + udt, udt_default = UDT(age=1, mf={2:3}), UDT(age=1, mf={2:3}) + item.udt, item.udt_default = udt, udt_default item.save() - self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), - {'id': 1, 'dummy': None, 'mf': {1: 2}, "udt": udt, "udt_default": default_udt}) + self.assertEqual(ModelWithDefault.all().get()._as_dict(), + {'id': 1, 'dummy': None, 'mf': {1: 2}, "udt": udt, "udt_default": udt_default}) def test_collections(self): """ @@ -293,7 +291,7 @@ def test_collections(self): udt, udt_default = UDT(age=1, mf={2: 1}), UDT(age=1, mf={2: 1}) item.update(mf={2:1}, udt=udt, udt_default=udt_default) - self.assertEqual(ModelWithDefault.objects().all().get()._as_dict(), + self.assertEqual(ModelWithDefault.all().get()._as_dict(), {'id': 1, 'dummy': 1, 'mf': {2: 1}, "udt": udt, "udt_default": udt_default}) def test_collection_with_default(self): @@ -311,28 +309,28 @@ def test_collection_with_default(self): udt, udt_default = UDT(age=1, mf={6: 6}), UDT(age=1, mf={6: 6}) item = ModelWithDefaultCollection.create(id=1, mf={1: 1}, dummy=1, udt=udt, udt_default=udt_default).save() - self.assertEqual(ModelWithDefaultCollection.objects().all().get()._as_dict(), + self.assertEqual(ModelWithDefaultCollection.all().get()._as_dict(), {'id': 1, 'dummy': 1, 'mf': {1: 1}, "udt": udt, "udt_default": udt_default}) udt, udt_default = UDT(age=1, mf={5: 5}), UDT(age=1, mf={5: 5}) item.update(mf={2: 2}, udt=udt, udt_default=udt_default) - self.assertEqual(ModelWithDefaultCollection.objects().all().get()._as_dict(), + self.assertEqual(ModelWithDefaultCollection.all().get()._as_dict(), {'id': 1, 'dummy': 1, 'mf': {2: 2}, "udt": udt, "udt_default": udt_default}) udt, udt_default = UDT(age=1, mf=None), UDT(age=1, mf=None) expected_udt, expected_udt_default = UDT(age=1, mf={}), UDT(age=1, mf={}) item.update(mf=None, udt=udt, udt_default=udt_default) - self.assertEqual(ModelWithDefaultCollection.objects().all().get()._as_dict(), + self.assertEqual(ModelWithDefaultCollection.all().get()._as_dict(), {'id': 1, 'dummy': 1, 'mf': {}, "udt": expected_udt, "udt_default": expected_udt_default}) udt_default = UDT(age=1, mf=None), UDT(age=1, mf={5:5}) item = ModelWithDefaultCollection.create(id=2, dummy=2).save() - self.assertEqual(ModelWithDefaultCollection.objects().all().get(id=2)._as_dict(), + self.assertEqual(ModelWithDefaultCollection.all().get(id=2)._as_dict(), {'id': 2, 'dummy': 2, 'mf': {2: 2}, "udt": None, "udt_default": udt_default}) udt, udt_default = UDT(age=1, mf={1: 1, 6: 6}), UDT(age=1, mf={1: 1, 6: 6}) item.update(mf={1: 1, 4: 4}, udt=udt, udt_default=udt_default) - self.assertEqual(ModelWithDefaultCollection.objects().all().get(id=2)._as_dict(), + self.assertEqual(ModelWithDefaultCollection.all().get(id=2)._as_dict(), {'id': 2, 'dummy': 2, 'mf': {1: 1, 4: 4}, "udt": udt, "udt_default": udt_default}) drop_table(ModelWithDefaultCollection) From c571d36777f864fde2cb7dc42f2e026c09172936 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 2 May 2017 10:59:26 -0400 Subject: [PATCH 1113/2005] Fix UDT changed behavior with a default value --- cassandra/cqlengine/columns.py | 13 +++++++--- .../cqlengine/model/test_updates.py | 25 +++++++++++++------ 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 80b2e5c477..9e7c2cad47 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -235,8 +235,6 @@ def to_database(self, value): """ Converts python value into database value """ - if value is None and self.has_default: - return self.get_default() return value @property @@ -948,7 +946,16 @@ def to_database(self, value): class UDTValueManager(BaseValueManager): @property def changed(self): - return self.value != self.previous_value or (self.value is not None and self.value.has_changed_fields()) + if self.explicit: + return self.value != self.previous_value + + default_value = self.column.get_default() + if not self.column._val_is_null(default_value): + return self.value != default_value + elif self.previous_value is None: + return not self.column._val_is_null(self.value) and self.value.has_changed_fields() + + return False def reset_previous_value(self): if self.value is not None: diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index c22668bcf8..2f966e7ece 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -183,9 +183,11 @@ def setUpClass(cls): def setUp(self): sync_table(ModelWithDefault) + sync_table(ModelWithDefaultCollection) def tearDown(self): drop_table(ModelWithDefault) + drop_table(ModelWithDefaultCollection) def test_value_override_with_default(self): """ @@ -309,28 +311,35 @@ def test_collection_with_default(self): udt, udt_default = UDT(age=1, mf={6: 6}), UDT(age=1, mf={6: 6}) item = ModelWithDefaultCollection.create(id=1, mf={1: 1}, dummy=1, udt=udt, udt_default=udt_default).save() - self.assertEqual(ModelWithDefaultCollection.all().get()._as_dict(), + self.assertEqual(ModelWithDefaultCollection.objects.get(id=1)._as_dict(), {'id': 1, 'dummy': 1, 'mf': {1: 1}, "udt": udt, "udt_default": udt_default}) udt, udt_default = UDT(age=1, mf={5: 5}), UDT(age=1, mf={5: 5}) item.update(mf={2: 2}, udt=udt, udt_default=udt_default) - self.assertEqual(ModelWithDefaultCollection.all().get()._as_dict(), + self.assertEqual(ModelWithDefaultCollection.objects.get(id=1)._as_dict(), {'id': 1, 'dummy': 1, 'mf': {2: 2}, "udt": udt, "udt_default": udt_default}) udt, udt_default = UDT(age=1, mf=None), UDT(age=1, mf=None) expected_udt, expected_udt_default = UDT(age=1, mf={}), UDT(age=1, mf={}) item.update(mf=None, udt=udt, udt_default=udt_default) - self.assertEqual(ModelWithDefaultCollection.all().get()._as_dict(), + self.assertEqual(ModelWithDefaultCollection.objects.get(id=1)._as_dict(), {'id': 1, 'dummy': 1, 'mf': {}, "udt": expected_udt, "udt_default": expected_udt_default}) - udt_default = UDT(age=1, mf=None), UDT(age=1, mf={5:5}) - item = ModelWithDefaultCollection.create(id=2, dummy=2).save() - self.assertEqual(ModelWithDefaultCollection.all().get(id=2)._as_dict(), + udt_default = UDT(age=1, mf={2:2}, dummy_udt=42) + item = ModelWithDefaultCollection.create(id=2, dummy=2) + self.assertEqual(ModelWithDefaultCollection.objects.get(id=2)._as_dict(), {'id': 2, 'dummy': 2, 'mf': {2: 2}, "udt": None, "udt_default": udt_default}) udt, udt_default = UDT(age=1, mf={1: 1, 6: 6}), UDT(age=1, mf={1: 1, 6: 6}) item.update(mf={1: 1, 4: 4}, udt=udt, udt_default=udt_default) - self.assertEqual(ModelWithDefaultCollection.all().get(id=2)._as_dict(), + self.assertEqual(ModelWithDefaultCollection.objects.get(id=2)._as_dict(), {'id': 2, 'dummy': 2, 'mf': {1: 1, 4: 4}, "udt": udt, "udt_default": udt_default}) - drop_table(ModelWithDefaultCollection) + item.update(udt_default=None) + self.assertEqual(ModelWithDefaultCollection.objects.get(id=2)._as_dict(), + {'id': 2, 'dummy': 2, 'mf': {1: 1, 4: 4}, "udt": udt, "udt_default": None}) + + udt_default = UDT(age=1, mf={2:2}) + item.update(udt_default=udt_default) + self.assertEqual(ModelWithDefaultCollection.objects.get(id=2)._as_dict(), + {'id': 2, 'dummy': 2, 'mf': {1: 1, 4: 4}, "udt": udt, "udt_default": udt_default}) From 815250655b82fd72c29dd3bd846a99600767794f Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 8 May 2017 16:33:48 -0400 Subject: [PATCH 1114/2005] Added to_python and to_database methods to UserDefinedType --- cassandra/cqlengine/columns.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 9e7c2cad47..a5afb66e9c 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -1001,6 +1001,22 @@ def validate(self, value): val.validate() return val + def to_python(self, value): + copied_value = deepcopy(value) + for name, field in self.user_type._fields.items(): + if copied_value[name] is not None or isinstance(field, BaseContainerColumn): + copied_value[name] = field.to_python(copied_value[name]) + + return copied_value + + def to_database(self, value): + copied_value = deepcopy(value) + for name, field in self.user_type._fields.items(): + if copied_value[name] is not None or isinstance(field, BaseContainerColumn): + copied_value[name] = field.to_database(copied_value[name]) + + return copied_value + def resolve_udts(col_def, out_list): for col in col_def.sub_types: From 96cfe3deea8ae14594a85ae5c698668dccd461dc Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 8 May 2017 17:00:10 -0400 Subject: [PATCH 1115/2005] Added tests for to_python, to_database method of UserDefinedType --- cassandra/cqlengine/columns.py | 6 +++++ .../cqlengine/model/test_updates.py | 27 +++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index a5afb66e9c..3470a44a6e 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -1002,6 +1002,9 @@ def validate(self, value): return val def to_python(self, value): + if value is None: + return + copied_value = deepcopy(value) for name, field in self.user_type._fields.items(): if copied_value[name] is not None or isinstance(field, BaseContainerColumn): @@ -1010,6 +1013,9 @@ def to_python(self, value): return copied_value def to_database(self, value): + if value is None: + return + copied_value = deepcopy(value) for name, field in self.user_type._fields.items(): if copied_value[name] is not None or isinstance(field, BaseContainerColumn): diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index 2f966e7ece..0de6543e21 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -151,6 +151,7 @@ class UDT(UserType): age = columns.Integer() mf = columns.Map(columns.Integer, columns.Integer) dummy_udt = columns.Integer(default=42) + time_col = columns.Time() class ModelWithDefault(Model): @@ -343,3 +344,29 @@ def test_collection_with_default(self): item.update(udt_default=udt_default) self.assertEqual(ModelWithDefaultCollection.objects.get(id=2)._as_dict(), {'id': 2, 'dummy': 2, 'mf': {1: 1, 4: 4}, "udt": udt, "udt_default": udt_default}) + + + def test_udt_to_python(self): + """ + Test the to_python and to_database are correctly called on UDTs + @since 3.10 + @jira_ticket PYTHON-743 + @expected_result the int value is correctly converted to utils.Time + and written to C* + + @test_category object_mapper + """ + item = ModelWithDefault(id=1) + item.save() + + # We update time_col this way because we want to hit + # the to_python method from UserDefinedType, otherwise to_python + # would be called in UDT.__init__ + user_to_update = UDT() + user_to_update.time_col = 10 + + item.update(udt=user_to_update) + + udt, udt_default = UDT(time_col=10), UDT(age=1, mf={2:2}) + self.assertEqual(ModelWithDefault.objects.get(id=1)._as_dict(), + {'id': 1, 'dummy': 42, 'mf': {}, "udt": udt, "udt_default": udt_default}) From ffcb70c6010ed1d2a12190ed292283320b7a1434 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 9 May 2017 08:42:02 -0400 Subject: [PATCH 1116/2005] Skipped test class is C* is lower than 2.2 --- tests/integration/cqlengine/model/test_updates.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index 0de6543e21..0f55ef476e 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -17,6 +17,7 @@ from mock import patch from cassandra.cqlengine import ValidationError +from tests.integration import greaterthancass21 from tests.integration.cqlengine.base import BaseCassEngTestCase from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns @@ -175,7 +176,7 @@ class ModelWithDefaultCollection(Model): udt = columns.UserDefinedType(UDT) udt_default = columns.UserDefinedType(UDT, default=UDT(age=1, mf={2: 2})) - +@greaterthancass21 class ModelWithDefaultTests(BaseCassEngTestCase): @classmethod From d1638b1a043a4e52d220d90d4ea156c4ab8637bf Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 9 May 2017 14:12:36 -0400 Subject: [PATCH 1117/2005] Cleaned up some parts of the tests --- .../cqlengine/columns/test_validation.py | 2 +- .../cqlengine/model/test_model_io.py | 10 +++---- .../integration/cqlengine/model/test_udts.py | 26 +++++++++---------- .../cqlengine/model/test_updates.py | 12 ++++----- .../cqlengine/query/test_queryoperators.py | 4 +-- 5 files changed, 27 insertions(+), 27 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 8eb0d2708c..8c2f54b081 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -771,7 +771,7 @@ def test_extra_field(self): sync_table(self.TestModel) self.TestModel.create() execute("ALTER TABLE {0} add blah int".format(self.TestModel.column_family_name(include_keyspace=True))) - self.TestModel.objects().all() + self.TestModel.objects.all() class TestTimeUUIDFromDatetime(BaseCassEngTestCase): diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index a8c46253a5..c7e161139a 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -210,7 +210,7 @@ class AllDatatypesModel(Model): m=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), n=int(str(2147483647) + '000')) self.assertEqual(1, AllDatatypesModel.objects.count()) - output = AllDatatypesModel.objects().first() + output = AllDatatypesModel.objects.first() for i, i_char in enumerate(range(ord('a'), ord('a') + 14)): self.assertEqual(input[i], output[chr(i_char)]) @@ -263,7 +263,7 @@ class v4DatatypesModel(Model): v4DatatypesModel.create(id=0, a=date(1970, 1, 1), b=32523, c=time(16, 47, 25, 7), d=123) self.assertEqual(1, v4DatatypesModel.objects.count()) - output = v4DatatypesModel.objects().first() + output = v4DatatypesModel.objects.first() for i, i_char in enumerate(range(ord('a'), ord('a') + 3)): self.assertEqual(input[i], output[chr(i_char)]) @@ -288,16 +288,16 @@ class FloatingPointModel(Model): sync_table(FloatingPointModel) FloatingPointModel.create(id=0, f=2.39) - output = FloatingPointModel.objects().first() + output = FloatingPointModel.objects.first() self.assertEqual(2.390000104904175, output.f) # float loses precision FloatingPointModel.create(id=0, f=3.4028234663852886e+38, d=2.39) - output = FloatingPointModel.objects().first() + output = FloatingPointModel.objects.first() self.assertEqual(3.4028234663852886e+38, output.f) self.assertEqual(2.39, output.d) # double retains precision FloatingPointModel.create(id=0, d=3.4028234663852886e+38) - output = FloatingPointModel.objects().first() + output = FloatingPointModel.objects.first() self.assertEqual(3.4028234663852886e+38, output.d) diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index b232a7bc1e..fe0096a2d8 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -115,7 +115,7 @@ def test_can_insert_udts(self): self.assertEqual(1, UserModel.objects.count()) - john = UserModel.objects().first() + john = UserModel.objects.first() self.assertEqual(0, john.id) self.assertTrue(type(john.info) is User) self.assertEqual(42, john.info.age) @@ -128,14 +128,14 @@ def test_can_update_udts(self): user = User(age=42, name="John") created_user = UserModel.create(id=0, info=user) - john_info = UserModel.objects().first().info + john_info = UserModel.objects.first().info self.assertEqual(42, john_info.age) self.assertEqual("John", john_info.name) created_user.info = User(age=22, name="Mary") created_user.update() - mary_info = UserModel.objects().first().info + mary_info = UserModel.objects.first().info self.assertEqual(22, mary_info["age"]) self.assertEqual("Mary", mary_info["name"]) @@ -146,14 +146,14 @@ def test_can_update_udts_with_nones(self): user = User(age=42, name="John") created_user = UserModel.create(id=0, info=user) - john_info = UserModel.objects().first().info + john_info = UserModel.objects.first().info self.assertEqual(42, john_info.age) self.assertEqual("John", john_info.name) created_user.info = None created_user.update() - john_info = UserModel.objects().first().info + john_info = UserModel.objects.first().info self.assertIsNone(john_info) def test_can_create_same_udt_different_keyspaces(self): @@ -179,7 +179,7 @@ class UserModelGender(Model): user = UserGender(age=42, name="John") UserModelGender.create(id=0, info=user) - john_info = UserModelGender.objects().first().info + john_info = UserModelGender.objects.first().info self.assertEqual(42, john_info.age) self.assertEqual("John", john_info.name) self.assertIsNone(john_info.gender) @@ -187,7 +187,7 @@ class UserModelGender(Model): user = UserGender(age=42) UserModelGender.create(id=0, info=user) - john_info = UserModelGender.objects().first().info + john_info = UserModelGender.objects.first().info self.assertEqual(42, john_info.age) self.assertIsNone(john_info.name) self.assertIsNone(john_info.gender) @@ -222,7 +222,7 @@ class DepthModel(Model): udts.append(Depth_3(value=udts[2])) DepthModel.create(id=0, v_0=udts[0], v_1=udts[1], v_2=udts[2], v_3=udts[3]) - output = DepthModel.objects().first() + output = DepthModel.objects.first() self.assertEqual(udts[0], output.v_0) self.assertEqual(udts[1], output.v_1) @@ -253,7 +253,7 @@ def test_can_insert_udts_with_nones(self): self.assertEqual(1, AllDatatypesModel.objects.count()) - output = AllDatatypesModel.objects().first().data + output = AllDatatypesModel.objects.first().data self.assertEqual(input, output) def test_can_insert_udts_with_all_datatypes(self): @@ -282,7 +282,7 @@ def test_can_insert_udts_with_all_datatypes(self): AllDatatypesModel.create(id=0, data=input) self.assertEqual(1, AllDatatypesModel.objects.count()) - output = AllDatatypesModel.objects().first().data + output = AllDatatypesModel.objects.first().data for i in range(ord('a'), ord('a') + 14): self.assertEqual(input[chr(i)], output[chr(i)]) @@ -324,7 +324,7 @@ class Allv4DatatypesModel(Model): Allv4DatatypesModel.create(id=0, data=input) self.assertEqual(1, Allv4DatatypesModel.objects.count()) - output = Allv4DatatypesModel.objects().first().data + output = Allv4DatatypesModel.objects.first().data for i in range(ord('a'), ord('a') + 3): self.assertEqual(input[chr(i)], output[chr(i)]) @@ -368,7 +368,7 @@ class Container(Model): # Validate input and output matches self.assertEqual(1, Container.objects.count()) - names_output = Container.objects().first().names + names_output = Container.objects.first().names self.assertEqual(names_output, names) def test_udts_with_unicode(self): @@ -458,7 +458,7 @@ class TheModel(Model): self.assertEqual(1, TheModel.objects.count()) - john = TheModel.objects().first() + john = TheModel.objects.first() self.assertEqual(john.id, id) info = john.info self.assertIsInstance(info, db_field_different) diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index 0f55ef476e..15fe7bf3a2 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -205,14 +205,14 @@ def test_value_override_with_default(self): initial = ModelWithDefault(id=1, mf={0: 0}, dummy=0, udt=first_udt, udt_default=first_udt) initial.save() - self.assertEqual(ModelWithDefault.all().get()._as_dict(), + self.assertEqual(ModelWithDefault.get()._as_dict(), {'id': 1, 'dummy': 0, 'mf': {0: 0}, "udt": first_udt, "udt_default": first_udt}) second_udt = UDT(age=1, mf={3: 3}, dummy_udt=12) second = ModelWithDefault(id=1) second.update(mf={0: 1}, udt=second_udt) - self.assertEqual(ModelWithDefault.all().get()._as_dict(), + self.assertEqual(ModelWithDefault.get()._as_dict(), {'id': 1, 'dummy': 0, 'mf': {0: 1}, "udt": second_udt, "udt_default": first_udt}) def test_value_is_written_if_is_default(self): @@ -230,7 +230,7 @@ def test_value_is_written_if_is_default(self): initial.udt_default = self.udt_default initial.update() - self.assertEqual(ModelWithDefault.all().get()._as_dict(), + self.assertEqual(ModelWithDefault.get()._as_dict(), {'id': 1, 'dummy': 42, 'mf': {0: 0}, "udt": None, "udt_default": self.udt_default}) def test_null_update_is_respected(self): @@ -252,7 +252,7 @@ def test_null_update_is_respected(self): updated_udt = UDT(age=1, mf={2:2}, dummy_udt=None) obj.update(dummy=None, udt_default=updated_udt) - self.assertEqual(ModelWithDefault.all().get()._as_dict(), + self.assertEqual(ModelWithDefault.get()._as_dict(), {'id': 1, 'dummy': None, 'mf': {0: 0}, "udt": None, "udt_default": updated_udt}) def test_only_set_values_is_updated(self): @@ -275,7 +275,7 @@ def test_only_set_values_is_updated(self): item.udt, item.udt_default = udt, udt_default item.save() - self.assertEqual(ModelWithDefault.all().get()._as_dict(), + self.assertEqual(ModelWithDefault.get()._as_dict(), {'id': 1, 'dummy': None, 'mf': {1: 2}, "udt": udt, "udt_default": udt_default}) def test_collections(self): @@ -295,7 +295,7 @@ def test_collections(self): udt, udt_default = UDT(age=1, mf={2: 1}), UDT(age=1, mf={2: 1}) item.update(mf={2:1}, udt=udt, udt_default=udt_default) - self.assertEqual(ModelWithDefault.all().get()._as_dict(), + self.assertEqual(ModelWithDefault.get()._as_dict(), {'id': 1, 'dummy': 1, 'mf': {2: 1}, "udt": udt, "udt_default": udt_default}) def test_collection_with_default(self): diff --git a/tests/integration/cqlengine/query/test_queryoperators.py b/tests/integration/cqlengine/query/test_queryoperators.py index 46d46d65e6..52a824fa1d 100644 --- a/tests/integration/cqlengine/query/test_queryoperators.py +++ b/tests/integration/cqlengine/query/test_queryoperators.py @@ -75,10 +75,10 @@ def tearDown(self): @execute_count(15) def test_token_function(self): """ Tests that token functions work properly """ - assert TokenTestModel.objects().count() == 0 + assert TokenTestModel.objects.count() == 0 for i in range(10): TokenTestModel.create(key=i, val=i) - assert TokenTestModel.objects().count() == 10 + assert TokenTestModel.objects.count() == 10 seen_keys = set() last_token = None for instance in TokenTestModel.objects().limit(5): From 7a2937931231268e3d736ca0209911f2e16388f5 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 9 May 2017 17:12:52 -0400 Subject: [PATCH 1118/2005] raise an exception if a number cannot be encoded as vint --- cassandra/marshal.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 712fa6e7f0..f8f5123033 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -127,8 +127,8 @@ def vints_unpack(term): # noqa def vints_pack(values): revbytes = bytearray() values = [int(v) for v in values[::-1]] - for v in values: - v = encode_zig_zag(v) + for value in values: + v = encode_zig_zag(value) if v < 128: revbytes.append(v) else: @@ -145,6 +145,9 @@ def vints_pack(values): revbytes.append(v & 0xff) v >>= 8 + if num_extra_bytes > 8: + raise ValueError('Value %d is too big and cannot be encoded as vint' % value) + # We can now store the last bits in the first byte n = 8 - num_extra_bytes v |= (0xff >> n << n) From b077240c70de64af7452242269cf1011cf068d6c Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 9 May 2017 11:36:11 -0400 Subject: [PATCH 1119/2005] Added tests for PYTHON-747, encoding of Duration type --- tests/integration/__init__.py | 1 + tests/integration/standard/test_types.py | 53 +++++++++++++++++++++++- 2 files changed, 52 insertions(+), 2 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 9795cff16d..b39cd887f9 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -256,6 +256,7 @@ def get_unsupported_upper_protocol(): greaterthancass21 = unittest.skipUnless(CASSANDRA_VERSION >= '2.2', 'Cassandra version 2.2 or greater required') greaterthanorequalcass30 = unittest.skipUnless(CASSANDRA_VERSION >= '3.0', 'Cassandra version 3.0 or greater required') greaterthanorequalcass36 = unittest.skipUnless(CASSANDRA_VERSION >= '3.6', 'Cassandra version 3.6 or greater required') +greaterthanorequalcass3_10 = unittest.skipUnless(CASSANDRA_VERSION >= '3.10', 'Cassandra version 3.10 or greater required') lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < '3.0', 'Cassandra version less then 3.0 required') dseonly = unittest.skipUnless(DSE_VERSION, "Test is only applicalbe to DSE clusters") pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 05dc608b6b..a19f623bef 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -27,11 +27,11 @@ from cassandra.concurrent import execute_concurrent_with_args from cassandra.cqltypes import Int32Type, EMPTY from cassandra.query import dict_factory, ordered_dict_factory -from cassandra.util import sortedset +from cassandra.util import sortedset, Duration from tests.unit.cython.utils import cythontest from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass, notprotocolv1, \ - BasicSharedKeyspaceUnitTestCase, greaterthancass21, lessthancass30 + BasicSharedKeyspaceUnitTestCase, greaterthancass21, lessthancass30, greaterthanorequalcass3_10 from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, PRIMITIVE_DATATYPES_KEYS, \ get_sample, get_collection_sample @@ -793,6 +793,55 @@ def test_cython_decimal(self): finally: self.session.execute("DROP TABLE {0}".format(self.function_table_name)) + @greaterthanorequalcass3_10 + def test_smoke_duration_values(self): + """ + Test to write several Duration values to the database and verify + they can be read correctly. The verify than an exception is arisen + if the value is too big + + @since 3.10 + @jira_ticket PYTHON-747 + @expected_result the read value in C* matches the written one + + @test_category data_types serialization + """ + self.session.execute(""" + CREATE TABLE duration_smoke (k int primary key, v duration) + """) + self.addCleanup(self.session.execute, "DROP TABLE duration_smoke") + + prepared = self.session.prepare(""" + INSERT INTO duration_smoke (k, v) + VALUES (?, ?) + """) + + nanosecond_smoke_values = [0, -1, 1, 100, 1000, 1000000, 1000000000, + 10000000000000,-9223372036854775807, 9223372036854775807, + int("7FFFFFFFFFFFFFFF", 16), int("-7FFFFFFFFFFFFFFF", 16)] + month_day_smoke_values = [0, -1, 1, 100, 1000, 1000000, 1000000000, + int("7FFFFFFF", 16), int("-7FFFFFFF", 16)] + + for nanosecond_value in nanosecond_smoke_values: + for month_day_value in month_day_smoke_values: + + # Must have the same sign + if (month_day_value <= 0) != (nanosecond_value <= 0): + continue + + self.session.execute(prepared, (1, Duration(month_day_value, month_day_value, nanosecond_value))) + results = self.session.execute("SELECT * FROM duration_smoke") + + v = results[0][1] + self.assertEqual(Duration(month_day_value, month_day_value, nanosecond_value), v, + "Error encoding value {0},{0},{1}".format(month_day_value, nanosecond_value)) + + self.assertRaises(ValueError, self.session.execute, prepared, + (1, Duration(0, 0, int("8FFFFFFFFFFFFFF0", 16)))) + self.assertRaises(ValueError, self.session.execute, prepared, + (1, Duration(0, int("8FFFFFFFFFFFFFF0", 16), 0))) + self.assertRaises(ValueError, self.session.execute, prepared, + (1, Duration(int("8FFFFFFFFFFFFFF0", 16), 0, 0))) class TypeTestsProtocol(BasicSharedKeyspaceUnitTestCase): From 09b881f29f7377b40392dba27fce6a2bf59a8eac Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 10 May 2017 16:00:39 -0400 Subject: [PATCH 1120/2005] Updated build.yaml --- build.yaml | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/build.yaml b/build.yaml index 6f57d700a3..2cb0fc4fd4 100644 --- a/build.yaml +++ b/build.yaml @@ -1,26 +1,34 @@ schedules: - commit: + commit_master: schedule: per_commit branches: - include: [master, /python.*/] + include: [master] env_vars: | EVENT_LOOP_MANAGER='libev' matrix: exclude: - python: [3.4, 3.6] - nightly_libev: - schedule: nightly + commit_branches: + schedule: per_commit branches: - include: [master] + include: [/python.*/] env_vars: | EVENT_LOOP_MANAGER='libev' matrix: exclude: + - python: [3.4, 3.6] - cassandra: ['2.0', '2.1', '3.0'] + nightly_libev: + schedule: weekly + branches: + include: [master] + env_vars: | + EVENT_LOOP_MANAGER='libev' + nightly_gevent: - schedule: nightly + schedule: weekly branches: include: [master] env_vars: | @@ -28,37 +36,27 @@ schedules: matrix: exclude: - python: [3.4, 3.6] - cassandra: ['2.0', '2.1', '3.0'] nightly_eventlet: - schedule: nightly + schedule: weekly branches: include: [master] env_vars: | EVENT_LOOP_MANAGER='eventlet' - matrix: - exclude: - - cassandra: ['2.0', '2.1', '3.0'] nightly_async: - schedule: nightly + schedule: weekly branches: include: [master] env_vars: | EVENT_LOOP_MANAGER='async' - matrix: - exclude: - - cassandra: ['2.0', '2.1', '3.0'] nightly_twister: - schedule: nightly + schedule: weekly branches: include: [master] env_vars: | EVENT_LOOP_MANAGER='twisted' - matrix: - exclude: - - cassandra: ['2.0', '2.1', '3.0'] python: - 2.7 From 1aa33f0e13146ff7128c749a5c3abb2413acfd17 Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 12 May 2017 14:27:25 -0400 Subject: [PATCH 1121/2005] Updated build.yaml --- build.yaml | 19 ++++++++++++------- tests/integration/__init__.py | 12 ++++++------ 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/build.yaml b/build.yaml index 2cb0fc4fd4..6169b58a6a 100644 --- a/build.yaml +++ b/build.yaml @@ -20,14 +20,14 @@ schedules: - python: [3.4, 3.6] - cassandra: ['2.0', '2.1', '3.0'] - nightly_libev: + weekly_libev: schedule: weekly branches: include: [master] env_vars: | EVENT_LOOP_MANAGER='libev' - nightly_gevent: + weekly_gevent: schedule: weekly branches: include: [master] @@ -35,23 +35,23 @@ schedules: EVENT_LOOP_MANAGER='gevent' matrix: exclude: - - python: [3.4, 3.6] + - python: [3.4, 3.5, 3.6] - nightly_eventlet: + weekly_eventlet: schedule: weekly branches: include: [master] env_vars: | EVENT_LOOP_MANAGER='eventlet' - nightly_async: + weekly_async: schedule: weekly branches: include: [master] env_vars: | EVENT_LOOP_MANAGER='async' - nightly_twister: + weekly_twister: schedule: weekly branches: include: [master] @@ -63,19 +63,22 @@ python: - 3.4 - 3.5 - 3.6 - + os: - ubuntu/trusty64 + cassandra: - '2.0' - '2.1' - '2.2' - '3.0' - '3.11' + env: CYTHON: - CYTHON - NO_CYTHON + build: - script: | export JAVA_HOME=$CCM_JAVA_HOME @@ -85,6 +88,7 @@ build: pip install git+https://github.com/pcmanus/ccm.git # Install dependencies sudo apt-get install -y libev4 libev-dev + pip install -r test-requirements.txt pip install nose-ignore-docstring FORCE_CYTHON=False @@ -99,6 +103,7 @@ build: python setup.py build_ext --inplace --no-cython fi + echo "Running with event loop manager: $EVENT_LOOP_MANAGER" echo "==========RUNNING CQLENGINE TESTS==========" EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index b39cd887f9..ad5d1da379 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -13,26 +13,26 @@ # limitations under the License. import os -from cassandra.io.geventreactor import GeventConnection -from cassandra.io.libevreactor import LibevConnection -from cassandra.io.asyncorereactor import AsyncoreConnection -from cassandra.io.eventletreactor import EventletConnection -from cassandra.io.twistedreactor import TwistedConnection - EVENT_LOOP_MANAGER = os.getenv('EVENT_LOOP_MANAGER', "libev") if EVENT_LOOP_MANAGER == "gevent": import gevent.monkey gevent.monkey.patch_all() + from cassandra.io.geventreactor import GeventConnection connection_class = GeventConnection elif EVENT_LOOP_MANAGER == "eventlet": from eventlet import monkey_patch monkey_patch() + + from cassandra.io.eventletreactor import EventletConnection connection_class = EventletConnection elif EVENT_LOOP_MANAGER == "async": + from cassandra.io.asyncorereactor import AsyncoreConnection connection_class = AsyncoreConnection elif EVENT_LOOP_MANAGER == "twisted": + from cassandra.io.twistedreactor import TwistedConnection connection_class = TwistedConnection else: + from cassandra.io.libevreactor import LibevConnection connection_class = LibevConnection from cassandra.cluster import Cluster From 816e1cae3a268a93f132e911ed66763016137c93 Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 15 May 2017 12:24:46 -0400 Subject: [PATCH 1122/2005] Fixed test_thread_safety in unit tests --- tests/unit/test_policies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index e2df3056e1..38973d2691 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -105,7 +105,7 @@ def test_thread_safety(self): def check_query_plan(): for i in range(100): qplan = list(policy.make_query_plan()) - self.assertEqual(sorted(qplan), hosts) + self.assertEqual(sorted(qplan), list(hosts)) threads = [Thread(target=check_query_plan) for i in range(4)] for t in threads: From 8796f30bf9b2a6ac4c2204e3551a8b397ccc6140 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 16 May 2017 11:10:33 -0400 Subject: [PATCH 1123/2005] correct index.rst --- docs/index.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 762daad60c..3f9ab66bd4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -2,9 +2,9 @@ Python Cassandra Driver ======================= A Python client driver for `Apache Cassandra `_. This driver works exclusively with the Cassandra Query Language v3 (CQL3) -and Cassandra's native protocol. Cassandra 1.2+ is supported. +and Cassandra's native protocol. Cassandra 2.1+ is supported. -The driver supports Python 2.6, 2.7, 3.3, and 3.4. +The driver supports Python 2.6, 2.7, 3.3, 3.4, 3.5, and 3.6. This driver is open source under the `Apache v2 License `_. From 06835c9b68fd9872f7073f546e79759bb8453512 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 16 May 2017 14:10:32 -0400 Subject: [PATCH 1124/2005] Added tests for PYTHON 707 --- tests/unit/test_policies.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 2fc2504c80..33f913cc93 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -920,6 +920,30 @@ def test_schedule_exactly_one_attempt(self): ) self.assertEqual(len(list(policy.new_schedule())), 1) + def test_schedule_overflow(self): + """ + Test to verify an OverflowError is handled correctly + in the ExponentialReconnectionPolicy + @since 3.10 + @jira_ticket PYTHON-707 + @expected_result all numbers should be less than sys.float_info.max + since that's the biggest max we can possibly have as that argument must be a float. + Note that is possible for a float to be inf. + + @test_category policy + """ + + # This should lead to overflow + # Note that this may not happen in the fist iterations + # as sys.float_info.max * 2 = inf + base_delay = sys.float_info.max - 1 + max_delay = sys.float_info.max + max_attempts = 2**12 + policy = ExponentialReconnectionPolicy(base_delay=base_delay, max_delay=max_delay, max_attempts=max_attempts) + schedule = list(policy.new_schedule()) + for number in schedule: + self.assertLessEqual(number, sys.float_info.max) + ONE = ConsistencyLevel.ONE From a16b841ef8969a69a0ccf583a884c63313f47bdc Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 16 May 2017 15:08:54 -0400 Subject: [PATCH 1125/2005] Added tests for PYTHON-750 --- .../cqlengine/columns/test_validation.py | 18 ++++++++++++++++-- .../cqlengine/model/test_model_io.py | 9 ++++++--- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 8c2f54b081..1f6fa42588 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -26,14 +26,14 @@ from cassandra.cqlengine.columns import (TimeUUID, Ascii, Text, Integer, BigInt, VarInt, DateTime, Date, UUID, Boolean, Decimal, Inet, Time, UserDefinedType, - Map, List, Set, Tuple, Double) + Map, List, Set, Tuple, Double, Duration) from cassandra.cqlengine.connection import execute from cassandra.cqlengine.management import sync_table, drop_table from cassandra.cqlengine.models import Model, ValidationError from cassandra.cqlengine.usertype import UserType from cassandra import util -from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthanorequalcass30 +from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthanorequalcass30, greaterthanorequalcass3_10 from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -344,6 +344,20 @@ def setUpClass(cls): ) super(TestBoolean, cls).setUpClass() +@greaterthanorequalcass3_10 +class TestDuration(DataType, BaseCassEngTestCase): + @classmethod + def setUpClass(cls): + cls.db_klass, cls.python_klass = ( + Duration, + util.Duration + ) + cls.first_value, cls.second_value, cls.third_value = ( + util.Duration(0, 0, 0), + util.Duration(1, 2, 3), + util.Duration(0, 0, 0) + ) + super(TestDuration, cls).setUpClass() class User(UserType): # We use Date and Time to ensure to_python diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index c7e161139a..e2776e297d 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -29,11 +29,11 @@ from cassandra.cqlengine.management import drop_table from cassandra.cqlengine.models import Model from cassandra.query import SimpleStatement -from cassandra.util import Date, Time +from cassandra.util import Date, Time, Duration from cassandra.cqlengine.statements import SelectStatement, DeleteStatement, WhereClause from cassandra.cqlengine.operators import EqualsOperator -from tests.integration import PROTOCOL_VERSION +from tests.integration import PROTOCOL_VERSION, greaterthanorequalcass3_10 from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine import DEFAULT_KEYSPACE @@ -163,6 +163,7 @@ def test_a_sensical_error_is_raised_if_you_try_to_create_a_table_twice(self): sync_table(TestModel) sync_table(TestModel) + @greaterthanorequalcass3_10 def test_can_insert_model_with_all_column_types(self): """ Test for inserting all column types into a Model @@ -195,6 +196,7 @@ class AllDatatypesModel(Model): l = columns.TimeUUID() m = columns.UUID() n = columns.VarInt() + o = columns.Duration() sync_table(AllDatatypesModel) @@ -207,7 +209,8 @@ class AllDatatypesModel(Model): e=datetime.utcfromtimestamp(872835240), f=Decimal('12.3E+7'), g=2.39, h=3.4028234663852886e+38, i='123.123.123.123', j=2147483647, k='text', l=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), - m=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), n=int(str(2147483647) + '000')) + m=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), n=int(str(2147483647) + '000'), + o=Duration(2, 3, 4)) self.assertEqual(1, AllDatatypesModel.objects.count()) output = AllDatatypesModel.objects.first() From 6cb229010b99291195242b356307e3303a62913b Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 10 May 2017 11:12:06 -0400 Subject: [PATCH 1126/2005] Added test for PYTHON-747 --- tests/integration/standard/test_connection.py | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index d7b1f49687..c4b750b829 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -22,6 +22,7 @@ import sys from threading import Thread, Event import time +import weakref from cassandra import ConsistencyLevel, OperationTimedOut from cassandra.cluster import NoHostAvailable, ConnectionShutdown, Cluster @@ -36,6 +37,7 @@ try: from cassandra.io.libevreactor import LibevConnection + from cassandra.io.libevreactor import _cleanup as libev__cleanup except ImportError: LibevConnection = None @@ -398,3 +400,47 @@ def setUp(self): raise unittest.SkipTest( 'libev does not appear to be installed properly') ConnectionTests.setUp(self) + + def test_watchers_are_finished(self): + """ + Test for asserting that watchers are closed in LibevConnection + + It will open a connection to the Cluster and then abruptly clean it simulating, + a process termination without calling cluster.shutdown(), which would trigger + LibevConnection._libevloop._cleanup. Then it will check the watchers have been closed + Finally it will restore the LibevConnection reactor so it doesn't affect + the rest of the tests + + @since 3.10 + @jira_ticket PYTHON-747 + @expected_result the watchers are closed + + @test_category connection + """ + + # conn._write_watcher and conn._read_watcher will be closed + # when the request is finished so it may not be _cleanup the + # one who ends up cleaning them everytime. + for _ in range(10): + cluster = Cluster(connection_class=LibevConnection) + session = cluster.connect(wait_for_all_pools=True) + + session.execute_async("SELECT * FROM system.local LIMIT 1") + # We have to make a copy because the connections shouldn't + # be alive when we verify them + live_connections = set(LibevConnection._libevloop._live_conns) + + # This simulates the process ending without cluster.shutdown() + # being called, then with atexit _cleanup for libevreactor would + # be called + libev__cleanup(weakref.ref(LibevConnection._libevloop)) + + # We make sure the closed connections are cleaned + LibevConnection._libevloop._loop_will_run(None) + for conn in live_connections: + for watcher in (conn._write_watcher, conn._read_watcher): + self.assertIsNone(watcher) + + # Restart the reactor + LibevConnection._libevloop = None + LibevConnection.initialize_reactor() From 45689d08ef84011940bb877ad5145f7a2d32cbd2 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 17 May 2017 08:57:04 -0400 Subject: [PATCH 1127/2005] Added tests with Batch statements with prepared statement after ALTER --- .../standard/test_prepared_statements.py | 2 +- tests/integration/standard/test_query.py | 55 ++++++++++++++++++- 2 files changed, 54 insertions(+), 3 deletions(-) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 65886826c3..a3a291cf69 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -390,7 +390,7 @@ def test_raise_error_on_prepared_statement_execution_dropped_table(self): with self.assertRaises(InvalidRequest): self.session.execute(prepared, [0]) - # TODO revisit this test + # TODO revisit this test, it on hold now due to CASSANDRA-10786 @unittest.skip def test_invalidated_result_metadata(self): """ diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index a4d15a2b7b..360edb0c8a 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -494,19 +494,70 @@ def test_prepare_batch_statement(self): reprepare_on_up=False) self.addCleanup(clus.shutdown) + table = "test3rf.%s" % self._testMethodName.lower() + session = clus.connect(wait_for_all_pools=True) - insert_statement = session.prepare("INSERT INTO test3rf.test (k, v) VALUES (?, ?)") + session.execute("DROP TABLE IF EXISTS %s" % table) + session.execute("CREATE TABLE %s (k int PRIMARY KEY, v int )" % table) + + insert_statement = session.prepare("INSERT INTO %s (k, v) VALUES (?, ?)" % table) # This is going to query a host where the query # is not prepared batch_statement = BatchStatement(consistency_level=ConsistencyLevel.ONE) batch_statement.add(insert_statement, (1, 2)) session.execute(batch_statement) - select_results = session.execute("SELECT * FROM test3rf.test WHERE k = 1") + select_results = session.execute("SELECT * FROM %s WHERE k = 1" % table) first_row = select_results[0][:2] self.assertEqual((1, 2), first_row) + def test_prepare_batch_statement_after_alter(self): + """ + Test to validate a prepared statement used inside a batch statement is correctly handled + by the driver. The metadata might be updated when a table is altered. This tests combines + queries not being prepared and an update of the prepared statement metadata + + @since 3.10 + @jira_ticket PYTHON-706 + @expected_result queries will have to re-prepared on hosts that aren't the control connection + and the batch statement will be sent. + """ + white_list = ForcedHostSwitchPolicy() + clus = Cluster( + load_balancing_policy=white_list, + protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, + reprepare_on_up=False) + self.addCleanup(clus.shutdown) + + table = "test3rf.%s" % self._testMethodName.lower() + + session = clus.connect(wait_for_all_pools=True) + + session.execute("DROP TABLE IF EXISTS %s" % table) + session.execute("CREATE TABLE %s (k int PRIMARY KEY, a int, b int, d int)" % table) + insert_statement = session.prepare("INSERT INTO %s (k, b, d) VALUES (?, ?, ?)" % table) + + # Altering the table might trigger an update in the insert metadata + session.execute("ALTER TABLE %s ADD c int" % table) + + values_to_insert = [(1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)] + + # We query the three hosts in order (due to the ForcedHostSwitchPolicy) + # the first three queries will have to be repreapred and the rest should + # work as normal batch prepared statements + for i in range(10): + value_to_insert = values_to_insert[i % len(values_to_insert)] + batch_statement = BatchStatement(consistency_level=ConsistencyLevel.ONE) + batch_statement.add(insert_statement, value_to_insert) + session.execute(batch_statement) + + select_results = session.execute("SELECT * FROM %s" % table) + expected_results = [(1, None, 2, None, 3), (2, None, 3, None, 4), + (3, None, 4, None, 5), (4, None, 5, None, 6)] + + self.assertEqual(set(expected_results), set(select_results._current_rows)) + class PrintStatementTests(unittest.TestCase): """ From 29ccd1564d28b32620d08d17a89c532781cf0f38 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 17 May 2017 11:22:36 -0400 Subject: [PATCH 1128/2005] ConnectionHeartbeat should mark down HostConnection on OTO --- cassandra/cluster.py | 4 +--- cassandra/connection.py | 2 +- cassandra/pool.py | 30 +++++++++++++++++++++++------- 3 files changed, 25 insertions(+), 11 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c641b84e51..d9074f7979 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1395,10 +1395,8 @@ def on_down(self, host, is_host_addition, expect_host_to_be_down=False): """ if self.is_shutdown: return - with host.lock: was_up = host.is_up - # ignore down signals if we have open pools to the host # this is to avoid closing pools when a control connection host became isolated if self._discount_down_events and self.profile_manager.distance(host) != HostDistance.IGNORED: @@ -3134,7 +3132,7 @@ def get_connections(self): c = getattr(self, '_connection', None) return [c] if c else [] - def return_connection(self, connection): + def return_connection(self, connection, mark_host_down=False): # noqa if connection is self._connection and (connection.is_defunct or connection.is_closed): self.reconnect() diff --git a/cassandra/connection.py b/cassandra/connection.py index 216b79058c..cad889eeeb 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -1007,7 +1007,7 @@ def run(self): for connection, owner, exc in failed_connections: self._raise_if_stopped() connection.defunct(exc) - owner.return_connection(connection) + owner.return_connection(connection, mark_host_down=True) except self.ShutdownException: pass except Exception: diff --git a/cassandra/pool.py b/cassandra/pool.py index 4784be5773..9d9a9aa788 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -359,18 +359,29 @@ def borrow_connection(self, timeout): raise NoConnectionsAvailable("All request IDs are currently in use") - def return_connection(self, connection): + def return_connection(self, connection, mark_host_down=False): with connection.lock: connection.in_flight -= 1 with self._stream_available_condition: self._stream_available_condition.notify() - if (connection.is_defunct or connection.is_closed) and not connection.signaled_error: - log.debug("Defunct or closed connection (%s) returned to pool, potentially " - "marking host %s as down", id(connection), self.host) - is_down = self._session.cluster.signal_connection_failure( - self.host, connection.last_error, is_host_addition=False) - connection.signaled_error = True + if connection.is_defunct or connection.is_closed: + if connection.signaled_error and not mark_host_down: + return + + is_down = False + if not connection.signaled_error: + log.debug("Defunct or closed connection (%s) returned to pool, potentially " + "marking host %s as down", id(connection), self.host) + is_down = self._session.cluster.signal_connection_failure( + self.host, connection.last_error, is_host_addition=False) + connection.signaled_error = True + + # Force mark down a host on error, used by the ConnectionHeartbeat + if mark_host_down and not is_down: + is_down = True + self._session.cluster.on_down(self.host, is_host_addition=False) + if is_down: self.shutdown() else: @@ -382,6 +393,11 @@ def return_connection(self, connection): self._session.submit(self._replace, connection) def _replace(self, connection): + with self._lock: + if self.is_shutdown: + self._is_replacing = False + return + log.debug("Replacing connection (%s) to %s", id(connection), self.host) try: conn = self._session.cluster.connection_factory(self.host.address) From 790e97202ecf44cdf1e1a529a3b2e74c5f9b3d4b Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 17 May 2017 15:21:48 -0400 Subject: [PATCH 1129/2005] make _time_remaining a property --- cassandra/cluster.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 7440901e40..55e9228f04 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3319,7 +3319,6 @@ def __init__(self, session, message, query, timeout, metrics=None, prepared_stat self.message = message self.query = query self.timeout = timeout - self._time_remaining = timeout self._retry_policy = retry_policy self._metrics = metrics self.prepared_statement = prepared_statement @@ -3334,6 +3333,12 @@ def __init__(self, session, message, query, timeout, metrics=None, prepared_stat self.attempted_hosts = [] self._start_timer() + @property + def _time_remaining(self): + if self.timeout is None: + return None + return (self._start_time + self.timeout) - time.time() + def _start_timer(self): if self._timer is None: spec_delay = self._spec_execution_plan.next_execution(self._current_host) @@ -3364,8 +3369,6 @@ def _on_speculative_execute(self): self._timer = None if not self._event.is_set(): if self._time_remaining is not None: - elapsed = time.time() - self._start_time - self._time_remaining -= elapsed if self._time_remaining <= 0: self._on_timeout() return From 1b6a8cecc04e89f8b80e13c23d3402e4c8b29b6d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 17 May 2017 15:29:07 -0400 Subject: [PATCH 1130/2005] fix HostConnectionPool --- cassandra/pool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/pool.py b/cassandra/pool.py index 9d9a9aa788..0fab068b4c 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -642,7 +642,7 @@ def _wait_for_conn(self, timeout): raise NoConnectionsAvailable() - def return_connection(self, connection): + def return_connection(self, connection, mark_host_down=False): #noqa with connection.lock: connection.in_flight -= 1 in_flight = connection.in_flight From fc2bea54163e70cdfd93976c892c30871f5f5701 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 17 May 2017 16:37:11 -0400 Subject: [PATCH 1131/2005] Skipped TestDuration in cqlengine tests for some C* versions --- .../cqlengine/columns/test_validation.py | 28 ++++++++++++------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 1f6fa42588..2ee7072197 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -348,16 +348,24 @@ def setUpClass(cls): class TestDuration(DataType, BaseCassEngTestCase): @classmethod def setUpClass(cls): - cls.db_klass, cls.python_klass = ( - Duration, - util.Duration - ) - cls.first_value, cls.second_value, cls.third_value = ( - util.Duration(0, 0, 0), - util.Duration(1, 2, 3), - util.Duration(0, 0, 0) - ) - super(TestDuration, cls).setUpClass() + # setUpClass is executed despite the whole class being skipped + if CASSANDRA_VERSION >= "3.10": + cls.db_klass, cls.python_klass = ( + Duration, + util.Duration + ) + cls.first_value, cls.second_value, cls.third_value = ( + util.Duration(0, 0, 0), + util.Duration(1, 2, 3), + util.Duration(0, 0, 0) + ) + super(TestDuration, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + if CASSANDRA_VERSION >= "3.10": + super(TestDuration, cls).tearDownClass() + class User(UserType): # We use Date and Time to ensure to_python From b8fda56b4a732e5025fd3376ed95536a571f3300 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 18 May 2017 10:18:36 -0400 Subject: [PATCH 1132/2005] Updated variable comparison to be effective with Job Creator in tests --- tests/integration/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index ad5d1da379..c8dc8ca346 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -14,21 +14,21 @@ import os EVENT_LOOP_MANAGER = os.getenv('EVENT_LOOP_MANAGER', "libev") -if EVENT_LOOP_MANAGER == "gevent": +if "gevent" in EVENT_LOOP_MANAGER: import gevent.monkey gevent.monkey.patch_all() from cassandra.io.geventreactor import GeventConnection connection_class = GeventConnection -elif EVENT_LOOP_MANAGER == "eventlet": +elif "eventlet" in EVENT_LOOP_MANAGER: from eventlet import monkey_patch monkey_patch() from cassandra.io.eventletreactor import EventletConnection connection_class = EventletConnection -elif EVENT_LOOP_MANAGER == "async": +elif "async" in EVENT_LOOP_MANAGER: from cassandra.io.asyncorereactor import AsyncoreConnection connection_class = AsyncoreConnection -elif EVENT_LOOP_MANAGER == "twisted": +elif "twisted" in EVENT_LOOP_MANAGER: from cassandra.io.twistedreactor import TwistedConnection connection_class = TwistedConnection else: From ef1fc897d63de162cb168a4fcff83081d14e2f21 Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 12 May 2017 18:37:41 -0400 Subject: [PATCH 1133/2005] Added test for PYTHON-755 --- tests/integration/standard/test_policies.py | 47 ++++++++++++++++++--- 1 file changed, 41 insertions(+), 6 deletions(-) diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 84f252cd19..dc09382d7d 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -23,9 +23,12 @@ from cassandra.cluster import ExecutionProfile from cassandra.query import SimpleStatement from cassandra.policies import ConstantSpeculativeExecutionPolicy, RoundRobinPolicy +from cassandra.connection import Connection + from tests.integration import BasicSharedKeyspaceUnitTestCase, greaterthancass21 from tests import notwindows +from mock import patch def setup_module(): use_singledc() @@ -44,18 +47,24 @@ def make_query_plan(self, working_keyspace=None, query=None): return hosts +# This doesn't work well with Windows clock granularity +@notwindows class SpecExecTest(BasicSharedKeyspaceUnitTestCase): - def setUp(self): + @classmethod + def setUpClass(cls): + cls.common_setup(1) + spec_ep_brr = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20)) spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20)) spec_ep_rr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 1)) - self.cluster.add_execution_profile("spec_ep_brr", spec_ep_brr) - self.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr) - self.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim) + spec_ep_brr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0.4, 10)) + + cls.cluster.add_execution_profile("spec_ep_brr", spec_ep_brr) + cls.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr) + cls.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim) + cls.cluster.add_execution_profile("spec_ep_brr_lim", spec_ep_brr_lim) - #This doesn't work well with Windows clock granularity - @notwindows @greaterthancass21 def test_speculative_execution(self): """ @@ -102,3 +111,29 @@ def test_speculative_execution(self): # Test timeout with spec_ex with self.assertRaises(OperationTimedOut): result = self.session.execute(statement, execution_profile='spec_ep_rr', timeout=.5) + + #TODO redo this tests with Scassandra + def test_speculative_and_timeout(self): + """ + Test to ensure the timeout is honored when using speculative execution + @since 3.10 + @jira_ticket PYTHON-750 + @expected_result speculative retries be schedule every fixed period, during the maximum + period of the timeout. + + @test_category metadata + """ + # We mock this so no messages are sent, otherwise a reponse might arrive + # and we would not know how many hosts we queried + with patch.object(Connection, "send_msg", return_value = 100) as mocked_send_msg: + + statement = SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (0, 1);", is_idempotent=True) + + # An OperationTimedOut is placed here in response_future, + # that's why we can't call session.execute,which would raise it, but + # we have to directly wait for the event + response_future = self.session.execute_async(statement, execution_profile='spec_ep_brr_lim', timeout=2.2) + response_future._event.wait() + + # This is because 2.2 / 0.4 + 1 = 6 + self.assertEqual(len(response_future.attempted_hosts), 6) From 7563118aeec6407e7edd19addaf888abdb0731df Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 18 May 2017 15:26:03 -0400 Subject: [PATCH 1134/2005] Changed test versions from 3.11 to 3.10 in build.yaml --- build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.yaml b/build.yaml index 6169b58a6a..37ca9de72b 100644 --- a/build.yaml +++ b/build.yaml @@ -72,7 +72,7 @@ cassandra: - '2.1' - '2.2' - '3.0' - - '3.11' + - '3.10' env: CYTHON: From cf75d547f132b579587dead372a6770e586183ca Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 18 May 2017 17:30:56 -0400 Subject: [PATCH 1135/2005] Fixed unit tests after PYTHON-734 --- tests/unit/test_connection.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 3209d312ef..c267d4b6b0 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -358,7 +358,8 @@ def test_no_req_ids(self, *args): self.assertEqual(max_connection.send_msg.call_count, 0) self.assertEqual(max_connection.send_msg.call_count, 0) max_connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) - holder.return_connection.assert_has_calls([call(max_connection)] * get_holders.call_count) + holder.return_connection.assert_has_calls( + [call(max_connection, mark_host_down=True)] * get_holders.call_count) def test_unexpected_response(self, *args): request_id = 999 @@ -386,7 +387,8 @@ def send_msg(msg, req_id, msg_callback): exc = connection.defunct.call_args_list[0][0][0] self.assertIsInstance(exc, ConnectionException) self.assertRegexpMatches(exc.args[0], r'^Received unexpected response to OptionsMessage.*') - holder.return_connection.assert_has_calls([call(connection)] * get_holders.call_count) + holder.return_connection.assert_has_calls( + [call(connection, mark_host_down=True)] * get_holders.call_count) def test_timeout(self, *args): request_id = 999 @@ -415,7 +417,8 @@ def send_msg(msg, req_id, msg_callback): self.assertIsInstance(exc, OperationTimedOut) self.assertEqual(exc.errors, 'Connection heartbeat timeout after 0.05 seconds') self.assertEqual(exc.last_host, 'localhost') - holder.return_connection.assert_has_calls([call(connection)] * get_holders.call_count) + holder.return_connection.assert_has_calls( + [call(connection, mark_host_down=True)] * get_holders.call_count) class TimerTest(unittest.TestCase): From d3664dd00abfdda8c1819fbf4510510955763693 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 18 May 2017 19:02:29 -0400 Subject: [PATCH 1136/2005] Prevented hang in test_speculative_and_timeout --- tests/integration/standard/test_policies.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index dc09382d7d..91f62df6d5 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -133,7 +133,8 @@ def test_speculative_and_timeout(self): # that's why we can't call session.execute,which would raise it, but # we have to directly wait for the event response_future = self.session.execute_async(statement, execution_profile='spec_ep_brr_lim', timeout=2.2) - response_future._event.wait() + response_future._event.wait(4) + self.assertIsInstance(response_future._final_exception, OperationTimedOut) # This is because 2.2 / 0.4 + 1 = 6 self.assertEqual(len(response_future.attempted_hosts), 6) From c5925a9210e86e294fd5c8a392156b83613a3a30 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 19 May 2017 15:19:01 -0400 Subject: [PATCH 1137/2005] Improve SSL docs for security --- docs/security.rst | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/docs/security.rst b/docs/security.rst index 9f7af68b4d..989688e9bb 100644 --- a/docs/security.rst +++ b/docs/security.rst @@ -74,11 +74,16 @@ For example: .. code-block:: python from cassandra.cluster import Cluster - from ssl import PROTOCOL_TLSv1 + from ssl import PROTOCOL_TLSv1, CERT_REQUIRED - ssl_opts = {'ca_certs': '/path/to/my/ca.certs', - 'ssl_version': PROTOCOL_TLSv1} + ssl_opts = { + 'ca_certs': '/path/to/my/ca.certs', + 'ssl_version': PROTOCOL_TLSv1, + 'cert_reqs': CERT_REQUIRED # Certificates are required and validated + } cluster = Cluster(ssl_options=ssl_opts) -For further reading, Andrew Mussey has published a thorough guide on +This is only an example to show how to pass the ssl parameters. Consider reading +the `python ssl documentation `_ for +your configuration. For further reading, Andrew Mussey has published a thorough guide on `Using SSL with the DataStax Python driver `_. From 90159c9ac1209121b14d1ae86fae2979844fc23b Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 19 May 2017 16:06:16 -0400 Subject: [PATCH 1138/2005] get address for timeout error safely --- cassandra/cluster.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index dee208c6c6..c76ccb99df 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3355,7 +3355,8 @@ def _on_timeout(self): errors = self._errors if not errors: if self.is_schema_agreed: - errors = {self._current_host.address: "Client request timeout. See Session.execute[_async](timeout)"} + key = self._current_host.address if self.current_host else 'no host queried before timeout' + errors = {key: "Client request timeout. See Session.execute[_async](timeout)"} else: connection = self.session.cluster.control_connection._connection host = connection.host if connection else 'unknown' From 26a4a33c274fdf88e160ada85cec3c6ad2c0278a Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 19 May 2017 18:12:32 -0400 Subject: [PATCH 1139/2005] Fixed typo --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c76ccb99df..fc6b22afe0 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3355,7 +3355,7 @@ def _on_timeout(self): errors = self._errors if not errors: if self.is_schema_agreed: - key = self._current_host.address if self.current_host else 'no host queried before timeout' + key = self._current_host.address if self._current_host else 'no host queried before timeout' errors = {key: "Client request timeout. See Session.execute[_async](timeout)"} else: connection = self.session.cluster.control_connection._connection From 2ba22df0cf7d41ab2a15ac6eb4669c33b4fce9c7 Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 22 May 2017 10:03:39 -0400 Subject: [PATCH 1140/2005] Fixed test_watchers_are_finished --- tests/integration/__init__.py | 1 + tests/integration/cqlengine/columns/test_validation.py | 4 ++-- tests/integration/standard/test_connection.py | 9 ++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index c8dc8ca346..4a0bd79d3c 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -257,6 +257,7 @@ def get_unsupported_upper_protocol(): greaterthanorequalcass30 = unittest.skipUnless(CASSANDRA_VERSION >= '3.0', 'Cassandra version 3.0 or greater required') greaterthanorequalcass36 = unittest.skipUnless(CASSANDRA_VERSION >= '3.6', 'Cassandra version 3.6 or greater required') greaterthanorequalcass3_10 = unittest.skipUnless(CASSANDRA_VERSION >= '3.10', 'Cassandra version 3.10 or greater required') +greaterthanorequalcass3_11 = unittest.skipUnless(CASSANDRA_VERSION >= '3.11', 'Cassandra version 3.10 or greater required') lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < '3.0', 'Cassandra version less then 3.0 required') dseonly = unittest.skipUnless(DSE_VERSION, "Test is only applicalbe to DSE clusters") pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 2ee7072197..2da9007006 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -33,7 +33,7 @@ from cassandra.cqlengine.usertype import UserType from cassandra import util -from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthanorequalcass30, greaterthanorequalcass3_10 +from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthanorequalcass30, greaterthanorequalcass3_11 from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -344,7 +344,7 @@ def setUpClass(cls): ) super(TestBoolean, cls).setUpClass() -@greaterthanorequalcass3_10 +@greaterthanorequalcass3_11 class TestDuration(DataType, BaseCassEngTestCase): @classmethod def setUpClass(cls): diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index c4b750b829..ef2b7e4561 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -17,6 +17,7 @@ except ImportError: import unittest # noqa +from nose.tools import nottest from functools import partial from six.moves import range import sys @@ -401,6 +402,7 @@ def setUp(self): 'libev does not appear to be installed properly') ConnectionTests.setUp(self) + @nottest def test_watchers_are_finished(self): """ Test for asserting that watchers are closed in LibevConnection @@ -435,12 +437,9 @@ def test_watchers_are_finished(self): # be called libev__cleanup(weakref.ref(LibevConnection._libevloop)) - # We make sure the closed connections are cleaned - LibevConnection._libevloop._loop_will_run(None) for conn in live_connections: for watcher in (conn._write_watcher, conn._read_watcher): - self.assertIsNone(watcher) + self.assertTrue(watcher is None or not watcher.is_active()) - # Restart the reactor + cluster.shutdown() LibevConnection._libevloop = None - LibevConnection.initialize_reactor() From 5e375cc9cdc05781bbe01c3e69d95713a0ebdcab Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 22 May 2017 11:28:05 -0400 Subject: [PATCH 1141/2005] remove eager callback execution in timer With our fix for PYTHON-755, you can now start timers with a negative timeout in cluster.Cluster._start_timer. When this happens, Timer.callback can be called twice, once in Timer.__init__ and once in Timer.finish. The first is just an optimization, so we remove it here. --- cassandra/connection.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index cad889eeeb..03c44fcc68 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -1032,8 +1032,6 @@ class Timer(object): def __init__(self, timeout, callback): self.end = time.time() + timeout self.callback = callback - if timeout < 0: - self.callback() def __lt__(self, other): return self.end < other.end From b886e3ec891a4b2b998c07018361904d9a7c4db4 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 19 May 2017 17:10:42 -0400 Subject: [PATCH 1142/2005] PYTHON-626: improve prepared stmt docs --- cassandra/query.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/cassandra/query.py b/cassandra/query.py index 4e3497485e..cfa3aef6af 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -366,6 +366,21 @@ class PreparedStatement(object): A :class:`.PreparedStatement` should be prepared only once. Re-preparing a statement may affect performance (as the operation requires a network roundtrip). + + |prepared_stmt_head|: Do not use ``*`` in prepared statements if you might + change the schema of the table being queried. The driver and server each + maintain a map between metadata for a schema and statements that were + prepared against that schema. When a user changes a schema, e.g. by adding + or removing a column, the server invalidates its mappings involving that + schema. However, there is currently no way to propagate that invalidation + to drivers. Thus, after a schema change, the driver will incorrectly + interpret the results of ``SELECT *`` queries prepared before the schema + change. This is currently being addressed in `CASSANDRA-10786 + `_. + + .. |prepared_stmt_head| raw:: html + + A note about * in prepared statements """ column_metadata = None #TODO: make this bind_metadata in next major From b0c0f22dc5e256b44188b4a5eb342c78ff8f1067 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 23 May 2017 10:12:56 -0400 Subject: [PATCH 1143/2005] changelog for 3.10 release --- CHANGELOG.rst | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f63ef99880..f7bfc004fc 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,36 @@ +3.10.0 +====== +May 24, 2017 + +Features +-------- +* Add Duration type to cqlengine (PYTHON-750) +* Community PR review: Raise error on primary key update only if its value changed (PYTHON-705) +* get_query_trace() contract is ambiguous (PYTHON-196) + +Bug Fixes +--------- +* Queries using speculative execution policy timeout prematurely (PYTHON-755) +* Fix `map` where results are not consumed (PYTHON-749) +* Driver fails to encode Duration's with large values (PYTHON-747) +* UDT values are not updated correctly in CQLEngine (PYTHON-743) +* UDT types are not validated in CQLEngine (PYTHON-742) +* to_python is not implemented for types columns.Type and columns.Date in CQLEngine (PYTHON-741) +* Clients spin infinitely trying to connect to a host that is drained (PYTHON-734) +* Resulset.get_query_trace returns empty trace sometimes (PYTHON-730) +* Memory grows and doesn't get removed (PYTHON-720) +* Fix RuntimeError caused by change dict size during iteration (PYTHON-708) +* fix ExponentialReconnectionPolicy may throw OverflowError problem (PYTHON-707) +* Avoid using nonexistent prepared statement in ResponseFuture (PYTHON-706) + +Other +----- +* Update README (PYTHON-746) +* Test python versions 3.5 and 3.6 (PYTHON-737) +* Docs Warning About Prepare "select *" (PYTHON-626) +* Increase Coverage in CqlEngine Test Suite (PYTHON-505) +* Example SSL connection code does not verify server certificates (PYTHON-469) + 3.9.0 ===== From 7b00f256a2c672085f7e0f4cf43a8492049777c1 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 23 May 2017 13:24:59 -0400 Subject: [PATCH 1144/2005] add docs reference for 3.10 --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index e8adac73c5..9f997bb095 100644 --- a/docs.yaml +++ b/docs.yaml @@ -13,6 +13,8 @@ sections: prebuilds: - python setup.py build_ext --inplace --force --no-murmur3 --no-cython versions: + - name: 3.10 + ref: b0c0f22d - name: 3.9 ref: 3.9-doc - name: 3.8 From 64572368a6c533b9429c18a60bc2949400fc4640 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 23 May 2017 16:01:11 -0400 Subject: [PATCH 1145/2005] fix v3.10 docs name --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 9f997bb095..40271830aa 100644 --- a/docs.yaml +++ b/docs.yaml @@ -13,7 +13,7 @@ sections: prebuilds: - python setup.py build_ext --inplace --force --no-murmur3 --no-cython versions: - - name: 3.10 + - name: '3.10' ref: b0c0f22d - name: 3.9 ref: 3.9-doc From 34664df1692d3f86a5eb800ab20400d4f4c6da70 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 23 May 2017 16:02:24 -0400 Subject: [PATCH 1146/2005] set new docs reference for v3.10 --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 40271830aa..a91384667b 100644 --- a/docs.yaml +++ b/docs.yaml @@ -14,7 +14,7 @@ prebuilds: - python setup.py build_ext --inplace --force --no-murmur3 --no-cython versions: - name: '3.10' - ref: b0c0f22d + ref: 64572368 - name: 3.9 ref: 3.9-doc - name: 3.8 From e527d914cd180958ec6b6a120af27aa093af183e Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 24 May 2017 10:34:49 -0400 Subject: [PATCH 1147/2005] Simplified the travis matrix, moved part of it to Jenkins --- .travis.yml | 1 - build.yaml | 9 +++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 622ddee052..6a341ef37b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,6 @@ python: sudo: false env: - CASS_DRIVER_NO_CYTHON=1 - - addons: apt: diff --git a/build.yaml b/build.yaml index 37ca9de72b..a7a9c78132 100644 --- a/build.yaml +++ b/build.yaml @@ -103,6 +103,15 @@ build: python setup.py build_ext --inplace --no-cython fi + # Run the unit tests, this is not done in travis because + # it takes too much time for the whole matrix to build with cython + if [[ $CYTHON == 'CYTHON' ]]; then + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/unit/ || true + MONKEY_PATCH_LOOP=1 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/unit/io/test_eventletreactor.py || true + MONKEY_PATCH_LOOP=1 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/unit/io/test_geventreactor.py || true + + fi + echo "Running with event loop manager: $EVENT_LOOP_MANAGER" echo "==========RUNNING CQLENGINE TESTS==========" EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true From d72e32111e9cf5b2938871e94dda20a9d82e4084 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 24 May 2017 13:32:09 -0400 Subject: [PATCH 1148/2005] bump version for 3.10 release --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index fc271d8eba..a5501f42f1 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 9, 0, 'post0') +__version_info__ = (3, 10) __version__ = '.'.join(map(str, __version_info__)) From 85a09974b7ba9006480d8a7ca11579f6ee941350 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 24 May 2017 13:47:44 -0400 Subject: [PATCH 1149/2005] post-release version (3.10.0.post0) --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index a5501f42f1..a96624def7 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 10) +__version_info__ = (3, 10, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From fda90ccefa72f529493451b7afa7eeab4845471b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 24 May 2017 21:05:57 -0400 Subject: [PATCH 1150/2005] small refactor of python-734 to be less invasive in the api --- cassandra/cluster.py | 4 +++- cassandra/connection.py | 5 ++++- cassandra/pool.py | 11 +++++------ 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index fc6b22afe0..372d54330a 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1395,8 +1395,10 @@ def on_down(self, host, is_host_addition, expect_host_to_be_down=False): """ if self.is_shutdown: return + with host.lock: was_up = host.is_up + # ignore down signals if we have open pools to the host # this is to avoid closing pools when a control connection host became isolated if self._discount_down_events and self.profile_manager.distance(host) != HostDistance.IGNORED: @@ -3132,7 +3134,7 @@ def get_connections(self): c = getattr(self, '_connection', None) return [c] if c else [] - def return_connection(self, connection, mark_host_down=False): # noqa + def return_connection(self, connection): if connection is self._connection and (connection.is_defunct or connection.is_closed): self.reconnect() diff --git a/cassandra/connection.py b/cassandra/connection.py index 03c44fcc68..5f39ffed73 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -1007,7 +1007,10 @@ def run(self): for connection, owner, exc in failed_connections: self._raise_if_stopped() connection.defunct(exc) - owner.return_connection(connection, mark_host_down=True) + if not connection.is_control_connection: + # Only HostConnection supports shutdown_on_error + owner.shutdown_on_error = True + owner.return_connection(connection) except self.ShutdownException: pass except Exception: diff --git a/cassandra/pool.py b/cassandra/pool.py index 0fab068b4c..7a198e6757 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -305,6 +305,7 @@ class HostConnection(object): host = None host_distance = None is_shutdown = False + shutdown_on_error = False _session = None _connection = None @@ -359,14 +360,14 @@ def borrow_connection(self, timeout): raise NoConnectionsAvailable("All request IDs are currently in use") - def return_connection(self, connection, mark_host_down=False): + def return_connection(self, connection): with connection.lock: connection.in_flight -= 1 with self._stream_available_condition: self._stream_available_condition.notify() if connection.is_defunct or connection.is_closed: - if connection.signaled_error and not mark_host_down: + if connection.signaled_error and not self.shutdown_on_error: return is_down = False @@ -377,8 +378,7 @@ def return_connection(self, connection, mark_host_down=False): self.host, connection.last_error, is_host_addition=False) connection.signaled_error = True - # Force mark down a host on error, used by the ConnectionHeartbeat - if mark_host_down and not is_down: + if self.shutdown_on_error and not is_down: is_down = True self._session.cluster.on_down(self.host, is_host_addition=False) @@ -395,7 +395,6 @@ def return_connection(self, connection, mark_host_down=False): def _replace(self, connection): with self._lock: if self.is_shutdown: - self._is_replacing = False return log.debug("Replacing connection (%s) to %s", id(connection), self.host) @@ -642,7 +641,7 @@ def _wait_for_conn(self, timeout): raise NoConnectionsAvailable() - def return_connection(self, connection, mark_host_down=False): #noqa + def return_connection(self, connection): with connection.lock: connection.in_flight -= 1 in_flight = connection.in_flight From 87a4a60c52429b122c3073aedfd648dc18702b28 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 24 May 2017 21:26:47 -0400 Subject: [PATCH 1151/2005] Fix tests --- tests/unit/test_connection.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index c267d4b6b0..389ec32677 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -358,8 +358,9 @@ def test_no_req_ids(self, *args): self.assertEqual(max_connection.send_msg.call_count, 0) self.assertEqual(max_connection.send_msg.call_count, 0) max_connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) + holder.shutdown_on_error = True holder.return_connection.assert_has_calls( - [call(max_connection, mark_host_down=True)] * get_holders.call_count) + [call(max_connection)] * get_holders.call_count) def test_unexpected_response(self, *args): request_id = 999 @@ -387,8 +388,9 @@ def send_msg(msg, req_id, msg_callback): exc = connection.defunct.call_args_list[0][0][0] self.assertIsInstance(exc, ConnectionException) self.assertRegexpMatches(exc.args[0], r'^Received unexpected response to OptionsMessage.*') + holder.shutdown_on_error = True holder.return_connection.assert_has_calls( - [call(connection, mark_host_down=True)] * get_holders.call_count) + [call(connection)] * get_holders.call_count) def test_timeout(self, *args): request_id = 999 @@ -417,8 +419,9 @@ def send_msg(msg, req_id, msg_callback): self.assertIsInstance(exc, OperationTimedOut) self.assertEqual(exc.errors, 'Connection heartbeat timeout after 0.05 seconds') self.assertEqual(exc.last_host, 'localhost') + holder.shutdown_on_error = True holder.return_connection.assert_has_calls( - [call(connection, mark_host_down=True)] * get_holders.call_count) + [call(connection)] * get_holders.call_count) class TimerTest(unittest.TestCase): From b2448f25e1ccb97c8bcd0d696b2783f9b5b1b45d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 24 May 2017 21:29:56 -0400 Subject: [PATCH 1152/2005] remove unuseful assignement in tests --- tests/unit/test_connection.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 389ec32677..ec9e1a3f4b 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -358,7 +358,6 @@ def test_no_req_ids(self, *args): self.assertEqual(max_connection.send_msg.call_count, 0) self.assertEqual(max_connection.send_msg.call_count, 0) max_connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) - holder.shutdown_on_error = True holder.return_connection.assert_has_calls( [call(max_connection)] * get_holders.call_count) @@ -388,7 +387,6 @@ def send_msg(msg, req_id, msg_callback): exc = connection.defunct.call_args_list[0][0][0] self.assertIsInstance(exc, ConnectionException) self.assertRegexpMatches(exc.args[0], r'^Received unexpected response to OptionsMessage.*') - holder.shutdown_on_error = True holder.return_connection.assert_has_calls( [call(connection)] * get_holders.call_count) @@ -419,7 +417,6 @@ def send_msg(msg, req_id, msg_callback): self.assertIsInstance(exc, OperationTimedOut) self.assertEqual(exc.errors, 'Connection heartbeat timeout after 0.05 seconds') self.assertEqual(exc.last_host, 'localhost') - holder.shutdown_on_error = True holder.return_connection.assert_has_calls( [call(connection)] * get_holders.call_count) From 6232eb5fdcffe77b8d7eaea44c6d7bc289f9f88f Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 25 May 2017 11:25:11 -0400 Subject: [PATCH 1153/2005] improve release documentation --- README-dev.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README-dev.rst b/README-dev.rst index d14a9e45a8..a26dcf995e 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -9,9 +9,9 @@ Releasing * When in doubt, follow PEP 440 versioning * Add the new version in ``docs.yaml`` -* Commit the changelog and version changes +* Commit the changelog and version changes, e.g. ``git commit -m'version 1.0.0'`` * Tag the release. For example: ``git tag -a 1.0.0 -m 'version 1.0.0'`` -* Push the commit and tag: ``git push --tags origin master`` +* Push the tag and new ``master``: ``git push origin 1.0.0 ; git push origin master`` * Upload the package to pypi:: python setup.py register From 336e7636d1c06e6092558ae43e99ce7a34c4b172 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 25 May 2017 11:30:59 -0400 Subject: [PATCH 1154/2005] improve docs for uploading gh-pages docs --- README-dev.rst | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/README-dev.rst b/README-dev.rst index a26dcf995e..8812bce6b9 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -48,18 +48,19 @@ To build the docs, run:: python setup.py doc -To upload the docs, checkout the ``gh-pages`` branch (it's usually easier to -clone a second copy of this repo and leave it on that branch) and copy the entire +To upload the docs, checkout the ``gh-pages`` branch and copy the entire contents all of ``docs/_build/X.Y.Z/*`` into the root of the ``gh-pages`` branch and then push that branch to github. For example:: + git checkout 1.0.0 python setup.py doc - cp -R docs/_build/1.0.0-beta1/* ~/python-driver-docs/ - cd ~/python-driver-docs - git add --all - git commit -m 'Update docs' + git checkout gh-pages + cp -R docs/_build/1.0.0/* . + git add --update # add modified files + # Also make sure to add any new documentation files! + git commit -m 'Update docs (version 1.0.0)' git push origin gh-pages If docs build includes errors, those errors may not show up in the next build unless From 0af28ccbc919c5bf0eeee7a9d907aee778474537 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 23 May 2017 10:03:39 -0400 Subject: [PATCH 1155/2005] Removed skip from test_watchers_are_finished --- tests/integration/standard/test_connection.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index ef2b7e4561..ffb315583f 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -17,7 +17,6 @@ except ImportError: import unittest # noqa -from nose.tools import nottest from functools import partial from six.moves import range import sys @@ -402,7 +401,6 @@ def setUp(self): 'libev does not appear to be installed properly') ConnectionTests.setUp(self) - @nottest def test_watchers_are_finished(self): """ Test for asserting that watchers are closed in LibevConnection From f916afd653589a7e020a7a2881e56d7a5880b61d Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 24 Apr 2017 15:43:05 -0400 Subject: [PATCH 1156/2005] Added fix to test_token_aware_composite_key test --- .../long/test_loadbalancingpolicies.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index ed15fc9a06..99612480af 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -16,7 +16,7 @@ from cassandra import ConsistencyLevel, Unavailable, OperationTimedOut, ReadTimeout, ReadFailure, \ WriteTimeout, WriteFailure -from cassandra.cluster import Cluster, NoHostAvailable, Session +from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile from cassandra.concurrent import execute_concurrent_with_args from cassandra.metadata import murmur3 from cassandra.policies import (RoundRobinPolicy, DCAwareRoundRobinPolicy, @@ -475,9 +475,19 @@ def test_token_aware_composite_key(self): '(k1, k2, i) ' 'VALUES ' '(?, ?, ?)' % table) - session.execute(prepared.bind((1, 2, 3))) + bound = prepared.bind((1, 2, 3)) + result = session.execute(bound) + self.assertIn(result.response_future.attempted_hosts[0], + cluster.metadata.get_replicas(keyspace, bound.routing_key)) + + # There could be race condition with querying a node + # which doesn't yet have the data so we query one of + # the replicas + results = session.execute(SimpleStatement('SELECT * FROM %s WHERE k1 = 1 AND k2 = 2' % table, + routing_key=bound.routing_key)) + self.assertIn(results.response_future.attempted_hosts[0], + cluster.metadata.get_replicas(keyspace, bound.routing_key)) - results = session.execute('SELECT * FROM %s WHERE k1 = 1 AND k2 = 2' % table) self.assertTrue(results[0].i) cluster.shutdown() From 3eb2d20c6c3224700017058c4b975b533ce9704b Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 24 Apr 2017 17:34:21 -0400 Subject: [PATCH 1157/2005] Possible fix for test_async_timeouts --- tests/integration/long/test_failure_types.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index 4af31b5229..c148ae1eeb 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys,logging, traceback, time +import sys,logging, traceback, time, re from cassandra import (ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, ReadFailure, WriteFailure, FunctionFailure, ProtocolVersion) @@ -134,7 +134,7 @@ def setFailingNodes(self, failing_nodes, keyspace): # Ensure all nodes not on the list, but that are currently set to failing are enabled for node in self.nodes_currently_failing: if node not in failing_nodes: - node.stop(wait_other_notice=True, gently=False) + node.stop(wait_other_notice=True, gently=True) node.start(wait_for_binary_proto=True, wait_other_notice=True) self.nodes_currently_failing.remove(node) @@ -324,22 +324,28 @@ def setUp(self): """ # self.node1, self.node2, self.node3 = get_cluster().nodes.values() - self.node1 = get_node(1) + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect() + self.session = self.cluster.connect(wait_for_all_pools=True) + + # We make sure that the host that will be stopped + # is the one with which we have the control connection + self.control_connection_host_number = int(re.match(r"^127.0.0.(\d)$", + self.cluster.get_control_connection_host().address).groups(0)[0]) + self.node_to_stop = get_node(self.control_connection_host_number) ddl = ''' CREATE TABLE test3rf.timeout ( k int PRIMARY KEY, v int )''' self.session.execute(ddl) - self.node1.pause() + self.node_to_stop.pause() def tearDown(self): """ Shutdown cluster and resume node1 """ - self.node1.resume() + self.node_to_stop.resume() self.session.execute("DROP TABLE test3rf.timeout") self.cluster.shutdown() From 391378bc457f69ff7bd47f417c5efff54886dcda Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 24 Apr 2017 17:53:51 -0400 Subject: [PATCH 1158/2005] Possible fix for test_for_schema_disagreement_attribute --- tests/integration/long/__init__.py | 5 +++++ tests/integration/long/test_schema.py | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/integration/long/__init__.py b/tests/integration/long/__init__.py index df218cf114..bbab66014b 100644 --- a/tests/integration/long/__init__.py +++ b/tests/integration/long/__init__.py @@ -11,6 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + try: from ccmlib import common except ImportError as e: diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index 6e979a4541..e3de72fc3c 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -134,7 +134,7 @@ def test_for_schema_disagreement_attribute(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=0.001) session = cluster.connect(wait_for_all_pools=True) - rs = session.execute("CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}") + rs = session.execute("CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}") self.check_and_wait_for_agreement(session, rs, False) rs = session.execute("CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)") self.check_and_wait_for_agreement(session, rs, False) @@ -145,7 +145,7 @@ def test_for_schema_disagreement_attribute(self): # These should have schema agreement cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=100) session = cluster.connect() - rs = session.execute("CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}") + rs = session.execute("CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}") self.check_and_wait_for_agreement(session, rs, True) rs = session.execute("CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)") self.check_and_wait_for_agreement(session, rs, True) From 82d36bf105458224432006112c2733c0049ec1bf Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 25 Apr 2017 16:38:12 -0400 Subject: [PATCH 1159/2005] Possible fix for test_async_timeouts --- tests/integration/long/test_failure_types.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index c148ae1eeb..c39903ec41 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -16,7 +16,8 @@ from cassandra import (ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, ReadFailure, WriteFailure, FunctionFailure, ProtocolVersion) -from cassandra.cluster import Cluster, NoHostAvailable +from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.policies import WhiteListRoundRobinPolicy from cassandra.concurrent import execute_concurrent_with_args from cassandra.query import SimpleStatement from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace, remove_cluster, get_node @@ -325,13 +326,13 @@ def setUp(self): # self.node1, self.node2, self.node3 = get_cluster().nodes.values() - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + node1 = ExecutionProfile( + load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']) + ) + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, execution_profiles={EXEC_PROFILE_DEFAULT: node1}) self.session = self.cluster.connect(wait_for_all_pools=True) - # We make sure that the host that will be stopped - # is the one with which we have the control connection - self.control_connection_host_number = int(re.match(r"^127.0.0.(\d)$", - self.cluster.get_control_connection_host().address).groups(0)[0]) + self.control_connection_host_number = 1 self.node_to_stop = get_node(self.control_connection_host_number) ddl = ''' From b8f1b77383daa2912410e8c3670c55cbaab13f0d Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 27 Apr 2017 15:06:26 -0400 Subject: [PATCH 1160/2005] Added possible fix to test_unavailable --- tests/integration/standard/test_metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 02689d10c6..8882a58bf1 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -146,7 +146,7 @@ def test_unavailable(self): # Sometimes this commands continues with the other nodes having not noticed # 1 is down, and a Timeout error is returned instead of an Unavailable get_node(1).stop(wait=True, wait_other_notice=True) - + time.sleep(5) try: # Test write query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL) From dc8d840cb1c2e35056312d109afa9b42dd960fa8 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 9 May 2017 16:20:28 -0400 Subject: [PATCH 1161/2005] Added possible fix to test_connect_on_keyspace --- tests/__init__.py | 2 +- tests/integration/__init__.py | 10 ++++++++-- tests/integration/standard/test_cluster.py | 6 +++--- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index 14ea6b5ba9..c735c169bc 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -63,4 +63,4 @@ def is_monkey_patched(): notwindows = unittest.skipUnless(not "Windows" in platform.system(), "This test is not adequate for windows") notpypy = unittest.skipUnless(not platform.python_implementation() == 'PyPy', "This tests is not suitable for pypy") -notmonkeypatch = unittest.skipUnless(MONKEY_PATCH_LOOP, "Skpping this test because monkey patching is required") \ No newline at end of file +notmonkeypatch = unittest.skipUnless(MONKEY_PATCH_LOOP, "Skipping this test because monkey patching is required") \ No newline at end of file diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 4a0bd79d3c..999072e65d 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -529,11 +529,17 @@ def setup_keyspace(ipformat=None, wait=True): WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}''' execute_with_long_wait_retry(session, ddl) - ddl = ''' + ddl_3f = ''' CREATE TABLE test3rf.test ( k int PRIMARY KEY, v int )''' - execute_with_long_wait_retry(session, ddl) + execute_with_long_wait_retry(session, ddl_3f) + + ddl_1f = ''' + CREATE TABLE test1rf.test ( + k int PRIMARY KEY, + v int )''' + execute_with_long_wait_retry(session, ddl_1f) except Exception: traceback.print_exc() diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 9b216b2c23..a6ad63cb24 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -282,15 +282,15 @@ def test_connect_on_keyspace(self): session = cluster.connect() result = session.execute( """ - INSERT INTO test3rf.test (k, v) VALUES (8889, 8889) + INSERT INTO test1rf.test (k, v) VALUES (8889, 8889) """) self.assertFalse(result) - result = session.execute("SELECT * FROM test3rf.test") + result = session.execute("SELECT * FROM test1rf.test") self.assertEqual([(8889, 8889)], result) # test_connect_on_keyspace - session2 = cluster.connect('test3rf') + session2 = cluster.connect('test1rf') result2 = session2.execute("SELECT * FROM test") self.assertEqual(result, result2) cluster.shutdown() From 10dfa3eb898da328e26b3c535f9cee96c34da252 Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 12 May 2017 13:58:25 -0400 Subject: [PATCH 1162/2005] Removed unused class in test --- tests/integration/__init__.py | 18 ++++-------------- tests/integration/standard/test_metrics.py | 4 ++-- 2 files changed, 6 insertions(+), 16 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 999072e65d..32efdd5e3f 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -681,7 +681,7 @@ def tearDownClass(cls): drop_keyspace_shutdown_cluster(cls.ks_name, cls.session, cls.cluster) -class BasicSharedKeyspaceUnitTestCaseWTable(BasicSharedKeyspaceUnitTestCase): +class BasicSharedKeyspaceUnitTestCaseRF1(BasicSharedKeyspaceUnitTestCase): """ This is basic unit test case that can be leveraged to scope a keyspace to a specific test class. creates a keyspace named after the testclass with a rf of 1, and a table named after the class @@ -701,16 +701,6 @@ def setUpClass(self): self.common_setup(2) -class BasicSharedKeyspaceUnitTestCaseWTable(BasicSharedKeyspaceUnitTestCase): - """ - This is basic unit test case that can be leveraged to scope a keyspace to a specific test class. - creates a keyspace named after the testc lass with a rf of 2, and a table named after the class - """ - @classmethod - def setUpClass(self): - self.common_setup(3, True, True, True) - - class BasicSharedKeyspaceUnitTestCaseRF3(BasicSharedKeyspaceUnitTestCase): """ This is basic unit test case that can be leveraged to scope a keyspace to a specific test class. @@ -721,14 +711,14 @@ def setUpClass(self): self.common_setup(3) -class BasicSharedKeyspaceUnitTestCaseRF3WTable(BasicSharedKeyspaceUnitTestCase): +class BasicSharedKeyspaceUnitTestCaseRF3WM(BasicSharedKeyspaceUnitTestCase): """ This is basic unit test case that can be leveraged to scope a keyspace to a specific test class. - creates a keyspace named after the test class with a rf of 3 and a table named after the class + creates a keyspace named after the test class with a rf of 3 with metrics enabled """ @classmethod def setUpClass(self): - self.common_setup(3, True) + self.common_setup(3, True, True, True) class BasicSharedKeyspaceUnitTestCaseWFunctionTable(BasicSharedKeyspaceUnitTestCase): diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 8882a58bf1..00642aef3c 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -28,7 +28,7 @@ from cassandra.cluster import Cluster, NoHostAvailable from tests.integration import get_cluster, get_node, use_singledc, PROTOCOL_VERSION, execute_until_pass from greplin import scales -from tests.integration import BasicSharedKeyspaceUnitTestCaseWTable, BasicExistingKeyspaceUnitTestCase, local +from tests.integration import BasicSharedKeyspaceUnitTestCaseRF3WM, BasicExistingKeyspaceUnitTestCase, local def setup_module(): use_singledc() @@ -179,7 +179,7 @@ def test_unavailable(self): # pass -class MetricsNamespaceTest(BasicSharedKeyspaceUnitTestCaseWTable): +class MetricsNamespaceTest(BasicSharedKeyspaceUnitTestCaseRF3WM): @local def test_metrics_per_cluster(self): """ From be535e7fc46dda747e0040233be1f1c3fa42b2f4 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 23 May 2017 13:57:23 -0400 Subject: [PATCH 1163/2005] Fix for test_for_schema_disagreement_attribute --- tests/integration/long/test_schema.py | 6 ++++-- tests/integration/standard/test_query.py | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index e3de72fc3c..086130776a 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -136,7 +136,8 @@ def test_for_schema_disagreement_attribute(self): rs = session.execute("CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}") self.check_and_wait_for_agreement(session, rs, False) - rs = session.execute("CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)") + rs = session.execute(SimpleStatement("CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)", + consistency_level=ConsistencyLevel.ALL)) self.check_and_wait_for_agreement(session, rs, False) rs = session.execute("DROP KEYSPACE test_schema_disagreement") self.check_and_wait_for_agreement(session, rs, False) @@ -147,7 +148,8 @@ def test_for_schema_disagreement_attribute(self): session = cluster.connect() rs = session.execute("CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}") self.check_and_wait_for_agreement(session, rs, True) - rs = session.execute("CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)") + rs = session.execute(SimpleStatement("CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)", + consistency_level=ConsistencyLevel.ALL)) self.check_and_wait_for_agreement(session, rs, True) rs = session.execute("DROP KEYSPACE test_schema_disagreement") self.check_and_wait_for_agreement(session, rs, True) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 360edb0c8a..33ca43eb90 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -508,7 +508,8 @@ def test_prepare_batch_statement(self): batch_statement = BatchStatement(consistency_level=ConsistencyLevel.ONE) batch_statement.add(insert_statement, (1, 2)) session.execute(batch_statement) - select_results = session.execute("SELECT * FROM %s WHERE k = 1" % table) + select_results = session.execute(SimpleStatement("SELECT * FROM %s WHERE k = 1" % table, + consistency_level=ConsistencyLevel.ALL)) first_row = select_results[0][:2] self.assertEqual((1, 2), first_row) From e337ef26c9633558d11e7e8063ef9c894e44a4fa Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 23 May 2017 15:19:52 -0400 Subject: [PATCH 1164/2005] Moved test_watchers_are_finished to unit tests --- tests/integration/standard/test_connection.py | 42 ------------------ tests/unit/io/test_libevreactor.py | 43 ++++++++++++++++--- 2 files changed, 38 insertions(+), 47 deletions(-) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index ffb315583f..d1b676a3c1 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -37,7 +37,6 @@ try: from cassandra.io.libevreactor import LibevConnection - from cassandra.io.libevreactor import _cleanup as libev__cleanup except ImportError: LibevConnection = None @@ -400,44 +399,3 @@ def setUp(self): raise unittest.SkipTest( 'libev does not appear to be installed properly') ConnectionTests.setUp(self) - - def test_watchers_are_finished(self): - """ - Test for asserting that watchers are closed in LibevConnection - - It will open a connection to the Cluster and then abruptly clean it simulating, - a process termination without calling cluster.shutdown(), which would trigger - LibevConnection._libevloop._cleanup. Then it will check the watchers have been closed - Finally it will restore the LibevConnection reactor so it doesn't affect - the rest of the tests - - @since 3.10 - @jira_ticket PYTHON-747 - @expected_result the watchers are closed - - @test_category connection - """ - - # conn._write_watcher and conn._read_watcher will be closed - # when the request is finished so it may not be _cleanup the - # one who ends up cleaning them everytime. - for _ in range(10): - cluster = Cluster(connection_class=LibevConnection) - session = cluster.connect(wait_for_all_pools=True) - - session.execute_async("SELECT * FROM system.local LIMIT 1") - # We have to make a copy because the connections shouldn't - # be alive when we verify them - live_connections = set(LibevConnection._libevloop._live_conns) - - # This simulates the process ending without cluster.shutdown() - # being called, then with atexit _cleanup for libevreactor would - # be called - libev__cleanup(weakref.ref(LibevConnection._libevloop)) - - for conn in live_connections: - for watcher in (conn._write_watcher, conn._read_watcher): - self.assertTrue(watcher is None or not watcher.is_active()) - - cluster.shutdown() - LibevConnection._libevloop = None diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 273dcc0035..cb15f38081 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -20,25 +20,24 @@ import math from mock import patch, Mock import os +import weakref import six from six import BytesIO from socket import error as socket_error -import sys -import time +from cassandra.io.libevreactor import _cleanup as libev__cleanup from cassandra.connection import (HEADER_DIRECTION_TO_CLIENT, ConnectionException, ProtocolError) from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ReadyMessage, ServerError) from cassandra.marshal import uint8_pack, uint32_pack, int32_pack -from tests.unit.io.utils import TimerCallback -from tests.unit.io.utils import submit_and_wait_for_completion + from tests import is_monkey_patched try: - from cassandra.io.libevreactor import LibevConnection + from cassandra.io.libevreactor import LibevConnection, LibevLoop except ImportError: LibevConnection = None # noqa @@ -296,3 +295,37 @@ def test_partial_message_read(self, *args): self.assertTrue(c.connected_event.is_set()) self.assertFalse(c.is_defunct) + + def test_watchers_are_finished(self, *args): + """ + Test for asserting that watchers are closed in LibevConnection + + This test simulates a process termination without calling cluster.shutdown(), which would trigger + LibevConnection._libevloop._cleanup. It will check the watchers have been closed + Finally it will restore the LibevConnection reactor so it doesn't affect + the rest of the tests + + @since 3.10 + @jira_ticket PYTHON-747 + @expected_result the watchers are closed + + @test_category connection + """ + with patch.object(LibevConnection._libevloop, "_thread"), \ + patch.object(LibevConnection._libevloop, "notify"): + + self.make_connection() + + # We have to make a copy because the connections shouldn't + # be alive when we verify them + live_connections = set(LibevConnection._libevloop._live_conns) + + # This simulates the process ending without cluster.shutdown() + # being called, then with atexit _cleanup for libevreactor would + # be called + libev__cleanup(weakref.ref(LibevConnection._libevloop)) + for conn in live_connections: + for watcher in (conn._write_watcher, conn._read_watcher): + self.assertTrue(watcher.stop.mock_calls) + + LibevConnection._libevloop._shutdown = False From 6d3c453d6fbbeaaff2784c38115a06dba33c33df Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 23 May 2017 17:35:06 -0400 Subject: [PATCH 1165/2005] Fixed opened connections in test_session_host_parameter --- tests/integration/standard/test_cluster.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index a6ad63cb24..e55bd975fe 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -192,13 +192,23 @@ def test_session_host_parameter(self): @test_category connection """ + # Test with empty list + cluster = Cluster(protocol_version=PROTOCOL_VERSION) with self.assertRaises(NoHostAvailable): - Session(Cluster(protocol_version=PROTOCOL_VERSION), []) + Session(cluster, []) + cluster.shutdown() + + # Test with only invalid + cluster = Cluster(protocol_version=PROTOCOL_VERSION) with self.assertRaises(NoHostAvailable): - Session(Cluster(protocol_version=PROTOCOL_VERSION), [Host("1.2.3.4", SimpleConvictionPolicy)]) - session = Session(Cluster(protocol_version=PROTOCOL_VERSION), [Host(x, SimpleConvictionPolicy) for x in + Session(cluster, [Host("1.2.3.4", SimpleConvictionPolicy)]) + cluster.shutdown() + + # Test with valid and invalid hosts + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + Session(cluster, [Host(x, SimpleConvictionPolicy) for x in ("127.0.0.1", "127.0.0.2", "1.2.3.4")]) - session.shutdown() + cluster.shutdown() def test_protocol_negotiation(self): """ From 49769b1d8aa653e7c39c3cd89dc89a6a7ba3fe80 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 24 May 2017 13:26:28 -0400 Subject: [PATCH 1166/2005] Added message in case of assertion fail --- tests/integration/standard/test_cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index e55bd975fe..e47c395314 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -297,7 +297,7 @@ def test_connect_on_keyspace(self): self.assertFalse(result) result = session.execute("SELECT * FROM test1rf.test") - self.assertEqual([(8889, 8889)], result) + self.assertEqual([(8889, 8889)], result, "Rows in ResultSet are {0}".format(result.current_rows)) # test_connect_on_keyspace session2 = cluster.connect('test1rf') From 8e6a1db2f8ce33b486d671aff479959aa531d4ed Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 25 May 2017 17:34:40 -0400 Subject: [PATCH 1167/2005] PrepareStatement should support is_idempotent --- CHANGELOG.rst | 7 +++++++ cassandra/query.py | 3 ++- tests/integration/standard/test_policies.py | 10 ++++++++++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f7bfc004fc..5423e039ab 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,10 @@ +3.11 +==== + +Bug Fixes +--------- +* is_idempotent flag is not propagated from PreparedStatement to BoundStatement (PYTHON-736) + 3.10.0 ====== May 24, 2017 diff --git a/cassandra/query.py b/cassandra/query.py index cfa3aef6af..a3dcc1d85b 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -496,7 +496,8 @@ def __init__(self, prepared_statement, retry_policy=None, consistency_level=None self.keyspace = meta[0].keyspace_name Statement.__init__(self, retry_policy, consistency_level, routing_key, - serial_consistency_level, fetch_size, keyspace, custom_payload) + serial_consistency_level, fetch_size, keyspace, custom_payload, + prepared_statement.is_idempotent) def bind(self, values): """ diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 91f62df6d5..0503ce3b91 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -112,6 +112,16 @@ def test_speculative_execution(self): with self.assertRaises(OperationTimedOut): result = self.session.execute(statement, execution_profile='spec_ep_rr', timeout=.5) + # PYTHON-736 Test speculation policy works with a prepared statement + statement = self.session.prepare("SELECT timeout(100) FROM d WHERE k = ?") + # non-idempotent + result = self.session.execute(statement, (0,), execution_profile='spec_ep_brr') + self.assertEqual(1, len(result.response_future.attempted_hosts)) + # idempotent + statement.is_idempotent = True + result = self.session.execute(statement, (0,), execution_profile='spec_ep_brr') + self.assertLess(1, len(result.response_future.attempted_hosts)) + #TODO redo this tests with Scassandra def test_speculative_and_timeout(self): """ From 20d7de0435f040f1941e79f18aca5c4f362c35ac Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 26 May 2017 16:00:37 -0400 Subject: [PATCH 1168/2005] Fix small C* version issue in test_speculative_execution --- tests/integration/standard/test_policies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 0503ce3b91..9a4b8ae534 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -113,7 +113,7 @@ def test_speculative_execution(self): result = self.session.execute(statement, execution_profile='spec_ep_rr', timeout=.5) # PYTHON-736 Test speculation policy works with a prepared statement - statement = self.session.prepare("SELECT timeout(100) FROM d WHERE k = ?") + statement = self.session.prepare("SELECT timeout(i) FROM d WHERE k = ?") # non-idempotent result = self.session.execute(statement, (0,), execution_profile='spec_ep_brr') self.assertEqual(1, len(result.response_future.attempted_hosts)) From edf483f45eafcb366035d9756b38cd3eaf6ebc7a Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 26 May 2017 16:37:57 -0400 Subject: [PATCH 1169/2005] Skipped Libev tests if not installed --- tests/unit/io/test_libevreactor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index cb15f38081..f08af0f650 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -25,7 +25,6 @@ from six import BytesIO from socket import error as socket_error -from cassandra.io.libevreactor import _cleanup as libev__cleanup from cassandra.connection import (HEADER_DIRECTION_TO_CLIENT, ConnectionException, ProtocolError) @@ -37,6 +36,7 @@ try: + from cassandra.io.libevreactor import _cleanup as libev__cleanup from cassandra.io.libevreactor import LibevConnection, LibevLoop except ImportError: LibevConnection = None # noqa From c714839a427a716868a49aa428283b1a048a850f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 26 May 2017 16:02:29 -0400 Subject: [PATCH 1170/2005] Bump cython version to allow 0.25.x --- CHANGELOG.rst | 4 ++++ setup.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 5423e039ab..8df7459614 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,10 @@ Bug Fixes --------- * is_idempotent flag is not propagated from PreparedStatement to BoundStatement (PYTHON-736) +Other +----- +* Bump Cython dependency version to 0.25.2 (PYTHON-754) + 3.10.0 ====== May 24, 2017 diff --git a/setup.py b/setup.py index 93443a2139..9d1a723179 100644 --- a/setup.py +++ b/setup.py @@ -388,7 +388,7 @@ def run_setup(extensions): # 1.) build_ext eats errors at compile time, letting the install complete while producing useful feedback # 2.) there could be a case where the python environment has cython installed but the system doesn't have build tools if pre_build_check(): - cython_dep = 'Cython>=0.20,<0.25' + cython_dep = 'Cython>=0.20,!=0.25,<0.26' user_specified_cython_version = os.environ.get('CASS_DRIVER_ALLOWED_CYTHON_VERSION') if user_specified_cython_version is not None: cython_dep = 'Cython==%s' % (user_specified_cython_version,) From 78691a7ec589ef7109c4d98cd5f95a90cb0cd00e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Sun, 4 Jun 2017 11:19:31 -0400 Subject: [PATCH 1171/2005] Ensure asyncore dispatchers are closed on exit --- CHANGELOG.rst | 1 + cassandra/io/asyncorereactor.py | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 5423e039ab..b9afb17159 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,7 @@ Bug Fixes --------- * is_idempotent flag is not propagated from PreparedStatement to BoundStatement (PYTHON-736) +* Fix asyncore hang on exit (PYTHON-767) 3.10.0 ====== diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 66fce9bb49..04e9684ec9 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -240,6 +240,8 @@ def add_timer(self, timer): self._timers.add_timer(timer) def _cleanup(self): + global _dispatcher_map + self._shutdown = True if not self._thread: return @@ -253,6 +255,12 @@ def _cleanup(self): log.debug("Event loop thread was joined") + # Ensure all connections are closed and in-flight requests cancelled + for conn in tuple(_dispatcher_map.values()): + conn.close() + + log.debug("Dispatchers were closed") + class AsyncoreConnection(Connection, asyncore.dispatcher): """ @@ -326,7 +334,7 @@ def close(self): #This happens when the connection is shutdown while waiting for the ReadyMessage if not self.connected_event.is_set(): self.last_error = ConnectionShutdown("Connection to %s was closed" % self.host) - + # don't leave in-progress operations hanging self.connected_event.set() From e31394692dce9c4252f4c1b49d792a7af97fe5fc Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 9 Jun 2017 15:41:13 -0400 Subject: [PATCH 1172/2005] All Heartbeat futures should have the same timeout --- CHANGELOG.rst | 1 + cassandra/connection.py | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 328dfcbead..b68722f783 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,7 @@ Bug Fixes --------- * is_idempotent flag is not propagated from PreparedStatement to BoundStatement (PYTHON-736) * Fix asyncore hang on exit (PYTHON-767) +* Driver takes several minutes to remove a bad host from session (PYTHON-762) Other ----- diff --git a/cassandra/connection.py b/cassandra/connection.py index 5f39ffed73..5908342930 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -990,11 +990,14 @@ def run(self): owner.return_connection(connection) self._raise_if_stopped() + # Wait max `self._interval` seconds for all HeartbeatFutures to complete + timeout = self._interval + start_time = time.time() for f in futures: self._raise_if_stopped() connection = f.connection try: - f.wait(self._interval) + f.wait(timeout) # TODO: move this, along with connection locks in pool, down into Connection with connection.lock: connection.in_flight -= 1 @@ -1004,6 +1007,8 @@ def run(self): id(connection), connection.host) failed_connections.append((f.connection, f.owner, e)) + timeout = self._interval - (time.time() - start_time) + for connection, owner, exc in failed_connections: self._raise_if_stopped() connection.defunct(exc) From e19c041822f87b27086a9ce0106eec76f865b22e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 12 Jun 2017 09:26:46 -0400 Subject: [PATCH 1173/2005] Add idle_heartbeat_timeout cluster option to tune how long to wait for heartbeat responses --- CHANGELOG.rst | 4 ++++ cassandra/cluster.py | 16 ++++++++++++++-- cassandra/connection.py | 9 +++++---- docs/api/cassandra/cluster.rst | 2 ++ tests/unit/test_connection.py | 4 ++-- 5 files changed, 27 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index b68722f783..4bd805ed20 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,10 @@ 3.11 ==== +Features +-------- +* Add idle_heartbeat_timeout cluster option to tune how long to wait for heartbeat responses. (PYTHON-762) + Bug Fixes --------- * is_idempotent flag is not propagated from PreparedStatement to BoundStatement (PYTHON-736) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 372d54330a..cfe12f9c9c 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -590,6 +590,12 @@ def default_retry_policy(self, policy): Setting to zero disables heartbeats. """ + idle_heartbeat_timeout = 30 + """ + Timeout, in seconds, on which the heartbeat wait for idle connection responses. + Lowering this value can help to discover bad connections earlier. + """ + schema_event_refresh_window = 2 """ Window, in seconds, within which a schema component will be refreshed after @@ -756,7 +762,8 @@ def __init__(self, reprepare_on_up=True, execution_profiles=None, allow_beta_protocol_version=False, - timestamp_generator=None): + timestamp_generator=None, + idle_heartbeat_timeout=30): """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as extablishing connection pools or refreshing metadata. @@ -847,6 +854,7 @@ def __init__(self, self.max_schema_agreement_wait = max_schema_agreement_wait self.control_connection_timeout = control_connection_timeout self.idle_heartbeat_interval = idle_heartbeat_interval + self.idle_heartbeat_timeout = idle_heartbeat_timeout self.schema_event_refresh_window = schema_event_refresh_window self.topology_event_refresh_window = topology_event_refresh_window self.status_event_refresh_window = status_event_refresh_window @@ -1187,7 +1195,11 @@ def connect(self, keyspace=None, wait_for_all_pools=False): self.profile_manager.check_supported() # todo: rename this method if self.idle_heartbeat_interval: - self._idle_heartbeat = ConnectionHeartbeat(self.idle_heartbeat_interval, self.get_connection_holders) + self._idle_heartbeat = ConnectionHeartbeat( + self.idle_heartbeat_interval, + self.get_connection_holders, + timeout=self.idle_heartbeat_timeout + ) self._is_setup = True session = self._new_session(keyspace) diff --git a/cassandra/connection.py b/cassandra/connection.py index 5908342930..c1e9d88867 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -951,9 +951,10 @@ def _options_callback(self, response): class ConnectionHeartbeat(Thread): - def __init__(self, interval_sec, get_connection_holders): + def __init__(self, interval_sec, get_connection_holders, timeout): Thread.__init__(self, name="Connection heartbeat") self._interval = interval_sec + self._timeout = timeout self._get_connection_holders = get_connection_holders self._shutdown_event = Event() self.daemon = True @@ -990,8 +991,8 @@ def run(self): owner.return_connection(connection) self._raise_if_stopped() - # Wait max `self._interval` seconds for all HeartbeatFutures to complete - timeout = self._interval + # Wait max `self._timeout` seconds for all HeartbeatFutures to complete + timeout = self._timeout start_time = time.time() for f in futures: self._raise_if_stopped() @@ -1007,7 +1008,7 @@ def run(self): id(connection), connection.host) failed_connections.append((f.connection, f.owner, e)) - timeout = self._interval - (time.time() - start_time) + timeout = self._timeout - (time.time() - start_time) for connection, owner, exc in failed_connections: self._raise_if_stopped() diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index c37851ad2d..86e168ad08 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -46,6 +46,8 @@ .. autoattribute:: idle_heartbeat_interval + .. autoattribute:: idle_heartbeat_timeout + .. autoattribute:: schema_event_refresh_window .. autoattribute:: topology_event_refresh_window diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index ec9e1a3f4b..9d312b85db 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -277,8 +277,8 @@ def make_get_holders(len): get_holders = Mock(return_value=holders) return get_holders - def run_heartbeat(self, get_holders_fun, count=2, interval=0.05): - ch = ConnectionHeartbeat(interval, get_holders_fun) + def run_heartbeat(self, get_holders_fun, count=2, interval=0.05, timeout=0.05): + ch = ConnectionHeartbeat(interval, get_holders_fun, timeout=timeout) time.sleep(interval * count) ch.stop() self.assertTrue(get_holders_fun.call_count) From 32a057c47e8efb8c920dcb49d0c110f14df627ae Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 12 Jun 2017 16:12:00 -0400 Subject: [PATCH 1174/2005] Added a more complete pre build check to setup.py --- CHANGELOG.rst | 1 + setup.py | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4bd805ed20..f6f43545b7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -10,6 +10,7 @@ Bug Fixes * is_idempotent flag is not propagated from PreparedStatement to BoundStatement (PYTHON-736) * Fix asyncore hang on exit (PYTHON-767) * Driver takes several minutes to remove a bad host from session (PYTHON-762) +* Installation doesn't always fall back to no cython in Windows (PYTHON-763) Other ----- diff --git a/setup.py b/setup.py index 9d1a723179..42547a2383 100644 --- a/setup.py +++ b/setup.py @@ -348,6 +348,13 @@ def pre_build_check(): compiler = new_compiler(compiler=be.compiler) customize_compiler(compiler) + try: + # We must be able to initialize the compiler if it has that method + if hasattr(compiler, "initialize"): + compiler.initialize() + except: + return False + executables = [] if compiler.compiler_type in ('unix', 'cygwin'): executables = [compiler.executables[exe][0] for exe in ('compiler_so', 'linker_so')] From 2113517142892333ac2a63e0fdd0d1b196d20c70 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 13 Jun 2017 13:38:20 -0400 Subject: [PATCH 1175/2005] add details to README-dev.rst --- README-dev.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README-dev.rst b/README-dev.rst index 8812bce6b9..8c8b71bcca 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -2,6 +2,9 @@ Releasing ========= * Run the tests and ensure they all pass * Update CHANGELOG.rst + + * Check for any missing entries + * Add today's date to the release section * Update the version in ``cassandra/__init__.py`` * For beta releases, use a version like ``(2, 1, '0b1')`` @@ -31,6 +34,9 @@ Releasing * test and push updated branch to origin * Update the JIRA versions: https://datastax-oss.atlassian.net/plugins/servlet/project-config/PYTHON/versions + + * add release dates and set version as "released" + * Make an announcement on the mailing list Building the Docs From c59763815d2cb29ae08ab5ab14932de7dd28212c Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 15 Jun 2017 14:09:06 -0400 Subject: [PATCH 1176/2005] Better state cleaning in HostConnection.shutdown --- cassandra/pool.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cassandra/pool.py b/cassandra/pool.py index 7a198e6757..5043c9d114 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -421,6 +421,7 @@ def shutdown(self): if self._connection: self._connection.close() + self._connection = None def _set_keyspace_for_all_conns(self, keyspace, callback): if self.is_shutdown or not self._connection: From e57450c3ffe3a39f72ed2a0db4ed77be7bcbe28f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 16 Jun 2017 07:29:11 -0400 Subject: [PATCH 1177/2005] Ensure shutdown_on_error is set before defuncting the connection --- cassandra/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index c1e9d88867..b888cbda6a 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -1012,10 +1012,10 @@ def run(self): for connection, owner, exc in failed_connections: self._raise_if_stopped() - connection.defunct(exc) if not connection.is_control_connection: # Only HostConnection supports shutdown_on_error owner.shutdown_on_error = True + connection.defunct(exc) owner.return_connection(connection) except self.ShutdownException: pass From 69b44c5131a34497eea5b176c5935d0ec4012bf0 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 16 Jun 2017 10:31:17 -0400 Subject: [PATCH 1178/2005] 772 changelog --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f6f43545b7..be4b91b6fb 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,6 +11,7 @@ Bug Fixes * Fix asyncore hang on exit (PYTHON-767) * Driver takes several minutes to remove a bad host from session (PYTHON-762) * Installation doesn't always fall back to no cython in Windows (PYTHON-763) +* Avoid to replace a connection that is supposed to shutdown (PYTHON-772) Other ----- From a8c35c8670161279e3f1337b4a7aa1bb0a75a3d4 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 15 Jun 2017 17:50:57 -0400 Subject: [PATCH 1179/2005] fix in_flight += 1 invariant in set_keyspace_async --- CHANGELOG.rst | 1 + cassandra/connection.py | 38 +++++++++++++++++++++++++------------- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index be4b91b6fb..fb49e6cbb2 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -12,6 +12,7 @@ Bug Fixes * Driver takes several minutes to remove a bad host from session (PYTHON-762) * Installation doesn't always fall back to no cython in Windows (PYTHON-763) * Avoid to replace a connection that is supposed to shutdown (PYTHON-772) +* request_ids may not be returned to the pool (PYTHON-739) Other ----- diff --git a/cassandra/connection.py b/cassandra/connection.py index b888cbda6a..900b6d0cdf 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -810,7 +810,29 @@ def set_keyspace_async(self, keyspace, callback): When the operation completes, `callback` will be called with two arguments: this connection and an Exception if an error occurred, otherwise :const:`None`. + + This method will always increment :attr:`.in_flight` attribute, even if + it doesn't need to make a request, just to maintain an + ":attr:`.in_flight` is incremented" invariant. """ + # Here we increment in_flight unconditionally, whether we need to issue + # a request or not. This is bad, but allows callers -- specifically + # _set_keyspace_for_all_conns -- to assume that we increment + # self.in_flight during this call. This allows the passed callback to + # safely call HostConnection{Pool,}.return_connection on this + # Connection. + # + # We use a busy wait on the lock here because: + # - we'll only spin if the connection is at max capacity, which is very + # unlikely for a set_keyspace call + # - it allows us to avoid signaling a condition every time a request completes + while True: + with self.lock: + if self.in_flight < self.max_request_id: + self.in_flight += 1 + break + time.sleep(0.001) + if not keyspace or keyspace == self.keyspace: callback(self, None) return @@ -828,19 +850,9 @@ def process_result(result): callback(self, self.defunct(ConnectionException( "Problem while setting keyspace: %r" % (result,), self.host))) - request_id = None - # we use a busy wait on the lock here because: - # - we'll only spin if the connection is at max capacity, which is very - # unlikely for a set_keyspace call - # - it allows us to avoid signaling a condition every time a request completes - while True: - with self.lock: - if self.in_flight < self.max_request_id: - request_id = self.get_request_id() - self.in_flight += 1 - break - - time.sleep(0.001) + # We've incremented self.in_flight above, so we "have permission" to + # acquire a new request id + request_id = self.get_request_id() self.send_msg(query, request_id, process_result) From 286e66357dd65b30665883c26c6dc7b4f4f06c09 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 16 Jun 2017 11:41:10 -0400 Subject: [PATCH 1180/2005] fix assert placement in get_request_id The old behavior could lead to cases where highest_request_id could be incremented beyond max_request_id and not fixed. --- cassandra/connection.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 900b6d0cdf..483dd529e4 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -436,9 +436,10 @@ def get_request_id(self): try: return self.request_ids.popleft() except IndexError: - self.highest_request_id += 1 + new_request_id = self.highest_request_id + 1 # in_flight checks should guarantee this assert self.highest_request_id <= self.max_request_id + self.highest_request_id = new_request_id return self.highest_request_id def handle_pushed(self, response): From e301ff9c702bbc0741a74da5634108d0986381d3 Mon Sep 17 00:00:00 2001 From: Shevchenko Vitaliy Date: Thu, 1 Jun 2017 18:31:21 +0300 Subject: [PATCH 1181/2005] Fix DeprecationWarning when using lz4 --- cassandra/connection.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 5f39ffed73..bc5a19ca91 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -61,6 +61,12 @@ except ImportError: pass else: + # The compress and decompress functions we need were moved from the lz4 to + # the lz4.block namespace, so we try both here. + try: + lz4_block = lz4.block + except AttributeError: + lz4_block = lz4 # Cassandra writes the uncompressed message length in big endian order, # but the lz4 lib requires little endian order, so we wrap these @@ -68,11 +74,11 @@ def lz4_compress(byts): # write length in big-endian instead of little-endian - return int32_pack(len(byts)) + lz4.compress(byts)[4:] + return int32_pack(len(byts)) + lz4_block.compress(byts)[4:] def lz4_decompress(byts): # flip from big-endian to little-endian - return lz4.decompress(byts[3::-1] + byts[4:]) + return lz4_block.decompress(byts[3::-1] + byts[4:]) locally_supported_compressions['lz4'] = (lz4_compress, lz4_decompress) From 0c510e72728b794da41242c83e97434b6e5d9111 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 19 Jun 2017 11:19:32 -0400 Subject: [PATCH 1182/2005] changelog for 769 --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index fb49e6cbb2..56c9c58555 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -17,6 +17,7 @@ Bug Fixes Other ----- * Bump Cython dependency version to 0.25.2 (PYTHON-754) +* Fix DeprecationWarning when using lz4 (PYTHON-769) 3.10.0 ====== From 25fb93157fb50e841c7ff16913787890bee2d877 Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 26 Jun 2017 10:22:20 -0400 Subject: [PATCH 1183/2005] Updated build.yaml to run on the weekend --- build.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/build.yaml b/build.yaml index a7a9c78132..f1d3dd74d3 100644 --- a/build.yaml +++ b/build.yaml @@ -21,14 +21,14 @@ schedules: - cassandra: ['2.0', '2.1', '3.0'] weekly_libev: - schedule: weekly + schedule: 0 10 * * 6 branches: include: [master] env_vars: | EVENT_LOOP_MANAGER='libev' weekly_gevent: - schedule: weekly + schedule: 0 14 * * 6 branches: include: [master] env_vars: | @@ -38,21 +38,21 @@ schedules: - python: [3.4, 3.5, 3.6] weekly_eventlet: - schedule: weekly + schedule: 0 18 * * 6 branches: include: [master] env_vars: | EVENT_LOOP_MANAGER='eventlet' weekly_async: - schedule: weekly + schedule: 0 10 * * 7 branches: include: [master] env_vars: | EVENT_LOOP_MANAGER='async' weekly_twister: - schedule: weekly + schedule: 0 14 * * 7 branches: include: [master] env_vars: | From 9f897c76041f0d5cde201f1fd6bc76a56d00cb12 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 20 Apr 2017 12:38:22 -0400 Subject: [PATCH 1184/2005] Updated appveyor tests --- appveyor/appveyor.ps1 | 2 ++ appveyor/run_test.ps1 | 43 ++++++++++++++++++++++++++++------- tests/integration/__init__.py | 1 + 3 files changed, 38 insertions(+), 8 deletions(-) diff --git a/appveyor/appveyor.ps1 b/appveyor/appveyor.ps1 index af7ce5cc6d..cbee5154a2 100644 --- a/appveyor/appveyor.ps1 +++ b/appveyor/appveyor.ps1 @@ -2,6 +2,8 @@ $env:JAVA_HOME="C:\Program Files\Java\jdk1.8.0" $env:PATH="$($env:JAVA_HOME)\bin;$($env:PATH)" $env:CCM_PATH="C:\Users\appveyor\ccm" $env:CASSANDRA_VERSION=$env:cassandra_version +$env:EVENT_LOOP_MANAGER="async" + python --version python -c "import platform; print(platform.architecture())" # Install Ant diff --git a/appveyor/run_test.ps1 b/appveyor/run_test.ps1 index 9e809a3245..26e2cf3334 100644 --- a/appveyor/run_test.ps1 +++ b/appveyor/run_test.ps1 @@ -11,14 +11,41 @@ python --version python -c "import platform; print(platform.architecture())" $wc = New-Object 'System.Net.WebClient' -nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit -$env:MONKEY_PATCH_LOOP=1 -nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit\io\test_geventreactor.py -nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit\io\test_eventletreactor.py -Remove-Item $env:MONKEY_PATCH_LOOP +if($env:ci_type -eq 'unit'){ + echo "Running Unit tests" + nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit -echo "uploading unit results" -$wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\unit_results.xml)) + $env:MONKEY_PATCH_LOOP=1 + nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit\io\test_geventreactor.py + nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit\io\test_eventletreactor.py + Remove-Item $env:MONKEY_PATCH_LOOP -exit 0 + echo "uploading unit results" + $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\unit_results.xml)) + +} + +if($env:ci_type -eq 'standard'){ + echo "Running CQLEngine integration tests" + nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=cqlengine_results.xml .\tests\integration\cqlengine + $cqlengine_tests_result = $lastexitcode + $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\cqlengine_results.xml)) + echo "uploading CQLEngine test results" + + echo "Running standard integration tests" + nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml .\tests\integration\standard + $integration_tests_result = $lastexitcode + $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\standard_results.xml)) + echo "uploading standard integration test results" +} + +if($env:ci_type -eq 'long'){ + nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=cqlengine_results.xml .\tests\integration\cqlengine + $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\cqlengine_results.xml)) + echo "uploading standard integration test results" +} + +$exit_result = $unit_tests_result + $cqlengine_tests_result + $integration_tests_result +echo "Exit result: $exit_result" +exit $exit_result diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 32efdd5e3f..862c0c4495 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -31,6 +31,7 @@ elif "twisted" in EVENT_LOOP_MANAGER: from cassandra.io.twistedreactor import TwistedConnection connection_class = TwistedConnection + else: from cassandra.io.libevreactor import LibevConnection connection_class = LibevConnection From 491413ea4fe94cc8989a239dcc342c60fe0b355a Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 27 Apr 2017 16:20:39 -0400 Subject: [PATCH 1185/2005] Fixed appveyor integration tests --- appveyor.yml | 4 ++-- appveyor/appveyor.ps1 | 4 ++++ appveyor/run_test.ps1 | 6 +++--- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index dd3f1a97dc..5233a00129 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,10 +1,10 @@ environment: matrix: - PYTHON: "C:\\Python27-x64" - cassandra_version: 3.0.5 + cassandra_version: 3.10 ci_type: standard - PYTHON: "C:\\Python34" - cassandra_version: 3.0.5 + cassandra_version: 3.10 ci_type: unit os: Visual Studio 2015 platform: diff --git a/appveyor/appveyor.ps1 b/appveyor/appveyor.ps1 index cbee5154a2..f7f6158ba4 100644 --- a/appveyor/appveyor.ps1 +++ b/appveyor/appveyor.ps1 @@ -59,9 +59,13 @@ $env:PYTHONPATH="$($env:CCM_PATH);$($env:PYTHONPATH)" $env:PATH="$($env:CCM_PATH);$($env:PATH)" # Predownload cassandra version for CCM if it isn't already downloaded. +# This is necessary because otherwise ccm fails If (!(Test-Path C:\Users\appveyor\.ccm\repository\$env:cassandra_version)) { Start-Process python -ArgumentList "$($env:CCM_PATH)\ccm.py create -v $($env:cassandra_version) -n 1 predownload" -Wait -NoNewWindow + echo "Checking status of download" + python $env:CCM_PATH\ccm.py status Start-Process python -ArgumentList "$($env:CCM_PATH)\ccm.py remove predownload" -Wait -NoNewWindow + echo "Downloaded version $env:cassandra_version" } Start-Process python -ArgumentList "-m pip install -r test-requirements.txt" -Wait -NoNewWindow diff --git a/appveyor/run_test.ps1 b/appveyor/run_test.ps1 index 26e2cf3334..1c4f5d366a 100644 --- a/appveyor/run_test.ps1 +++ b/appveyor/run_test.ps1 @@ -3,9 +3,9 @@ Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope Process -force Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope CurrentUser -force Get-ExecutionPolicy -List echo $env:Path -echo $env:JAVA_HOME -echo $env:PYTHONPATH -echo $env:CASSANDRA_VERSION +echo "JAVA_HOME: $env:JAVA_HOME" +echo "PYTHONPATH: $env:PYTHONPATH" +echo "Cassandra version: $env:CASSANDRA_VERSION" echo $env:ci_type python --version python -c "import platform; print(platform.architecture())" From 1b56cc6930b1f831c6deb69196181da186ecd5fd Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 8 May 2017 11:38:40 -0400 Subject: [PATCH 1186/2005] Skipped test for windows --- tests/integration/standard/test_connection.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index d1b676a3c1..4b9f73b5e9 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -32,7 +32,7 @@ from cassandra.policies import WhiteListRoundRobinPolicy, HostStateListener from cassandra.pool import HostConnectionPool -from tests import is_monkey_patched +from tests import is_monkey_patched, notwindows from tests.integration import use_singledc, PROTOCOL_VERSION, get_node, CASSANDRA_IP, local try: @@ -360,6 +360,9 @@ def send_msgs(conn, event): for t in threads: t.join() + # We skip this one for windows because the clock is not as + # granular as in linux + @notwindows def test_connect_timeout(self): # Underlying socket implementations don't always throw a socket timeout even with min float # This can be timing sensitive, added retry to ensure failure occurs if it can From 39d1cc5533e8d22724231eeede154f3a6fcb69cc Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 26 Jun 2017 16:37:21 -0400 Subject: [PATCH 1187/2005] Added potential fix for test_trace_unavailable --- tests/integration/standard/test_cluster.py | 31 +++++++++++++++++----- tests/integration/standard/test_metrics.py | 2 ++ 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index e47c395314..03ed4769b2 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -605,19 +605,36 @@ def test_trace_unavailable(self): @test_category query """ cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.addCleanup(cluster.shutdown) session = cluster.connect() query = "SELECT * FROM system.local" statement = SimpleStatement(query) - future = session.execute_async(statement, trace=True) - future.result() - self.assertRaises(TraceUnavailable, future.get_query_trace, -1.0) - query = SimpleStatement("SELECT * FROM system.local") - future = session.execute_async(query, trace=True) - self.assertRaises(TraceUnavailable, future.get_query_trace, max_wait=120) + max_retry_count = 10 + for i in range(max_retry_count): + future = session.execute_async(statement, trace=True) + future.result() + try: + result = future.get_query_trace(-1.0) + # In case the result has time to come back before this timeout due to a race condition + check_trace(result) + except TraceUnavailable: + break + else: + raise Exception("get_query_trace didn't raise TraceUnavailable after {} tries".format(max_retry_count)) + - cluster.shutdown() + for i in range(max_retry_count): + future = session.execute_async(statement, trace=True) + try: + result = future.get_query_trace(max_wait=120) + # In case the result has been set check the trace + check_trace(result) + except TraceUnavailable: + break + else: + raise Exception("get_query_trace didn't raise TraceUnavailable after {} tries".format(max_retry_count)) def test_string_coverage(self): """ diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 00642aef3c..3811325522 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -195,6 +195,8 @@ def test_metrics_per_cluster(self): default_retry_policy=FallthroughRetryPolicy()) cluster2.connect(self.ks_name, wait_for_all_pools=True) + self.assertEqual(len(cluster2.metadata.all_hosts()), 3) + query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name), consistency_level=ConsistencyLevel.ALL) self.session.execute(query) From d4385bf2b735bc9c239578b5142fa7b50a0422bf Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 28 Jun 2017 14:02:54 -0400 Subject: [PATCH 1188/2005] KEEP_TEST_CLUSTER env var --- tests/integration/__init__.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 862c0c4495..7667e0e2c7 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -113,6 +113,8 @@ def _tuple_version(version_string): USE_CASS_EXTERNAL = bool(os.getenv('USE_CASS_EXTERNAL', False)) +KEEP_TEST_CLUSTER = bool(os.getenv('KEEP_TEST_CLUSTER', False)) + # If set to to true this will force the Cython tests to run regardless of whether they are installed cython_env = os.getenv('VERIFY_CYTHON', "False") @@ -309,7 +311,7 @@ def use_single_node(start=True, workloads=[]): def remove_cluster(): - if USE_CASS_EXTERNAL: + if USE_CASS_EXTERNAL or KEEP_TEST_CLUSTER: return global CCM_CLUSTER @@ -416,7 +418,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): def teardown_package(): - if USE_CASS_EXTERNAL: + if USE_CASS_EXTERNAL or KEEP_TEST_CLUSTER: return # when multiple modules are run explicitly, this runs between them # need to make sure CCM_CLUSTER is properly cleared for that case From 6f4d4ff7dfb59ffad3ec01b11b9120bae70af663 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 5 Jul 2017 11:33:07 -0400 Subject: [PATCH 1189/2005] Changed _defer_fields and _only_fields in CQLEngine so they store the name of the fields in the DB --- cassandra/cqlengine/query.py | 18 +++++++++++++----- .../cqlengine/query/test_queryset.py | 9 +++++++++ 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 98154cbfb7..615a650b05 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -355,9 +355,15 @@ def __init__(self, model): # because explicit is better than implicit self._limit = 10000 - # see the defer and only methods + # We store the fields for which we use the Equal operator + # in a query, so we don't select it from the DB. _defer_fields + # will contain the names of the fields in the DB, not the names + # of the variables used by the mapper self._defer_fields = set() self._deferred_values = {} + + # This variable will hold the names in the database of the fields + # for which we want to query self._only_fields = [] self._values_list = False @@ -719,7 +725,7 @@ def filter(self, *args, **kwargs): else: query_val = column.to_database(val) if not col_op: # only equal values should be deferred - clone._defer_fields.add(col_name) + clone._defer_fields.add(column.db_field_name) clone._deferred_values[column.db_field_name] = val # map by db field name for substitution in results clone._where.append(WhereClause(column.db_field_name, operator, query_val, quote_field=quote_field)) @@ -941,6 +947,8 @@ def _only_or_defer(self, action, fields): "Can't resolve fields {0} in {1}".format( ', '.join(missing_fields), self.model.__name__)) + fields = [self.model._columns[field].db_field_name for field in fields] + if action == 'defer': clone._defer_fields.update(fields) elif action == 'only': @@ -1068,18 +1076,18 @@ def _validate_select_where(self): def _select_fields(self): if self._defer_fields or self._only_fields: - fields = self.model._columns.keys() + fields = [columns.db_field_name for columns in self.model._columns.values()] if self._defer_fields: fields = [f for f in fields if f not in self._defer_fields] # select the partition keys if all model fields are set defer if not fields: - fields = self.model._partition_keys + fields = [columns.db_field_name for columns in self.model._partition_keys.values()] if self._only_fields: fields = [f for f in fields if f in self._only_fields] if not fields: raise QueryException('No fields in select query. Only fields: "{0}", defer fields: "{1}"'.format( ','.join(self._only_fields), ','.join(self._defer_fields))) - return [self.model._columns[f].db_field_name for f in fields] + return fields return super(ModelQuerySet, self)._select_fields() def _get_result_constructor(self): diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index fbb23e18f1..004f21a037 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -1281,6 +1281,15 @@ def test_db_field_names_used(self): for value in values: self.assertTrue(value not in str(b2.queries[0])) + def test_db_field_value_list(self): + DBFieldModel.create(k0=0, k1=0, c0=0, v0=4, v1=5) + + self.assertEqual(DBFieldModel.objects.filter(c0=0, k0=0, k1=0).values_list('c0', 'v0')._defer_fields, + {'a', 'c', 'b'}) + self.assertEqual(DBFieldModel.objects.filter(c0=0, k0=0, k1=0).values_list('c0', 'v0')._only_fields, + ['c', 'd']) + + list(DBFieldModel.objects.filter(c0=0, k0=0, k1=0).values_list('c0', 'v0')) class TestModelSmall(Model): From 6bec6fd7e852ae33e12cf475b030d08260b04240 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 10 Jul 2017 14:55:03 -0400 Subject: [PATCH 1190/2005] check new value, not old one part of PYTHON-734 --- cassandra/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index b94310b9df..57f776874d 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -444,7 +444,7 @@ def get_request_id(self): except IndexError: new_request_id = self.highest_request_id + 1 # in_flight checks should guarantee this - assert self.highest_request_id <= self.max_request_id + assert new_request_id <= self.max_request_id self.highest_request_id = new_request_id return self.highest_request_id From 9aaf1ec532f730c9fefd5ba4440553ea75ffa6b3 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 26 May 2017 12:05:32 -0400 Subject: [PATCH 1191/2005] whitespace fixes pre-PYTHON-761 --- cassandra/policies.py | 1 + .../integration/long/test_loadbalancingpolicies.py | 13 ++++++++----- tests/unit/test_policies.py | 10 ++++------ 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 389ee35343..d078da27d6 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -619,6 +619,7 @@ class WriteType(object): A lighweight-transaction write, such as "DELETE ... IF EXISTS". """ + WriteType.name_to_value = { 'SIMPLE': WriteType.SIMPLE, 'BATCH': WriteType.BATCH, diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 99612480af..f16df83f1a 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -12,11 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import struct, time, logging, sys, traceback +import logging +import struct +import sys +import traceback from cassandra import ConsistencyLevel, Unavailable, OperationTimedOut, ReadTimeout, ReadFailure, \ WriteTimeout, WriteFailure -from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile +from cassandra.cluster import Cluster, NoHostAvailable from cassandra.concurrent import execute_concurrent_with_args from cassandra.metadata import murmur3 from cassandra.policies import (RoundRobinPolicy, DCAwareRoundRobinPolicy, @@ -40,7 +43,7 @@ class LoadBalancingPolicyTests(unittest.TestCase): def setUp(self): - remove_cluster() # clear ahead of test so it doesn't use one left in unknown state + remove_cluster() # clear ahead of test so it doesn't use one left in unknown state self.coordinator_stats = CoordinatorStats() self.prepared = None self.probe_cluster = None @@ -105,7 +108,7 @@ def _query(self, session, keyspace, count=12, query_string = 'SELECT * FROM %s.cf WHERE k = ?' % keyspace if not self.prepared or self.prepared.query_string != query_string: self.prepared = session.prepare(query_string) - self.prepared.consistency_level=consistency_level + self.prepared.consistency_level = consistency_level for i in range(count): tries = 0 while True: @@ -508,7 +511,7 @@ def test_token_aware_with_rf_2(self, use_prepared=False): self.coordinator_stats.reset_counts() stop(2) - self._wait_for_nodes_down([2],cluster) + self._wait_for_nodes_down([2], cluster) self._query(session, keyspace) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 33f913cc93..c66fe53260 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -421,7 +421,6 @@ def test_modification_during_generation(self): policy.on_up(hosts[2]) policy.on_up(hosts[3]) - another_host = Host(5, SimpleConvictionPolicy) another_host.set_location_info("dc3", "rack1") new_host.set_location_info("dc3", "rack1") @@ -755,7 +754,7 @@ def test_no_shuffle_if_given_no_keyspace(self): @test_category policy """ self._assert_shuffle(keyspace=None, routing_key='routing_key') - + def test_no_shuffle_if_given_no_routing_key(self): """ Test to validate the hosts are not shuffled when no routing_key is provided @@ -766,7 +765,7 @@ def test_no_shuffle_if_given_no_routing_key(self): @test_category policy """ self._assert_shuffle(keyspace='keyspace', routing_key=None) - + @patch('cassandra.policies.shuffle') def _assert_shuffle(self, patched_shuffle, keyspace, routing_key): hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)] @@ -884,7 +883,7 @@ def test_bad_vals(self): self.assertRaises(ValueError, ExponentialReconnectionPolicy, -1, 0) self.assertRaises(ValueError, ExponentialReconnectionPolicy, 0, -1) self.assertRaises(ValueError, ExponentialReconnectionPolicy, 9000, 1) - self.assertRaises(ValueError, ExponentialReconnectionPolicy, 1, 2,-1) + self.assertRaises(ValueError, ExponentialReconnectionPolicy, 1, 2, -1) def test_schedule_no_max(self): base_delay = 2.0 @@ -1235,8 +1234,7 @@ def test_hosts_with_hostname(self): class AddressTranslatorTest(unittest.TestCase): def test_identity_translator(self): - it = IdentityTranslator() - addr = '127.0.0.1' + IdentityTranslator() @patch('socket.getfqdn', return_value='localhost') def test_ec2_multi_region_translator(self, *_): From 391889b06791832d6ac36660f19aba139b3e8d6d Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 26 May 2017 14:26:07 -0400 Subject: [PATCH 1192/2005] add HostFilterPolicy and tests --- CHANGELOG.rst | 5 + cassandra/policies.py | 128 +++++++++++ docs/api/cassandra/policies.rst | 8 + .../long/test_loadbalancingpolicies.py | 37 +++- tests/unit/test_policies.py | 200 +++++++++++++++++- 5 files changed, 375 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 56c9c58555..eb460b24bf 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,7 @@ Features -------- * Add idle_heartbeat_timeout cluster option to tune how long to wait for heartbeat responses. (PYTHON-762) +* Add HostFilterPolicy (PYTHON-761) Bug Fixes --------- @@ -19,6 +20,10 @@ Other * Bump Cython dependency version to 0.25.2 (PYTHON-754) * Fix DeprecationWarning when using lz4 (PYTHON-769) +Other +----- +* Deprecate WhiteListRoundRobinPolicy (PYTHON-759) + 3.10.0 ====== May 24, 2017 diff --git a/cassandra/policies.py b/cassandra/policies.py index d078da27d6..1144f4f469 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -17,6 +17,7 @@ from random import randint, shuffle from threading import Lock import socket +from warnings import warn from cassandra import ConsistencyLevel, OperationTimedOut @@ -396,6 +397,10 @@ def on_remove(self, *args, **kwargs): class WhiteListRoundRobinPolicy(RoundRobinPolicy): """ + |wlrrp| **is deprecated. It will be removed in 4.0.** It can effectively be + reimplemented using :class:`.HostFilterPolicy`. For more information, see + PYTHON-758_. + A subclass of :class:`.RoundRobinPolicy` which evenly distributes queries across all nodes in the cluster, regardless of what datacenter the nodes may be in, but @@ -405,12 +410,25 @@ class WhiteListRoundRobinPolicy(RoundRobinPolicy): https://datastax-oss.atlassian.net/browse/JAVA-145 Where connection errors occur when connection attempts are made to private IP addresses remotely + + .. |wlrrp| raw:: html + + WhiteListRoundRobinPolicy + + .. _PYTHON-758: https://datastax-oss.atlassian.net/browse/PYTHON-758 + """ def __init__(self, hosts): """ The `hosts` parameter should be a sequence of hosts to permit connections to. """ + msg = ('WhiteListRoundRobinPolicy is deprecated. ' + 'It will be removed in 4.0. ' + 'It can effectively be reimplemented using HostFilterPolicy.') + warn(msg, DeprecationWarning) + # DeprecationWarnings are silent by default so we also log the message + log.warning(msg) self._allowed_hosts = hosts self._allowed_hosts_resolved = [endpoint[4][0] for a in self._allowed_hosts @@ -441,6 +459,116 @@ def on_add(self, host): RoundRobinPolicy.on_add(self, host) +class HostFilterPolicy(LoadBalancingPolicy): + """ + A :class:`.LoadBalancingPolicy` subclass configured with a child policy, + and a single-argument predicate. This policy defers to the child policy for + hosts where ``predicate(host)`` is truthy. Hosts for which + ``predicate(host)`` is falsey will be considered :attr:`.IGNORED`, and will + not be used in a query plan. + + This can be used in the cases where you need a whitelist or blacklist + policy, e.g. to prepare for decommissioning nodes or for testing: + + .. code-block:: python + + def address_is_ignored(host): + return host.address in [ignored_address0, ignored_address1] + + blacklist_filter_policy = HostFilterPolicy( + child_policy=RoundRobinPolicy(), + predicate=address_is_ignored + ) + + cluster = Cluster( + primary_host, + load_balancing_policy=blacklist_filter_policy, + ) + + Please note that whitelist and blacklist policies are not recommended for + general, day-to-day use. You probably want something like + :class:`.DCAwareRoundRobinPolicy`, which prefers a local DC but has + fallbacks, over a brute-force method like whitelisting or blacklisting. + """ + + def __init__(self, child_policy, predicate): + """ + :param child_policy: an instantiated :class:`.LoadBalancingPolicy` + that this one will defer to. + :param predicate: a one-parameter function that takes a :class:`.Host`. + If it returns a falsey value, the :class:`.Host` will + be :attr:`.IGNORED` and not returned in query plans. + """ + super(HostFilterPolicy, self).__init__() + self._child_policy = child_policy + self._predicate = predicate + + def on_up(self, host, *args, **kwargs): + if self.predicate(host): + return self._child_policy.on_up(host, *args, **kwargs) + + def on_down(self, host, *args, **kwargs): + if self.predicate(host): + return self._child_policy.on_down(host, *args, **kwargs) + + def on_add(self, host, *args, **kwargs): + if self.predicate(host): + return self._child_policy.on_add(host, *args, **kwargs) + + def on_remove(self, host, *args, **kwargs): + if self.predicate(host): + return self._child_policy.on_remove(host, *args, **kwargs) + + @property + def predicate(self): + """ + A predicate, set on object initialization, that takes a :class:`.Host` + and returns a value. If the value is falsy, the :class:`.Host` is + :class:`~HostDistance.IGNORED`. If the value is truthy, + :class:`.HostFilterPolicy` defers to the child policy to determine the + host's distance. + + This is a read-only value set in ``__init__``, implemented as a + ``property``. + """ + return self._predicate + + def distance(self, host): + """ + Checks if ``predicate(host)``, then returns + :attr:`~HostDistance.IGNORED` if falsey, and defers to the child policy + otherwise. + """ + if self.predicate(host): + return self._child_policy.distance(host) + else: + return HostDistance.IGNORED + + def populate(self, cluster, hosts): + self._child_policy.populate( + cluster=cluster, + hosts=[h for h in hosts if self.predicate(h)] + ) + + def make_query_plan(self, working_keyspace=None, query=None): + """ + Defers to the child policy's + :meth:`.LoadBalancingPolicy.make_query_plan`. Since host changes (up, + down, addition, and removal) have not been propagated to the child + policy, the child policy will only ever return policies for which + :meth:`.predicate(host)` was truthy when that change occurred. + """ + child_qp = self._child_policy.make_query_plan( + working_keyspace=working_keyspace, query=query + ) + for host in child_qp: + if self.predicate(host): + yield host + + def check_supported(self): + return self._child_policy.check_supported() + + class ConvictionPolicy(object): """ A policy which decides when hosts should be considered down diff --git a/docs/api/cassandra/policies.rst b/docs/api/cassandra/policies.rst index 294fb5cfa9..0cb16ed1de 100644 --- a/docs/api/cassandra/policies.rst +++ b/docs/api/cassandra/policies.rst @@ -24,6 +24,14 @@ Load Balancing .. autoclass:: TokenAwarePolicy :members: +.. autoclass:: HostFilterPolicy + + # we document these methods manually so we can specify a param to predicate + + .. automethod:: predicate(host) + .. automethod:: distance + .. automethod:: make_query_plan + Translating Server Node Addresses --------------------------------- diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index f16df83f1a..707171e5b9 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -23,7 +23,8 @@ from cassandra.concurrent import execute_concurrent_with_args from cassandra.metadata import murmur3 from cassandra.policies import (RoundRobinPolicy, DCAwareRoundRobinPolicy, - TokenAwarePolicy, WhiteListRoundRobinPolicy) + TokenAwarePolicy, WhiteListRoundRobinPolicy, + HostFilterPolicy) from cassandra.query import SimpleStatement from tests.integration import use_singledc, use_multidc, remove_cluster, PROTOCOL_VERSION @@ -665,3 +666,37 @@ def test_white_list(self): pass finally: cluster.shutdown() + + def test_black_list_with_host_filter_policy(self): + use_singledc() + keyspace = 'test_black_list_with_hfp' + ignored_address = (IP_FORMAT % 2) + hfp = HostFilterPolicy( + child_policy=RoundRobinPolicy(), + predicate=lambda host: host.address != ignored_address + ) + cluster = Cluster( + (IP_FORMAT % 1,), + load_balancing_policy=hfp, + protocol_version=PROTOCOL_VERSION, + topology_event_refresh_window=0, + status_event_refresh_window=0 + ) + self.addCleanup(cluster.shutdown) + session = cluster.connect() + self._wait_for_nodes_up([1, 2, 3]) + + self.assertNotIn(ignored_address, [h.address for h in hfp.make_query_plan()]) + + create_schema(cluster, session, keyspace) + self._insert(session, keyspace) + self._query(session, keyspace) + + self.coordinator_stats.assert_query_count_equals(self, 1, 6) + self.coordinator_stats.assert_query_count_equals(self, 2, 0) + self.coordinator_stats.assert_query_count_equals(self, 3, 6) + + # policy should not allow reconnecting to ignored host + force_stop(2) + self._wait_for_nodes_down([2]) + self.assertFalse(cluster.metadata._hosts[ignored_address].is_currently_reconnecting()) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index c66fe53260..9f3ae0c9c1 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -18,9 +18,10 @@ import unittest # noqa from itertools import islice, cycle -from mock import Mock, patch +from mock import Mock, patch, call from random import randint import six +from six.moves._thread import LockType import sys import struct from threading import Thread @@ -34,7 +35,7 @@ RetryPolicy, WriteType, DowngradingConsistencyRetryPolicy, ConstantReconnectionPolicy, LoadBalancingPolicy, ConvictionPolicy, ReconnectionPolicy, FallthroughRetryPolicy, - IdentityTranslator, EC2MultiRegionTranslator) + IdentityTranslator, EC2MultiRegionTranslator, HostFilterPolicy) from cassandra.pool import Host from cassandra.query import Statement @@ -1231,6 +1232,23 @@ def test_hosts_with_hostname(self): self.assertEqual(policy.distance(host), HostDistance.LOCAL) + def test_deprecated(self): + import warnings + + warnings.resetwarnings() # in case we've instantiated one before + + # set up warning filters to allow all, set up restore when this test is done + filters_backup, warnings.filters = warnings.filters, [] + self.addCleanup(setattr, warnings, 'filters', filters_backup) + + with warnings.catch_warnings(record=True) as caught_warnings: + WhiteListRoundRobinPolicy([]) + self.assertEqual(len(caught_warnings), 1) + warning_message = caught_warnings[-1] + self.assertEqual(warning_message.category, DeprecationWarning) + self.assertIn('4.0', warning_message.message.args[0]) + + class AddressTranslatorTest(unittest.TestCase): def test_identity_translator(self): @@ -1243,3 +1261,181 @@ def test_ec2_multi_region_translator(self, *_): translated = ec2t.translate(addr) self.assertIsNot(translated, addr) # verifies that the resolver path is followed self.assertEqual(translated, addr) # and that it resolves to the same address + + +class HostFilterPolicyInitTest(unittest.TestCase): + + def setUp(self): + self.child_policy, self.predicate = (Mock(name='child_policy'), + Mock(name='predicate')) + + def _check_init(self, hfp): + self.assertIs(hfp._child_policy, self.child_policy) + self.assertIsInstance(hfp._hosts_lock, LockType) + + # we can't use a simple assertIs because we wrap the function + arg0, arg1 = Mock(name='arg0'), Mock(name='arg1') + hfp.predicate(arg0) + hfp.predicate(arg1) + self.predicate.assert_has_calls([call(arg0), call(arg1)]) + + def test_init_arg_order(self): + self._check_init(HostFilterPolicy(self.child_policy, self.predicate)) + + def test_init_kwargs(self): + self._check_init(HostFilterPolicy( + predicate=self.predicate, child_policy=self.child_policy + )) + + def test_immutable_predicate(self): + expected_message_regex = "can't set attribute" + hfp = HostFilterPolicy(child_policy=Mock(name='child_policy'), + predicate=Mock(name='predicate')) + with self.assertRaisesRegexp(AttributeError, expected_message_regex): + hfp.predicate = object() + + +class HostFilterPolicyDeferralTest(unittest.TestCase): + + def setUp(self): + self.passthrough_hfp = HostFilterPolicy( + child_policy=Mock(name='child_policy'), + predicate=Mock(name='passthrough_predicate', + return_value=True) + ) + self.filterall_hfp = HostFilterPolicy( + child_policy=Mock(name='child_policy'), + predicate=Mock(name='filterall_predicate', + return_value=False) + ) + + def _check_host_triggered_method(self, policy, name): + arg, kwarg = Mock(name='arg'), Mock(name='kwarg') + expect_deferral = policy is self.passthrough_hfp + method, child_policy_method = (getattr(policy, name), + getattr(policy._child_policy, name)) + + result = method(arg, kw=kwarg) + + if expect_deferral: + # method calls the child policy's method... + child_policy_method.assert_called_once_with(arg, kw=kwarg) + # and returns its return value + self.assertIs(result, child_policy_method.return_value) + else: + child_policy_method.assert_not_called() + + def test_defer_on_up_to_child_policy(self): + self._check_host_triggered_method(self.passthrough_hfp, 'on_up') + + def test_defer_on_down_to_child_policy(self): + self._check_host_triggered_method(self.passthrough_hfp, 'on_down') + + def test_defer_on_add_to_child_policy(self): + self._check_host_triggered_method(self.passthrough_hfp, 'on_add') + + def test_defer_on_remove_to_child_policy(self): + self._check_host_triggered_method(self.passthrough_hfp, 'on_remove') + + def test_filtered_host_on_up_doesnt_call_child_policy(self): + self._check_host_triggered_method(self.filterall_hfp, 'on_up') + + def test_filtered_host_on_down_doesnt_call_child_policy(self): + self._check_host_triggered_method(self.filterall_hfp, 'on_down') + + def test_filtered_host_on_add_doesnt_call_child_policy(self): + self._check_host_triggered_method(self.filterall_hfp, 'on_add') + + def test_filtered_host_on_remove_doesnt_call_child_policy(self): + self._check_host_triggered_method(self.filterall_hfp, 'on_remove') + + def _check_check_supported_deferral(self, policy): + policy.check_supported() + policy._child_policy.check_supported.assert_called_once() + + def test_check_supported_defers_to_child(self): + self._check_check_supported_deferral(self.passthrough_hfp) + + def test_check_supported_defers_to_child_when_predicate_filtered(self): + self._check_check_supported_deferral(self.filterall_hfp) + + +class HostFilterPolicyDistanceTest(unittest.TestCase): + + def setUp(self): + self.hfp = HostFilterPolicy( + child_policy=Mock(name='child_policy', distance=Mock(name='distance')), + predicate=lambda host: host.address == 'acceptme' + ) + self.ignored_host = Host(inet_address='ignoreme', conviction_policy_factory=Mock()) + self.accepted_host = Host(inet_address='acceptme', conviction_policy_factory=Mock()) + + def test_ignored_with_filter(self): + self.assertEqual(self.hfp.distance(self.ignored_host), + HostDistance.IGNORED) + self.assertNotEqual(self.hfp.distance(self.accepted_host), + HostDistance.IGNORED) + + def test_accepted_filter_defers_to_child_policy(self): + self.hfp._child_policy.distance.side_effect = distances = Mock(), Mock() + + # getting the distance for an ignored host shouldn't affect subsequent results + self.hfp.distance(self.ignored_host) + # first call of _child_policy with count() side effect + self.assertEqual(self.hfp.distance(self.accepted_host), distances[0]) + # second call of _child_policy with count() side effect + self.assertEqual(self.hfp.distance(self.accepted_host), distances[1]) + + +class HostFilterPolicyPopulateTest(unittest.TestCase): + + def test_populate_deferred_to_child(self): + hfp = HostFilterPolicy( + child_policy=Mock(name='child_policy'), + predicate=lambda host: True + ) + mock_cluster, hosts = (Mock(name='cluster'), + ['host1', 'host2', 'host3']) + hfp.populate(mock_cluster, hosts) + hfp._child_policy.populate.assert_called_once_with( + cluster=mock_cluster, + hosts=hosts + ) + + def test_child_not_populated_with_filtered_hosts(self): + hfp = HostFilterPolicy( + child_policy=Mock(name='child_policy'), + predicate=lambda host: 'acceptme' in host + ) + mock_cluster, hosts = (Mock(name='cluster'), + ['acceptme0', 'ignoreme0', 'ignoreme1', 'acceptme1']) + hfp.populate(mock_cluster, hosts) + hfp._child_policy.populate.assert_called_once() + self.assertEqual( + hfp._child_policy.populate.call_args[1]['hosts'], + ['acceptme0', 'acceptme1'] + ) + + +class HostFilterPolicyQueryPlanTest(unittest.TestCase): + + def test_query_plan_deferred_to_child(self): + child_policy = Mock( + name='child_policy', + make_query_plan=Mock( + return_value=[object(), object(), object()] + ) + ) + hfp = HostFilterPolicy( + child_policy=child_policy, + predicate=lambda host: True + ) + working_keyspace, query = (Mock(name='working_keyspace'), + Mock(name='query')) + qp = list(hfp.make_query_plan(working_keyspace=working_keyspace, + query=query)) + hfp._child_policy.make_query_plan.assert_called_once_with( + working_keyspace=working_keyspace, + query=query + ) + self.assertEqual(qp, hfp._child_policy.make_query_plan.return_value) From 42b1fbaebaa58aeb9d0f37b6bb5315b4c219ee67 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 11 Jul 2017 15:29:09 -0400 Subject: [PATCH 1193/2005] fix murmur3 on big-endian systems --- CHANGELOG.rst | 1 + cassandra/murmur3.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 56c9c58555..e28afe809f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -13,6 +13,7 @@ Bug Fixes * Installation doesn't always fall back to no cython in Windows (PYTHON-763) * Avoid to replace a connection that is supposed to shutdown (PYTHON-772) * request_ids may not be returned to the pool (PYTHON-739) +* Fix murmur3 on big-endian systems (PYTHON-653) Other ----- diff --git a/cassandra/murmur3.py b/cassandra/murmur3.py index 61180c0121..c1d5ab3258 100644 --- a/cassandra/murmur3.py +++ b/cassandra/murmur3.py @@ -7,7 +7,7 @@ def body_and_tail(data): nblocks = l // 16 tail = l % 16 if nblocks: - return struct.unpack_from('qq' * nblocks, data), struct.unpack_from('b' * tail, data, -tail), l + return struct.unpack_from('<' + ('qq' * nblocks), data), struct.unpack_from('b' * tail, data, -tail), l else: return tuple(), struct.unpack_from('b' * tail, data, -tail), l From b349ecbf699e8f6a5e8aacf893383a7e6679ac0a Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 12 Jul 2017 10:24:03 -0400 Subject: [PATCH 1194/2005] explain use of byte-order sigil --- cassandra/murmur3.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/murmur3.py b/cassandra/murmur3.py index c1d5ab3258..7c8d641b32 100644 --- a/cassandra/murmur3.py +++ b/cassandra/murmur3.py @@ -7,6 +7,8 @@ def body_and_tail(data): nblocks = l // 16 tail = l % 16 if nblocks: + # we use '<', specifying little-endian byte order for data bigger than + # a byte so behavior is the same on little- and big-endian platforms return struct.unpack_from('<' + ('qq' * nblocks), data), struct.unpack_from('b' * tail, data, -tail), l else: return tuple(), struct.unpack_from('b' * tail, data, -tail), l From 1ca83aa8326a0fb935c42be18e781a851c952ec0 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 12 Jul 2017 06:44:05 -0400 Subject: [PATCH 1195/2005] Make sure the session is shutdown on destruction --- CHANGELOG.rst | 1 + cassandra/cluster.py | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e28afe809f..cccdf873f9 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -14,6 +14,7 @@ Bug Fixes * Avoid to replace a connection that is supposed to shutdown (PYTHON-772) * request_ids may not be returned to the pool (PYTHON-739) * Fix murmur3 on big-endian systems (PYTHON-653) +* Ensure unused connections are closed if a Session is deleted by the GC (PYTHON-774) Other ----- diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cfe12f9c9c..b5a7336e4e 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2325,6 +2325,10 @@ def __enter__(self): def __exit__(self, *args): self.shutdown() + def __del__(self): + # Ensure all connections are closed, in case the Session object is deleted by the GC + self.shutdown() + def add_or_renew_pool(self, host, is_host_addition): """ For internal use only. From f7881d9e04da7da37aff31b0abee26ea763d3b4b Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 13 Jul 2017 09:48:13 -0400 Subject: [PATCH 1196/2005] PYTHON-785 changelog entry --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index cacf589f34..c62daf3bbb 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -16,6 +16,7 @@ Bug Fixes * request_ids may not be returned to the pool (PYTHON-739) * Fix murmur3 on big-endian systems (PYTHON-653) * Ensure unused connections are closed if a Session is deleted by the GC (PYTHON-774) +* Fix .values_list by using db names internally (cqlengine) (PYTHON-785) Other ----- From 0fcd4b99d7c8011e77d3037abc4a06a5826808a3 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 13 Jul 2017 15:41:42 -0400 Subject: [PATCH 1197/2005] flesh out ResultSet upgrade guide (#796) PYTHON-464 --- docs/upgrading.rst | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/docs/upgrading.rst b/docs/upgrading.rst index 93ac213cac..9ab8eb31f4 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -46,9 +46,16 @@ materialize a list using the iterator: results = session.execute("SELECT * FROM system.local") row_list = list(results) -For backward compatability, :class:`~.ResultSet` supports indexing. If -the result is paged, all pages will be materialized. A warning will -be logged if a paged query is implicitly materialized. +For backward compatability, :class:`~.ResultSet` supports indexing. When +accessed at an index, a `~.ResultSet` object will materialize all its pages: + +.. code-block:: python + + results = session.execute("SELECT * FROM system.local") + first_result = results[0] # materializes results, fetching all pages + +This can send requests and load (possibly large) results into memory, so +`~.ResultSet` will log a warning on implicit materialization. Trace information is not attached to executed Statements ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From a51cae2055714923a1df69f0987a72590d0b74be Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 14 Jul 2017 13:14:58 -0400 Subject: [PATCH 1198/2005] fix docs/index.rst (#797) --- docs/index.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 3f9ab66bd4..a9f3cb3825 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,7 +4,7 @@ A Python client driver for `Apache Cassandra `_. This driver works exclusively with the Cassandra Query Language v3 (CQL3) and Cassandra's native protocol. Cassandra 2.1+ is supported. -The driver supports Python 2.6, 2.7, 3.3, 3.4, 3.5, and 3.6. +The driver supports Python 2.7, 3.3, 3.4, 3.5, and 3.6. This driver is open source under the `Apache v2 License `_. @@ -74,8 +74,7 @@ Visit the :doc:`FAQ section ` in this documentation. Please send questions to the `mailing list `_. -Alternatively, you can use IRC. Connect to the #datastax-drivers channel on irc.freenode.net. -If you don't have an IRC client, you can use `freenode's web-based client `_. +Alternatively, you can use the `#datastax-drivers` channel in the DataStax Acadamy Slack to ask questions in real time. Reporting Issues ---------------- From e7c7d7bf27576b7ca92503e250599d02df0a87e2 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 17 Jul 2017 09:05:57 -0400 Subject: [PATCH 1199/2005] timestamp granularity doc fix (#799) PYTHON-717 --- docs/dates_and_times.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/dates_and_times.rst b/docs/dates_and_times.rst index 5b13cb7f55..7a89f77437 100644 --- a/docs/dates_and_times.rst +++ b/docs/dates_and_times.rst @@ -7,7 +7,7 @@ reasoning behind it, and describe approaches to working with these types. timestamps (Cassandra DateType) ------------------------------- -Timestamps in Cassandra are timezone-naive timestamps encoded as microseconds since UNIX epoch. Clients working with +Timestamps in Cassandra are timezone-naive timestamps encoded as millseconds since UNIX epoch. Clients working with timestamps in this database usually find it easiest to reason about them if they are always assumed to be UTC. To quote the pytz documentation, "The preferred way of dealing with times is to always work in UTC, converting to localtime only when generating output to be read by humans." The driver adheres to this tenant, and assumes UTC is always in the database. The @@ -34,7 +34,7 @@ Note the second point above applies even to "local" times created using ``now()` These do not contain timezone information intrinsically, so they will be assumed to be UTC and not shifted. When generating timestamps in the application, it is clearer to use ``datetime.utcnow()`` to be explicit about it. -If the input for a timestamp is numeric, it is assumed to be a epoch-relative microsecond timestamp, as specified in the +If the input for a timestamp is numeric, it is assumed to be a epoch-relative millisecond timestamp, as specified in the CQL spec -- no scaling or conversion is done. Read Path From b7c2eb700521dd7fc6c1401d7aa432ec88a592a4 Mon Sep 17 00:00:00 2001 From: Colin Deasy Date: Mon, 17 Jul 2017 15:23:54 +0100 Subject: [PATCH 1200/2005] Add python 3.5 and 3.6 classifier in setup.py (#749) --- setup.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup.py b/setup.py index 42547a2383..ef50cde69e 100644 --- a/setup.py +++ b/setup.py @@ -431,6 +431,8 @@ def run_setup(extensions): 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Software Development :: Libraries :: Python Modules' From 1f9bb019c780a49a2f5e580c495927812287d216 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 13 Jul 2017 08:25:04 -0400 Subject: [PATCH 1201/2005] Replaced WhiteHost policy for FilterHost policy in tests --- cassandra/policies.py | 17 ++---- tests/integration/long/test_failure_types.py | 6 +- tests/integration/standard/test_cluster.py | 55 ++++++++++++++----- tests/integration/standard/test_connection.py | 10 +++- tests/integration/standard/test_metrics.py | 6 +- 5 files changed, 62 insertions(+), 32 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 1144f4f469..25565a2c6e 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -504,20 +504,16 @@ def __init__(self, child_policy, predicate): self._predicate = predicate def on_up(self, host, *args, **kwargs): - if self.predicate(host): - return self._child_policy.on_up(host, *args, **kwargs) + return self._child_policy.on_up(host, *args, **kwargs) def on_down(self, host, *args, **kwargs): - if self.predicate(host): - return self._child_policy.on_down(host, *args, **kwargs) + return self._child_policy.on_down(host, *args, **kwargs) def on_add(self, host, *args, **kwargs): - if self.predicate(host): - return self._child_policy.on_add(host, *args, **kwargs) + return self._child_policy.on_add(host, *args, **kwargs) def on_remove(self, host, *args, **kwargs): - if self.predicate(host): - return self._child_policy.on_remove(host, *args, **kwargs) + return self._child_policy.on_remove(host, *args, **kwargs) @property def predicate(self): @@ -545,10 +541,7 @@ def distance(self, host): return HostDistance.IGNORED def populate(self, cluster, hosts): - self._child_policy.populate( - cluster=cluster, - hosts=[h for h in hosts if self.predicate(h)] - ) + self._child_policy.populate(cluster=cluster, hosts=hosts) def make_query_plan(self, working_keyspace=None, query=None): """ diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index c39903ec41..a8f6939719 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -17,7 +17,7 @@ from cassandra import (ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, ReadFailure, WriteFailure, FunctionFailure, ProtocolVersion) from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT -from cassandra.policies import WhiteListRoundRobinPolicy +from cassandra.policies import HostFilterPolicy, RoundRobinPolicy from cassandra.concurrent import execute_concurrent_with_args from cassandra.query import SimpleStatement from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace, remove_cluster, get_node @@ -327,7 +327,9 @@ def setUp(self): # self.node1, self.node2, self.node3 = get_cluster().nodes.values() node1 = ExecutionProfile( - load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']) + load_balancing_policy=HostFilterPolicy( + RoundRobinPolicy(), lambda host: host.address == "127.0.0.1" + ) ) self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, execution_profiles={EXEC_PROFILE_DEFAULT: node1}) self.session = self.cluster.connect(wait_for_all_pools=True) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 03ed4769b2..7acae6a133 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -29,7 +29,7 @@ from cassandra.concurrent import execute_concurrent from cassandra.policies import (RoundRobinPolicy, ExponentialReconnectionPolicy, RetryPolicy, SimpleConvictionPolicy, HostDistance, - WhiteListRoundRobinPolicy, AddressTranslator) + AddressTranslator, HostFilterPolicy) from cassandra.pool import Host from cassandra.query import SimpleStatement, TraceUnavailable, tuple_factory @@ -477,7 +477,10 @@ def test_refresh_schema_type(self): def test_refresh_schema_no_wait(self): contact_points = [CASSANDRA_IP] cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=10, - contact_points=contact_points, load_balancing_policy=WhiteListRoundRobinPolicy(contact_points)) + contact_points=contact_points, + load_balancing_policy=HostFilterPolicy( + RoundRobinPolicy(), lambda host: host.address == CASSANDRA_IP + )) session = cluster.connect() schema_ver = session.execute("SELECT schema_version FROM system.local WHERE key='local'")[0][0] @@ -618,7 +621,7 @@ def test_trace_unavailable(self): try: result = future.get_query_trace(-1.0) # In case the result has time to come back before this timeout due to a race condition - check_trace(result) + self.check_trace(result) except TraceUnavailable: break else: @@ -630,7 +633,7 @@ def test_trace_unavailable(self): try: result = future.get_query_trace(max_wait=120) # In case the result has been set check the trace - check_trace(result) + self.check_trace(result) except TraceUnavailable: break else: @@ -774,7 +777,11 @@ def test_profile_load_balancing(self): @test_category config_profiles """ query = "select release_version from system.local" - node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy([CASSANDRA_IP])) + node1 = ExecutionProfile( + load_balancing_policy=HostFilterPolicy( + RoundRobinPolicy(), lambda host: host.address == CASSANDRA_IP + ) + ) with Cluster(execution_profiles={'node1': node1}) as cluster: session = cluster.connect(wait_for_all_pools=True) @@ -925,8 +932,16 @@ def test_profile_pool_management(self): @test_category config_profiles """ - node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) - node2 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2'])) + node1 = ExecutionProfile( + load_balancing_policy=HostFilterPolicy( + RoundRobinPolicy(), lambda host: host.address == "127.0.0.1" + ) + ) + node2 = ExecutionProfile( + load_balancing_policy=HostFilterPolicy( + RoundRobinPolicy(), lambda host: host.address == "127.0.0.2" + ) + ) with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1, 'node2': node2}) as cluster: session = cluster.connect(wait_for_all_pools=True) pools = session.get_pool_state() @@ -935,7 +950,11 @@ def test_profile_pool_management(self): self.assertEqual(set(h.address for h in pools), set(('127.0.0.1', '127.0.0.2'))) # dynamically update pools on add - node3 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.3'])) + node3 = ExecutionProfile( + load_balancing_policy=HostFilterPolicy( + RoundRobinPolicy(), lambda host: host.address == "127.0.0.3" + ) + ) cluster.add_execution_profile('node3', node3) pools = session.get_pool_state() self.assertEqual(set(h.address for h in pools), set(('127.0.0.1', '127.0.0.2', '127.0.0.3'))) @@ -953,14 +972,22 @@ def test_add_profile_timeout(self): """ max_retry_count = 10 for i in range(max_retry_count): - node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) + node1 = ExecutionProfile( + load_balancing_policy=HostFilterPolicy( + RoundRobinPolicy(), lambda host: host.address == "127.0.0.1" + ) + ) with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1}) as cluster: session = cluster.connect(wait_for_all_pools=True) pools = session.get_pool_state() self.assertGreater(len(cluster.metadata.all_hosts()), 2) self.assertEqual(set(h.address for h in pools), set(('127.0.0.1',))) - node2 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2', '127.0.0.3'])) + node2 = ExecutionProfile( + load_balancing_policy=HostFilterPolicy( + RoundRobinPolicy(), lambda host: host.address in ["127.0.0.2", "127.0.0.3"] + ) + ) start = time.time() try: @@ -1030,7 +1057,9 @@ def test_address_translator_with_mixed_nodes(self): @local class ContextManagementTest(unittest.TestCase): - load_balancing_policy = WhiteListRoundRobinPolicy([CASSANDRA_IP]) + load_balancing_policy = HostFilterPolicy( + RoundRobinPolicy(), lambda host: host.address == CASSANDRA_IP + ) cluster_kwargs = {'execution_profiles': {EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy= load_balancing_policy)}, 'schema_metadata_enabled': False, @@ -1150,7 +1179,6 @@ def test_down_event_with_active_connection(self): @local class DontPrepareOnIgnoredHostsTest(unittest.TestCase): - ignored_addresses = ['127.0.0.3'] ignore_node_3_policy = IgnoredHostPolicy(ignored_addresses) @@ -1189,7 +1217,8 @@ def test_prepare_on_ignored_hosts(self): @local class DuplicateRpcTest(unittest.TestCase): - load_balancing_policy = WhiteListRoundRobinPolicy(['127.0.0.1']) + load_balancing_policy = HostFilterPolicy(RoundRobinPolicy(), + lambda host: host.address == "127.0.0.1") def setUp(self): self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=self.load_balancing_policy) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 4b9f73b5e9..4b0f8cab70 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -29,7 +29,7 @@ from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.protocol import QueryMessage from cassandra.connection import Connection -from cassandra.policies import WhiteListRoundRobinPolicy, HostStateListener +from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, HostStateListener from cassandra.pool import HostConnectionPool from tests import is_monkey_patched, notwindows @@ -50,8 +50,12 @@ class ConnectionTimeoutTest(unittest.TestCase): def setUp(self): self.defaultInFlight = Connection.max_in_flight Connection.max_in_flight = 2 - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy= - WhiteListRoundRobinPolicy([CASSANDRA_IP])) + self.cluster = Cluster( + protocol_version=PROTOCOL_VERSION, + load_balancing_policy=HostFilterPolicy( + RoundRobinPolicy(), predicate=lambda host: host.address == CASSANDRA_IP + ) + ) self.session = self.cluster.connect() def tearDown(self): diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 3811325522..90af9ee4a9 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -14,7 +14,7 @@ import time -from cassandra.policies import WhiteListRoundRobinPolicy, FallthroughRetryPolicy +from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, FallthroughRetryPolicy try: import unittest2 as unittest @@ -39,7 +39,9 @@ class MetricsTests(unittest.TestCase): def setUp(self): contact_point = ['127.0.0.2'] self.cluster = Cluster(contact_points=contact_point, metrics_enabled=True, protocol_version=PROTOCOL_VERSION, - load_balancing_policy=WhiteListRoundRobinPolicy(contact_point), + load_balancing_policy=HostFilterPolicy( + RoundRobinPolicy(), lambda host: host.address in contact_point + ), default_retry_policy=FallthroughRetryPolicy()) self.session = self.cluster.connect("test3rf", wait_for_all_pools=True) From 6ebbb24b5d1196f4422c35ed874e2902e97bf4fe Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 12 Jul 2017 21:07:08 -0400 Subject: [PATCH 1202/2005] Added integration tests around querying replicas --- tests/integration/standard/test_cluster.py | 65 +++++++++++++++++++++- 1 file changed, 62 insertions(+), 3 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 03ed4769b2..aa94549af8 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -29,13 +29,14 @@ from cassandra.concurrent import execute_concurrent from cassandra.policies import (RoundRobinPolicy, ExponentialReconnectionPolicy, RetryPolicy, SimpleConvictionPolicy, HostDistance, - WhiteListRoundRobinPolicy, AddressTranslator) + WhiteListRoundRobinPolicy, AddressTranslator, TokenAwarePolicy, HostFilterPolicy) from cassandra.pool import Host from cassandra.query import SimpleStatement, TraceUnavailable, tuple_factory -from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, CASSANDRA_VERSION, DSE_VERSION, execute_until_pass, execute_with_long_wait_retry, get_node,\ - MockLoggingHandler, get_unsupported_lower_protocol, get_unsupported_upper_protocol, protocolv5, local, CASSANDRA_IP +from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, CASSANDRA_VERSION, \ + execute_until_pass, execute_with_long_wait_retry, get_node, MockLoggingHandler, get_unsupported_lower_protocol, \ + get_unsupported_upper_protocol, protocolv5, local, CASSANDRA_IP from tests.integration.util import assert_quiescent_pool_state import sys @@ -974,6 +975,64 @@ def test_add_profile_timeout(self): else: raise Exception("add_execution_profile didn't timeout after {0} retries".format(max_retry_count)) + def test_replicas_are_queried(self): + """ + Test that replicas are queried first for TokenAwarePolicy. A table with RF 1 + is created. All the queries should go to that replica when TokenAwarePolicy + is used. + Then using HostFilterPolicy the replica is excluded from the considered hosts. + By checking the trace we verify that there are no more replicas. + + @since 3.5 + @jira_ticket PYTHON-653 + @expected_result the replicas are queried for HostFilterPolicy + + @test_category metadata + """ + queried_hosts = set() + with Cluster(protocol_version=PROTOCOL_VERSION, + load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy())) as cluster: + session = cluster.connect() + session.execute(''' + CREATE TABLE test1rf.table_with_big_key ( + k1 int, + k2 int, + k3 int, + k4 int, + PRIMARY KEY((k1, k2, k3), k4))''') + prepared = session.prepare("""SELECT * from test1rf.table_with_big_key + WHERE k1 = ? AND k2 = ? AND k3 = ? AND k4 = ?""") + for i in range(10): + result = session.execute(prepared, (i, i, i, i), trace=True) + queried_hosts = self._assert_replica_queried(result.get_query_trace(), only_replicas=True) + last_i = i + + only_replica = queried_hosts.pop() + available_hosts = [host for host in ["127.0.0.1", "127.0.0.2", "127.0.0.3"] if host != only_replica] + with Cluster(contact_points=available_hosts, + protocol_version=PROTOCOL_VERSION, + load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), + predicate=lambda host: host.address != only_replica)) as cluster: + + session = cluster.connect() + prepared = session.prepare("""SELECT * from test1rf.table_with_big_key + WHERE k1 = ? AND k2 = ? AND k3 = ? AND k4 = ?""") + for _ in range(10): + result = session.execute(prepared, (last_i, last_i, last_i, last_i), trace=True) + self._assert_replica_queried(result.get_query_trace(), only_replicas=False) + + session.execute('''DROP TABLE test1rf.table_with_big_key''') + + def _assert_replica_queried(self, trace, only_replicas=True): + queried_hosts = set() + for row in trace.events: + queried_hosts.add(row.source) + if only_replicas: + self.assertEqual(len(queried_hosts), 1, "The hosts queried where {}".format(queried_hosts)) + else: + self.assertGreater(len(queried_hosts), 1, "The host queried was {}".format(queried_hosts)) + return queried_hosts + class LocalHostAdressTranslator(AddressTranslator): From 3a7d39c60cf47c80569b3e8b4cfb3622df191ead Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 18 Jul 2017 16:03:10 -0400 Subject: [PATCH 1203/2005] minor fix in cqlengine docs --- docs/api/cassandra/cqlengine/models.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/api/cassandra/cqlengine/models.rst b/docs/api/cassandra/cqlengine/models.rst index b3695e8ac6..fbcec06e5e 100644 --- a/docs/api/cassandra/cqlengine/models.rst +++ b/docs/api/cassandra/cqlengine/models.rst @@ -171,7 +171,7 @@ Model Sets the ttl values to run instance updates and inserts queries with. - .. method:: using(keyspace=None, connection=None) + .. method:: using(connection=None) Change the context on the fly of the model instance (keyspace, connection) From 6a9b9b616afe1be4200dc76e1235f9f09fb033fd Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 13 Jul 2017 13:28:49 -0400 Subject: [PATCH 1204/2005] Added tests for PYTHON-761 --- .../long/test_loadbalancingpolicies.py | 13 +++- tests/integration/standard/test_policies.py | 51 +++++++++++++++- tests/unit/test_policies.py | 61 ++++++++++++++++++- 3 files changed, 117 insertions(+), 8 deletions(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 707171e5b9..f6aadd3e54 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -668,6 +668,15 @@ def test_white_list(self): cluster.shutdown() def test_black_list_with_host_filter_policy(self): + """ + Test to validate removing certain hosts from the query plan with + HostFilterPolicy + @since 3.8 + @jira_ticket PYTHON-961 + @expected_result the excluded hosts are ignored + + @test_category policy + """ use_singledc() keyspace = 'test_black_list_with_hfp' ignored_address = (IP_FORMAT % 2) @@ -692,9 +701,9 @@ def test_black_list_with_host_filter_policy(self): self._insert(session, keyspace) self._query(session, keyspace) - self.coordinator_stats.assert_query_count_equals(self, 1, 6) + self.coordinator_stats.assert_query_count_equals(self, 1, 8) self.coordinator_stats.assert_query_count_equals(self, 2, 0) - self.coordinator_stats.assert_query_count_equals(self, 3, 6) + self.coordinator_stats.assert_query_count_equals(self, 3, 4) # policy should not allow reconnecting to ignored host force_stop(2) diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 9a4b8ae534..0eba146c15 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -20,12 +20,14 @@ import unittest # noqa from cassandra import OperationTimedOut -from cassandra.cluster import ExecutionProfile +from cassandra.cluster import ExecutionProfile, Cluster from cassandra.query import SimpleStatement -from cassandra.policies import ConstantSpeculativeExecutionPolicy, RoundRobinPolicy +from cassandra.policies import ConstantSpeculativeExecutionPolicy, HostFilterPolicy, RoundRobinPolicy, \ + SimpleConvictionPolicy from cassandra.connection import Connection +from cassandra.pool import Host -from tests.integration import BasicSharedKeyspaceUnitTestCase, greaterthancass21 +from tests.integration import BasicSharedKeyspaceUnitTestCase, greaterthancass21, PROTOCOL_VERSION from tests import notwindows from mock import patch @@ -47,6 +49,49 @@ def make_query_plan(self, working_keyspace=None, query=None): return hosts +class HostFilterPolicyTests(unittest.TestCase): + + def test_predicate_changes(self): + """ + Test to validate hostfilter reacts correctly when the predicate return + a different subset of the hosts + HostFilterPolicy + @since 3.8 + @jira_ticket PYTHON-961 + @expected_result the excluded hosts are ignored + + @test_category policy + """ + external_event = True + contact_point = "127.0.0.1" + + single_host = {Host(contact_point, SimpleConvictionPolicy)} + all_hosts = {Host("127.0.0.{}".format(i), SimpleConvictionPolicy) for i in (1, 2, 3)} + + predicate = lambda host: host.address == contact_point if external_event else True + cluster = Cluster((contact_point,), load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), + predicate=predicate), + protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, + status_event_refresh_window=0) + session = cluster.connect(wait_for_all_pools=True) + + queried_hosts = set() + for _ in range(10): + response = session.execute("SELECT * from system.local") + queried_hosts.update(response.response_future.attempted_hosts) + + self.assertEqual(queried_hosts, single_host) + + external_event = False + session.update_created_pools() + + queried_hosts = set() + for _ in range(10): + response = session.execute("SELECT * from system.local") + queried_hosts.update(response.response_future.attempted_hosts) + self.assertEqual(queried_hosts, all_hosts) + + # This doesn't work well with Windows clock granularity @notwindows class SpecExecTest(BasicSharedKeyspaceUnitTestCase): diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 9f3ae0c9c1..ffbd774e73 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1402,13 +1402,13 @@ def test_populate_deferred_to_child(self): hosts=hosts ) - def test_child_not_populated_with_filtered_hosts(self): + def test_child_is_populated_with_filtered_hosts(self): hfp = HostFilterPolicy( child_policy=Mock(name='child_policy'), - predicate=lambda host: 'acceptme' in host + predicate=lambda host: False ) mock_cluster, hosts = (Mock(name='cluster'), - ['acceptme0', 'ignoreme0', 'ignoreme1', 'acceptme1']) + ['acceptme0', 'acceptme1']) hfp.populate(mock_cluster, hosts) hfp._child_policy.populate.assert_called_once() self.assertEqual( @@ -1439,3 +1439,58 @@ def test_query_plan_deferred_to_child(self): query=query ) self.assertEqual(qp, hfp._child_policy.make_query_plan.return_value) + + def test_wrap_token_aware(self): + cluster = Mock(spec=Cluster) + hosts = [Host("127.0.0.{}".format(i), SimpleConvictionPolicy) for i in range(1, 6)] + for host in hosts: + host.set_up() + + def get_replicas(keyspace, packed_key): + return hosts[:2] + + cluster.metadata.get_replicas.side_effect = get_replicas + + child_policy = TokenAwarePolicy(RoundRobinPolicy()) + + hfp = HostFilterPolicy( + child_policy=child_policy, + predicate=lambda host: host.address != "127.0.0.1" and host.address != "127.0.0.4" + ) + hfp.populate(cluster, hosts) + + # We don't allow randomness for ordering the replicas in RoundRobin + hfp._child_policy._child_policy._position = 0 + + + mocked_query = Mock() + query_plan = hfp.make_query_plan("keyspace", mocked_query) + # First the not filtered replica, and then the rest of the allowed hosts ordered + query_plan = list(query_plan) + self.assertEqual(query_plan[0], Host("127.0.0.2", SimpleConvictionPolicy)) + self.assertEqual(set(query_plan[1:]),{Host("127.0.0.3", SimpleConvictionPolicy), + Host("127.0.0.5", SimpleConvictionPolicy)}) + + def test_create_whitelist(self): + cluster = Mock(spec=Cluster) + hosts = [Host("127.0.0.{}".format(i), SimpleConvictionPolicy) for i in range(1, 6)] + for host in hosts: + host.set_up() + + child_policy = RoundRobinPolicy() + + hfp = HostFilterPolicy( + child_policy=child_policy, + predicate=lambda host: host.address == "127.0.0.1" or host.address == "127.0.0.4" + ) + hfp.populate(cluster, hosts) + + # We don't allow randomness for ordering the replicas in RoundRobin + hfp._child_policy._position = 0 + + mocked_query = Mock() + query_plan = hfp.make_query_plan("keyspace", mocked_query) + # Only the filtered replicas should be allowed + self.assertEqual(set(query_plan), {Host("127.0.0.1", SimpleConvictionPolicy), + Host("127.0.0.4", SimpleConvictionPolicy)}) + From f42132ad455706c41f69fbd84725876e8a266e4e Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 19 Jul 2017 13:59:58 -0400 Subject: [PATCH 1205/2005] Fix for test_black_list_with_host_filter_policy since no gurantees are made in the order of hosts for RoundRobin --- tests/integration/long/test_loadbalancingpolicies.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index f6aadd3e54..c92bf6402a 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -701,9 +701,15 @@ def test_black_list_with_host_filter_policy(self): self._insert(session, keyspace) self._query(session, keyspace) - self.coordinator_stats.assert_query_count_equals(self, 1, 8) + # RoundRobin doesn't provide a gurantee on the order of the hosts + # so we will have that for 127.0.0.1 and 127.0.0.3 the count for one + # will be 4 and for the other 8 + first_node_count = self.coordinator_stats.get_query_count(1) + third_node_count = self.coordinator_stats.get_query_count(3) + self.assertEqual(first_node_count + third_node_count, 12) + self.assertTrue(first_node_count == 8 or first_node_count == 4) + self.coordinator_stats.assert_query_count_equals(self, 2, 0) - self.coordinator_stats.assert_query_count_equals(self, 3, 4) # policy should not allow reconnecting to ignored host force_stop(2) From fad8764b365e76f2c2955500f2956c4396eec9b4 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 19 Jul 2017 14:28:56 -0400 Subject: [PATCH 1206/2005] Fix hfp docs (#802) * fix HFP docs to account for new behavior * make comment stop rendering --- cassandra/policies.py | 17 +++++++++++++---- docs/api/cassandra/policies.rst | 2 +- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 25565a2c6e..466a4cdcbe 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -485,6 +485,10 @@ def address_is_ignored(host): load_balancing_policy=blacklist_filter_policy, ) + See the note in the :meth:`.make_query_plan` documentation for a caveat on + how wrapping ordering polices (e.g. :class:`.RoundRobinPolicy`) may break + desirable properties of the wrapped policy. + Please note that whitelist and blacklist policies are not recommended for general, day-to-day use. You probably want something like :class:`.DCAwareRoundRobinPolicy`, which prefers a local DC but has @@ -546,10 +550,15 @@ def populate(self, cluster, hosts): def make_query_plan(self, working_keyspace=None, query=None): """ Defers to the child policy's - :meth:`.LoadBalancingPolicy.make_query_plan`. Since host changes (up, - down, addition, and removal) have not been propagated to the child - policy, the child policy will only ever return policies for which - :meth:`.predicate(host)` was truthy when that change occurred. + :meth:`.LoadBalancingPolicy.make_query_plan` and filters the results. + + Note that this filtering may break desirable properties of the wrapped + policy in some cases. For instance, imagine if you configure this + policy to filter out ``host2``, and to wrap a round-robin policy that + rotates through three hosts in the order ``host1, host2, host3``, + ``host2, host3, host1``, ``host3, host1, host2``, repeating. This + policy will yield ``host1, host3``, ``host3, host1``, ``host3, host1``, + disproportionately favoring ``host3``. """ child_qp = self._child_policy.make_query_plan( working_keyspace=working_keyspace, query=query diff --git a/docs/api/cassandra/policies.rst b/docs/api/cassandra/policies.rst index 0cb16ed1de..b662755caf 100644 --- a/docs/api/cassandra/policies.rst +++ b/docs/api/cassandra/policies.rst @@ -26,7 +26,7 @@ Load Balancing .. autoclass:: HostFilterPolicy - # we document these methods manually so we can specify a param to predicate + .. we document these methods manually so we can specify a param to predicate .. automethod:: predicate(host) .. automethod:: distance From c2814ddd64acfcb12a44635e91e9cbe58d7cf9b4 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 20 Jul 2017 16:16:37 -0400 Subject: [PATCH 1207/2005] Fixed HFP unit tests --- tests/unit/test_policies.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index ffbd774e73..b82af1db70 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1311,19 +1311,15 @@ def setUp(self): def _check_host_triggered_method(self, policy, name): arg, kwarg = Mock(name='arg'), Mock(name='kwarg') - expect_deferral = policy is self.passthrough_hfp method, child_policy_method = (getattr(policy, name), getattr(policy._child_policy, name)) result = method(arg, kw=kwarg) - if expect_deferral: - # method calls the child policy's method... - child_policy_method.assert_called_once_with(arg, kw=kwarg) - # and returns its return value - self.assertIs(result, child_policy_method.return_value) - else: - child_policy_method.assert_not_called() + # method calls the child policy's method... + child_policy_method.assert_called_once_with(arg, kw=kwarg) + # and returns its return value + self.assertIs(result, child_policy_method.return_value) def test_defer_on_up_to_child_policy(self): self._check_host_triggered_method(self.passthrough_hfp, 'on_up') From d1da2b5685e82abe10d8a0e3e6b69f867a6aef34 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 24 Jul 2017 15:02:15 -0400 Subject: [PATCH 1208/2005] CHANGELOG update for release --- CHANGELOG.rst | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index c62daf3bbb..207336d1ee 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,7 @@ -3.11 -==== +3.11.0 +====== +July 24, 2017 + Features -------- @@ -22,10 +24,10 @@ Other ----- * Bump Cython dependency version to 0.25.2 (PYTHON-754) * Fix DeprecationWarning when using lz4 (PYTHON-769) - -Other ------ * Deprecate WhiteListRoundRobinPolicy (PYTHON-759) +* Improve upgrade guide for materializing pages (PYTHON-464) +* Documentation for time/date specifies timestamp inupt as microseconds (PYTHON-717) +* Point to DSA Slack, not IRC, in docs index 3.10.0 ====== From 48accb56e5946396196bdaf71bfc1299c41562fd Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 24 Jul 2017 15:59:03 -0400 Subject: [PATCH 1209/2005] version 3.11.0 --- cassandra/__init__.py | 2 +- docs.yaml | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index a96624def7..d7f3aeca44 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 10, 0, 'post0') +__version_info__ = (3, 11, 0) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs.yaml b/docs.yaml index a91384667b..f2e3768a0e 100644 --- a/docs.yaml +++ b/docs.yaml @@ -13,6 +13,8 @@ sections: prebuilds: - python setup.py build_ext --inplace --force --no-murmur3 --no-cython versions: + - name: '3.11' + ref: '3.11.0' - name: '3.10' ref: 64572368 - name: 3.9 From d200658f46cdb84a413abe1616cc2e0823c26827 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 24 Jul 2017 16:52:29 -0400 Subject: [PATCH 1210/2005] update version post-3.11.0-release --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index d7f3aeca44..feabc14192 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 11, 0) +__version_info__ = (3, 11, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From 0e65b336067d89e1b96d8c6b950886a649315656 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 25 Jul 2017 10:02:51 -0400 Subject: [PATCH 1211/2005] big-endian non-support statement --- README.rst | 2 ++ docs/index.rst | 2 ++ 2 files changed, 4 insertions(+) diff --git a/README.rst b/README.rst index c586a76f56..f30a916854 100644 --- a/README.rst +++ b/README.rst @@ -10,6 +10,8 @@ The driver supports Python 2.7, 3.3, 3.4, 3.5, and 3.6. If you require compatibility with DataStax Enterprise, use the `DataStax Enterprise Python Driver `_. +**Note:** DataStax products do not support big-endian systems. + Feedback Requested ------------------ **Help us focus our efforts!** Provide your input on the `Platform and Runtime Survey `_ (we kept it short). diff --git a/docs/index.rst b/docs/index.rst index a9f3cb3825..3c9b3adb69 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -10,6 +10,8 @@ This driver is open source under the `Apache v2 License `_. The source code for this driver can be found on `GitHub `_. +**Note:** DataStax products do not support big-endian systems. + Contents -------- :doc:`installation` From 9a67df40172d17428d11d31aa3b6526bc0a3bffd Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 26 Jul 2017 14:08:20 -0400 Subject: [PATCH 1212/2005] Fixed some failing tests in Jenkins --- tests/integration/standard/test_cluster.py | 4 ++-- tests/integration/standard/test_policies.py | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index ca44f11e55..65387fb60b 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -1020,7 +1020,7 @@ def test_replicas_are_queried(self): queried_hosts = set() with Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy())) as cluster: - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) session.execute(''' CREATE TABLE test1rf.table_with_big_key ( k1 int, @@ -1042,7 +1042,7 @@ def test_replicas_are_queried(self): load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), predicate=lambda host: host.address != only_replica)) as cluster: - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) prepared = session.prepare("""SELECT * from test1rf.table_with_big_key WHERE k1 = ? AND k2 = ? AND k3 = ? AND k4 = ?""") for _ in range(10): diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 0eba146c15..46eebdcf75 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -31,6 +31,7 @@ from tests import notwindows from mock import patch +from concurrent.futures import wait as wait_futures def setup_module(): use_singledc() @@ -83,7 +84,8 @@ def test_predicate_changes(self): self.assertEqual(queried_hosts, single_host) external_event = False - session.update_created_pools() + futures = session.update_created_pools() + wait_futures(futures, timeout=cluster.connect_timeout) queried_hosts = set() for _ in range(10): From 3e125bf5d1df28db3466c7201f7dd9bcba03cb8f Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 4 Aug 2016 16:40:52 -0500 Subject: [PATCH 1213/2005] Prototype scassandra client, with metadata initialization --- tests/integration/scassandra/__init__.py | 13 +++ tests/integration/scassandra/client.py | 126 +++++++++++++++++++++++ 2 files changed, 139 insertions(+) create mode 100644 tests/integration/scassandra/__init__.py create mode 100644 tests/integration/scassandra/client.py diff --git a/tests/integration/scassandra/__init__.py b/tests/integration/scassandra/__init__.py new file mode 100644 index 0000000000..cdf00881fc --- /dev/null +++ b/tests/integration/scassandra/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License diff --git a/tests/integration/scassandra/client.py b/tests/integration/scassandra/client.py new file mode 100644 index 0000000000..41a300582e --- /dev/null +++ b/tests/integration/scassandra/client.py @@ -0,0 +1,126 @@ + +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +import json +import requests + + +class ScassandraClient(object): + + def __init__(self, admin_addr="127.0.0.1:8043"): + self.admin_addr = admin_addr + + def prime_metadata(self): + + self.submit_request(self.retrieve_system_peers_query()) + self.submit_request(self.retrieve_schema_column_family()) + self.submit_request(self.retrieve_schema_columns()) + self.submit_request(self.retrieve_system_local_query()) + self.submit_request(self.retrieve_schema_keyspaces()) + self.submit_request(PrimeQuery("SELECT * FROM system.schema_usertypes", result="invalid")) + self.submit_request(PrimeQuery("SELECT * FROM system.schema_aggregates", result="invalid")) + self.submit_request(PrimeQuery("SELECT * FROM system.schema_functions", result="invalid")) + self.submit_request(PrimeQuery("SELECT * FROM system.schema_triggers", result="invalid")) + + def submit_request(self, query): + result = requests.post("http://{0}/{1}".format(self.admin_addr, query.type), json=query.fetch_json()) + print(result) + return result + + def retrieve_system_local_query(self): + system_local_row = {} + system_local_row["cluster_name"] = "custom cluster name" + system_local_row["partitioner"] = "org.apache.cassandra.dht.Murmur3Partitioner" + system_local_row["data_center"] = "dc1" + system_local_row["rack"] = "rc1" + system_local_row["release_version"] = "2.0.1" + tokens = ["1743244960790844724"] + system_local_row["tokens"] = tokens + column_types = {"tokens": "set"} + system_local = PrimeQuery("SELECT * FROM system.local WHERE key='local'", rows=[system_local_row], column_types=column_types) + return system_local + + def retrieve_system_peers_query(self): + peer_1_row = {} + peer_1_row["peer"] = "127.0.0.1" + peer_1_row["data_center"] = "datacenter1" + peer_1_row["host_id"] = "8db34a1c-bbb5-4f6e-b31e-e15d75b05620" + peer_1_row["preferred_ip"] = "None" + peer_1_row["rack"] = "rack1" + peer_1_row["release_version"] = "2.0.1" + peer_1_row["rpc_address"] = "127.0.0.1" + peer_1_row["schema_version"] = "7f1c0a6e-ea18-343e-b344-a7cb80640dca" + tokens = ["1743244960790844724"] + peer_1_row["tokens"] = tokens + column_types = {"tokens": "set"} + peers = PrimeQuery("SELECT * FROM system.peers", rows=[peer_1_row], column_types=column_types) + return peers + + def retrieve_schema_keyspaces(self): + schema_keyspaces_rows = {} + schema_keyspaces_rows["keyspace_name"] = "system" + schema_keyspaces_rows["durable_writes"] = "True" + schema_keyspaces_rows["strategy_class"] = "org.apache.cassandra.locator.LocalStrategy" + schema_keyspaces_rows["strategy_options"] = "{}" + sks = PrimeQuery("SELECT * FROM system.schema_keyspaces", rows=[schema_keyspaces_rows]) + return sks + + def retrieve_schema_column_family(self): + scf_1_row = {} + scf_1_row["comment"] = "ColumnFamily definitions" + scf_1_row["keyspace_name"] = "system" + scf = PrimeQuery("SELECT * FROM system.schema_columnfamilies", rows=[scf_1_row]) + return scf + + def retrieve_schema_columns(self): + schema_columns_row_1 = {} + schema_columns_row_1["index_options"] = "null" + schema_columns_row_1["index_name"] = "null" + schema_columns_row_1["keyspace_name"] = "null" + schema_columns_row_1["index_type"] = "validator" + schema_columns_row_1["validator"] = "org.apache.cassandra.db.marshal.UTF8Type" + schema_columns_row_1["columnfamily_name"] = "schema_columnfamilies" + schema_columns_row_1["component_index"] = "1" + schema_columns_row_1["type"] = "regular" + schema_columns_row_1["column_name"] = "compaction_strategy_class" + sc = PrimeQuery("SELECT * FROM system.schema_columns", rows=[schema_columns_row_1]) + return sc + + +class PrimeQuery(object): + + def __init__(self, expected_query, result="success", rows=None, column_types=None): + self.expected_query = expected_query + self.rows = rows + self.result = result + self.column_types = column_types + self.type = "prime-query-single" + + def fetch_json(self): + json_dict = {} + then = {} + when = {} + when['query'] = self.expected_query + then['result'] = self.result + if self.rows: + then['rows'] = self.rows + if self.column_types: + then['column_types'] = self.column_types + json_dict['when'] = when + json_dict['then'] = then + return json_dict + + + From d33f2c084bb047c89151444ad2a45eff1a0d04ec Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 25 Aug 2016 15:06:51 -0500 Subject: [PATCH 1214/2005] Added simple Server object to start and stop scassandra --- tests/integration/scassandra/Server.py | 29 ++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 tests/integration/scassandra/Server.py diff --git a/tests/integration/scassandra/Server.py b/tests/integration/scassandra/Server.py new file mode 100644 index 0000000000..b2ff4e28c2 --- /dev/null +++ b/tests/integration/scassandra/Server.py @@ -0,0 +1,29 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +import subprocess + + +class ScassandraServer(object): + + def __init__(self, jar_path, binary_port=8042, admin_port=8043): + self.binary_port = binary_port + self.admin_port = admin_port + self.jar_path = jar_path + + def start(self): + self.proc = subprocess.Popen(['java', '-jar', '-Dscassandra.binary.port={0}'.format(self.binary_port), '-Dscassandra.admin.port={0}'.format(self.admin_port), '-Dscassandra.log.level=INFO', self.jar_path], shell=False) + + def stop(self): + self.proc.terminate() From da017f15f7e55d8a6f6dcb4122b95aa3db587cfb Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 14 Sep 2016 16:25:04 -0500 Subject: [PATCH 1215/2005] renaming server.py --- cassandra/io/asyncorereactor.py | 1 - tests/integration/scassandra/client.py | 41 ++- .../scassandra/{Server.py => server.py} | 6 +- tests/integration/simulacron/__init__.py | 13 + tests/integration/simulacron/utils.py | 268 ++++++++++++++++++ tests/integration/standard/test_policies.py | 46 +-- 6 files changed, 347 insertions(+), 28 deletions(-) rename tests/integration/scassandra/{Server.py => server.py} (66%) create mode 100644 tests/integration/simulacron/__init__.py create mode 100644 tests/integration/simulacron/utils.py diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 04e9684ec9..6e3aafba3b 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -47,7 +47,6 @@ def _cleanup(loop_weakref): loop = loop_weakref() except ReferenceError: return - loop._cleanup() diff --git a/tests/integration/scassandra/client.py b/tests/integration/scassandra/client.py index 41a300582e..3f311f62f1 100644 --- a/tests/integration/scassandra/client.py +++ b/tests/integration/scassandra/client.py @@ -19,7 +19,7 @@ class ScassandraClient(object): - def __init__(self, admin_addr="127.0.0.1:8043"): + def __init__(self, admin_addr="127.0.0.1:8187"): self.admin_addr = admin_addr def prime_metadata(self): @@ -35,7 +35,10 @@ def prime_metadata(self): self.submit_request(PrimeQuery("SELECT * FROM system.schema_triggers", result="invalid")) def submit_request(self, query): - result = requests.post("http://{0}/{1}".format(self.admin_addr, query.type), json=query.fetch_json()) + result = requests.post("http://{0}/{1}{2}".format( + self.admin_addr, query.type, query.fetch_url_params()), + json=query.fetch_json()) + print(result) return result @@ -101,12 +104,13 @@ def retrieve_schema_columns(self): class PrimeQuery(object): - def __init__(self, expected_query, result="success", rows=None, column_types=None): + def __init__(self, expected_query, result="success", rows=None, column_types=None, then=None): self.expected_query = expected_query self.rows = rows self.result = result self.column_types = column_types self.type = "prime-query-single" + self.then = then def fetch_json(self): json_dict = {} @@ -114,13 +118,40 @@ def fetch_json(self): when = {} when['query'] = self.expected_query then['result'] = self.result - if self.rows: + if self.rows is not None: then['rows'] = self.rows - if self.column_types: + + if self.column_types is not None: then['column_types'] = self.column_types + + if self.then is not None: + then.update(self.then) + json_dict['when'] = when json_dict['then'] = then return json_dict + def fetch_url_params(self): + return "" + +class ClusterQuery(object): + def __init__(self, cluster_name, cassandra_version, data_centers=1, json_dict=None): + self.cluster_name = cluster_name + self.cassandra_version = cassandra_version + self.data_centers = data_centers + if json_dict is None: + self.json_dict = {} + else: + self.json_dict = json_dict + + self.type = "cluster" + + def fetch_json(self): + return self.json_dict + + def fetch_url_params(self): + return "?cluster_name={0}&cassandra_version={1}&data_centers={2}".\ + format(self.cluster_name, self.cassandra_version, self.data_centers) + diff --git a/tests/integration/scassandra/Server.py b/tests/integration/scassandra/server.py similarity index 66% rename from tests/integration/scassandra/Server.py rename to tests/integration/scassandra/server.py index b2ff4e28c2..ef77cae172 100644 --- a/tests/integration/scassandra/Server.py +++ b/tests/integration/scassandra/server.py @@ -17,13 +17,11 @@ class ScassandraServer(object): - def __init__(self, jar_path, binary_port=8042, admin_port=8043): - self.binary_port = binary_port - self.admin_port = admin_port + def __init__(self, jar_path): self.jar_path = jar_path def start(self): - self.proc = subprocess.Popen(['java', '-jar', '-Dscassandra.binary.port={0}'.format(self.binary_port), '-Dscassandra.admin.port={0}'.format(self.admin_port), '-Dscassandra.log.level=INFO', self.jar_path], shell=False) + self.proc = subprocess.Popen(['java', '-jar', self.jar_path], shell=False) def stop(self): self.proc.terminate() diff --git a/tests/integration/simulacron/__init__.py b/tests/integration/simulacron/__init__.py new file mode 100644 index 0000000000..cdf00881fc --- /dev/null +++ b/tests/integration/simulacron/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License diff --git a/tests/integration/simulacron/utils.py b/tests/integration/simulacron/utils.py new file mode 100644 index 0000000000..234708f8f3 --- /dev/null +++ b/tests/integration/simulacron/utils.py @@ -0,0 +1,268 @@ + +# Copyright 2013-2016 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +import requests +import json + +from tests.integration import CASSANDRA_VERSION +import subprocess +import time + + +class SimulacronServer(object): + + def __init__(self, jar_path): + self.jar_path = jar_path + + def start(self): + self.proc = subprocess.Popen(['java', '-jar', self.jar_path], shell=False) + + def stop(self): + self.proc.terminate() + +jar_path = "/home/jaume/workspace/simulacron/standalone/target/standalone-0.1-SNAPSHOT.jar" +server_simulacron = SimulacronServer(jar_path) + +def start_simulacron(): + server_simulacron.start() + + #Check logs for this + time.sleep(5) + +def stopt_simulacron(): + server_simulacron.stop() + + +class SimulacronClient(object): + + def __init__(self, admin_addr="127.0.0.1:8187"): + self.admin_addr = admin_addr + + def prime_metadata(self): + + self.submit_request(self.retrieve_system_peers_query()) + self.submit_request(self.retrieve_schema_column_family()) + self.submit_request(self.retrieve_schema_columns()) + self.submit_request(self.retrieve_system_local_query()) + self.submit_request(self.retrieve_schema_keyspaces()) + self.submit_request(PrimeQuery("SELECT * FROM system.schema_usertypes", result="invalid")) + self.submit_request(PrimeQuery("SELECT * FROM system.schema_aggregates", result="invalid")) + self.submit_request(PrimeQuery("SELECT * FROM system.schema_functions", result="invalid")) + self.submit_request(PrimeQuery("SELECT * FROM system.schema_triggers", result="invalid")) + + def submit_request(self, query): + result = requests.post("http://{0}/{1}{2}".format( + self.admin_addr, query.path, query.fetch_url_params()), + json=query.fetch_json()) + + return result + + def prime_server_versions(self): + system_local_row = {} + system_local_row["cql_version"] = "3.4.4" + system_local_row["release_version"] = "3.1.1" + "-SNAPSHOT" + column_types = {"cql_version": "ascii", "release_version": "ascii"} + system_local = PrimeQuery("SELECT cql_version, release_version FROM system.local", + rows=[system_local_row], + column_types=column_types, + then={"delay_in_ms": 1}) + + self.submit_request(system_local) + + def retrieve_system_local_query(self): + system_local_row = {} + system_local_row["cluster_name"] = "custom cluster name" + system_local_row["partitioner"] = "org.apache.cassandra.dht.Murmur3Partitioner" + system_local_row["data_center"] = "dc1" + system_local_row["rack"] = "rc1" + system_local_row["release_version"] = "2.0.1" + tokens = ["1743244960790844724"] + system_local_row["tokens"] = tokens + column_types = {"tokens": "set"} + system_local = PrimeQuery("SELECT * FROM system.local WHERE key='local'", rows=[system_local_row], column_types=column_types) + return system_local + + def retrieve_system_peers_query(self): + peer_1_row = {} + peer_1_row["peer"] = "127.0.0.1" + peer_1_row["data_center"] = "datacenter1" + peer_1_row["host_id"] = "8db34a1c-bbb5-4f6e-b31e-e15d75b05620" + peer_1_row["preferred_ip"] = "None" + peer_1_row["rack"] = "rack1" + peer_1_row["release_version"] = "2.0.1" + peer_1_row["rpc_address"] = "127.0.0.1" + peer_1_row["schema_version"] = "7f1c0a6e-ea18-343e-b344-a7cb80640dca" + tokens = ["1743244960790844724"] + peer_1_row["tokens"] = tokens + column_types = {"tokens": "set"} + peers = PrimeQuery("SELECT * FROM system.peers", rows=[peer_1_row], column_types=column_types) + return peers + + def retrieve_schema_keyspaces(self): + schema_keyspaces_rows = {} + schema_keyspaces_rows["keyspace_name"] = "system" + schema_keyspaces_rows["durable_writes"] = "True" + schema_keyspaces_rows["strategy_class"] = "org.apache.cassandra.locator.LocalStrategy" + schema_keyspaces_rows["strategy_options"] = "{}" + sks = PrimeQuery("SELECT * FROM system.schema_keyspaces", rows=[schema_keyspaces_rows]) + return sks + + def retrieve_schema_column_family(self): + scf_1_row = {} + scf_1_row["comment"] = "ColumnFamily definitions" + scf_1_row["keyspace_name"] = "system" + scf = PrimeQuery("SELECT * FROM system.schema_columnfamilies", rows=[scf_1_row]) + return scf + + def retrieve_schema_columns(self): + schema_columns_row_1 = {} + schema_columns_row_1["index_options"] = "null" + schema_columns_row_1["index_name"] = "null" + schema_columns_row_1["keyspace_name"] = "null" + schema_columns_row_1["index_type"] = "validator" + schema_columns_row_1["validator"] = "org.apache.cassandra.db.marshal.UTF8Type" + schema_columns_row_1["columnfamily_name"] = "schema_columnfamilies" + schema_columns_row_1["component_index"] = "1" + schema_columns_row_1["type"] = "regular" + schema_columns_row_1["column_name"] = "compaction_strategy_class" + sc = PrimeQuery("SELECT * FROM system.schema_columns", rows=[schema_columns_row_1]) + return sc + +NO_THEN = object() + +class PrimeQuery(object): + + def __init__(self, expected_query, result="success", rows=None, column_types=None, then=None): + self.expected_query = expected_query + self.rows = rows + self.result = result + self.column_types = column_types + self.path = "prime-query-single" + self.then = then + + def fetch_json(self): + json_dict = {} + then = {} + when = {} + when['query'] = self.expected_query + then['result'] = self.result + if self.rows is not None: + then['rows'] = self.rows + + if self.column_types is not None: + then['column_types'] = self.column_types + + if self.then is not None and self.then is not NO_THEN: + then.update(self.then) + + json_dict['when'] = when + if self.then is not NO_THEN: + json_dict['then'] = then + + return json_dict + + def set_node(self, cluster_id, datacenter_id, node_id): + self.cluster_id = cluster_id + self.datacenter_id = datacenter_id + self.node_id = node_id + + if self.cluster_id is not None: + self.path += "/{}".format(self.cluster_id) + + if self.cluster_id is not None: + self.path += "/{}".format(self.datacenter_id) + + if self.cluster_id is not None: + self.path += "/{}".format(self.node_id) + + def fetch_url_params(self): + return "" + + +class ClusterQuery(object): + def __init__(self, cluster_name, cassandra_version, data_centers=1, json_dict=None): + self.cluster_name = cluster_name + self.cassandra_version = cassandra_version + self.data_centers = data_centers + if json_dict is None: + self.json_dict = {} + else: + self.json_dict = json_dict + + self.path = "cluster" + + def fetch_json(self): + return self.json_dict + + def fetch_url_params(self): + return "?cluster_name={0}&cassandra_version={1}&data_centers={2}".\ + format(self.cluster_name, self.cassandra_version, self.data_centers) + + +def prime_driver_defaults(): + client_simulacron = SimulacronClient() + client_simulacron.prime_server_versions() + + +def prime_singledc(): + cluster_query = ClusterQuery("singledc", CASSANDRA_VERSION, 3) + client_simulacron = SimulacronClient() + response = client_simulacron.submit_request(cluster_query) + return SimulacronCluster(response.text) + + +def start_and_prime_singledc(): + start_simulacron() + prime_singledc() + prime_driver_defaults() + +default_column_types = { + "key": "bigint", + "description": "ascii", + "dates": "map" + } + +default_row = {} +default_row["key"] = 2 +default_row["description"] = "whatever_description" +default_row["dates"] = {"whatever_text" : "2014-08-01"} + +def prime_query(query, rows=[default_row], column_types=default_column_types, then=None): + client_simulacron = SimulacronClient() + query = PrimeQuery(query, rows=rows, column_types=column_types, then=then) + response = client_simulacron.submit_request(query) + return response + + +class SimulacronCluster(object): + def __init__(self, json_text): + self.json = json_text + self.o = json.loads(json_text) + + def get_cluster_id(self): + return self.o["id"] + + def get_cluster_name(self): + return self.o["name"] + + def get_data_centers_ids(self): + return [dc["id"] for dc in self.o["data_centers"]] + + def get_data_centers_names(self): + return [dc["name"] for dc in self.o["data_centers"]] + + def get_node_ids(self, datacenter_id): + datacenter = filter(lambda x: x["id"] == datacenter_id, self.o["data_centers"]).pop() + return [node["id"] for node in datacenter["nodes"]] diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 46eebdcf75..d35e5e69e0 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -29,12 +29,14 @@ from tests.integration import BasicSharedKeyspaceUnitTestCase, greaterthancass21, PROTOCOL_VERSION from tests import notwindows +from tests.integration.simulacron.utils import start_and_prime_singledc, prime_query, stopt_simulacron, NO_THEN from mock import patch from concurrent.futures import wait as wait_futures def setup_module(): - use_singledc() + #use_singledc() + pass class BadRoundRobinPolicy(RoundRobinPolicy): @@ -100,6 +102,8 @@ class SpecExecTest(BasicSharedKeyspaceUnitTestCase): @classmethod def setUpClass(cls): + start_and_prime_singledc() + cls.common_setup(1) spec_ep_brr = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20)) @@ -112,6 +116,10 @@ def setUpClass(cls): cls.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim) cls.cluster.add_execution_profile("spec_ep_brr_lim", spec_ep_brr_lim) + @classmethod + def tearDownClass(cls): + stopt_simulacron() + @greaterthancass21 def test_speculative_execution(self): """ @@ -124,12 +132,11 @@ def test_speculative_execution(self): @test_category metadata """ - self.session.execute("""USE {0}""".format(self.keyspace_name)) - self.session.execute("""create or replace function timeout (arg int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS $$ long start = System.currentTimeMillis(); while(System.currentTimeMillis() - start < arg){} return arg; $$;""") - self.session.execute("""CREATE TABLE d (k int PRIMARY KEY , i int);""") - self.session.execute("""INSERT INTO d (k,i) VALUES (0, 1000);""") - statement = SimpleStatement("""SELECT timeout(i) FROM d WHERE k =0""", is_idempotent=True) - statement_non_idem = SimpleStatement("""SELECT timeout(i) FROM d WHERE k =0""", is_idempotent=False) + query_to_prime = "INSERT INTO test3rf.test (k, v) VALUES (0, 1);" + prime_query(query_to_prime, then={"delay_in_ms": 4000}) + + statement = SimpleStatement(query_to_prime, is_idempotent=True) + statement_non_idem = SimpleStatement(query_to_prime, is_idempotent=False) # This LBP should repeat hosts up to around 30 result = self.session.execute(statement, execution_profile='spec_ep_brr') @@ -159,6 +166,7 @@ def test_speculative_execution(self): with self.assertRaises(OperationTimedOut): result = self.session.execute(statement, execution_profile='spec_ep_rr', timeout=.5) + """ # PYTHON-736 Test speculation policy works with a prepared statement statement = self.session.prepare("SELECT timeout(i) FROM d WHERE k = ?") # non-idempotent @@ -168,8 +176,9 @@ def test_speculative_execution(self): statement.is_idempotent = True result = self.session.execute(statement, (0,), execution_profile='spec_ep_brr') self.assertLess(1, len(result.response_future.attempted_hosts)) + """ + - #TODO redo this tests with Scassandra def test_speculative_and_timeout(self): """ Test to ensure the timeout is honored when using speculative execution @@ -182,16 +191,17 @@ def test_speculative_and_timeout(self): """ # We mock this so no messages are sent, otherwise a reponse might arrive # and we would not know how many hosts we queried - with patch.object(Connection, "send_msg", return_value = 100) as mocked_send_msg: + prime_query("INSERT INTO test3rf.test (k, v) VALUES (0, 1);", then=NO_THEN) - statement = SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (0, 1);", is_idempotent=True) + statement = SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (0, 1);", is_idempotent=True) - # An OperationTimedOut is placed here in response_future, - # that's why we can't call session.execute,which would raise it, but - # we have to directly wait for the event - response_future = self.session.execute_async(statement, execution_profile='spec_ep_brr_lim', timeout=2.2) - response_future._event.wait(4) - self.assertIsInstance(response_future._final_exception, OperationTimedOut) + # An OperationTimedOut is placed here in response_future, + # that's why we can't call session.execute,which would raise it, but + # we have to directly wait for the event + response_future = self.session.execute_async(statement, execution_profile='spec_ep_brr_lim', + timeout=2.2) + response_future._event.wait(4) + self.assertIsInstance(response_future._final_exception, OperationTimedOut) - # This is because 2.2 / 0.4 + 1 = 6 - self.assertEqual(len(response_future.attempted_hosts), 6) + # This is because 2.2 / 0.4 + 1 = 6 + self.assertEqual(len(response_future.attempted_hosts), 6) From 7f2a4637fb2f2b127d597ae198dd7a829460810d Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 13 Jun 2017 10:40:04 -0400 Subject: [PATCH 1216/2005] Added simulacron to test harness, refactores some tests using it --- build.yaml | 19 + tests/integration/__init__.py | 3 +- tests/integration/scassandra/__init__.py | 13 - tests/integration/scassandra/client.py | 157 --------- tests/integration/scassandra/server.py | 27 -- tests/integration/simulacron/__init__.py | 5 + .../integration/simulacron/test_connection.py | 114 ++++++ tests/integration/simulacron/test_policies.py | 163 +++++++++ tests/integration/simulacron/utils.py | 329 +++++++++++------- tests/integration/standard/test_connection.py | 11 +- tests/integration/standard/test_policies.py | 140 +------- 11 files changed, 512 insertions(+), 469 deletions(-) delete mode 100644 tests/integration/scassandra/__init__.py delete mode 100644 tests/integration/scassandra/client.py delete mode 100644 tests/integration/scassandra/server.py create mode 100644 tests/integration/simulacron/test_connection.py create mode 100644 tests/integration/simulacron/test_policies.py diff --git a/build.yaml b/build.yaml index f1d3dd74d3..780cb4f8ed 100644 --- a/build.yaml +++ b/build.yaml @@ -103,6 +103,21 @@ build: python setup.py build_ext --inplace --no-cython fi + echo "Installing simulacron" + pushd ~ + git clone git@github.com:datastax/simulacron.git + cd simulacron + git clone git@github.com:datastax/native-protocol.git + cd native-protocol + mvn clean install + cd .. + mvn clean install -DskipTests=true + ls standalone/target + SIMULACRON_JAR=`find \`pwd\` -name "simulacron-standalone-*.jar"` + echo "SIMULACRON_JAR: $SIMULACRON_JAR" + + popd + # Run the unit tests, this is not done in travis because # it takes too much time for the whole matrix to build with cython if [[ $CYTHON == 'CYTHON' ]]; then @@ -112,6 +127,10 @@ build: fi + echo "Running with event loop manager: $EVENT_LOOP_MANAGER" + echo "==========RUNNING SIMULACRON TESTS==========" + SIMULACRON_JAR=$SIMULACRON_JAR EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR DSE_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true + echo "Running with event loop manager: $EVENT_LOOP_MANAGER" echo "==========RUNNING CQLENGINE TESTS==========" EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 7667e0e2c7..857c8c0d33 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -114,7 +114,7 @@ def _tuple_version(version_string): USE_CASS_EXTERNAL = bool(os.getenv('USE_CASS_EXTERNAL', False)) KEEP_TEST_CLUSTER = bool(os.getenv('KEEP_TEST_CLUSTER', False)) - +SIMULACRON_JAR = os.getenv('SIMULACRON_JAR', None) # If set to to true this will force the Cython tests to run regardless of whether they are installed cython_env = os.getenv('VERIFY_CYTHON', "False") @@ -265,6 +265,7 @@ def get_unsupported_upper_protocol(): dseonly = unittest.skipUnless(DSE_VERSION, "Test is only applicalbe to DSE clusters") pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") notpy3 = unittest.skipIf(sys.version_info >= (3, 0), "Test not applicable for Python 3.x runtime") +ifsimulacron = unittest.skipIf(SIMULACRON_JAR is None, "Simulacron jar hasn't been specified") def wait_for_node_socket(node, timeout): diff --git a/tests/integration/scassandra/__init__.py b/tests/integration/scassandra/__init__.py deleted file mode 100644 index cdf00881fc..0000000000 --- a/tests/integration/scassandra/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2013-2016 DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License diff --git a/tests/integration/scassandra/client.py b/tests/integration/scassandra/client.py deleted file mode 100644 index 3f311f62f1..0000000000 --- a/tests/integration/scassandra/client.py +++ /dev/null @@ -1,157 +0,0 @@ - -# Copyright 2013-2016 DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License - -import json -import requests - - -class ScassandraClient(object): - - def __init__(self, admin_addr="127.0.0.1:8187"): - self.admin_addr = admin_addr - - def prime_metadata(self): - - self.submit_request(self.retrieve_system_peers_query()) - self.submit_request(self.retrieve_schema_column_family()) - self.submit_request(self.retrieve_schema_columns()) - self.submit_request(self.retrieve_system_local_query()) - self.submit_request(self.retrieve_schema_keyspaces()) - self.submit_request(PrimeQuery("SELECT * FROM system.schema_usertypes", result="invalid")) - self.submit_request(PrimeQuery("SELECT * FROM system.schema_aggregates", result="invalid")) - self.submit_request(PrimeQuery("SELECT * FROM system.schema_functions", result="invalid")) - self.submit_request(PrimeQuery("SELECT * FROM system.schema_triggers", result="invalid")) - - def submit_request(self, query): - result = requests.post("http://{0}/{1}{2}".format( - self.admin_addr, query.type, query.fetch_url_params()), - json=query.fetch_json()) - - print(result) - return result - - def retrieve_system_local_query(self): - system_local_row = {} - system_local_row["cluster_name"] = "custom cluster name" - system_local_row["partitioner"] = "org.apache.cassandra.dht.Murmur3Partitioner" - system_local_row["data_center"] = "dc1" - system_local_row["rack"] = "rc1" - system_local_row["release_version"] = "2.0.1" - tokens = ["1743244960790844724"] - system_local_row["tokens"] = tokens - column_types = {"tokens": "set"} - system_local = PrimeQuery("SELECT * FROM system.local WHERE key='local'", rows=[system_local_row], column_types=column_types) - return system_local - - def retrieve_system_peers_query(self): - peer_1_row = {} - peer_1_row["peer"] = "127.0.0.1" - peer_1_row["data_center"] = "datacenter1" - peer_1_row["host_id"] = "8db34a1c-bbb5-4f6e-b31e-e15d75b05620" - peer_1_row["preferred_ip"] = "None" - peer_1_row["rack"] = "rack1" - peer_1_row["release_version"] = "2.0.1" - peer_1_row["rpc_address"] = "127.0.0.1" - peer_1_row["schema_version"] = "7f1c0a6e-ea18-343e-b344-a7cb80640dca" - tokens = ["1743244960790844724"] - peer_1_row["tokens"] = tokens - column_types = {"tokens": "set"} - peers = PrimeQuery("SELECT * FROM system.peers", rows=[peer_1_row], column_types=column_types) - return peers - - def retrieve_schema_keyspaces(self): - schema_keyspaces_rows = {} - schema_keyspaces_rows["keyspace_name"] = "system" - schema_keyspaces_rows["durable_writes"] = "True" - schema_keyspaces_rows["strategy_class"] = "org.apache.cassandra.locator.LocalStrategy" - schema_keyspaces_rows["strategy_options"] = "{}" - sks = PrimeQuery("SELECT * FROM system.schema_keyspaces", rows=[schema_keyspaces_rows]) - return sks - - def retrieve_schema_column_family(self): - scf_1_row = {} - scf_1_row["comment"] = "ColumnFamily definitions" - scf_1_row["keyspace_name"] = "system" - scf = PrimeQuery("SELECT * FROM system.schema_columnfamilies", rows=[scf_1_row]) - return scf - - def retrieve_schema_columns(self): - schema_columns_row_1 = {} - schema_columns_row_1["index_options"] = "null" - schema_columns_row_1["index_name"] = "null" - schema_columns_row_1["keyspace_name"] = "null" - schema_columns_row_1["index_type"] = "validator" - schema_columns_row_1["validator"] = "org.apache.cassandra.db.marshal.UTF8Type" - schema_columns_row_1["columnfamily_name"] = "schema_columnfamilies" - schema_columns_row_1["component_index"] = "1" - schema_columns_row_1["type"] = "regular" - schema_columns_row_1["column_name"] = "compaction_strategy_class" - sc = PrimeQuery("SELECT * FROM system.schema_columns", rows=[schema_columns_row_1]) - return sc - - -class PrimeQuery(object): - - def __init__(self, expected_query, result="success", rows=None, column_types=None, then=None): - self.expected_query = expected_query - self.rows = rows - self.result = result - self.column_types = column_types - self.type = "prime-query-single" - self.then = then - - def fetch_json(self): - json_dict = {} - then = {} - when = {} - when['query'] = self.expected_query - then['result'] = self.result - if self.rows is not None: - then['rows'] = self.rows - - if self.column_types is not None: - then['column_types'] = self.column_types - - if self.then is not None: - then.update(self.then) - - json_dict['when'] = when - json_dict['then'] = then - return json_dict - - def fetch_url_params(self): - return "" - -class ClusterQuery(object): - def __init__(self, cluster_name, cassandra_version, data_centers=1, json_dict=None): - self.cluster_name = cluster_name - self.cassandra_version = cassandra_version - self.data_centers = data_centers - if json_dict is None: - self.json_dict = {} - else: - self.json_dict = json_dict - - self.type = "cluster" - - def fetch_json(self): - return self.json_dict - - def fetch_url_params(self): - return "?cluster_name={0}&cassandra_version={1}&data_centers={2}".\ - format(self.cluster_name, self.cassandra_version, self.data_centers) - - - diff --git a/tests/integration/scassandra/server.py b/tests/integration/scassandra/server.py deleted file mode 100644 index ef77cae172..0000000000 --- a/tests/integration/scassandra/server.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2013-2016 DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License - -import subprocess - - -class ScassandraServer(object): - - def __init__(self, jar_path): - self.jar_path = jar_path - - def start(self): - self.proc = subprocess.Popen(['java', '-jar', self.jar_path], shell=False) - - def stop(self): - self.proc.terminate() diff --git a/tests/integration/simulacron/__init__.py b/tests/integration/simulacron/__init__.py index cdf00881fc..b7ec7fb40a 100644 --- a/tests/integration/simulacron/__init__.py +++ b/tests/integration/simulacron/__init__.py @@ -11,3 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License + +from tests.integration.simulacron.utils import stop_simulacron + +def teardown_package(): + stop_simulacron() \ No newline at end of file diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py new file mode 100644 index 0000000000..1feb03b17c --- /dev/null +++ b/tests/integration/simulacron/test_connection.py @@ -0,0 +1,114 @@ +# Copyright 2013-2017 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import time +import logging + +from cassandra import OperationTimedOut +from cassandra.cluster import Cluster, _Scheduler, EXEC_PROFILE_DEFAULT, ExecutionProfile +from cassandra.policies import RoundRobinPolicy, HostStateListener +from concurrent.futures import ThreadPoolExecutor + +from tests.integration import ifsimulacron, CASSANDRA_VERSION +from tests.integration.simulacron.utils import start_and_prime_cluster_defaults, prime_query, stop_simulacron, \ + prime_request, PrimeOptions, NO_THEN + +import time + +class TrackDownListener(HostStateListener): + hosts_marked_down = [] + + def on_down(self, host): + self.hosts_marked_down.append(host) + +class ThreadTracker(ThreadPoolExecutor): + called_functions = [] + + def submit(self, fn, *args, **kwargs): + self.called_functions.append(fn.__name__) + return super(ThreadTracker, self).submit(fn, *args, **kwargs) + +@ifsimulacron +class ConnectionTest(unittest.TestCase): + + def test_heart_beat_timeout(self): + """ + Test to ensure the hosts are marked as down after a OTO is received. + Also to ensure this happens within the expected timeout + @since 3.10 + @jira_ticket PYTHON-762 + @expected_result all the hosts have been marked as down at some point + + @test_category metadata + """ + number_of_dcs = 3 + nodes_per_dc = 100 + + query_to_prime = "INSERT INTO test3rf.test (k, v) VALUES (0, 1);" + + idle_heartbeat_timeout = 5 + idle_heartbeat_interval = 1 + + start_and_prime_cluster_defaults(number_of_dcs, nodes_per_dc, CASSANDRA_VERSION) + + listener = TrackDownListener() + executor = ThreadTracker(max_workers=16) + + # We need to disable compression since it's not supported in simulacron + cluster = Cluster(compression=False, + idle_heartbeat_interval=idle_heartbeat_interval, + idle_heartbeat_timeout=idle_heartbeat_timeout, + executor_threads=16, + execution_profiles={ + EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=RoundRobinPolicy())}) + + cluster.scheduler.shutdown() + cluster.executor = executor + cluster.scheduler = _Scheduler(executor) + + session = cluster.connect(wait_for_all_pools=True) + cluster.register_listener(listener) + log = logging.getLogger() + log.setLevel('CRITICAL') + + self.addCleanup(cluster.shutdown) + self.addCleanup(stop_simulacron) + self.addCleanup(log.setLevel, "DEBUG") + + prime_query(query_to_prime, then=NO_THEN) + + futures = [] + for _ in range(number_of_dcs * nodes_per_dc): + future = session.execute_async(query_to_prime) + futures.append(future) + + for f in futures: + f._event.wait() + self.assertIsInstance(f._final_exception, OperationTimedOut) + + prime_request(PrimeOptions(then=NO_THEN)) + + # We allow from some extra time for all the hosts to be to on_down + # The callbacks should start happening after idle_heartbeat_timeout + idle_heartbeat_interval + time.sleep((idle_heartbeat_timeout + idle_heartbeat_interval)*2) + + for host in cluster.metadata.all_hosts(): + self.assertIn(host, listener.hosts_marked_down) + + # In this case HostConnection._replace shouldn't be called + self.assertNotIn("_replace", executor.called_functions) diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py new file mode 100644 index 0000000000..6fdffe2852 --- /dev/null +++ b/tests/integration/simulacron/test_policies.py @@ -0,0 +1,163 @@ +# Copyright 2013-2017 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from cassandra import OperationTimedOut +from cassandra.cluster import Cluster, ExecutionProfile +from cassandra.query import SimpleStatement +from cassandra.policies import ConstantSpeculativeExecutionPolicy, RoundRobinPolicy + +from tests.integration import PROTOCOL_VERSION, greaterthancass21, ifsimulacron, SIMULACRON_JAR +from tests import notwindows +from tests.integration.simulacron.utils import start_and_prime_singledc, prime_query, \ + stop_simulacron, NO_THEN, clear_queries + + +class BadRoundRobinPolicy(RoundRobinPolicy): + + def make_query_plan(self, working_keyspace=None, query=None): + pos = self._position + self._position += 1 + + hosts = [] + for _ in range(10): + hosts.extend(self._live_hosts) + + return hosts + + +# This doesn't work well with Windows clock granularity +@notwindows +@ifsimulacron +class SpecExecTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + if SIMULACRON_JAR is None: + return + + start_and_prime_singledc() + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) + cls.session = cls.cluster.connect(wait_for_all_pools=True) + + spec_ep_brr = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20)) + spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20)) + spec_ep_rr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 1)) + spec_ep_brr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0.4, 10)) + + cls.cluster.add_execution_profile("spec_ep_brr", spec_ep_brr) + cls.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr) + cls.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim) + cls.cluster.add_execution_profile("spec_ep_brr_lim", spec_ep_brr_lim) + + @classmethod + def tearDownClass(cls): + if SIMULACRON_JAR is None: + return + + cls.cluster.shutdown() + stop_simulacron() + + def tearDown(self): + clear_queries() + + @greaterthancass21 + def test_speculative_execution(self): + """ + Test to ensure that speculative execution honors LBP, and that they retry appropriately. + + This test will use various LBP, and ConstantSpeculativeExecutionPolicy settings and ensure the proper number of hosts are queried + @since 3.7.0 + @jira_ticket PYTHON-218 + @expected_result speculative retries should honor max retries, idempotent state of queries, and underlying lbp. + + @test_category metadata + """ + query_to_prime = "INSERT INTO test3rf.test (k, v) VALUES (0, 1);" + prime_query(query_to_prime, then={"delay_in_ms": 4000}) + + statement = SimpleStatement(query_to_prime, is_idempotent=True) + statement_non_idem = SimpleStatement(query_to_prime, is_idempotent=False) + + # This LBP should repeat hosts up to around 30 + result = self.session.execute(statement, execution_profile='spec_ep_brr') + self.assertEqual(21, len(result.response_future.attempted_hosts)) + + # This LBP should keep host list to 3 + result = self.session.execute(statement, execution_profile='spec_ep_rr') + self.assertEqual(3, len(result.response_future.attempted_hosts)) + # Spec_execution policy should limit retries to 1 + result = self.session.execute(statement, execution_profile='spec_ep_rr_lim') + + self.assertEqual(2, len(result.response_future.attempted_hosts)) + + # Spec_execution policy should not be used if the query is not idempotent + result = self.session.execute(statement_non_idem, execution_profile='spec_ep_brr') + self.assertEqual(1, len(result.response_future.attempted_hosts)) + + # Default policy with non_idem query + result = self.session.execute(statement_non_idem) + self.assertEqual(1, len(result.response_future.attempted_hosts)) + + # Should be able to run an idempotent query against default execution policy with no speculative_execution_policy + result = self.session.execute(statement) + self.assertEqual(1, len(result.response_future.attempted_hosts)) + + # Test timeout with spec_ex + with self.assertRaises(OperationTimedOut): + self.session.execute(statement, execution_profile='spec_ep_rr', timeout=.5) + + prepared_query_to_prime = "SELECT * FROM test3rf.test where k = ?" + when = {"params": {"k": "0"}, "param_types": {"k": "ascii"}} + prime_query(prepared_query_to_prime, when=when, then={"delay_in_ms": 4000}) + + # PYTHON-736 Test speculation policy works with a prepared statement + prepared_statement = self.session.prepare(prepared_query_to_prime) + # non-idempotent + result = self.session.execute(prepared_statement, ("0",), execution_profile='spec_ep_brr') + self.assertEqual(1, len(result.response_future.attempted_hosts)) + # idempotent + prepared_statement.is_idempotent = True + result = self.session.execute(prepared_statement, ("0",), execution_profile='spec_ep_brr') + self.assertLess(1, len(result.response_future.attempted_hosts)) + + + + def test_speculative_and_timeout(self): + """ + Test to ensure the timeout is honored when using speculative execution + @since 3.10 + @jira_ticket PYTHON-750 + @expected_result speculative retries be schedule every fixed period, during the maximum + period of the timeout. + + @test_category metadata + """ + prime_query("INSERT INTO test3rf.test (k, v) VALUES (0, 1);", then=NO_THEN) + + statement = SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (0, 1);", is_idempotent=True) + + # An OperationTimedOut is placed here in response_future, + # that's why we can't call session.execute,which would raise it, but + # we have to directly wait for the event + response_future = self.session.execute_async(statement, execution_profile='spec_ep_brr_lim', + timeout=2.2) + response_future._event.wait(4) + self.assertIsInstance(response_future._final_exception, OperationTimedOut) + + # This is because 2.2 / 0.4 + 1 = 6 + self.assertEqual(len(response_future.attempted_hosts), 6) \ No newline at end of file diff --git a/tests/integration/simulacron/utils.py b/tests/integration/simulacron/utils.py index 234708f8f3..3f0718ac00 100644 --- a/tests/integration/simulacron/utils.py +++ b/tests/integration/simulacron/utils.py @@ -1,5 +1,4 @@ - -# Copyright 2013-2016 DataStax, Inc. +# Copyright 2013-2017 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,149 +12,184 @@ # See the License for the specific language governing permissions and # limitations under the License -import requests +from six.moves.urllib.request import build_opener, Request, HTTPHandler import json - -from tests.integration import CASSANDRA_VERSION +from tests.integration import CASSANDRA_VERSION, SIMULACRON_JAR import subprocess import time +DEFAULT_CLUSTER = "python_simulacron_cluster" + + +class SimulacronCluster(object): + """ + Represents a Cluster object as returned by simulacron + """ + def __init__(self, json_text): + self.json = json_text + self.o = json.loads(json_text) + + def get_cluster_id(self): + return self.o["id"] + + def get_cluster_name(self): + return self.o["name"] + + def get_data_centers_ids(self): + return [dc["id"] for dc in self.o["data_centers"]] + + def get_data_centers_names(self): + return [dc["name"] for dc in self.o["data_centers"]] + + def get_node_ids(self, datacenter_id): + datacenter = list(filter(lambda x: x["id"] == datacenter_id, self.o["data_centers"])).pop() + return [node["id"] for node in datacenter["nodes"]] -class SimulacronServer(object): +class SimulacronServer(object): + """ + Class for starting and stopping the server from within the tests + """ def __init__(self, jar_path): self.jar_path = jar_path + self.running = False + self.proc = None def start(self): - self.proc = subprocess.Popen(['java', '-jar', self.jar_path], shell=False) + self.proc = subprocess.Popen(['java', '-jar', self.jar_path, "--loglevel", "ERROR"], shell=False) + self.running = True def stop(self): - self.proc.terminate() + if self.proc: + self.proc.terminate() + self.running = False + + def is_running(self): + # We could check self.proc.poll here instead + return self.running + + +SERVER_SIMULACRON = SimulacronServer(SIMULACRON_JAR) -jar_path = "/home/jaume/workspace/simulacron/standalone/target/standalone-0.1-SNAPSHOT.jar" -server_simulacron = SimulacronServer(jar_path) def start_simulacron(): - server_simulacron.start() + """ + Starts and waits for simulacron to run + """ + if SERVER_SIMULACRON.is_running(): + SERVER_SIMULACRON.stop() - #Check logs for this + SERVER_SIMULACRON.start() + + #TODO improve this sleep, maybe check the logs like ccm time.sleep(5) -def stopt_simulacron(): - server_simulacron.stop() +def stop_simulacron(): + SERVER_SIMULACRON.stop() class SimulacronClient(object): - def __init__(self, admin_addr="127.0.0.1:8187"): self.admin_addr = admin_addr - def prime_metadata(self): - - self.submit_request(self.retrieve_system_peers_query()) - self.submit_request(self.retrieve_schema_column_family()) - self.submit_request(self.retrieve_schema_columns()) - self.submit_request(self.retrieve_system_local_query()) - self.submit_request(self.retrieve_schema_keyspaces()) - self.submit_request(PrimeQuery("SELECT * FROM system.schema_usertypes", result="invalid")) - self.submit_request(PrimeQuery("SELECT * FROM system.schema_aggregates", result="invalid")) - self.submit_request(PrimeQuery("SELECT * FROM system.schema_functions", result="invalid")) - self.submit_request(PrimeQuery("SELECT * FROM system.schema_triggers", result="invalid")) - def submit_request(self, query): - result = requests.post("http://{0}/{1}{2}".format( - self.admin_addr, query.path, query.fetch_url_params()), - json=query.fetch_json()) + opener = build_opener(HTTPHandler) + data = json.dumps(query.fetch_json()).encode('utf8') + + request = Request("http://{}/{}{}".format( + self.admin_addr, query.path, query.fetch_url_params()), data=data) + request.get_method = lambda: 'POST' + request.add_header("Content-Type", 'application/json') + request.add_header("Content-Length", len(data)) - return result + connection = opener.open(request) + return connection.read().decode('utf-8') def prime_server_versions(self): + """ + This information has to be primed for the test harness to run + """ system_local_row = {} - system_local_row["cql_version"] = "3.4.4" - system_local_row["release_version"] = "3.1.1" + "-SNAPSHOT" + system_local_row["cql_version"] = CASSANDRA_VERSION + system_local_row["release_version"] = CASSANDRA_VERSION + "-SNAPSHOT" column_types = {"cql_version": "ascii", "release_version": "ascii"} system_local = PrimeQuery("SELECT cql_version, release_version FROM system.local", rows=[system_local_row], - column_types=column_types, - then={"delay_in_ms": 1}) + column_types=column_types) self.submit_request(system_local) - def retrieve_system_local_query(self): - system_local_row = {} - system_local_row["cluster_name"] = "custom cluster name" - system_local_row["partitioner"] = "org.apache.cassandra.dht.Murmur3Partitioner" - system_local_row["data_center"] = "dc1" - system_local_row["rack"] = "rc1" - system_local_row["release_version"] = "2.0.1" - tokens = ["1743244960790844724"] - system_local_row["tokens"] = tokens - column_types = {"tokens": "set"} - system_local = PrimeQuery("SELECT * FROM system.local WHERE key='local'", rows=[system_local_row], column_types=column_types) - return system_local - - def retrieve_system_peers_query(self): - peer_1_row = {} - peer_1_row["peer"] = "127.0.0.1" - peer_1_row["data_center"] = "datacenter1" - peer_1_row["host_id"] = "8db34a1c-bbb5-4f6e-b31e-e15d75b05620" - peer_1_row["preferred_ip"] = "None" - peer_1_row["rack"] = "rack1" - peer_1_row["release_version"] = "2.0.1" - peer_1_row["rpc_address"] = "127.0.0.1" - peer_1_row["schema_version"] = "7f1c0a6e-ea18-343e-b344-a7cb80640dca" - tokens = ["1743244960790844724"] - peer_1_row["tokens"] = tokens - column_types = {"tokens": "set"} - peers = PrimeQuery("SELECT * FROM system.peers", rows=[peer_1_row], column_types=column_types) - return peers - - def retrieve_schema_keyspaces(self): - schema_keyspaces_rows = {} - schema_keyspaces_rows["keyspace_name"] = "system" - schema_keyspaces_rows["durable_writes"] = "True" - schema_keyspaces_rows["strategy_class"] = "org.apache.cassandra.locator.LocalStrategy" - schema_keyspaces_rows["strategy_options"] = "{}" - sks = PrimeQuery("SELECT * FROM system.schema_keyspaces", rows=[schema_keyspaces_rows]) - return sks - - def retrieve_schema_column_family(self): - scf_1_row = {} - scf_1_row["comment"] = "ColumnFamily definitions" - scf_1_row["keyspace_name"] = "system" - scf = PrimeQuery("SELECT * FROM system.schema_columnfamilies", rows=[scf_1_row]) - return scf - - def retrieve_schema_columns(self): - schema_columns_row_1 = {} - schema_columns_row_1["index_options"] = "null" - schema_columns_row_1["index_name"] = "null" - schema_columns_row_1["keyspace_name"] = "null" - schema_columns_row_1["index_type"] = "validator" - schema_columns_row_1["validator"] = "org.apache.cassandra.db.marshal.UTF8Type" - schema_columns_row_1["columnfamily_name"] = "schema_columnfamilies" - schema_columns_row_1["component_index"] = "1" - schema_columns_row_1["type"] = "regular" - schema_columns_row_1["column_name"] = "compaction_strategy_class" - sc = PrimeQuery("SELECT * FROM system.schema_columns", rows=[schema_columns_row_1]) - return sc + def clear_all_queries(self, cluster_name=DEFAULT_CLUSTER): + """ + Clear all the primed queries from a particular cluster + :param cluster_name: cluster to clear queries from + """ + opener = build_opener(HTTPHandler) + request = Request("http://{0}/{1}/{2}".format( + self.admin_addr, "prime", cluster_name)) + request.get_method = lambda: 'DELETE' + connection = opener.open(request) + return connection.read() + NO_THEN = object() -class PrimeQuery(object): - def __init__(self, expected_query, result="success", rows=None, column_types=None, then=None): +class SimulacronRequest(object): + def fetch_json(self): + raise NotImplementedError() + + def fetch_url_params(self): + raise NotImplementedError() + + +class PrimeOptions(SimulacronRequest): + """ + Class used for specifying how should simulacron respond to an OptionsMessage + """ + def __init__(self, then=None, cluster_name=DEFAULT_CLUSTER): + self.path = "prime/{}".format(cluster_name) + self.then = then + + def fetch_json(self): + json_dict = {} + then = {} + when = {} + + when['request'] = "options" + + if self.then is not None and self.then is not NO_THEN: + then.update(self.then) + + json_dict['when'] = when + if self.then is not NO_THEN: + json_dict['then'] = then + + return json_dict + + def fetch_url_params(self): + return "" + + +class PrimeQuery(SimulacronRequest): + """ + Class used for specifying how should simulacron respond to particular query + """ + def __init__(self, expected_query, result="success", rows=None, + column_types=None, when=None, then=None, cluster_name=DEFAULT_CLUSTER): self.expected_query = expected_query self.rows = rows self.result = result self.column_types = column_types - self.path = "prime-query-single" + self.path = "prime/{}".format(cluster_name) self.then = then + self.when = when def fetch_json(self): json_dict = {} then = {} when = {} + when['query'] = self.expected_query then['result'] = self.result if self.rows is not None: @@ -167,10 +201,14 @@ def fetch_json(self): if self.then is not None and self.then is not NO_THEN: then.update(self.then) - json_dict['when'] = when if self.then is not NO_THEN: json_dict['then'] = then + if self.when is not None: + when.update(self.when) + + json_dict['when'] = when + return json_dict def set_node(self, cluster_id, datacenter_id, node_id): @@ -191,8 +229,11 @@ def fetch_url_params(self): return "" -class ClusterQuery(object): - def __init__(self, cluster_name, cassandra_version, data_centers=1, json_dict=None): +class ClusterQuery(SimulacronRequest): + """ + Class used for creating a cluster + """ + def __init__(self, cluster_name, cassandra_version, data_centers="3", json_dict=None): self.cluster_name = cluster_name self.cassandra_version = cassandra_version self.data_centers = data_centers @@ -207,62 +248,86 @@ def fetch_json(self): return self.json_dict def fetch_url_params(self): - return "?cluster_name={0}&cassandra_version={1}&data_centers={2}".\ - format(self.cluster_name, self.cassandra_version, self.data_centers) + return "?cassandra_version={0}&data_centers={1}&name={2}".\ + format(self.cassandra_version, self.data_centers, self.cluster_name) def prime_driver_defaults(): + """ + Function to prime the necessary queries so the test harness can run + """ client_simulacron = SimulacronClient() client_simulacron.prime_server_versions() -def prime_singledc(): - cluster_query = ClusterQuery("singledc", CASSANDRA_VERSION, 3) +def prime_cluster(data_centers="3", version=None, cluster_name=DEFAULT_CLUSTER): + """ + Creates a new cluster in the simulacron server + :param cluster_name: name of the cluster + :param data_centers: string describing the datacenter, e.g. 2/3 would be two + datacenters of 2 nodes and three nodes + :param version: C* version + """ + version = version or CASSANDRA_VERSION + cluster_query = ClusterQuery(cluster_name, version, data_centers) client_simulacron = SimulacronClient() response = client_simulacron.submit_request(cluster_query) - return SimulacronCluster(response.text) + return SimulacronCluster(response) -def start_and_prime_singledc(): +def start_and_prime_singledc(cluster_name=DEFAULT_CLUSTER): + """ + Starts simulacron and creates a cluster with a single datacenter + :param cluster_name: name of the cluster to start and prime + :return: + """ + start_and_prime_cluster_defaults(number_of_dc=1, nodes_per_dc=3, cluster_name=cluster_name) + + +def start_and_prime_cluster_defaults(number_of_dc=1, nodes_per_dc=3, version=None, cluster_name=DEFAULT_CLUSTER): + """ + :param number_of_dc: number of datacentes + :param nodes_per_dc: number of nodes per datacenter + :param version: C* version + """ start_simulacron() - prime_singledc() + data_centers = ",".join([str(nodes_per_dc)] * number_of_dc) + prime_cluster(data_centers=data_centers, version=version, cluster_name=cluster_name) prime_driver_defaults() + default_column_types = { "key": "bigint", - "description": "ascii", - "dates": "map" + "value": "ascii" } -default_row = {} -default_row["key"] = 2 -default_row["description"] = "whatever_description" -default_row["dates"] = {"whatever_text" : "2014-08-01"} +default_row = {"key": 2, "value": "value"} +default_rows = [default_row] -def prime_query(query, rows=[default_row], column_types=default_column_types, then=None): + +def prime_request(request): + """ + :param request: It could be PrimeQuery class or an PrimeOptions class + """ client_simulacron = SimulacronClient() - query = PrimeQuery(query, rows=rows, column_types=column_types, then=then) - response = client_simulacron.submit_request(query) + response = client_simulacron.submit_request(request) return response -class SimulacronCluster(object): - def __init__(self, json_text): - self.json = json_text - self.o = json.loads(json_text) - - def get_cluster_id(self): - return self.o["id"] - - def get_cluster_name(self): - return self.o["name"] +def prime_query(query, rows=default_rows, column_types=default_column_types, when=None, then=None, cluster_name=DEFAULT_CLUSTER): + """ + Shortcut function for priming a query + :return: + """ + query = PrimeQuery(query, rows=rows, column_types=column_types, when=when, then=then, cluster_name=cluster_name) + response = prime_request(query) + return response - def get_data_centers_ids(self): - return [dc["id"] for dc in self.o["data_centers"]] - def get_data_centers_names(self): - return [dc["name"] for dc in self.o["data_centers"]] +def clear_queries(): + """ + Clears all the queries that have been primed to simulacron + """ + client_simulacron = SimulacronClient() + client_simulacron.clear_all_queries() - def get_node_ids(self, datacenter_id): - datacenter = filter(lambda x: x["id"] == datacenter_id, self.o["data_centers"]).pop() - return [node["id"] for node in datacenter["nodes"]] diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 4b0f8cab70..f3da9ee3bc 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -89,7 +89,10 @@ class TestHostListener(HostStateListener): host_down = None def on_down(self, host): - host_down = host + self.host_down = True + + def on_up(self, host): + self.host_down = False class HeartbeatTest(unittest.TestCase): @@ -98,7 +101,8 @@ class HeartbeatTest(unittest.TestCase): @since 3.3 @jira_ticket PYTHON-286 - @expected_result host should not be marked down when heartbeat fails + @expected_result host should be marked down when heartbeat fails. This + happens after PYTHON-734 @test_category connection heartbeat """ @@ -124,6 +128,7 @@ def test_heart_beat_timeout(self): node.pause() # Wait for connections associated with this host go away self.wait_for_no_connections(host, self.cluster) + self.assertTrue(test_listener.host_down) # Resume paused node finally: node.resume() @@ -138,7 +143,7 @@ def test_heart_beat_timeout(self): time.sleep(.1) self.assertLess(count, 100, "Never connected to the first node") new_connections = self.wait_for_connections(host, self.cluster) - self.assertIsNone(test_listener.host_down) + self.assertFalse(test_listener.host_down) # Make sure underlying new connections don't match previous ones for connection in initial_connections: self.assertFalse(connection in new_connections) diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index d35e5e69e0..e056ae2e28 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -19,38 +19,17 @@ except ImportError: import unittest # noqa -from cassandra import OperationTimedOut -from cassandra.cluster import ExecutionProfile, Cluster -from cassandra.query import SimpleStatement -from cassandra.policies import ConstantSpeculativeExecutionPolicy, HostFilterPolicy, RoundRobinPolicy, \ +from cassandra.cluster import Cluster +from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, \ SimpleConvictionPolicy -from cassandra.connection import Connection from cassandra.pool import Host -from tests.integration import BasicSharedKeyspaceUnitTestCase, greaterthancass21, PROTOCOL_VERSION -from tests import notwindows -from tests.integration.simulacron.utils import start_and_prime_singledc, prime_query, stopt_simulacron, NO_THEN +from tests.integration import PROTOCOL_VERSION -from mock import patch from concurrent.futures import wait as wait_futures def setup_module(): - #use_singledc() - pass - - -class BadRoundRobinPolicy(RoundRobinPolicy): - - def make_query_plan(self, working_keyspace=None, query=None): - pos = self._position - self._position += 1 - - hosts = [] - for _ in range(10): - hosts.extend(self._live_hosts) - - return hosts - + use_singledc() class HostFilterPolicyTests(unittest.TestCase): @@ -94,114 +73,3 @@ def test_predicate_changes(self): response = session.execute("SELECT * from system.local") queried_hosts.update(response.response_future.attempted_hosts) self.assertEqual(queried_hosts, all_hosts) - - -# This doesn't work well with Windows clock granularity -@notwindows -class SpecExecTest(BasicSharedKeyspaceUnitTestCase): - - @classmethod - def setUpClass(cls): - start_and_prime_singledc() - - cls.common_setup(1) - - spec_ep_brr = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20)) - spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20)) - spec_ep_rr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 1)) - spec_ep_brr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0.4, 10)) - - cls.cluster.add_execution_profile("spec_ep_brr", spec_ep_brr) - cls.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr) - cls.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim) - cls.cluster.add_execution_profile("spec_ep_brr_lim", spec_ep_brr_lim) - - @classmethod - def tearDownClass(cls): - stopt_simulacron() - - @greaterthancass21 - def test_speculative_execution(self): - """ - Test to ensure that speculative execution honors LBP, and that they retry appropriately. - - This test will use various LBP, and ConstantSpeculativeExecutionPolicy settings and ensure the proper number of hosts are queried - @since 3.7.0 - @jira_ticket PYTHON-218 - @expected_result speculative retries should honor max retries, idempotent state of queries, and underlying lbp. - - @test_category metadata - """ - query_to_prime = "INSERT INTO test3rf.test (k, v) VALUES (0, 1);" - prime_query(query_to_prime, then={"delay_in_ms": 4000}) - - statement = SimpleStatement(query_to_prime, is_idempotent=True) - statement_non_idem = SimpleStatement(query_to_prime, is_idempotent=False) - - # This LBP should repeat hosts up to around 30 - result = self.session.execute(statement, execution_profile='spec_ep_brr') - self.assertEqual(21, len(result.response_future.attempted_hosts)) - - # This LBP should keep host list to 3 - result = self.session.execute(statement, execution_profile='spec_ep_rr') - self.assertEqual(3, len(result.response_future.attempted_hosts)) - # Spec_execution policy should limit retries to 1 - result = self.session.execute(statement, execution_profile='spec_ep_rr_lim') - - self.assertEqual(2, len(result.response_future.attempted_hosts)) - - # Spec_execution policy should not be used if the query is not idempotent - result = self.session.execute(statement_non_idem, execution_profile='spec_ep_brr') - self.assertEqual(1, len(result.response_future.attempted_hosts)) - - # Default policy with non_idem query - result = self.session.execute(statement_non_idem) - self.assertEqual(1, len(result.response_future.attempted_hosts)) - - # Should be able to run an idempotent query against default execution policy with no speculative_execution_policy - result = self.session.execute(statement) - self.assertEqual(1, len(result.response_future.attempted_hosts)) - - # Test timeout with spec_ex - with self.assertRaises(OperationTimedOut): - result = self.session.execute(statement, execution_profile='spec_ep_rr', timeout=.5) - - """ - # PYTHON-736 Test speculation policy works with a prepared statement - statement = self.session.prepare("SELECT timeout(i) FROM d WHERE k = ?") - # non-idempotent - result = self.session.execute(statement, (0,), execution_profile='spec_ep_brr') - self.assertEqual(1, len(result.response_future.attempted_hosts)) - # idempotent - statement.is_idempotent = True - result = self.session.execute(statement, (0,), execution_profile='spec_ep_brr') - self.assertLess(1, len(result.response_future.attempted_hosts)) - """ - - - def test_speculative_and_timeout(self): - """ - Test to ensure the timeout is honored when using speculative execution - @since 3.10 - @jira_ticket PYTHON-750 - @expected_result speculative retries be schedule every fixed period, during the maximum - period of the timeout. - - @test_category metadata - """ - # We mock this so no messages are sent, otherwise a reponse might arrive - # and we would not know how many hosts we queried - prime_query("INSERT INTO test3rf.test (k, v) VALUES (0, 1);", then=NO_THEN) - - statement = SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (0, 1);", is_idempotent=True) - - # An OperationTimedOut is placed here in response_future, - # that's why we can't call session.execute,which would raise it, but - # we have to directly wait for the event - response_future = self.session.execute_async(statement, execution_profile='spec_ep_brr_lim', - timeout=2.2) - response_future._event.wait(4) - self.assertIsInstance(response_future._final_exception, OperationTimedOut) - - # This is because 2.2 / 0.4 + 1 = 6 - self.assertEqual(len(response_future.attempted_hosts), 6) From 10ecc372124279fdd2223fed63b88de0167baee0 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 25 Jul 2017 16:41:06 -0400 Subject: [PATCH 1217/2005] Move calls to asyncore to the loop thread --- build.yaml | 4 +- cassandra/io/asyncorereactor.py | 55 +++++++++++++++++-- tests/integration/__init__.py | 2 + tests/integration/long/test_failure_types.py | 4 +- tests/integration/standard/test_connection.py | 9 +-- tests/integration/standard/test_policies.py | 5 +- tests/unit/io/test_asyncorereactor.py | 2 +- 7 files changed, 62 insertions(+), 19 deletions(-) diff --git a/build.yaml b/build.yaml index f1d3dd74d3..b5ea81ed20 100644 --- a/build.yaml +++ b/build.yaml @@ -14,11 +14,11 @@ schedules: branches: include: [/python.*/] env_vars: | - EVENT_LOOP_MANAGER='libev' + EVENT_LOOP_MANAGER='async' matrix: exclude: - python: [3.4, 3.6] - - cassandra: ['2.0', '2.1', '3.0'] + - cassandra: ['2.0', '2.1'] weekly_libev: schedule: 0 10 * * 6 diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 04e9684ec9..64dfb030e6 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -18,9 +18,10 @@ import os import socket import sys -from threading import Lock, Thread +from threading import Lock, Thread, Event import time import weakref +import sys from six.moves import range @@ -51,6 +52,33 @@ def _cleanup(loop_weakref): loop._cleanup() +class WaitableTimer(Timer): + def __init__(self, timeout, callback): + Timer.__init__(self, timeout, callback) + self.callback = callback + self.event = Event() + + self.final_exception = None + + def finish(self, time_now): + try: + finished = Timer.finish(self, time_now) + if finished: + self.event.set() + return True + return False + + except Exception as e: + self.final_exception = e + self.event.set() + return True + + def wait(self, timeout=None): + self.event.wait(timeout) + if self.final_exception: + raise self.final_exception + + class _PipeWrapper(object): def __init__(self, fd): @@ -239,6 +267,11 @@ def _run_loop(self): def add_timer(self, timer): self._timers.add_timer(timer) + # This function is called from a different thread than the event loop + # thread, so for this call to be thread safe, we must wake up the loop + # in case it's stuck at a select + self.wake_loop() + def _cleanup(self): global _dispatcher_map @@ -305,16 +338,23 @@ def __init__(self, *args, **kwargs): self.deque_lock = Lock() self._connect_socket() - asyncore.dispatcher.__init__(self, self._socket, _dispatcher_map) + + # start the event loop if needed + self._loop.maybe_start() + + init_handler = WaitableTimer( + timeout=0, + callback=partial(asyncore.dispatcher.__init__, + self, self._socket, _dispatcher_map) + ) + AsyncoreConnection._loop.add_timer(init_handler) + init_handler.wait(kwargs["connect_timeout"]) self._writable = True self._readable = True self._send_options_message() - # start the event loop if needed - self._loop.maybe_start() - def close(self): with self.lock: if self.is_closed: @@ -324,7 +364,10 @@ def close(self): log.debug("Closing connection (%s) to %s", id(self), self.host) self._writable = False self._readable = False - asyncore.dispatcher.close(self) + + # We don't have to wait for this to be closed, we can just schedule it + AsyncoreConnection.create_timer(0, partial(asyncore.dispatcher.close, self)) + log.debug("Closed socket to %s", self.host) if not self.is_defunct: diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 7667e0e2c7..c54c5734c8 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -265,6 +265,8 @@ def get_unsupported_upper_protocol(): dseonly = unittest.skipUnless(DSE_VERSION, "Test is only applicalbe to DSE clusters") pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") notpy3 = unittest.skipIf(sys.version_info >= (3, 0), "Test not applicable for Python 3.x runtime") +requiresmallclockgranularity = unittest.skipIf("Windows" in platform.system() or "async" in EVENT_LOOP_MANAGER, + "This test is not suitible for environments with large clock granularity") def wait_for_node_socket(node, timeout): diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index a8f6939719..10059d320f 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -20,7 +20,8 @@ from cassandra.policies import HostFilterPolicy, RoundRobinPolicy from cassandra.concurrent import execute_concurrent_with_args from cassandra.query import SimpleStatement -from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace, remove_cluster, get_node +from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace, remove_cluster, get_node, \ + requiresmallclockgranularity from mock import Mock try: @@ -318,6 +319,7 @@ def test_user_function_failure(self): """, consistency_level=ConsistencyLevel.ALL, expected_exception=None) +@requiresmallclockgranularity class TimeoutTimerTest(unittest.TestCase): def setUp(self): """ diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 4b0f8cab70..0606c105d5 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -32,9 +32,8 @@ from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, HostStateListener from cassandra.pool import HostConnectionPool -from tests import is_monkey_patched, notwindows -from tests.integration import use_singledc, PROTOCOL_VERSION, get_node, CASSANDRA_IP, local - +from tests import is_monkey_patched +from tests.integration import use_singledc, PROTOCOL_VERSION, get_node, CASSANDRA_IP, local, requiresmallclockgranularity try: from cassandra.io.libevreactor import LibevConnection except ImportError: @@ -364,9 +363,7 @@ def send_msgs(conn, event): for t in threads: t.join() - # We skip this one for windows because the clock is not as - # granular as in linux - @notwindows + @requiresmallclockgranularity def test_connect_timeout(self): # Underlying socket implementations don't always throw a socket timeout even with min float # This can be timing sensitive, added retry to ensure failure occurs if it can diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 0eba146c15..6322377278 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -28,7 +28,7 @@ from cassandra.pool import Host from tests.integration import BasicSharedKeyspaceUnitTestCase, greaterthancass21, PROTOCOL_VERSION -from tests import notwindows +from tests.integration import requiresmallclockgranularity from mock import patch @@ -92,8 +92,7 @@ def test_predicate_changes(self): self.assertEqual(queried_hosts, all_hosts) -# This doesn't work well with Windows clock granularity -@notwindows +@requiresmallclockgranularity class SpecExecTest(BasicSharedKeyspaceUnitTestCase): @classmethod diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 4be5a9b68e..3309c6a8f2 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -63,7 +63,7 @@ def setUp(self): raise unittest.SkipTest("Can't test asyncore with monkey patching") def make_connection(self): - c = AsyncoreConnection('1.2.3.4', cql_version='3.0.1') + c = AsyncoreConnection('1.2.3.4', cql_version='3.0.1', connect_timeout=5) c.socket = Mock() c.socket.send.side_effect = lambda x: len(x) return c From e86655d8524449b541083aa14bcafec49b464bad Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 31 Jul 2017 16:38:40 -0400 Subject: [PATCH 1218/2005] Added a test around SELECT JSON --- tests/integration/standard/test_query.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 33ca43eb90..2b71b5eaf5 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -314,6 +314,15 @@ def test_column_names(self): self.assertEqual(result_set.column_names, [u'user', u'game', u'year', u'month', u'day', u'score']) + def test_basic_json_query(self): + insert_query = SimpleStatement("INSERT INTO test3rf.test(k, v) values (1, 1)", consistency_level = ConsistencyLevel.QUORUM) + json_query = SimpleStatement("SELECT JSON * FROM test3rf.test where k=1", consistency_level = ConsistencyLevel.QUORUM) + + self.session.execute(insert_query) + results = self.session.execute(json_query) + self.assertEqual(results.column_names, ["[json]"]) + self.assertEqual(results[0][0], '{"k": 1, "v": 1}') + class PreparedStatementTests(unittest.TestCase): From fe469911641169be81fc5c80d0932a256257a733 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 1 Aug 2017 13:59:59 -0400 Subject: [PATCH 1219/2005] Added fix to temporal bug in eventlet --- tests/unit/io/test_eventletreactor.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index 8d0ca21347..05ee73d38b 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -39,6 +39,10 @@ def setUpClass(cls): # to make sure no monkey patching is happening if not MONKEY_PATCH_LOOP: return + + # This is being added temporarily due to a bug in eventlet: + # https://github.com/eventlet/eventlet/issues/401 + import eventlet; eventlet.sleep() monkey_patch() cls.connection_class = EventletConnection EventletConnection.initialize_reactor() From 528242287447494c702251d402d083a0dd40993e Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 1 Aug 2017 10:11:17 -0400 Subject: [PATCH 1220/2005] General cleanup for simulacron tests --- cassandra/io/asyncorereactor.py | 1 + tests/integration/__init__.py | 2 +- .../integration/simulacron/test_connection.py | 28 +++++++------ tests/integration/simulacron/test_policies.py | 6 +-- tests/integration/simulacron/utils.py | 41 ++++++++----------- 5 files changed, 38 insertions(+), 40 deletions(-) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 6e3aafba3b..04e9684ec9 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -47,6 +47,7 @@ def _cleanup(loop_weakref): loop = loop_weakref() except ReferenceError: return + loop._cleanup() diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 857c8c0d33..11470626b4 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -265,7 +265,7 @@ def get_unsupported_upper_protocol(): dseonly = unittest.skipUnless(DSE_VERSION, "Test is only applicalbe to DSE clusters") pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") notpy3 = unittest.skipIf(sys.version_info >= (3, 0), "Test not applicable for Python 3.x runtime") -ifsimulacron = unittest.skipIf(SIMULACRON_JAR is None, "Simulacron jar hasn't been specified") +requiressimulacron = unittest.skipIf(SIMULACRON_JAR is None, "Simulacron jar hasn't been specified") def wait_for_node_socket(node, timeout): diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 1feb03b17c..38595f13b0 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -16,19 +16,21 @@ except ImportError: import unittest # noqa -import time import logging +import time -from cassandra import OperationTimedOut -from cassandra.cluster import Cluster, _Scheduler, EXEC_PROFILE_DEFAULT, ExecutionProfile -from cassandra.policies import RoundRobinPolicy, HostStateListener from concurrent.futures import ThreadPoolExecutor -from tests.integration import ifsimulacron, CASSANDRA_VERSION -from tests.integration.simulacron.utils import start_and_prime_cluster_defaults, prime_query, stop_simulacron, \ - prime_request, PrimeOptions, NO_THEN +from cassandra import OperationTimedOut +from cassandra.cluster import (EXEC_PROFILE_DEFAULT, Cluster, ExecutionProfile, + _Scheduler) +from cassandra.policies import HostStateListener, RoundRobinPolicy +from tests.integration import CASSANDRA_VERSION, requiressimulacron +from tests.integration.simulacron.utils import (NO_THEN, PrimeOptions, + prime_query, prime_request, + start_and_prime_cluster_defaults, + stop_simulacron) -import time class TrackDownListener(HostStateListener): hosts_marked_down = [] @@ -43,7 +45,7 @@ def submit(self, fn, *args, **kwargs): self.called_functions.append(fn.__name__) return super(ThreadTracker, self).submit(fn, *args, **kwargs) -@ifsimulacron +@requiressimulacron class ConnectionTest(unittest.TestCase): def test_heart_beat_timeout(self): @@ -65,6 +67,7 @@ def test_heart_beat_timeout(self): idle_heartbeat_interval = 1 start_and_prime_cluster_defaults(number_of_dcs, nodes_per_dc, CASSANDRA_VERSION) + self.addCleanup(stop_simulacron) listener = TrackDownListener() executor = ThreadTracker(max_workers=16) @@ -76,6 +79,7 @@ def test_heart_beat_timeout(self): executor_threads=16, execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=RoundRobinPolicy())}) + self.addCleanup(cluster.shutdown) cluster.scheduler.shutdown() cluster.executor = executor @@ -83,11 +87,9 @@ def test_heart_beat_timeout(self): session = cluster.connect(wait_for_all_pools=True) cluster.register_listener(listener) + log = logging.getLogger() log.setLevel('CRITICAL') - - self.addCleanup(cluster.shutdown) - self.addCleanup(stop_simulacron) self.addCleanup(log.setLevel, "DEBUG") prime_query(query_to_prime, then=NO_THEN) @@ -105,7 +107,7 @@ def test_heart_beat_timeout(self): # We allow from some extra time for all the hosts to be to on_down # The callbacks should start happening after idle_heartbeat_timeout + idle_heartbeat_interval - time.sleep((idle_heartbeat_timeout + idle_heartbeat_interval)*2) + time.sleep((idle_heartbeat_timeout + idle_heartbeat_interval) * 2) for host in cluster.metadata.all_hosts(): self.assertIn(host, listener.hosts_marked_down) diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index 6fdffe2852..8b3cefd443 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -21,7 +21,7 @@ from cassandra.query import SimpleStatement from cassandra.policies import ConstantSpeculativeExecutionPolicy, RoundRobinPolicy -from tests.integration import PROTOCOL_VERSION, greaterthancass21, ifsimulacron, SIMULACRON_JAR +from tests.integration import PROTOCOL_VERSION, greaterthancass21, requiressimulacron, SIMULACRON_JAR from tests import notwindows from tests.integration.simulacron.utils import start_and_prime_singledc, prime_query, \ stop_simulacron, NO_THEN, clear_queries @@ -42,7 +42,7 @@ def make_query_plan(self, working_keyspace=None, query=None): # This doesn't work well with Windows clock granularity @notwindows -@ifsimulacron +@requiressimulacron class SpecExecTest(unittest.TestCase): @classmethod @@ -160,4 +160,4 @@ def test_speculative_and_timeout(self): self.assertIsInstance(response_future._final_exception, OperationTimedOut) # This is because 2.2 / 0.4 + 1 = 6 - self.assertEqual(len(response_future.attempted_hosts), 6) \ No newline at end of file + self.assertEqual(len(response_future.attempted_hosts), 6) diff --git a/tests/integration/simulacron/utils.py b/tests/integration/simulacron/utils.py index 3f0718ac00..409d9d6b4c 100644 --- a/tests/integration/simulacron/utils.py +++ b/tests/integration/simulacron/utils.py @@ -29,20 +29,24 @@ def __init__(self, json_text): self.json = json_text self.o = json.loads(json_text) - def get_cluster_id(self): + @property + def cluster_id(self): return self.o["id"] - def get_cluster_name(self): + @property + def cluster_name(self): return self.o["name"] - def get_data_centers_ids(self): + @property + def data_center_ids(self): return [dc["id"] for dc in self.o["data_centers"]] - def get_data_centers_names(self): + @property + def data_centers_names(self): return [dc["name"] for dc in self.o["data_centers"]] def get_node_ids(self, datacenter_id): - datacenter = list(filter(lambda x: x["id"] == datacenter_id, self.o["data_centers"])).pop() + datacenter = list(filter(lambda x: x["id"] == datacenter_id, self.o["data_centers"])).pop() return [node["id"] for node in datacenter["nodes"]] @@ -216,14 +220,9 @@ def set_node(self, cluster_id, datacenter_id, node_id): self.datacenter_id = datacenter_id self.node_id = node_id - if self.cluster_id is not None: - self.path += "/{}".format(self.cluster_id) - - if self.cluster_id is not None: - self.path += "/{}".format(self.datacenter_id) - - if self.cluster_id is not None: - self.path += "/{}".format(self.node_id) + self.path += '/'.join([component for component in + (self.cluster_id, self.datacenter_id, self.node_id) + if component is not None]) def fetch_url_params(self): return "" @@ -249,7 +248,7 @@ def fetch_json(self): def fetch_url_params(self): return "?cassandra_version={0}&data_centers={1}&name={2}".\ - format(self.cassandra_version, self.data_centers, self.cluster_name) + format(self.cassandra_version, self.data_centers, self.cluster_name) def prime_driver_defaults(): @@ -297,9 +296,9 @@ def start_and_prime_cluster_defaults(number_of_dc=1, nodes_per_dc=3, version=Non default_column_types = { - "key": "bigint", - "value": "ascii" - } + "key": "bigint", + "value": "ascii" +} default_row = {"key": 2, "value": "value"} default_rows = [default_row] @@ -309,9 +308,7 @@ def prime_request(request): """ :param request: It could be PrimeQuery class or an PrimeOptions class """ - client_simulacron = SimulacronClient() - response = client_simulacron.submit_request(request) - return response + return SimulacronClient().submit_request(request) def prime_query(query, rows=default_rows, column_types=default_column_types, when=None, then=None, cluster_name=DEFAULT_CLUSTER): @@ -328,6 +325,4 @@ def clear_queries(): """ Clears all the queries that have been primed to simulacron """ - client_simulacron = SimulacronClient() - client_simulacron.clear_all_queries() - + SimulacronClient().clear_all_queries() From d9bb688885077bc2e020af3eb1677a8217de3205 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 27 Jul 2017 18:57:15 -0400 Subject: [PATCH 1221/2005] Updated C* version in build.yaml and appveyor --- appveyor.yml | 4 ++-- build.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index 5233a00129..dd3adaa770 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,10 +1,10 @@ environment: matrix: - PYTHON: "C:\\Python27-x64" - cassandra_version: 3.10 + cassandra_version: 3.11.0 ci_type: standard - PYTHON: "C:\\Python34" - cassandra_version: 3.10 + cassandra_version: 3.11.0 ci_type: unit os: Visual Studio 2015 platform: diff --git a/build.yaml b/build.yaml index 8731da1f7c..ec894afb98 100644 --- a/build.yaml +++ b/build.yaml @@ -72,7 +72,7 @@ cassandra: - '2.1' - '2.2' - '3.0' - - '3.10' + - '3.11' env: CYTHON: From 50068f333cd31eefb20f6ef96f0999620a24c622 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 3 Aug 2017 17:47:16 -0400 Subject: [PATCH 1222/2005] add changelog checer to Travis configuration (#775) --- .travis.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.travis.yml b/.travis.yml index 6a341ef37b..4e58ae68c3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,5 +24,13 @@ install: - pip install tox-travis script: + - | + changed_code_filenames=`git diff --name-only $TRAVIS_COMMIT_RANGE` + changed_driver_filenames=`echo "$changed_code_filenames" | grep '^cassandra'` + changelog_changed=`echo "$changed_code_filenames" | grep '^CHANGELOG.rst'` + if [ "$changed_driver_filenames" ] && [ "$changelog_changed" = "" ] ; then + echo "This PR includes code changes, but no CHANGELOG changes. Please include a CHANGELOG entry." + exit 1 + fi - tox - tox -e patched_loops From ef234cd765ade6d1f07ffbfef26ef3236cdf2083 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 3 Aug 2017 16:31:13 -0400 Subject: [PATCH 1223/2005] Fixed connection tests and set min C* version for JSON tests --- tests/integration/standard/test_connection.py | 3 +++ tests/integration/standard/test_query.py | 2 ++ 2 files changed, 5 insertions(+) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index eba3f831d9..712df06f0c 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -127,6 +127,9 @@ def test_heart_beat_timeout(self): node.pause() # Wait for connections associated with this host go away self.wait_for_no_connections(host, self.cluster) + + # Wait to seconds for the driver to be notified + time.sleep(2) self.assertTrue(test_listener.host_down) # Resume paused node finally: diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 2b71b5eaf5..062a73c99a 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -29,6 +29,7 @@ from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, \ greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, local, get_cluster, setup_keyspace from tests import notwindows +from tests.integration import greaterthanorequalcass30 import time import re @@ -314,6 +315,7 @@ def test_column_names(self): self.assertEqual(result_set.column_names, [u'user', u'game', u'year', u'month', u'day', u'score']) + @greaterthanorequalcass30 def test_basic_json_query(self): insert_query = SimpleStatement("INSERT INTO test3rf.test(k, v) values (1, 1)", consistency_level = ConsistencyLevel.QUORUM) json_query = SimpleStatement("SELECT JSON * FROM test3rf.test where k=1", consistency_level = ConsistencyLevel.QUORUM) From 2a0856e2f79521f334ab36a16a3d16a3f7c7c92f Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 3 Aug 2017 16:31:13 -0400 Subject: [PATCH 1224/2005] Returned connection when there is an OTO --- CHANGELOG.rst | 8 ++++ build.yaml | 4 +- cassandra/cluster.py | 16 +++++++ cassandra/connection.py | 11 +++-- .../integration/simulacron/test_connection.py | 43 ++++++++++++++++++- tests/integration/standard/test_connection.py | 7 ++- tests/integration/standard/test_query.py | 2 + 7 files changed, 84 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 207336d1ee..d486368c41 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +3.12.0 +====== + +Bug Fixes +--------- +* Both _set_final_exception/result called for the same ResponseFuture (PYTHON-630) + + 3.11.0 ====== July 24, 2017 diff --git a/build.yaml b/build.yaml index ec894afb98..7fa159c083 100644 --- a/build.yaml +++ b/build.yaml @@ -14,11 +14,11 @@ schedules: branches: include: [/python.*/] env_vars: | - EVENT_LOOP_MANAGER='async' + EVENT_LOOP_MANAGER='libev' matrix: exclude: - python: [3.4, 3.6] - - cassandra: ['2.0', '2.1'] + - cassandra: ['2.0', '2.1', '3.0'] weekly_libev: schedule: 0 10 * * 6 diff --git a/cassandra/cluster.py b/cassandra/cluster.py index b5a7336e4e..57ebeb8958 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3370,6 +3370,22 @@ def _cancel_timer(self): self._timer.cancel() def _on_timeout(self): + + try: + self._connection._requests.pop(self._req_id) + # This prevents the race condition of the + # event loop thread just receiving the waited message + # If it arrives after this, it will be ignored + except KeyError: + return + + pool = self.session._pools.get(self._current_host) + if pool and not pool.is_shutdown: + with self._connection.lock: + self._connection.request_ids.append(self._req_id) + + pool.return_connection(self._connection) + errors = self._errors if not errors: if self.is_schema_agreed: diff --git a/cassandra/connection.py b/cassandra/connection.py index 57f776874d..0f98d30279 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -586,18 +586,23 @@ def process_io_buffer(self): @defunct_on_error def process_msg(self, header, body): + self.msg_received = True stream_id = header.stream if stream_id < 0: callback = None decoder = ProtocolHandler.decode_message result_metadata = None else: - callback, decoder, result_metadata = self._requests.pop(stream_id) + try: + callback, decoder, result_metadata = self._requests.pop(stream_id) + # This can only happen if the stream_id was + # removed due to an OperationTimedOut + except KeyError: + return + with self.lock: self.request_ids.append(stream_id) - self.msg_received = True - try: response = decoder(header.version, self.user_type_map, stream_id, header.flags, header.opcode, body, self.decompressor, result_metadata) diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 38595f13b0..0e5ee43eb4 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -21,14 +21,19 @@ from concurrent.futures import ThreadPoolExecutor +from mock import Mock + from cassandra import OperationTimedOut from cassandra.cluster import (EXEC_PROFILE_DEFAULT, Cluster, ExecutionProfile, _Scheduler) from cassandra.policies import HostStateListener, RoundRobinPolicy -from tests.integration import CASSANDRA_VERSION, requiressimulacron +from tests.integration import (CASSANDRA_VERSION, PROTOCOL_VERSION, + requiressimulacron) +from tests.integration.util import assert_quiescent_pool_state from tests.integration.simulacron.utils import (NO_THEN, PrimeOptions, prime_query, prime_request, start_and_prime_cluster_defaults, + start_and_prime_singledc, stop_simulacron) @@ -114,3 +119,39 @@ def test_heart_beat_timeout(self): # In this case HostConnection._replace shouldn't be called self.assertNotIn("_replace", executor.called_functions) + + def test_callbacks_and_pool_when_oto(self): + """ + Test to ensure the callbacks are correcltly called and the connection + is returned when there is an OTO + @since 3.12 + @jira_ticket PYTHON-630 + @expected_result the connection is correctly returned to the pool + after an OTO, also the only the errback is called and not the callback + when the message finally arrives. + + @test_category metadata + """ + start_and_prime_singledc() + self.addCleanup(stop_simulacron) + + cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) + session = cluster.connect() + self.addCleanup(cluster.shutdown) + + query_to_prime = "SELECT * from testkesypace.testtable" + + server_delay = 2 # seconds + prime_query(query_to_prime, then={"delay_in_ms": server_delay * 1000}) + + future = session.execute_async(query_to_prime, timeout=1) + callback, errback = Mock(name='callback'), Mock(name='errback') + future.add_callbacks(callback, errback) + self.assertRaises(OperationTimedOut, future.result) + + assert_quiescent_pool_state(self, cluster) + + time.sleep(server_delay + 1) + # PYTHON-630 -- only the errback should be called + errback.assert_called_once() + callback.assert_not_called() diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index eba3f831d9..c6e6363069 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -33,7 +33,8 @@ from cassandra.pool import HostConnectionPool from tests import is_monkey_patched -from tests.integration import use_singledc, PROTOCOL_VERSION, get_node, CASSANDRA_IP, local, requiresmallclockgranularity +from tests.integration import use_singledc, PROTOCOL_VERSION, get_node, CASSANDRA_IP, local, \ + requiresmallclockgranularity, greaterthancass20 try: from cassandra.io.libevreactor import LibevConnection except ImportError: @@ -114,6 +115,7 @@ def tearDown(self): self.cluster.shutdown() @local + @greaterthancass20 def test_heart_beat_timeout(self): # Setup a host listener to ensure the nodes don't go down test_listener = TestHostListener() @@ -127,6 +129,9 @@ def test_heart_beat_timeout(self): node.pause() # Wait for connections associated with this host go away self.wait_for_no_connections(host, self.cluster) + + # Wait to seconds for the driver to be notified + time.sleep(2) self.assertTrue(test_listener.host_down) # Resume paused node finally: diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 2b71b5eaf5..062a73c99a 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -29,6 +29,7 @@ from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, \ greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, local, get_cluster, setup_keyspace from tests import notwindows +from tests.integration import greaterthanorequalcass30 import time import re @@ -314,6 +315,7 @@ def test_column_names(self): self.assertEqual(result_set.column_names, [u'user', u'game', u'year', u'month', u'day', u'score']) + @greaterthanorequalcass30 def test_basic_json_query(self): insert_query = SimpleStatement("INSERT INTO test3rf.test(k, v) values (1, 1)", consistency_level = ConsistencyLevel.QUORUM) json_query = SimpleStatement("SELECT JSON * FROM test3rf.test where k=1", consistency_level = ConsistencyLevel.QUORUM) From 916a05dd6a86266b3be8cfdea5b430ffe02e22da Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 9 Aug 2017 10:25:58 -0400 Subject: [PATCH 1225/2005] Added simulacron to appveyor tests --- appveyor/appveyor.ps1 | 8 +++++ appveyor/run_test.ps1 | 9 ++++- tests/integration/simulacron/test_policies.py | 36 +++++++++++-------- 3 files changed, 37 insertions(+), 16 deletions(-) diff --git a/appveyor/appveyor.ps1 b/appveyor/appveyor.ps1 index f7f6158ba4..bca80a7ada 100644 --- a/appveyor/appveyor.ps1 +++ b/appveyor/appveyor.ps1 @@ -3,6 +3,7 @@ $env:PATH="$($env:JAVA_HOME)\bin;$($env:PATH)" $env:CCM_PATH="C:\Users\appveyor\ccm" $env:CASSANDRA_VERSION=$env:cassandra_version $env:EVENT_LOOP_MANAGER="async" +$env:SIMULACRON_JAR="C:\Users\appveyor\simulacron-standalone-0.5.0.jar" python --version python -c "import platform; print(platform.architecture())" @@ -41,6 +42,13 @@ If (!(Test-Path $jce_indicator)) { Remove-Item $jcePolicyDir } +# Download simulacron +$simulacron_url = "https://github.com/datastax/simulacron/releases/download/0.5.0/simulacron-standalone-0.5.0.jar" +$simulacron_jar = $env:SIMULACRON_JAR +if(!(Test-Path $simulacron_jar)) { + (new-object System.Net.WebClient).DownloadFile($simulacron_url, $simulacron_jar) +} + # Install Python Dependencies for CCM. Start-Process python -ArgumentList "-m pip install psutil pyYaml six numpy" -Wait -NoNewWindow diff --git a/appveyor/run_test.ps1 b/appveyor/run_test.ps1 index 1c4f5d366a..eae7115c5e 100644 --- a/appveyor/run_test.ps1 +++ b/appveyor/run_test.ps1 @@ -6,6 +6,7 @@ echo $env:Path echo "JAVA_HOME: $env:JAVA_HOME" echo "PYTHONPATH: $env:PYTHONPATH" echo "Cassandra version: $env:CASSANDRA_VERSION" +echo "Simulacron jar: $env:SIMULACRON_JAR" echo $env:ci_type python --version python -c "import platform; print(platform.architecture())" @@ -27,6 +28,12 @@ if($env:ci_type -eq 'unit'){ } if($env:ci_type -eq 'standard'){ + echo "Running simulacron tests" + nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=simulacron_results.xml .\tests\integration\simulacron + $simulacron_tests_result = $lastexitcode + $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\simulacron_results.xml)) + echo "uploading Simulacron test results" + echo "Running CQLEngine integration tests" nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=cqlengine_results.xml .\tests\integration\cqlengine $cqlengine_tests_result = $lastexitcode @@ -46,6 +53,6 @@ if($env:ci_type -eq 'long'){ echo "uploading standard integration test results" } -$exit_result = $unit_tests_result + $cqlengine_tests_result + $integration_tests_result +$exit_result = $unit_tests_result + $cqlengine_tests_result + $integration_tests_result + $simulacron_tests_result echo "Exit result: $exit_result" exit $exit_result diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index 8b3cefd443..98ba62eaee 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -41,7 +41,6 @@ def make_query_plan(self, working_keyspace=None, query=None): # This doesn't work well with Windows clock granularity -@notwindows @requiressimulacron class SpecExecTest(unittest.TestCase): @@ -54,10 +53,16 @@ def setUpClass(cls): cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) cls.session = cls.cluster.connect(wait_for_all_pools=True) - spec_ep_brr = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20)) - spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20)) - spec_ep_rr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 1)) - spec_ep_brr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0.4, 10)) + spec_ep_brr = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), + speculative_execution_policy=ConstantSpeculativeExecutionPolicy(1, 6), + request_timeout=12) + spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.5, 10), + request_timeout=12) + spec_ep_rr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), + speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0.5, 1), + request_timeout=12) + spec_ep_brr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), + speculative_execution_policy=ConstantSpeculativeExecutionPolicy(4, 10)) cls.cluster.add_execution_profile("spec_ep_brr", spec_ep_brr) cls.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr) @@ -88,14 +93,14 @@ def test_speculative_execution(self): @test_category metadata """ query_to_prime = "INSERT INTO test3rf.test (k, v) VALUES (0, 1);" - prime_query(query_to_prime, then={"delay_in_ms": 4000}) + prime_query(query_to_prime, then={"delay_in_ms": 10000}) statement = SimpleStatement(query_to_prime, is_idempotent=True) statement_non_idem = SimpleStatement(query_to_prime, is_idempotent=False) # This LBP should repeat hosts up to around 30 result = self.session.execute(statement, execution_profile='spec_ep_brr') - self.assertEqual(21, len(result.response_future.attempted_hosts)) + self.assertEqual(7, len(result.response_future.attempted_hosts)) # This LBP should keep host list to 3 result = self.session.execute(statement, execution_profile='spec_ep_rr') @@ -110,11 +115,11 @@ def test_speculative_execution(self): self.assertEqual(1, len(result.response_future.attempted_hosts)) # Default policy with non_idem query - result = self.session.execute(statement_non_idem) + result = self.session.execute(statement_non_idem, timeout=12) self.assertEqual(1, len(result.response_future.attempted_hosts)) # Should be able to run an idempotent query against default execution policy with no speculative_execution_policy - result = self.session.execute(statement) + result = self.session.execute(statement, timeout=12) self.assertEqual(1, len(result.response_future.attempted_hosts)) # Test timeout with spec_ex @@ -147,17 +152,18 @@ def test_speculative_and_timeout(self): @test_category metadata """ - prime_query("INSERT INTO test3rf.test (k, v) VALUES (0, 1);", then=NO_THEN) + query_to_prime = "INSERT INTO testkeyspace.testtable (k, v) VALUES (0, 1);" + prime_query(query_to_prime, then=NO_THEN) - statement = SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (0, 1);", is_idempotent=True) + statement = SimpleStatement(query_to_prime, is_idempotent=True) # An OperationTimedOut is placed here in response_future, # that's why we can't call session.execute,which would raise it, but # we have to directly wait for the event response_future = self.session.execute_async(statement, execution_profile='spec_ep_brr_lim', - timeout=2.2) - response_future._event.wait(4) + timeout=14) + response_future._event.wait(16) self.assertIsInstance(response_future._final_exception, OperationTimedOut) - # This is because 2.2 / 0.4 + 1 = 6 - self.assertEqual(len(response_future.attempted_hosts), 6) + # This is because 14 / 4 + 1 = 4 + self.assertEqual(len(response_future.attempted_hosts), 4) From 4effe4b73b89fd96d6b868a995ff71f36a0e6972 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 10 Aug 2017 14:11:56 -0400 Subject: [PATCH 1226/2005] populate default lbp on connection --- CHANGELOG.rst | 1 + cassandra/cluster.py | 11 ++++++++++- tests/integration/standard/test_cluster.py | 12 ++++++++++++ 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d486368c41..671dc05097 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,7 @@ Bug Fixes --------- * Both _set_final_exception/result called for the same ResponseFuture (PYTHON-630) +* Use of DCAwareRoundRobinPolicy raises NoHostAvailable exception (PYTHON-781) 3.11.0 diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 57ebeb8958..0adc82d3d1 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1175,6 +1175,9 @@ def connect(self, keyspace=None, wait_for_all_pools=False): self.profile_manager.populate( weakref.proxy(self), self.metadata.all_hosts()) + self.load_balancing_policy.populate( + weakref.proxy(self), self.metadata.all_hosts() + ) try: self.control_connection.connect() @@ -2661,7 +2664,13 @@ def _reconnect_internal(self): a connection to that host. """ errors = {} - for host in self._cluster._default_load_balancing_policy.make_query_plan(): + lbp = ( + self._cluster.load_balancing_policy + if self._cluster._config_mode == _ConfigMode.LEGACY else + self._cluster._default_load_balancing_policy + ) + + for host in lbp.make_query_plan(): try: return self._try_connect(host) except ConnectionException as exc: diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 65387fb60b..dc2297824b 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -824,6 +824,18 @@ def test_profile_load_balancing(self): # make sure original profile is not impacted self.assertTrue(session.execute(query, execution_profile='node1')[0].release_version) + def test_setting_lbp_legacy(self): + cluster = Cluster() + self.addCleanup(cluster.shutdown) + cluster.load_balancing_policy = RoundRobinPolicy() + self.assertEqual( + list(cluster.load_balancing_policy.make_query_plan()), [] + ) + cluster.connect() + self.assertNotEqual( + list(cluster.load_balancing_policy.make_query_plan()), [] + ) + def test_profile_lb_swap(self): """ Tests that profile load balancing policies are not shared From cc0089d16faa293bbcbabf3dd8bcc15a4591453b Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 16 Aug 2017 16:41:31 -0400 Subject: [PATCH 1227/2005] Removed unncessary connection setup --- cassandra/cqlengine/connection.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 2f4890d2b9..bb4c8bf42f 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -259,8 +259,7 @@ def default(): except: pass - conn = register_connection('default', hosts=None, default=True) - conn.setup() + register_connection('default', hosts=None, default=True) log.debug("cqlengine connection initialized with default session to localhost") From 9d62306746c683d3ec6bca345e27dec392a09d20 Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 21 Aug 2017 16:59:08 -0400 Subject: [PATCH 1228/2005] Drop materialized view tables in tests --- tests/integration/standard/test_query.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 062a73c99a..32ea9c1cb5 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -1037,6 +1037,11 @@ def test_mv_filtering(self): self.session.execute(create_mv_monthlyhigh) self.session.execute(create_mv_filtereduserhigh) + self.addCleanup(self.session.execute, "DROP MATERIALIZED VIEW {0}.alltimehigh".format(self.keyspace_name)) + self.addCleanup(self.session.execute, "DROP MATERIALIZED VIEW {0}.dailyhigh".format(self.keyspace_name)) + self.addCleanup(self.session.execute, "DROP MATERIALIZED VIEW {0}.monthlyhigh".format(self.keyspace_name)) + self.addCleanup(self.session.execute, "DROP MATERIALIZED VIEW {0}.filtereduserhigh".format(self.keyspace_name)) + prepared_insert = self.session.prepare("""INSERT INTO {0}.scores (user, game, year, month, day, score) VALUES (?, ?, ? ,? ,?, ?)""".format(self.keyspace_name)) bound = prepared_insert.bind(('pcmanus', 'Coup', 2015, 5, 1, 4000)) From a90ce1219140bf2306163b35b6e02bbd4c947a1f Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 25 Aug 2017 16:07:08 -0400 Subject: [PATCH 1229/2005] Updated CHANGELOG --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 671dc05097..010f4cc213 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,7 @@ Bug Fixes --------- * Both _set_final_exception/result called for the same ResponseFuture (PYTHON-630) * Use of DCAwareRoundRobinPolicy raises NoHostAvailable exception (PYTHON-781) +* Not create two sessions by default in CQLEngine (PYTHON-814) 3.11.0 From fc8a2c181100ee4efa3f332ebcf0eb11a037ab6f Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 28 Aug 2017 09:47:09 -0400 Subject: [PATCH 1230/2005] test_protocol.py whitespace fixes --- tests/unit/test_protocol.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index 7e6a5b3702..3792c29616 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -23,19 +23,19 @@ def test_prepare_message(self): message = PrepareMessage("a") io = Mock() - message.send_body(io,4) + message.send_body(io, 4) self._check_calls(io, [(b'\x00\x00\x00\x01',), (b'a',)]) io.reset_mock() - message.send_body(io,5) + message.send_body(io, 5) self._check_calls(io, [(b'\x00\x00\x00\x01',), (b'a',), (b'\x00\x00\x00\x00',)]) def test_execute_message(self): - message = ExecuteMessage('1',[],4) + message = ExecuteMessage('1', [], 4) io = Mock() - message.send_body(io,4) + message.send_body(io, 4) self._check_calls(io, [(b'\x00\x01',), (b'1',), (b'\x00\x04',), (b'\x01',), (b'\x00\x00',)]) io.reset_mock() From 52796211b0e33c2f04d67f38947d808590ee9a70 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 7 Sep 2017 10:56:04 -0400 Subject: [PATCH 1231/2005] Added tests for PYTHON-814 --- .../cqlengine/connections/test_connection.py | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index 121b79d03f..80fb21c62c 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -21,7 +21,7 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns, connection from cassandra.cqlengine.management import sync_table -from cassandra.cluster import Cluster +from cassandra.cluster import Cluster, _clusters_for_shutdown from cassandra.query import dict_factory from tests.integration import PROTOCOL_VERSION, execute_with_long_wait_retry, local @@ -29,6 +29,8 @@ from tests.integration.cqlengine import DEFAULT_KEYSPACE, setup_connection from cassandra.cqlengine import models +from mock import patch + class TestConnectModel(Model): @@ -69,6 +71,7 @@ def setUp(self): def tearDown(self): self.c.shutdown() + connection.unregister_connection("default") def test_connection_session_switch(self): """ @@ -104,3 +107,19 @@ def test_connection_setup_with_setup(self): def test_connection_setup_with_default(self): connection.default() self.assertIsNotNone(connection.get_connection("default").cluster.metadata.get_host("127.0.0.1")) + + def test_only_one_connection_is_created(self): + """ + Test to ensure that only one new connection is created by + connection.register_connection + + @since 3.12 + @jira_ticket PYTHON-814 + @expected_result Only one connection is created + + @test_category object_mapper + """ + number_of_clusters_before = len(_clusters_for_shutdown) + connection.default() + number_of_clusters_after = len(_clusters_for_shutdown) + self.assertEqual(number_of_clusters_after - number_of_clusters_before, 1) From e71f4b4b4abc47fb7b1ee31628a24690d3e0ea5b Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 8 Sep 2017 11:03:35 -0400 Subject: [PATCH 1232/2005] Refactor cqlengine.connections.test_conection.py --- .../cqlengine/connections/test_connection.py | 62 ++++++++++--------- 1 file changed, 33 insertions(+), 29 deletions(-) diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index 80fb21c62c..e3158c6143 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -38,14 +38,45 @@ class TestConnectModel(Model): keyspace = columns.Text() -class ConnectionTest(BaseCassEngTestCase): +class ConnectionTest(unittest.TestCase): + def tearDown(self): + connection.unregister_connection("default") + + @local + def test_connection_setup_with_setup(self): + connection.setup(hosts=None, default_keyspace=None) + self.assertIsNotNone(connection.get_connection("default").cluster.metadata.get_host("127.0.0.1")) + + @local + def test_connection_setup_with_default(self): + connection.default() + self.assertIsNotNone(connection.get_connection("default").cluster.metadata.get_host("127.0.0.1")) + + def test_only_one_connection_is_created(self): + """ + Test to ensure that only one new connection is created by + connection.register_connection + + @since 3.12 + @jira_ticket PYTHON-814 + @expected_result Only one connection is created + + @test_category object_mapper + """ + number_of_clusters_before = len(_clusters_for_shutdown) + connection.default() + number_of_clusters_after = len(_clusters_for_shutdown) + self.assertEqual(number_of_clusters_after - number_of_clusters_before, 1) + + +class SeveralConnectionsTest(BaseCassEngTestCase): @classmethod def setUpClass(cls): connection.unregister_connection('default') cls.keyspace1 = 'ctest1' cls.keyspace2 = 'ctest2' - super(ConnectionTest, cls).setUpClass() + super(SeveralConnectionsTest, cls).setUpClass() cls.setup_cluster = Cluster(protocol_version=PROTOCOL_VERSION) cls.setup_session = cls.setup_cluster.connect() ddl = "CREATE KEYSPACE {0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': '{1}'}}".format(cls.keyspace1, 1) @@ -71,7 +102,6 @@ def setUp(self): def tearDown(self): self.c.shutdown() - connection.unregister_connection("default") def test_connection_session_switch(self): """ @@ -97,29 +127,3 @@ def test_connection_session_switch(self): connection.set_session(self.session2) self.assertEqual(1, TestConnectModel.objects.count()) self.assertEqual(TestConnectModel.objects.first(), TCM2) - - @local - def test_connection_setup_with_setup(self): - connection.setup(hosts=None, default_keyspace=None) - self.assertIsNotNone(connection.get_connection("default").cluster.metadata.get_host("127.0.0.1")) - - @local - def test_connection_setup_with_default(self): - connection.default() - self.assertIsNotNone(connection.get_connection("default").cluster.metadata.get_host("127.0.0.1")) - - def test_only_one_connection_is_created(self): - """ - Test to ensure that only one new connection is created by - connection.register_connection - - @since 3.12 - @jira_ticket PYTHON-814 - @expected_result Only one connection is created - - @test_category object_mapper - """ - number_of_clusters_before = len(_clusters_for_shutdown) - connection.default() - number_of_clusters_after = len(_clusters_for_shutdown) - self.assertEqual(number_of_clusters_after - number_of_clusters_before, 1) From 5d3d47b5b75af19f9cb752057426421c3b88c8d4 Mon Sep 17 00:00:00 2001 From: Harald Nordgren Date: Fri, 24 Mar 2017 12:16:46 +0100 Subject: [PATCH 1233/2005] Allow min_length==0 for Column.Text fields --- cassandra/cqlengine/columns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 649dd3a9fd..6e30bf12b3 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -332,7 +332,7 @@ def __init__(self, min_length=None, max_length=None, **kwargs): :param int max_length: Sets the maximum length of this string, for validation purposes. """ self.min_length = ( - 1 if not min_length and kwargs.get('required', False) + 1 if min_length is None and kwargs.get('required', False) else min_length) self.max_length = max_length From f5694e4ae6d0569e08e8351b465ba0779eb4348c Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 28 Aug 2017 09:47:50 -0400 Subject: [PATCH 1234/2005] add and test keyspace-per-query CASSANDRA-10145/PYTHON-678 --- cassandra/__init__.py | 4 ++ cassandra/protocol.py | 59 +++++++++++++++++++-- tests/unit/test_protocol.py | 103 +++++++++++++++++++++++++++++++----- 3 files changed, 149 insertions(+), 17 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index feabc14192..678663cccd 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -204,6 +204,10 @@ def uses_prepare_flags(cls, version): def uses_error_code_map(cls, version): return version >= cls.V5 + @classmethod + def uses_keyspace_flag(cls, version): + return version >= cls.V5 + class SchemaChangeType(object): DROPPED = 'DROPPED' diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 099dd601c9..f25dd4ad68 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -507,6 +507,7 @@ def recv_body(cls, f, *args): _WITH_PAGING_STATE_FLAG = 0x08 _WITH_SERIAL_CONSISTENCY_FLAG = 0x10 _PROTOCOL_TIMESTAMP = 0x20 +_WITH_KEYSPACE_FLAG = 0x80 class QueryMessage(_MessageType): @@ -514,13 +515,14 @@ class QueryMessage(_MessageType): name = 'QUERY' def __init__(self, query, consistency_level, serial_consistency_level=None, - fetch_size=None, paging_state=None, timestamp=None): + fetch_size=None, paging_state=None, timestamp=None, keyspace=None): self.query = query self.consistency_level = consistency_level self.serial_consistency_level = serial_consistency_level self.fetch_size = fetch_size self.paging_state = paging_state self.timestamp = timestamp + self.keyspace = keyspace self._query_params = None # only used internally. May be set to a list of native-encoded values to have them sent with the request. def send_body(self, f, protocol_version): @@ -558,6 +560,14 @@ def send_body(self, f, protocol_version): if self.timestamp is not None: flags |= _PROTOCOL_TIMESTAMP + if self.keyspace is not None: + if ProtocolVersion.uses_keyspace_flag(protocol_version): + flags |= _WITH_KEYSPACE_FLAG + else: + raise UnsupportedOperation( + "Keyspaces may only be set on queries with protocol version " + "5 or higher. Consider setting Cluster.protocol_version to 5.") + if ProtocolVersion.uses_int_query_flags(protocol_version): write_uint(f, flags) else: @@ -576,6 +586,8 @@ def send_body(self, f, protocol_version): write_consistency_level(f, self.serial_consistency_level) if self.timestamp is not None: write_long(f, self.timestamp) + if self.keyspace is not None: + write_string(f, self.keyspace) CUSTOM_TYPE = object() @@ -768,14 +780,38 @@ class PrepareMessage(_MessageType): opcode = 0x09 name = 'PREPARE' - def __init__(self, query): + def __init__(self, query, keyspace=None): self.query = query + self.keyspace = keyspace def send_body(self, f, protocol_version): write_longstring(f, self.query) + + flags = 0x00 + + if self.keyspace is not None: + if ProtocolVersion.uses_keyspace_flag(protocol_version): + flags |= _WITH_KEYSPACE_FLAG + else: + raise UnsupportedOperation( + "Keyspaces may only be set on queries with protocol version " + "5 or higher. Consider setting Cluster.protocol_version to 5.") + if ProtocolVersion.uses_prepare_flags(protocol_version): - # Write the flags byte; with 0 value for now, but this should change in PYTHON-678 - write_uint(f, 0) + write_uint(f, flags) + else: + # checks above should prevent this, but just to be safe... + if flags: + raise UnsupportedOperation( + "Attempted to set flags with value {flags:0=#8x} on" + "protocol version {pv}, which doesn't support flags" + "in prepared statements." + "Consider setting Cluster.protocol_version to 5." + "".format(flags=flags, pv=protocol_version)) + + if ProtocolVersion.uses_keyspace_flag(protocol_version): + if self.keyspace: + write_string(f, self.keyspace) class ExecuteMessage(_MessageType): @@ -852,12 +888,14 @@ class BatchMessage(_MessageType): name = 'BATCH' def __init__(self, batch_type, queries, consistency_level, - serial_consistency_level=None, timestamp=None): + serial_consistency_level=None, timestamp=None, + keyspace=None): self.batch_type = batch_type self.queries = queries self.consistency_level = consistency_level self.serial_consistency_level = serial_consistency_level self.timestamp = timestamp + self.keyspace = keyspace def send_body(self, f, protocol_version): write_byte(f, self.batch_type.value) @@ -881,6 +919,13 @@ def send_body(self, f, protocol_version): flags |= _WITH_SERIAL_CONSISTENCY_FLAG if self.timestamp is not None: flags |= _PROTOCOL_TIMESTAMP + if self.keyspace: + if ProtocolVersion.uses_keyspace_flag(protocol_version): + flags |= _WITH_KEYSPACE_FLAG + else: + raise UnsupportedOperation( + "Keyspaces may only be set on queries with protocol version " + "5 or higher. Consider setting Cluster.protocol_version to 5.") if ProtocolVersion.uses_int_query_flags(protocol_version): write_int(f, flags) @@ -892,6 +937,10 @@ def send_body(self, f, protocol_version): if self.timestamp is not None: write_long(f, self.timestamp) + if ProtocolVersion.uses_keyspace_flag(protocol_version): + if self.keyspace is not None: + write_string(f, self.keyspace) + known_event_types = frozenset(( 'TOPOLOGY_CHANGE', diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index 3792c29616..8f690805f1 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -1,12 +1,13 @@ - try: import unittest2 as unittest except ImportError: import unittest # noqa from mock import Mock -from cassandra import ProtocolVersion -from cassandra.protocol import PrepareMessage, QueryMessage, ExecuteMessage +from cassandra import ProtocolVersion, UnsupportedOperation +from cassandra.protocol import (PrepareMessage, QueryMessage, ExecuteMessage, + BatchMessage) +from cassandra.query import SimpleStatement, BatchType class MessageTest(unittest.TestCase): @@ -53,20 +54,21 @@ def test_query_message(self): @test_category connection """ - message = QueryMessage("a",3) + message = QueryMessage("a", 3) io = Mock() - - message.send_body(io,4) + + message.send_body(io, 4) self._check_calls(io, [(b'\x00\x00\x00\x01',), (b'a',), (b'\x00\x03',), (b'\x00',)]) io.reset_mock() - message.send_body(io,5) + message.send_body(io, 5) self._check_calls(io, [(b'\x00\x00\x00\x01',), (b'a',), (b'\x00\x03',), (b'\x00\x00\x00\x00',)]) def _check_calls(self, io, expected): - self.assertEqual(len(io.write.mock_calls), len(expected)) - for call, expect in zip(io.write.mock_calls, expected): - self.assertEqual(call[1], expect) + self.assertEqual( + tuple(c[1] for c in io.write.mock_calls), + tuple(expected) + ) def test_prepare_flag(self): """ @@ -83,9 +85,86 @@ def test_prepare_flag(self): for version in ProtocolVersion.SUPPORTED_VERSIONS: message.send_body(io, version) if ProtocolVersion.uses_prepare_flags(version): - # This should pass after PYTHON-696 self.assertEqual(len(io.write.mock_calls), 3) - # self.assertEqual(uint32_unpack(io.write.mock_calls[2][1][0]) & _WITH_SERIAL_CONSISTENCY_FLAG, 1) else: self.assertEqual(len(io.write.mock_calls), 2) io.reset_mock() + + def test_prepare_flag_with_keyspace(self): + message = PrepareMessage("a", keyspace='ks') + io = Mock() + + for version in ProtocolVersion.SUPPORTED_VERSIONS: + if ProtocolVersion.uses_keyspace_flag(version): + message.send_body(io, version) + self._check_calls(io, [ + ('\x00\x00\x00\x01',), + ('a',), + ('\x00\x00\x00\x80',), + (b'\x00\x02',), + (b'ks',), + ]) + else: + with self.assertRaises(UnsupportedOperation): + message.send_body(io, version) + io.reset_mock() + + def test_keyspace_flag_raises_before_v5(self): + keyspace_message = QueryMessage('a', consistency_level=3, keyspace='ks') + io = Mock(name='io') + + with self.assertRaisesRegex(UnsupportedOperation, 'Keyspaces.*set'): + keyspace_message.send_body(io, protocol_version=4) + io.assert_not_called() + + def test_keyspace_written_with_length(self): + io = Mock(name='io') + base_expected = [ + (b'\x00\x00\x00\x01',), + (b'a',), + (b'\x00\x03',), + (b'\x00\x00\x00\x80',), # options w/ keyspace flag + ] + + QueryMessage('a', consistency_level=3, keyspace='ks').send_body( + io, protocol_version=5 + ) + self._check_calls(io, base_expected + [ + (b'\x00\x02',), # length of keyspace string + (b'ks',), + ]) + + io.reset_mock() + + QueryMessage('a', consistency_level=3, keyspace='keyspace').send_body( + io, protocol_version=5 + ) + self._check_calls(io, base_expected + [ + (b'\x00\x08',), # length of keyspace string + (b'keyspace',), + ]) + + def test_batch_message_with_keyspace(self): + self.maxDiff = None + io = Mock(name='io') + batch = BatchMessage( + batch_type=BatchType.LOGGED, + queries=((False, 'stmt a', ('param a',)), + (False, 'stmt b', ('param b',)), + (False, 'stmt c', ('param c',)) + ), + consistency_level=3, + keyspace='ks' + ) + batch.send_body(io, protocol_version=5) + self._check_calls(io, + (('\x00',), ('\x00\x03',), ('\x00',), + ('\x00\x00\x00\x06',), ('stmt a',), + ('\x00\x01',), ('\x00\x00\x00\x07',), ('param a',), + ('\x00',), ('\x00\x00\x00\x06',), ('stmt b',), + ('\x00\x01',), ('\x00\x00\x00\x07',), ('param b',), + ('\x00',), ('\x00\x00\x00\x06',), ('stmt c',), + ('\x00\x01',), ('\x00\x00\x00\x07',), ('param c',), + ('\x00\x03',), + ('\x00\x00\x00\x80',), ('\x00\x02',), ('ks',)) + ) From 81172ef1cbc09358f81c2c49e338bfa5603f8b81 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Thu, 14 Sep 2017 12:25:10 -0400 Subject: [PATCH 1235/2005] Added API and tests for CASSANDRA-10145 (#823) PYTHON-678 --- CHANGELOG.rst | 5 +- build.yaml | 6 +- cassandra/cluster.py | 86 ++++-- cassandra/protocol.py | 3 +- tests/integration/__init__.py | 16 +- tests/integration/standard/test_query.py | 328 ++++++++++++++++++++++- tests/unit/test_protocol.py | 24 +- tests/unit/test_response_future.py | 4 +- 8 files changed, 413 insertions(+), 59 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 671dc05097..2dc1c764da 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,12 +1,15 @@ 3.12.0 ====== +Features +-------- +* Send keyspace in QUERY, PREPARE, and BATCH messages (PYTHON-678) + Bug Fixes --------- * Both _set_final_exception/result called for the same ResponseFuture (PYTHON-630) * Use of DCAwareRoundRobinPolicy raises NoHostAvailable exception (PYTHON-781) - 3.11.0 ====== July 24, 2017 diff --git a/build.yaml b/build.yaml index 7fa159c083..cfc4269878 100644 --- a/build.yaml +++ b/build.yaml @@ -121,9 +121,9 @@ build: # Run the unit tests, this is not done in travis because # it takes too much time for the whole matrix to build with cython if [[ $CYTHON == 'CYTHON' ]]; then - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/unit/ || true - MONKEY_PATCH_LOOP=1 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/unit/io/test_eventletreactor.py || true - MONKEY_PATCH_LOOP=1 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/unit/io/test_geventreactor.py || true + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true + MONKEY_PATCH_LOOP=1 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true + MONKEY_PATCH_LOOP=1 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true fi diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 0adc82d3d1..9a2b3f654f 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1721,6 +1721,19 @@ def set_meta_refresh_enabled(self, enabled): self.schema_metadata_enabled = enabled self.token_metadata_enabled = enabled + @classmethod + def _send_chunks(cls, connection, host, chunks, set_keyspace=False): + for ks_chunk in chunks: + messages = [PrepareMessage(query=s.query_string, + keyspace=s.keyspace if set_keyspace else None) + for s in ks_chunk] + # TODO: make this timeout configurable somehow? + responses = connection.wait_for_responses(*messages, timeout=5.0, fail_on_error=False) + for success, response in responses: + if not success: + log.debug("Got unexpected response when preparing " + "statement on host %s: %r", host, response) + def _prepare_all_queries(self, host): if not self._prepared_statements or not self.reprepare_on_up: return @@ -1730,24 +1743,23 @@ def _prepare_all_queries(self, host): try: connection = self.connection_factory(host.address) statements = self._prepared_statements.values() - for keyspace, ks_statements in groupby(statements, lambda s: s.keyspace): - if keyspace is not None: - connection.set_keyspace_blocking(keyspace) - - # prepare 10 statements at a time - ks_statements = list(ks_statements) + if ProtocolVersion.uses_keyspace_flag(self.protocol_version): + # V5 protocol and higher, no need to set the keyspace chunks = [] - for i in range(0, len(ks_statements), 10): - chunks.append(ks_statements[i:i + 10]) - - for ks_chunk in chunks: - messages = [PrepareMessage(query=s.query_string) for s in ks_chunk] - # TODO: make this timeout configurable somehow? - responses = connection.wait_for_responses(*messages, timeout=5.0, fail_on_error=False) - for success, response in responses: - if not success: - log.debug("Got unexpected response when preparing " - "statement on host %s: %r", host, response) + for i in range(0, len(statements), 10): + chunks.append(statements[i:i + 10]) + self._send_chunks(connection, host, chunks, True) + else: + for keyspace, ks_statements in groupby(statements, lambda s: s.keyspace): + if keyspace is not None: + connection.set_keyspace_blocking(keyspace) + + # prepare 10 statements at a time + ks_statements = list(ks_statements) + chunks = [] + for i in range(0, len(ks_statements), 10): + chunks.append(ks_statements[i:i + 10]) + self._send_chunks(connection, host, chunks) log.debug("Done preparing all known prepared statements against host %s", host) except OperationTimedOut as timeout: @@ -2126,11 +2138,13 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time if isinstance(query, SimpleStatement): query_string = query.query_string + statement_keyspace = query.keyspace if ProtocolVersion.uses_keyspace_flag(self._protocol_version) else None if parameters: query_string = bind_params(query_string, parameters, self.encoder) message = QueryMessage( query_string, cl, serial_cl, - fetch_size, timestamp=timestamp) + fetch_size, timestamp=timestamp, + keyspace=statement_keyspace) elif isinstance(query, BoundStatement): prepared_statement = query.prepared_statement message = ExecuteMessage( @@ -2143,9 +2157,10 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time "BatchStatement execution is only supported with protocol version " "2 or higher (supported in Cassandra 2.0 and higher). Consider " "setting Cluster.protocol_version to 2 to support this operation.") + statement_keyspace = query.keyspace if ProtocolVersion.uses_keyspace_flag(self._protocol_version) else None message = BatchMessage( query.batch_type, query._statements_and_parameters, cl, - serial_cl, timestamp) + serial_cl, timestamp, statement_keyspace) message.tracing = trace @@ -2214,7 +2229,7 @@ def _on_request(self, response_future): for fn, args, kwargs in self._request_init_callbacks: fn(response_future, *args, **kwargs) - def prepare(self, query, custom_payload=None): + def prepare(self, query, custom_payload=None, keyspace=None): """ Prepares a query string, returning a :class:`~cassandra.query.PreparedStatement` instance which can be used as follows:: @@ -2237,13 +2252,24 @@ def prepare(self, query, custom_payload=None): ... bound = prepared.bind((user.id, user.name, user.age)) ... session.execute(bound) + Alternatively, if :attr:`~.Cluster.protocol_version` is 5 or higher + (requires Cassandra 4.0+), the keyspace can be specified as a + parameter. This will allow you to avoid specifying the keyspace in the + query without specifying a keyspace in :meth:`~.Cluster.connect`. It + even will let you prepare and use statements against a keyspace other + than the one originally specified on connection: + + >>> analyticskeyspace_prepared = session.prepare( + ... "INSERT INTO user_activity id, last_activity VALUES (?, ?)", + ... keyspace="analyticskeyspace") # note the different keyspace + **Important**: PreparedStatements should be prepared only once. Preparing the same query more than once will likely affect performance. `custom_payload` is a key value map to be passed along with the prepare message. See :ref:`custom_payload`. """ - message = PrepareMessage(query=query) + message = PrepareMessage(query=query, keyspace=keyspace) future = ResponseFuture(self, message, query=None, timeout=self.default_timeout) try: future.send_request() @@ -2252,8 +2278,9 @@ def prepare(self, query, custom_payload=None): log.exception("Error preparing query:") raise + prepared_keyspace = keyspace if keyspace else self.keyspace prepared_statement = PreparedStatement.from_message( - query_id, bind_metadata, pk_indexes, self.cluster.metadata, query, self.keyspace, + query_id, bind_metadata, pk_indexes, self.cluster.metadata, query, prepared_keyspace, self._protocol_version, result_metadata) prepared_statement.custom_payload = future.custom_payload @@ -2262,13 +2289,13 @@ def prepare(self, query, custom_payload=None): if self.cluster.prepare_on_all_hosts: host = future._current_host try: - self.prepare_on_all_hosts(prepared_statement.query_string, host) + self.prepare_on_all_hosts(prepared_statement.query_string, host, prepared_keyspace) except Exception: log.exception("Error preparing query on all hosts:") return prepared_statement - def prepare_on_all_hosts(self, query, excluded_host): + def prepare_on_all_hosts(self, query, excluded_host, keyspace=None): """ Prepare the given query on all hosts, excluding ``excluded_host``. Intended for internal use only. @@ -2276,7 +2303,8 @@ def prepare_on_all_hosts(self, query, excluded_host): futures = [] for host in tuple(self._pools.keys()): if host != excluded_host and host.is_up: - future = ResponseFuture(self, PrepareMessage(query=query), None, self.default_timeout) + future = ResponseFuture(self, PrepareMessage(query=query, keyspace=keyspace), + None, self.default_timeout) # we don't care about errors preparing against specific hosts, # since we can always prepare them as needed when the prepared @@ -3659,7 +3687,8 @@ def _set_result(self, host, connection, pool, response): current_keyspace = self._connection.keyspace prepared_keyspace = prepared_statement.keyspace - if prepared_keyspace and current_keyspace != prepared_keyspace: + if not ProtocolVersion.uses_keyspace_flag(self.session.cluster.protocol_version) \ + and prepared_keyspace and current_keyspace != prepared_keyspace: self._set_final_exception( ValueError("The Session's current keyspace (%s) does " "not match the keyspace the statement was " @@ -3669,7 +3698,10 @@ def _set_result(self, host, connection, pool, response): log.debug("Re-preparing unrecognized prepared statement against host %s: %s", host, prepared_statement.query_string) - prepare_message = PrepareMessage(query=prepared_statement.query_string) + prepared_keyspace = prepared_statement.keyspace \ + if ProtocolVersion.uses_keyspace_flag(self.session.cluster.protocol_version) else None + prepare_message = PrepareMessage(query=prepared_statement.query_string, + keyspace=prepared_keyspace) # since this might block, run on the executor to avoid hanging # the event loop thread self.session.submit(self._reprepare, prepare_message, host, connection, pool) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index f25dd4ad68..2901654001 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -508,6 +508,7 @@ def recv_body(cls, f, *args): _WITH_SERIAL_CONSISTENCY_FLAG = 0x10 _PROTOCOL_TIMESTAMP = 0x20 _WITH_KEYSPACE_FLAG = 0x80 +_PREPARED_WITH_KEYSPACE_FLAG = 0x01 class QueryMessage(_MessageType): @@ -791,7 +792,7 @@ def send_body(self, f, protocol_version): if self.keyspace is not None: if ProtocolVersion.uses_keyspace_flag(protocol_version): - flags |= _WITH_KEYSPACE_FLAG + flags |= _PREPARED_WITH_KEYSPACE_FLAG else: raise UnsupportedOperation( "Keyspaces may only be set on queries with protocol version " diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 6b0f874ee5..9017b0ccc7 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -182,8 +182,19 @@ def set_default_cass_ip(): Cluster.__init__.__func__.__defaults__ = tuple(defaults) -def get_default_protocol(): +def set_default_beta_flag_true(): + defaults = list(Cluster.__init__.__defaults__) + defaults = defaults[:-3] + [True] + defaults[-2:] + try: + Cluster.__init__.__defaults__ = tuple(defaults) + except: + Cluster.__init__.__func__.__defaults__ = tuple(defaults) + +def get_default_protocol(): + if Version(CASSANDRA_VERSION) >= Version('4.0'): + set_default_beta_flag_true() + return 5 if Version(CASSANDRA_VERSION) >= Version('2.2'): return 4 elif Version(CASSANDRA_VERSION) >= Version('2.1'): @@ -261,6 +272,7 @@ def get_unsupported_upper_protocol(): greaterthanorequalcass36 = unittest.skipUnless(CASSANDRA_VERSION >= '3.6', 'Cassandra version 3.6 or greater required') greaterthanorequalcass3_10 = unittest.skipUnless(CASSANDRA_VERSION >= '3.10', 'Cassandra version 3.10 or greater required') greaterthanorequalcass3_11 = unittest.skipUnless(CASSANDRA_VERSION >= '3.11', 'Cassandra version 3.10 or greater required') +greaterthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION >= '4.0', 'Cassandra version 4.0 or greater required') lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < '3.0', 'Cassandra version less then 3.0 required') dseonly = unittest.skipUnless(DSE_VERSION, "Test is only applicalbe to DSE clusters") pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") @@ -690,7 +702,7 @@ def tearDownClass(cls): class BasicSharedKeyspaceUnitTestCaseRF1(BasicSharedKeyspaceUnitTestCase): """ This is basic unit test case that can be leveraged to scope a keyspace to a specific test class. - creates a keyspace named after the testclass with a rf of 1, and a table named after the class + creates a keyspace named after the testclass with a rf of 1 """ @classmethod def setUpClass(self): diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 32ea9c1cb5..e2b422603b 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -24,25 +24,28 @@ from cassandra import ConsistencyLevel, Unavailable, InvalidRequest, cluster from cassandra.query import (PreparedStatement, BoundStatement, SimpleStatement, BatchStatement, BatchType, dict_factory, TraceUnavailable) -from cassandra.cluster import Cluster, NoHostAvailable -from cassandra.policies import HostDistance, RoundRobinPolicy +from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile +from cassandra.policies import HostDistance, RoundRobinPolicy, WhiteListRoundRobinPolicy from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, \ - greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, local, get_cluster, setup_keyspace + greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, local, get_cluster, setup_keyspace, \ + USE_CASS_EXTERNAL, greaterthanorequalcass40 from tests import notwindows -from tests.integration import greaterthanorequalcass30 +from tests.integration import greaterthanorequalcass30, get_node import time import re def setup_module(): - use_singledc(start=False) - ccm_cluster = get_cluster() - ccm_cluster.clear() - # This is necessary because test_too_many_statements may - # timeout otherwise - config_options = {'write_request_timeout_in_ms': '20000'} - ccm_cluster.set_configuration_options(config_options) - ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) + if not USE_CASS_EXTERNAL: + use_singledc(start=False) + ccm_cluster = get_cluster() + ccm_cluster.clear() + # This is necessary because test_too_many_statements may + # timeout otherwise + config_options = {'write_request_timeout_in_ms': '20000'} + ccm_cluster.set_configuration_options(config_options) + ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) + setup_keyspace() global CASS_SERVER_VERSION CASS_SERVER_VERSION = get_server_versions()[0] @@ -1175,3 +1178,304 @@ def test_unicode(self): self.session.execute(bound) +class BaseKeyspaceTests(): + @classmethod + def setUpClass(cls): + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.session = cls.cluster.connect(wait_for_all_pools=True) + cls.ks_name = cls.__name__.lower() + + cls.alternative_ks = "alternative_keyspace" + cls.table_name = "table_query_keyspace_tests" + + ddl = """CREATE KEYSPACE {0} WITH replication = + {{'class': 'SimpleStrategy', + 'replication_factor': '{1}'}}""".format(cls.ks_name, 1) + cls.session.execute(ddl) + + ddl = """CREATE KEYSPACE {0} WITH replication = + {{'class': 'SimpleStrategy', + 'replication_factor': '{1}'}}""".format(cls.alternative_ks, 1) + cls.session.execute(ddl) + + ddl = ''' + CREATE TABLE {0}.{1} ( + k int PRIMARY KEY, + v int )'''.format(cls.ks_name, cls.table_name) + cls.session.execute(ddl) + ddl = ''' + CREATE TABLE {0}.{1} ( + k int PRIMARY KEY, + v int )'''.format(cls.alternative_ks, cls.table_name) + cls.session.execute(ddl) + + cls.session.execute("INSERT INTO {}.{} (k, v) VALUES (1, 1)".format(cls.ks_name, cls.table_name)) + cls.session.execute("INSERT INTO {}.{} (k, v) VALUES (2, 2)".format(cls.alternative_ks, cls.table_name)) + + @classmethod + def tearDownClass(cls): + ddl = "DROP KEYSPACE {}".format(cls.alternative_ks) + cls.session.execute(ddl) + ddl = "DROP KEYSPACE {}".format(cls.ks_name) + cls.session.execute(ddl) + cls.cluster.shutdown() + +class QueryKeyspaceTests(BaseKeyspaceTests): + + def test_setting_keyspace(self): + """ + Test the basic functionality of PYTHON-678, the keyspace can be set + independently of the query and read the results + + @since 3.12 + @jira_ticket PYTHON-678 + @expected_result the query is executed and the results retrieved + + @test_category query + """ + self._check_set_keyspace_in_statement(self.session) + + def test_setting_keyspace_and_session(self): + """ + Test we can still send the keyspace independently even the session + connects to a keyspace when it's created + + @since 3.12 + @jira_ticket PYTHON-678 + @expected_result the query is executed and the results retrieved + + @test_category query + """ + cluster = Cluster(protocol_version=ProtocolVersion.V5, allow_beta_protocol_version=True) + session = cluster.connect(self.alternative_ks) + self.addCleanup(cluster.shutdown) + + self._check_set_keyspace_in_statement(session) + + def test_setting_keyspace_and_session_after_created(self): + """ + Test we can still send the keyspace independently even the session + connects to a different keyspace after being created + + @since 3.12 + @jira_ticket PYTHON-678 + @expected_result the query is executed and the results retrieved + + @test_category query + """ + cluster = Cluster(protocol_version=ProtocolVersion.V5, allow_beta_protocol_version=True) + session = cluster.connect() + self.addCleanup(cluster.shutdown) + + session.set_keyspace(self.alternative_ks) + self._check_set_keyspace_in_statement(session) + + def test_setting_keyspace_and_same_session(self): + """ + Test we can still send the keyspace independently even if the session + is connected to the sent keyspace + + @since 3.12 + @jira_ticket PYTHON-678 + @expected_result the query is executed and the results retrieved + + @test_category query + """ + cluster = Cluster(protocol_version=ProtocolVersion.V5, allow_beta_protocol_version=True) + session = cluster.connect(self.ks_name) + self.addCleanup(cluster.shutdown) + + self._check_set_keyspace_in_statement(session) + + +@greaterthanorequalcass40 +class SimpleWithKeyspaceTests(QueryKeyspaceTests, unittest.TestCase): + @unittest.skip + def test_lower_protocol(self): + cluster = Cluster(protocol_version=ProtocolVersion.V4) + session = cluster.connect(self.ks_name) + self.addCleanup(cluster.shutdown) + + simple_stmt = SimpleStatement("SELECT * from {}".format(self.table_name), keyspace=self.ks_name) + # This raises cassandra.cluster.NoHostAvailable: ('Unable to complete the operation against + # any hosts', {: UnsupportedOperation('Keyspaces may only be + # set on queries with protocol version 5 or higher. Consider setting Cluster.protocol_version to 5.',), + # : ConnectionException('Host has been marked down or removed',), + # : ConnectionException('Host has been marked down or removed',)}) + with self.assertRaises(NoHostAvailable): + session.execute(simple_stmt) + + def _check_set_keyspace_in_statement(self, session): + simple_stmt = SimpleStatement("SELECT * from {}".format(self.table_name), keyspace=self.ks_name) + results = session.execute(simple_stmt) + self.assertEqual(results[0], (1, 1)) + + simple_stmt = SimpleStatement("SELECT * from {}".format(self.table_name)) + simple_stmt.keyspace = self.ks_name + results = session.execute(simple_stmt) + self.assertEqual(results[0], (1, 1)) + + +@greaterthanorequalcass40 +class BatchWithKeyspaceTests(QueryKeyspaceTests, unittest.TestCase): + def _check_set_keyspace_in_statement(self, session): + batch_stmt = BatchStatement() + for i in range(10): + batch_stmt.add("INSERT INTO {} (k, v) VALUES (%s, %s)".format(self.table_name), (i, i)) + + batch_stmt.keyspace = self.ks_name + session.execute(batch_stmt) + self.confirm_results() + + def confirm_results(self): + keys = set() + values = set() + # Assuming the test data is inserted at default CL.ONE, we need ALL here to guarantee we see + # everything inserted + results = self.session.execute(SimpleStatement("SELECT * FROM {}.{}".format(self.ks_name, self.table_name), + consistency_level=ConsistencyLevel.ALL)) + for result in results: + keys.add(result.k) + values.add(result.v) + + self.assertEqual(set(range(10)), keys, msg=results) + self.assertEqual(set(range(10)), values, msg=results) + + +@greaterthanorequalcass40 +class PreparedWithKeyspaceTests(BaseKeyspaceTests, unittest.TestCase): + + def setUp(self): + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, allow_beta_protocol_version=True) + self.session = self.cluster.connect() + + def tearDown(self): + self.cluster.shutdown() + + def test_prepared_with_keyspace_explicit(self): + """ + Test the basic functionality of PYTHON-678, the keyspace can be set + independently of the query and read the results + + @since 3.12 + @jira_ticket PYTHON-678 + @expected_result the query is executed and the results retrieved + + @test_category query + """ + query = "SELECT * from {} WHERE k = ?".format(self.table_name) + prepared_statement = self.session.prepare(query, keyspace=self.ks_name) + + results = self.session.execute(prepared_statement, (1, )) + self.assertEqual(results[0], (1, 1)) + + prepared_statement_alternative = self.session.prepare(query, keyspace=self.alternative_ks) + + self.assertNotEqual(prepared_statement.query_id, prepared_statement_alternative.query_id) + + results = self.session.execute(prepared_statement_alternative, (2,)) + self.assertEqual(results[0], (2, 2)) + + def test_reprepare_after_host_is_down(self): + """ + Test that Cluster._prepare_all_queries is called and the + when a node comes up and the queries succeed later + + @since 3.12 + @jira_ticket PYTHON-678 + @expected_result the query is executed and the results retrieved + + @test_category query + """ + mock_handler = MockLoggingHandler() + logger = logging.getLogger(cluster.__name__) + logger.addHandler(mock_handler) + get_node(1).stop(wait=True, gently=True, wait_other_notice=True) + + only_first = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(["127.0.0.1"])) + self.cluster.add_execution_profile("only_first", only_first) + + query = "SELECT v from {} WHERE k = ?".format(self.table_name) + prepared_statement = self.session.prepare(query, keyspace=self.ks_name) + prepared_statement_alternative = self.session.prepare(query, keyspace=self.alternative_ks) + + get_node(1).start(wait_for_binary_proto=True, wait_other_notice=True) + + # We wait for cluster._prepare_all_queries to be called + time.sleep(5) + self.assertEqual(1, mock_handler.get_message_count('debug', 'Preparing all known prepared statements')) + results = self.session.execute(prepared_statement, (1,), execution_profile="only_first") + self.assertEqual(results[0], (1, )) + + results = self.session.execute(prepared_statement_alternative, (2,), execution_profile="only_first") + self.assertEqual(results[0], (2, )) + + def test_prepared_not_found(self): + """ + Test to if a query fails on a node that didn't have + the query prepared, it is re-prepared as expected and then + the query is executed + + @since 3.12 + @jira_ticket PYTHON-678 + @expected_result the query is executed and the results retrieved + + @test_category query + """ + cluster = Cluster(protocol_version=PROTOCOL_VERSION, allow_beta_protocol_version=True) + session = self.cluster.connect("system") + self.addCleanup(cluster.shutdown) + + cluster.prepare_on_all_hosts = False + query = "SELECT k from {} WHERE k = ?".format(self.table_name) + prepared_statement = session.prepare(query, keyspace=self.ks_name) + + for _ in range(10): + results = session.execute(prepared_statement, (1, )) + self.assertEqual(results[0], (1,)) + + def test_prepared_in_query_keyspace(self): + """ + Test to the the keyspace can be set in the query + + @since 3.12 + @jira_ticket PYTHON-678 + @expected_result the results are retrieved correctly + + @test_category query + """ + cluster = Cluster(protocol_version=PROTOCOL_VERSION, allow_beta_protocol_version=True) + session = self.cluster.connect() + self.addCleanup(cluster.shutdown) + + query = "SELECT k from {}.{} WHERE k = ?".format(self.ks_name, self.table_name) + prepared_statement = session.prepare(query) + results = session.execute(prepared_statement, (1,)) + self.assertEqual(results[0], (1,)) + + query = "SELECT k from {}.{} WHERE k = ?".format(self.alternative_ks, self.table_name) + prepared_statement = session.prepare(query) + results = session.execute(prepared_statement, (2,)) + self.assertEqual(results[0], (2,)) + + def test_prepared_in_query_keyspace_and_explicit(self): + """ + Test to the the keyspace set explicitly is ignored if it is + specified as well in the query + + @since 3.12 + @jira_ticket PYTHON-678 + @expected_result the keyspace set explicitly is ignored and + the results are retrieved correctly + + @test_category query + """ + query = "SELECT k from {}.{} WHERE k = ?".format(self.ks_name, self.table_name) + prepared_statement = self.session.prepare(query, keyspace="system") + results = self.session.execute(prepared_statement, (1,)) + self.assertEqual(results[0], (1,)) + + query = "SELECT k from {}.{} WHERE k = ?".format(self.ks_name, self.table_name) + prepared_statement = self.session.prepare(query, keyspace=self.alternative_ks) + results = self.session.execute(prepared_statement, (1,)) + self.assertEqual(results[0], (1,)) diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index 8f690805f1..3bfd77461c 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -98,9 +98,9 @@ def test_prepare_flag_with_keyspace(self): if ProtocolVersion.uses_keyspace_flag(version): message.send_body(io, version) self._check_calls(io, [ - ('\x00\x00\x00\x01',), - ('a',), - ('\x00\x00\x00\x80',), + (b'\x00\x00\x00\x01',), + (b'a',), + (b'\x00\x00\x00\x01',), (b'\x00\x02',), (b'ks',), ]) @@ -158,13 +158,13 @@ def test_batch_message_with_keyspace(self): ) batch.send_body(io, protocol_version=5) self._check_calls(io, - (('\x00',), ('\x00\x03',), ('\x00',), - ('\x00\x00\x00\x06',), ('stmt a',), - ('\x00\x01',), ('\x00\x00\x00\x07',), ('param a',), - ('\x00',), ('\x00\x00\x00\x06',), ('stmt b',), - ('\x00\x01',), ('\x00\x00\x00\x07',), ('param b',), - ('\x00',), ('\x00\x00\x00\x06',), ('stmt c',), - ('\x00\x01',), ('\x00\x00\x00\x07',), ('param c',), - ('\x00\x03',), - ('\x00\x00\x00\x80',), ('\x00\x02',), ('ks',)) + ((b'\x00',), (b'\x00\x03',), (b'\x00',), + (b'\x00\x00\x00\x06',), (b'stmt a',), + (b'\x00\x01',), (b'\x00\x00\x00\x07',), ('param a',), + (b'\x00',), (b'\x00\x00\x00\x06',), (b'stmt b',), + (b'\x00\x01',), (b'\x00\x00\x00\x07',), ('param b',), + (b'\x00',), (b'\x00\x00\x00\x06',), (b'stmt c',), + (b'\x00\x01',), (b'\x00\x00\x00\x07',), ('param c',), + (b'\x00\x03',), + (b'\x00\x00\x00\x80',), (b'\x00\x02',), (b'ks',)) ) diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index b03a6d99a3..e861301304 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -20,7 +20,7 @@ from mock import Mock, MagicMock, ANY from cassandra import ConsistencyLevel, Unavailable, SchemaTargetType, SchemaChangeType -from cassandra.cluster import Session, ResponseFuture, NoHostAvailable +from cassandra.cluster import Session, ResponseFuture, NoHostAvailable, ProtocolVersion from cassandra.connection import Connection, ConnectionException from cassandra.protocol import (ReadTimeoutErrorMessage, WriteTimeoutErrorMessage, UnavailableErrorMessage, ResultMessage, QueryMessage, @@ -476,6 +476,7 @@ def test_prepared_query_not_found(self): rf = self.make_response_future(session) rf.send_request() + session.cluster.protocol_version = ProtocolVersion.V4 session.cluster._prepared_statements = MagicMock(dict) prepared_statement = session.cluster._prepared_statements.__getitem__.return_value prepared_statement.query_string = "SELECT * FROM foobar" @@ -500,6 +501,7 @@ def test_prepared_query_not_found_bad_keyspace(self): rf = self.make_response_future(session) rf.send_request() + session.cluster.protocol_version = ProtocolVersion.V4 session.cluster._prepared_statements = MagicMock(dict) prepared_statement = session.cluster._prepared_statements.__getitem__.return_value prepared_statement.query_string = "SELECT * FROM foobar" From 189fe0bbe50b64f9384bbbf30b73e957e5630d2f Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 18 Sep 2017 17:18:14 -0400 Subject: [PATCH 1236/2005] Fix bug when subclassing AsyncoreConnection --- CHANGELOG.rst | 1 + cassandra/io/asyncorereactor.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 0d8bf72914..6878fca8a7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -10,6 +10,7 @@ Bug Fixes * Both _set_final_exception/result called for the same ResponseFuture (PYTHON-630) * Use of DCAwareRoundRobinPolicy raises NoHostAvailable exception (PYTHON-781) * Not create two sessions by default in CQLEngine (PYTHON-814) +* Bug when subclassing AyncoreConnection (PYTHON-827) 3.11.0 ====== diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 64dfb030e6..e875d89cf2 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -347,7 +347,7 @@ def __init__(self, *args, **kwargs): callback=partial(asyncore.dispatcher.__init__, self, self._socket, _dispatcher_map) ) - AsyncoreConnection._loop.add_timer(init_handler) + self._loop.add_timer(init_handler) init_handler.wait(kwargs["connect_timeout"]) self._writable = True From cbb2badb709ee4be8e2da06944ab7a385762f137 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 20 Sep 2017 09:16:51 -0400 Subject: [PATCH 1237/2005] The asyncore event loop shouldn't be awoken once closed, call manually to close the remaining open connections --- CHANGELOG.rst | 1 + cassandra/io/asyncorereactor.py | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6878fca8a7..c2dadcd95d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,6 +11,7 @@ Bug Fixes * Use of DCAwareRoundRobinPolicy raises NoHostAvailable exception (PYTHON-781) * Not create two sessions by default in CQLEngine (PYTHON-814) * Bug when subclassing AyncoreConnection (PYTHON-827) +* Error at cleanup when closing the asyncore connections (PYTHON-829) 3.11.0 ====== diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index e875d89cf2..6ac61f7065 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -279,6 +279,9 @@ def _cleanup(self): if not self._thread: return + # The loop shouldn't be woken up from here onwards since it won't be running + self._loop_dispatcher._notified = True + log.debug("Waiting for event loop thread to join...") self._thread.join(timeout=1.0) if self._thread.is_alive(): @@ -288,9 +291,12 @@ def _cleanup(self): log.debug("Event loop thread was joined") - # Ensure all connections are closed and in-flight requests cancelled + # Ensure all connections are closed and in-flight requests cancelled for conn in tuple(_dispatcher_map.values()): conn.close() + # The event loop should be closed, so we call remaining asyncore.close + # callbacks from here + self._timers.service_timeouts() log.debug("Dispatchers were closed") From 496391ee8397b695e552d214d077b64d7494aa64 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 20 Sep 2017 13:20:50 -0400 Subject: [PATCH 1238/2005] Replace deprecated method --- tests/unit/test_protocol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index 3bfd77461c..a4ee3385eb 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -113,7 +113,7 @@ def test_keyspace_flag_raises_before_v5(self): keyspace_message = QueryMessage('a', consistency_level=3, keyspace='ks') io = Mock(name='io') - with self.assertRaisesRegex(UnsupportedOperation, 'Keyspaces.*set'): + with self.assertRaisesRegexp(UnsupportedOperation, 'Keyspaces.*set'): keyspace_message.send_body(io, protocol_version=4) io.assert_not_called() From d3e960134f97198f244873aa1ce66078947c3a89 Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 22 Sep 2017 14:28:10 -0400 Subject: [PATCH 1239/2005] Close dispatcher last when cleaning asyncore connections --- cassandra/io/asyncorereactor.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 6ac61f7065..86abe5986b 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -279,9 +279,6 @@ def _cleanup(self): if not self._thread: return - # The loop shouldn't be woken up from here onwards since it won't be running - self._loop_dispatcher._notified = True - log.debug("Waiting for event loop thread to join...") self._thread.join(timeout=1.0) if self._thread.is_alive(): @@ -293,10 +290,11 @@ def _cleanup(self): # Ensure all connections are closed and in-flight requests cancelled for conn in tuple(_dispatcher_map.values()): - conn.close() - # The event loop should be closed, so we call remaining asyncore.close - # callbacks from here + if conn is not self._loop_dispatcher: + conn.close() self._timers.service_timeouts() + # Once all the connections are closed, close the dispatcher + self._loop_dispatcher.close() log.debug("Dispatchers were closed") From 86a88c3a048d5ee4f3e2ab99f5eb0501ca7f3f30 Mon Sep 17 00:00:00 2001 From: Shevchenko Vitaliy Date: Thu, 15 Jun 2017 11:12:19 +0300 Subject: [PATCH 1240/2005] Fix another potential RuntimeError-s referenced to changed WeakSet instance during iteration --- CHANGELOG.rst | 2 ++ cassandra/cluster.py | 28 ++++++++++++++-------------- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f6f43545b7..b73d2b4741 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,6 +11,8 @@ Bug Fixes * Fix asyncore hang on exit (PYTHON-767) * Driver takes several minutes to remove a bad host from session (PYTHON-762) * Installation doesn't always fall back to no cython in Windows (PYTHON-763) +* Fix sites where `sessions` can change during iteration (PYTHON-793) + Other ----- diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cfe12f9c9c..fae526acb9 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -964,7 +964,7 @@ def __init__(self, street, zipcode): "be returned when reading type %s.%s.", self.protocol_version, keyspace, user_type) self._user_types[keyspace][user_type] = klass - for session in self.sessions: + for session in tuple(self.sessions): session.user_type_registered(keyspace, user_type, klass) UserType.evict_udt_class(keyspace, user_type) @@ -994,7 +994,7 @@ def add_execution_profile(self, name, profile, pool_wait_timeout=5): for host in filter(lambda h: h.is_up, self.metadata.all_hosts()): profile.load_balancing_policy.on_up(host) futures = set() - for session in self.sessions: + for session in tuple(self.sessions): futures.update(session.update_created_pools()) _, not_done = wait_futures(futures, pool_wait_timeout) if not_done: @@ -1209,7 +1209,7 @@ def connect(self, keyspace=None, wait_for_all_pools=False): def get_connection_holders(self): holders = [] - for s in self.sessions: + for s in tuple(self.sessions): holders.extend(s.get_pools()) holders.append(self.control_connection) return holders @@ -1235,7 +1235,7 @@ def shutdown(self): self.control_connection.shutdown() - for session in self.sessions: + for session in tuple(self.sessions): session.shutdown() self.executor.shutdown() @@ -1262,7 +1262,7 @@ def _session_register_user_types(self, session): def _cleanup_failed_on_up_handling(self, host): self.profile_manager.on_down(host) self.control_connection.on_down(host) - for session in self.sessions: + for session in tuple(self.sessions): session.remove_pool(host) self._start_reconnector(host, is_host_addition=False) @@ -1301,7 +1301,7 @@ def _on_up_future_completed(self, host, futures, results, lock, finished_future) host._currently_handling_node_up = False # see if there are any pools to add or remove now that the host is marked up - for session in self.sessions: + for session in tuple(self.sessions): session.update_created_pools() def on_up(self, host): @@ -1338,7 +1338,7 @@ def on_up(self, host): self._prepare_all_queries(host) log.debug("Done preparing all queries for host %s, ", host) - for session in self.sessions: + for session in tuple(self.sessions): session.remove_pool(host) log.debug("Signalling to load balancing policies that host %s is up", host) @@ -1351,7 +1351,7 @@ def on_up(self, host): futures_lock = Lock() futures_results = [] callback = partial(self._on_up_future_completed, host, futures, futures_results, futures_lock) - for session in self.sessions: + for session in tuple(self.sessions): future = session.add_or_renew_pool(host, is_host_addition=False) if future is not None: have_future = True @@ -1415,7 +1415,7 @@ def on_down(self, host, is_host_addition, expect_host_to_be_down=False): # this is to avoid closing pools when a control connection host became isolated if self._discount_down_events and self.profile_manager.distance(host) != HostDistance.IGNORED: connected = False - for session in self.sessions: + for session in tuple(self.sessions): pool_states = session.get_pool_state() pool_state = pool_states.get(host) if pool_state: @@ -1431,7 +1431,7 @@ def on_down(self, host, is_host_addition, expect_host_to_be_down=False): self.profile_manager.on_down(host) self.control_connection.on_down(host) - for session in self.sessions: + for session in tuple(self.sessions): session.on_down(host) for listener in self.listeners: @@ -1488,7 +1488,7 @@ def future_completed(future): self._finalize_add(host) have_future = False - for session in self.sessions: + for session in tuple(self.sessions): future = session.add_or_renew_pool(host, is_host_addition=True) if future is not None: have_future = True @@ -1506,7 +1506,7 @@ def _finalize_add(self, host, set_up=True): listener.on_add(host) # see if there are any pools to add or remove now that the host is marked up - for session in self.sessions: + for session in tuple(self.sessions): session.update_created_pools() def on_remove(self, host): @@ -1516,7 +1516,7 @@ def on_remove(self, host): log.debug("Removing host %s", host) host.set_down() self.profile_manager.on_remove(host) - for session in self.sessions: + for session in tuple(self.sessions): session.on_remove(host) for listener in self.listeners: listener.on_remove(host) @@ -1576,7 +1576,7 @@ def _ensure_core_connections(self): If any host has fewer than the configured number of core connections open, attempt to open connections until that number is met. """ - for session in self.sessions: + for session in tuple(self.sessions): for pool in tuple(session._pools.values()): pool.ensure_core_connections() From 32037f35a299b937ca71bc553bf8c71ba22d7935 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 2 Oct 2017 14:42:27 -0400 Subject: [PATCH 1241/2005] Allow min_length=0 for Ascii and Text column types --- CHANGELOG.rst | 1 + tests/integration/cqlengine/columns/test_validation.py | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d3a8b6ba29..cc066415cd 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -13,6 +13,7 @@ Bug Fixes * Bug when subclassing AyncoreConnection (PYTHON-827) * Error at cleanup when closing the asyncore connections (PYTHON-829) * Fix sites where `sessions` can change during iteration (PYTHON-793) +* cqlengine: allow min_length=0 for Ascii and Text column types (PYTHON-735) 3.11.0 ====== diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 2da9007006..926e13dd0f 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -532,7 +532,10 @@ class TestAscii(BaseCassEngTestCase): def test_min_length(self): """ Test arbitrary minimal lengths requirements. """ + Ascii(min_length=0).validate('') + Ascii(min_length=0, required=True).validate('') + Ascii(min_length=0).validate(None) Ascii(min_length=0).validate('kevin') @@ -656,7 +659,10 @@ class TestText(BaseCassEngTestCase): def test_min_length(self): """ Test arbitrary minimal lengths requirements. """ + Text(min_length=0).validate('') + Text(min_length=0, required=True).validate('') + Text(min_length=0).validate(None) Text(min_length=0).validate('blake') From 1cc6b6395c9b21467e7da76b8a6f991cbc373bec Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 2 Oct 2017 15:31:00 -0400 Subject: [PATCH 1242/2005] Remove DeprecationWarning when using WhiteListRoundRobinPolicy --- CHANGELOG.rst | 4 ++++ cassandra/policies.py | 20 +------------------- 2 files changed, 5 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index cc066415cd..8c4c2fcc01 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -15,6 +15,10 @@ Bug Fixes * Fix sites where `sessions` can change during iteration (PYTHON-793) * cqlengine: allow min_length=0 for Ascii and Text column types (PYTHON-735) +Others +------ +* Remove DeprecationWarning when using WhiteListRoundRobinPolicy (PYTHON-810) + 3.11.0 ====== July 24, 2017 diff --git a/cassandra/policies.py b/cassandra/policies.py index 466a4cdcbe..76079a1526 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -17,7 +17,6 @@ from random import randint, shuffle from threading import Lock import socket -from warnings import warn from cassandra import ConsistencyLevel, OperationTimedOut @@ -397,10 +396,6 @@ def on_remove(self, *args, **kwargs): class WhiteListRoundRobinPolicy(RoundRobinPolicy): """ - |wlrrp| **is deprecated. It will be removed in 4.0.** It can effectively be - reimplemented using :class:`.HostFilterPolicy`. For more information, see - PYTHON-758_. - A subclass of :class:`.RoundRobinPolicy` which evenly distributes queries across all nodes in the cluster, regardless of what datacenter the nodes may be in, but @@ -410,26 +405,13 @@ class WhiteListRoundRobinPolicy(RoundRobinPolicy): https://datastax-oss.atlassian.net/browse/JAVA-145 Where connection errors occur when connection attempts are made to private IP addresses remotely - - .. |wlrrp| raw:: html - - WhiteListRoundRobinPolicy - - .. _PYTHON-758: https://datastax-oss.atlassian.net/browse/PYTHON-758 - """ + def __init__(self, hosts): """ The `hosts` parameter should be a sequence of hosts to permit connections to. """ - msg = ('WhiteListRoundRobinPolicy is deprecated. ' - 'It will be removed in 4.0. ' - 'It can effectively be reimplemented using HostFilterPolicy.') - warn(msg, DeprecationWarning) - # DeprecationWarnings are silent by default so we also log the message - log.warning(msg) - self._allowed_hosts = hosts self._allowed_hosts_resolved = [endpoint[4][0] for a in self._allowed_hosts for endpoint in socket.getaddrinfo(a, None, socket.AF_UNSPEC, socket.SOCK_STREAM)] From bb501ddacd8f708bc6b8bc1384443848b4141333 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 6 Sep 2017 15:09:13 -0400 Subject: [PATCH 1243/2005] Moved test_getting_replicas to unit tests --- tests/integration/standard/test_metadata.py | 32 ++--------------- tests/unit/test_metadata.py | 38 ++++++++++++++++++++- 2 files changed, 39 insertions(+), 31 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index ba628b53d8..df5b7720f6 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -27,11 +27,8 @@ from cassandra.cluster import Cluster from cassandra.encoder import Encoder -from cassandra.metadata import (Metadata, KeyspaceMetadata, IndexMetadata, - Token, MD5Token, TokenMap, murmur3, Function, Aggregate, protect_name, protect_names, - get_schema_parser, RegisteredTableExtension, _RegisteredExtensionType) -from cassandra.policies import SimpleConvictionPolicy -from cassandra.pool import Host +from cassandra.metadata import (IndexMetadata, Token, murmur3, Function, Aggregate, protect_name, protect_names, + RegisteredTableExtension, _RegisteredExtensionType, get_schema_parser,) from tests.integration import (get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, @@ -1465,31 +1462,6 @@ def test_token(self): self.assertEqual(expected_node_count, len(tmap.ring)) cluster.shutdown() - def test_getting_replicas(self): - tokens = [MD5Token(i) for i in range(0, (2 ** 127 - 1), 2 ** 125)] - hosts = [Host("ip%d" % i, SimpleConvictionPolicy) for i in range(len(tokens))] - token_to_primary_replica = dict(zip(tokens, hosts)) - keyspace = KeyspaceMetadata("ks", True, "SimpleStrategy", {"replication_factor": "1"}) - metadata = Mock(spec=Metadata, keyspaces={'ks': keyspace}) - token_map = TokenMap(MD5Token, token_to_primary_replica, tokens, metadata) - - # tokens match node tokens exactly - for i, token in enumerate(tokens): - expected_host = hosts[(i + 1) % len(hosts)] - replicas = token_map.get_replicas("ks", token) - self.assertEqual(set(replicas), set([expected_host])) - - # shift the tokens back by one - for token, expected_host in zip(tokens, hosts): - replicas = token_map.get_replicas("ks", MD5Token(token.value - 1)) - self.assertEqual(set(replicas), set([expected_host])) - - # shift the tokens forward by one - for i, token in enumerate(tokens): - replicas = token_map.get_replicas("ks", MD5Token(token.value + 1)) - expected_host = hosts[(i + 1) % len(hosts)] - self.assertEqual(set(replicas), set([expected_host])) - class KeyspaceAlterMetadata(unittest.TestCase): """ diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index a9a4852617..1f425d94d6 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -33,7 +33,7 @@ UserType, KeyspaceMetadata, get_schema_parser, _UnknownStrategy, ColumnMetadata, TableMetadata, IndexMetadata, Function, Aggregate, - Metadata) + Metadata, TokenMap) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host @@ -302,6 +302,42 @@ def test_is_valid_name(self): self.assertEqual(is_valid_name(keyword), False) +class GetReplicasTest(unittest.TestCase): + def _get_replicas(self, token_klass): + tokens = [token_klass(i) for i in range(0, (2 ** 127 - 1), 2 ** 125)] + hosts = [Host("ip%d" % i, SimpleConvictionPolicy) for i in range(len(tokens))] + token_to_primary_replica = dict(zip(tokens, hosts)) + keyspace = KeyspaceMetadata("ks", True, "SimpleStrategy", {"replication_factor": "1"}) + metadata = Mock(spec=Metadata, keyspaces={'ks': keyspace}) + token_map = TokenMap(token_klass, token_to_primary_replica, tokens, metadata) + + # tokens match node tokens exactly + for i, token in enumerate(tokens): + expected_host = hosts[(i + 1) % len(hosts)] + replicas = token_map.get_replicas("ks", token) + self.assertEqual(set(replicas), {expected_host}) + + # shift the tokens back by one + for token, expected_host in zip(tokens, hosts): + replicas = token_map.get_replicas("ks", token_klass(token.value - 1)) + self.assertEqual(set(replicas), {expected_host}) + + # shift the tokens forward by one + for i, token in enumerate(tokens): + replicas = token_map.get_replicas("ks", token_klass(token.value + 1)) + expected_host = hosts[(i + 1) % len(hosts)] + self.assertEqual(set(replicas), {expected_host}) + + def test_murmur3_tokens(self): + self._get_replicas(Murmur3Token) + + def test_md5_tokens(self): + self._get_replicas(MD5Token) + + def test_bytes_tokens(self): + self._get_replicas(BytesToken) + + class Murmur3TokensTest(unittest.TestCase): def test_murmur3_init(self): From 4fa014313b68c21962012bd4e490a116d2fbb122 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 21 Sep 2017 12:30:50 -0400 Subject: [PATCH 1244/2005] Don't join the thread if it hasn't been started --- CHANGELOG.rst | 1 + cassandra/io/libevreactor.py | 16 +++++++++++----- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6878fca8a7..d82a53541c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,6 +11,7 @@ Bug Fixes * Use of DCAwareRoundRobinPolicy raises NoHostAvailable exception (PYTHON-781) * Not create two sessions by default in CQLEngine (PYTHON-814) * Bug when subclassing AyncoreConnection (PYTHON-827) +* Rare exception when "sys.exit(0)" after query timeouts (PYTHON-752) 3.11.0 ====== diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index aadbe3a53c..dc1a6e6a91 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -63,6 +63,7 @@ def __init__(self): self._started = False self._shutdown = False self._lock = Lock() + self._lock_thread = Lock() self._thread = None @@ -94,9 +95,11 @@ def maybe_start(self): should_start = True if should_start: - self._thread = Thread(target=self._run_loop, name="event_loop") - self._thread.daemon = True - self._thread.start() + with self._lock_thread: + if not self._shutdown: + self._thread = Thread(target=self._run_loop, name="event_loop") + self._thread.daemon = True + self._thread.start() self._notifier.send() @@ -126,8 +129,11 @@ def _cleanup(self): watcher.stop() self.notify() # wake the timer watcher - log.debug("Waiting for event loop thread to join...") - self._thread.join(timeout=1.0) + + # PYTHON-752 Thread might have just been created and not started + with self._lock_thread: + self._thread.join(timeout=1.0) + if self._thread.is_alive(): log.warning( "Event loop thread could not be joined, so shutdown may not be clean. " From 4ef23f3d7b19efd6b23722036ef9586d206beb44 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 3 Oct 2017 14:22:17 -0400 Subject: [PATCH 1245/2005] Added test for WhiteListRoundRobinPolicyTests --- tests/integration/standard/test_policies.py | 27 +++++++++++++++++---- tests/unit/test_policies.py | 16 ------------ 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 655d7265a0..c1ad12d720 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -19,19 +19,19 @@ except ImportError: import unittest # noqa -from cassandra.cluster import Cluster -from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, \ - SimpleConvictionPolicy +from cassandra.cluster import Cluster, ExecutionProfile +from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, SimpleConvictionPolicy, WhiteListRoundRobinPolicy from cassandra.pool import Host -from tests.integration import PROTOCOL_VERSION - +from tests.integration import PROTOCOL_VERSION, local from concurrent.futures import wait as wait_futures + def setup_module(): use_singledc() + class HostFilterPolicyTests(unittest.TestCase): def test_predicate_changes(self): @@ -74,3 +74,20 @@ def test_predicate_changes(self): response = session.execute("SELECT * from system.local") queried_hosts.update(response.response_future.attempted_hosts) self.assertEqual(queried_hosts, all_hosts) + + +class WhiteListRoundRobinPolicyTests(unittest.TestCase): + + @local + def test_only_connects_to_subset(self): + only_connect_hosts = {"127.0.0.1", "127.0.0.2"} + white_list = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(only_connect_hosts)) + cluster = Cluster(execution_profiles={"white_list": white_list}) + #cluster = Cluster(load_balancing_policy=WhiteListRoundRobinPolicy(only_connect_hosts)) + session = cluster.connect(wait_for_all_pools=True) + queried_hosts = set() + for _ in range(10): + response = session.execute('SELECT * from system.local', execution_profile="white_list") + queried_hosts.update(response.response_future.attempted_hosts) + queried_hosts = set(host.address for host in queried_hosts) + self.assertEqual(queried_hosts, only_connect_hosts) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index b82af1db70..5237a4b793 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1232,22 +1232,6 @@ def test_hosts_with_hostname(self): self.assertEqual(policy.distance(host), HostDistance.LOCAL) - def test_deprecated(self): - import warnings - - warnings.resetwarnings() # in case we've instantiated one before - - # set up warning filters to allow all, set up restore when this test is done - filters_backup, warnings.filters = warnings.filters, [] - self.addCleanup(setattr, warnings, 'filters', filters_backup) - - with warnings.catch_warnings(record=True) as caught_warnings: - WhiteListRoundRobinPolicy([]) - self.assertEqual(len(caught_warnings), 1) - warning_message = caught_warnings[-1] - self.assertEqual(warning_message.category, DeprecationWarning) - self.assertIn('4.0', warning_message.message.args[0]) - class AddressTranslatorTest(unittest.TestCase): From 9869c2a044e5dd76309026437bcda5d01acf99c7 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 4 Oct 2017 12:42:10 -0400 Subject: [PATCH 1246/2005] PYTHON-751: Add IPV4Address/IPV6Address support for inet type (#828) * Add IPV4Address/IPV6Address support for inet type --- CHANGELOG.rst | 1 + cassandra/cqltypes.py | 19 ++++++++----- cassandra/encoder.py | 13 +++++++++ tests/__init__.py | 27 +++++++++++++++++++ tests/integration/__init__.py | 26 ++---------------- .../cqlengine/columns/test_validation.py | 1 - tests/integration/datatype_utils.py | 19 +++++++++++-- tests/integration/standard/test_cluster.py | 3 ++- tests/integration/standard/test_types.py | 24 ++++++++++++++++- tests/unit/test_cluster.py | 14 ++++------ 10 files changed, 103 insertions(+), 44 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index bad388ab7d..693b8658af 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,7 @@ Features -------- * Send keyspace in QUERY, PREPARE, and BATCH messages (PYTHON-678) +* Add IPv4Address/IPv6Address support for inet types (PYTHON-751) Bug Fixes --------- diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 735bf2a9e8..fc6ef4595c 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -43,6 +43,8 @@ from uuid import UUID import warnings +if six.PY3: + import ipaddress from cassandra.marshal import (int8_pack, int8_unpack, int16_pack, int16_unpack, uint16_pack, uint16_unpack, uint32_pack, uint32_unpack, @@ -517,12 +519,17 @@ def deserialize(byts, protocol_version): @staticmethod def serialize(addr, protocol_version): - if ':' in addr: - return util.inet_pton(socket.AF_INET6, addr) - else: - # util.inet_pton could also handle, but this is faster - # since we've already determined the AF - return socket.inet_aton(addr) + try: + if ':' in addr: + return util.inet_pton(socket.AF_INET6, addr) + else: + # util.inet_pton could also handle, but this is faster + # since we've already determined the AF + return socket.inet_aton(addr) + except: + if six.PY3 and isinstance(addr, (ipaddress.IPv4Address, ipaddress.IPv6Address)): + return addr.packed + raise ValueError("can't interpret %r as an inet address" % (addr,)) class CounterColumnType(LongType): diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 805b1917fe..881b21f532 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -29,6 +29,9 @@ from uuid import UUID import six +if six.PY3: + import ipaddress + from cassandra.util import (OrderedDict, OrderedMap, OrderedMapSerializedKey, sortedset, Time, Date) @@ -103,6 +106,8 @@ def __init__(self): memoryview: self.cql_encode_bytes, bytes: self.cql_encode_bytes, type(None): self.cql_encode_none, + ipaddress.IPv4Address: self.cql_encode_ipaddress, + ipaddress.IPv6Address: self.cql_encode_ipaddress }) def cql_encode_none(self, val): @@ -225,3 +230,11 @@ def cql_encode_all_types(self, val): if :attr:`~Encoder.mapping` does not contain an entry for the type. """ return self.mapping.get(type(val), self.cql_encode_object)(val) + + if six.PY3: + def cql_encode_ipaddress(self, val): + """ + Converts an ipaddress (IPV4Address, IPV6Address) to a CQL string. This + is suitable for ``inet`` type columns. + """ + return "'%s'" % val.compressed diff --git a/tests/__init__.py b/tests/__init__.py index c735c169bc..b84a8ed1d3 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -59,6 +59,33 @@ def is_monkey_patched(): return is_gevent_monkey_patched() or is_eventlet_monkey_patched() +EVENT_LOOP_MANAGER = os.getenv('EVENT_LOOP_MANAGER', "libev") +if "gevent" in EVENT_LOOP_MANAGER: + import gevent.monkey + gevent.monkey.patch_all() + from cassandra.io.geventreactor import GeventConnection + connection_class = GeventConnection +elif "eventlet" in EVENT_LOOP_MANAGER: + from eventlet import monkey_patch + monkey_patch() + + from cassandra.io.eventletreactor import EventletConnection + connection_class = EventletConnection +elif "async" in EVENT_LOOP_MANAGER: + from cassandra.io.asyncorereactor import AsyncoreConnection + connection_class = AsyncoreConnection +elif "twisted" in EVENT_LOOP_MANAGER: + from cassandra.io.twistedreactor import TwistedConnection + connection_class = TwistedConnection + +else: + try: + from cassandra.io.libevreactor import LibevConnection + connection_class = LibevConnection + except ImportError: + connection_class = None + + MONKEY_PATCH_LOOP = bool(os.getenv('MONKEY_PATCH_LOOP', False)) notwindows = unittest.skipUnless(not "Windows" in platform.system(), "This test is not adequate for windows") diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 9017b0ccc7..11a8f98e72 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -12,31 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. import os - -EVENT_LOOP_MANAGER = os.getenv('EVENT_LOOP_MANAGER', "libev") -if "gevent" in EVENT_LOOP_MANAGER: - import gevent.monkey - gevent.monkey.patch_all() - from cassandra.io.geventreactor import GeventConnection - connection_class = GeventConnection -elif "eventlet" in EVENT_LOOP_MANAGER: - from eventlet import monkey_patch - monkey_patch() - - from cassandra.io.eventletreactor import EventletConnection - connection_class = EventletConnection -elif "async" in EVENT_LOOP_MANAGER: - from cassandra.io.asyncorereactor import AsyncoreConnection - connection_class = AsyncoreConnection -elif "twisted" in EVENT_LOOP_MANAGER: - from cassandra.io.twistedreactor import TwistedConnection - connection_class = TwistedConnection - -else: - from cassandra.io.libevreactor import LibevConnection - connection_class = LibevConnection - from cassandra.cluster import Cluster + +from tests import connection_class, EVENT_LOOP_MANAGER Cluster.connection_class = connection_class try: diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 926e13dd0f..43eb0fe678 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -529,7 +529,6 @@ def test_default_zero_fields_validate(self): class TestAscii(BaseCassEngTestCase): - def test_min_length(self): """ Test arbitrary minimal lengths requirements. """ diff --git a/tests/integration/datatype_utils.py b/tests/integration/datatype_utils.py index dc89971a93..f2314467e8 100644 --- a/tests/integration/datatype_utils.py +++ b/tests/integration/datatype_utils.py @@ -15,6 +15,7 @@ from decimal import Decimal from datetime import datetime, date, time from uuid import uuid1, uuid4 +import six from cassandra.util import OrderedMap, Date, Time, sortedset, Duration @@ -90,7 +91,11 @@ def get_sample_data(): sample_data[datatype] = 3.4028234663852886e+38 elif datatype == 'inet': - sample_data[datatype] = '123.123.123.123' + sample_data[datatype] = ('123.123.123.123', '2001:db8:85a3:8d3:1319:8a2e:370:7348') + if six.PY3: + import ipaddress + sample_data[datatype] += (ipaddress.IPv4Address("123.123.123.123"), + ipaddress.IPv6Address('2001:db8:85a3:8d3:1319:8a2e:370:7348')) elif datatype == 'int': sample_data[datatype] = 2147483647 @@ -140,10 +145,20 @@ def get_sample(datatype): """ Helper method to access created sample data for primitive types """ - + if isinstance(SAMPLE_DATA[datatype], tuple): + return SAMPLE_DATA[datatype][0] return SAMPLE_DATA[datatype] +def get_all_samples(datatype): + """ + Helper method to access created sample data for primitive types + """ + if isinstance(SAMPLE_DATA[datatype], tuple): + return SAMPLE_DATA[datatype] + return SAMPLE_DATA[datatype], + + def get_collection_sample(collection_type, datatype): """ Helper method to access created sample data for collection types diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index dc2297824b..0ae7ae94b8 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -34,7 +34,7 @@ from cassandra.pool import Host from cassandra.query import SimpleStatement, TraceUnavailable, tuple_factory - +from tests import notwindows from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, CASSANDRA_VERSION, \ execute_until_pass, execute_with_long_wait_retry, get_node, MockLoggingHandler, get_unsupported_lower_protocol, \ get_unsupported_upper_protocol, protocolv5, local, CASSANDRA_IP @@ -476,6 +476,7 @@ def test_refresh_schema_type(self): cluster.shutdown() @local + @notwindows def test_refresh_schema_no_wait(self): contact_points = [CASSANDRA_IP] cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=10, diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index a19f623bef..cda3c9379f 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -33,7 +33,7 @@ from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass, notprotocolv1, \ BasicSharedKeyspaceUnitTestCase, greaterthancass21, lessthancass30, greaterthanorequalcass3_10 from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, PRIMITIVE_DATATYPES_KEYS, \ - get_sample, get_collection_sample + get_sample, get_all_samples, get_collection_sample def setup_module(): @@ -161,8 +161,30 @@ def test_can_insert_primitive_datatypes(self): for expected, actual in zip(params, results): self.assertEqual(actual, expected) + # try the same thing sending one insert at the time + s.execute("TRUNCATE alltypes;") + for i, datatype in enumerate(PRIMITIVE_DATATYPES): + single_col_name = chr(start_index + i) + single_col_names = ["zz", single_col_name] + placeholders = ','.join(["%s"] * len(single_col_names)) + single_columns_string = ', '.join(single_col_names) + for j, data_sample in enumerate(get_all_samples(datatype)): + key = i + 1000 * j + single_params = (key, data_sample) + s.execute("INSERT INTO alltypes ({0}) VALUES ({1})".format(single_columns_string, placeholders), + single_params) + # verify data + result = s.execute("SELECT {0} FROM alltypes WHERE zz=%s".format(single_columns_string), (key,))[0][1] + compare_value = data_sample + if six.PY3: + import ipaddress + if isinstance(data_sample, ipaddress.IPv4Address) or isinstance(data_sample, ipaddress.IPv6Address): + compare_value = str(data_sample) + self.assertEqual(result, compare_value) + # try the same thing with a prepared statement placeholders = ','.join(["?"] * len(col_names)) + s.execute("TRUNCATE alltypes;") insert = s.prepare("INSERT INTO alltypes ({0}) VALUES ({1})".format(columns_string, placeholders)) s.execute(insert.bind(params)) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 40924293fd..81a80079b1 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -27,11 +27,7 @@ from cassandra.query import SimpleStatement, named_tuple_factory, tuple_factory from cassandra.pool import Host from tests.unit.utils import mock_session_pools - -try: - from cassandra.io.libevreactor import LibevConnection -except ImportError: - LibevConnection = None # noqa +from tests import connection_class class ExceptionTypeTest(unittest.TestCase): @@ -129,9 +125,9 @@ def test_event_delay_timing(self, *_): class SessionTest(unittest.TestCase): def setUp(self): - if LibevConnection is None: + if connection_class is None: raise unittest.SkipTest('libev does not appear to be installed correctly') - LibevConnection.initialize_reactor() + connection_class.initialize_reactor() # TODO: this suite could be expanded; for now just adding a test covering a PR @mock_session_pools @@ -164,9 +160,9 @@ def test_default_serial_consistency_level(self, *_): class ExecutionProfileTest(unittest.TestCase): def setUp(self): - if LibevConnection is None: + if connection_class is None: raise unittest.SkipTest('libev does not appear to be installed correctly') - LibevConnection.initialize_reactor() + connection_class.initialize_reactor() def _verify_response_future_profile(self, rf, prof): self.assertEqual(rf._load_balancer, prof.load_balancing_policy) From 034dfc37fd06f2e8754ecb2339e6c288daaa010f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 4 Oct 2017 12:44:05 -0400 Subject: [PATCH 1247/2005] PYTHON-833: Bump Cython dependency version to 0.27 (#835) * Bump Cython dependency version to 0.27 --- CHANGELOG.rst | 5 +++++ setup.py | 2 +- test-requirements.txt | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 693b8658af..4ef0871db6 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -21,6 +21,11 @@ Others ------ * Remove DeprecationWarning when using WhiteListRoundRobinPolicy (PYTHON-810) +Other +----- + +* Bump Cython dependency version to 0.27 (PYTHON-833) + 3.11.0 ====== July 24, 2017 diff --git a/setup.py b/setup.py index ef50cde69e..9f7c043709 100644 --- a/setup.py +++ b/setup.py @@ -395,7 +395,7 @@ def run_setup(extensions): # 1.) build_ext eats errors at compile time, letting the install complete while producing useful feedback # 2.) there could be a case where the python environment has cython installed but the system doesn't have build tools if pre_build_check(): - cython_dep = 'Cython>=0.20,!=0.25,<0.26' + cython_dep = 'Cython>=0.20,!=0.25,<0.28' user_specified_cython_version = os.environ.get('CASS_DRIVER_ALLOWED_CYTHON_VERSION') if user_specified_cython_version is not None: cython_dep = 'Cython==%s' % (user_specified_cython_version,) diff --git a/test-requirements.txt b/test-requirements.txt index c84faa8021..ca71b0dd71 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -11,5 +11,5 @@ pure-sasl twisted gevent>=1.0 eventlet -cython>=0.20,<0.25 +cython>=0.20,<0.28 packaging From 9738aa1ef7ab2c2048f92e54190d7fbf46017890 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 3 Oct 2017 10:02:34 -0400 Subject: [PATCH 1248/2005] add VIEW and CDC writetypes PYTHON-794 --- CHANGELOG.rst | 1 + cassandra/policies.py | 16 +++++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4ef0871db6..7fbb74929a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,7 @@ Features -------- * Send keyspace in QUERY, PREPARE, and BATCH messages (PYTHON-678) * Add IPv4Address/IPv6Address support for inet types (PYTHON-751) +* WriteType.CDC and VIEW missing (PYTHON-794) Bug Fixes --------- diff --git a/cassandra/policies.py b/cassandra/policies.py index 76079a1526..cc471be51c 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -731,6 +731,18 @@ class WriteType(object): A lighweight-transaction write, such as "DELETE ... IF EXISTS". """ + VIEW = 6 + """ + This WriteType is only seen in results for requests that were unable to + complete MV operations. + """ + + CDC = 7 + """ + This WriteType is only seen in results for requests that were unable to + complete CDC operations. + """ + WriteType.name_to_value = { 'SIMPLE': WriteType.SIMPLE, @@ -738,7 +750,9 @@ class WriteType(object): 'UNLOGGED_BATCH': WriteType.UNLOGGED_BATCH, 'COUNTER': WriteType.COUNTER, 'BATCH_LOG': WriteType.BATCH_LOG, - 'CAS': WriteType.CAS + 'CAS': WriteType.CAS, + 'VIEW': WriteType.VIEW, + 'CDC': WriteType.CDC } From af303571a143e1b83da97feff02493d9e80b3db0 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 3 Oct 2017 14:50:57 -0400 Subject: [PATCH 1249/2005] WARN on Cluster.__init__ w contact points & no lbp PYTHON-812 --- CHANGELOG.rst | 1 + cassandra/cluster.py | 15 ++++++++++++++- tests/unit/test_cluster.py | 36 ++++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7fbb74929a..b6e41a230a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,6 +6,7 @@ Features * Send keyspace in QUERY, PREPARE, and BATCH messages (PYTHON-678) * Add IPv4Address/IPv6Address support for inet types (PYTHON-751) * WriteType.CDC and VIEW missing (PYTHON-794) +* Warn on Cluster init if contact points are specified but LBP isn't (PYTHON-812) Bug Fixes --------- diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d7aa70501b..2870b4c64a 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -312,6 +312,8 @@ class _ConfigMode(object): PROFILES = 2 +_UNSET_ARG = object() + class Cluster(object): """ The main class to use when interacting with a Cassandra cluster. @@ -733,7 +735,7 @@ def token_metadata_enabled(self, enabled): _listener_lock = None def __init__(self, - contact_points=["127.0.0.1"], + contact_points=_UNSET_ARG, port=9042, compression=True, auth_provider=None, @@ -770,7 +772,18 @@ def __init__(self, Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. """ + if contact_points is not _UNSET_ARG and load_balancing_policy is None: + log.warn('Cluster.__init__ called with contact_points specified, ' + 'but no load_balancing_policy. In the next major ' + 'version, this will raise an error; please specify a ' + 'load balancing policy. ' + '(contact_points = {cp}, lbp = {lbp}' + ''.format(cp=contact_points, lbp=load_balancing_policy)) + if contact_points is not None: + if contact_points is _UNSET_ARG: + contact_points = ['127.0.0.1'] + if isinstance(contact_points, six.string_types): raise TypeError("contact_points should not be a string, it should be a sequence (e.g. list) of strings") diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 81a80079b1..7d7238376e 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -363,3 +363,39 @@ def test_no_profiles_same_name(self): # cannot add a profile added dynamically self.assertRaises(ValueError, cluster.add_execution_profile, 'two', ExecutionProfile()) + + @mock_session_pools + def test_warning_on_no_lbp_with_contact_points(self): + """ + Test that users are warned when they instantiate a Cluster object with + contact points but no load-balancing policy. + + @since 3.12.0 + @jira_ticket PYTHON-812 + @expected_result logs + + @test_category configuration + """ + with patch('cassandra.cluster.log') as patched_logger: + Cluster(contact_points=['1']) + patched_logger.warn.assert_called_once() + warning_message = patched_logger.warn.call_args[0][0] + self.assertIn('no load_balancing_policy', warning_message) + self.assertIn("contact_points = ['1']", warning_message) + self.assertIn('lbp = None', warning_message) + + @mock_session_pools + def test_no_warning_on_contact_points_with_lbp(self): + """ + Test that users aren't warned when they instantiate a Cluster object + with contact points and a load-balancing policy. + + @since 3.12.0 + @jira_ticket PYTHON-812 + @expected_result no logs + + @test_category configuration + """ + with patch('cassandra.cluster.log') as patched_logger: + Cluster(contact_points=['1'], load_balancing_policy=object()) + patched_logger.warn.assert_not_called() From 5c77d9392a6dd115bb2678d7bfc9dc8bba0882d3 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 9 Aug 2017 17:31:12 -0400 Subject: [PATCH 1250/2005] Added some tests around network paritioning, closing connections and cluster._retry and idle connections with simulacron --- tests/integration/simulacron/__init__.py | 15 +- .../integration/simulacron/test_connection.py | 193 +++++++++++++++++- tests/integration/simulacron/test_policies.py | 1 - tests/integration/simulacron/utils.py | 14 +- tests/integration/standard/test_cluster.py | 35 ++-- 5 files changed, 228 insertions(+), 30 deletions(-) diff --git a/tests/integration/simulacron/__init__.py b/tests/integration/simulacron/__init__.py index b7ec7fb40a..842fa50348 100644 --- a/tests/integration/simulacron/__init__.py +++ b/tests/integration/simulacron/__init__.py @@ -11,8 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from tests.integration.simulacron.utils import stop_simulacron, clear_queries -from tests.integration.simulacron.utils import stop_simulacron def teardown_package(): - stop_simulacron() \ No newline at end of file + stop_simulacron() + + +class SimulacronBase(unittest.TestCase): + def tearDown(self): + clear_queries() + stop_simulacron() diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 0e5ee43eb4..bc2ae9f00a 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -25,20 +25,22 @@ from cassandra import OperationTimedOut from cassandra.cluster import (EXEC_PROFILE_DEFAULT, Cluster, ExecutionProfile, - _Scheduler) + _Scheduler, NoHostAvailable) from cassandra.policies import HostStateListener, RoundRobinPolicy from tests.integration import (CASSANDRA_VERSION, PROTOCOL_VERSION, requiressimulacron) from tests.integration.util import assert_quiescent_pool_state +from tests.integration.simulacron import SimulacronBase from tests.integration.simulacron.utils import (NO_THEN, PrimeOptions, prime_query, prime_request, start_and_prime_cluster_defaults, start_and_prime_singledc, - stop_simulacron) + clear_queries) class TrackDownListener(HostStateListener): - hosts_marked_down = [] + def __init__(self): + self.hosts_marked_down = [] def on_down(self, host): self.hosts_marked_down.append(host) @@ -50,8 +52,21 @@ def submit(self, fn, *args, **kwargs): self.called_functions.append(fn.__name__) return super(ThreadTracker, self).submit(fn, *args, **kwargs) + +class OrderedRoundRobinPolicy(RoundRobinPolicy): + + def make_query_plan(self, working_keyspace=None, query=None): + self._position += 1 + + hosts = [] + for _ in range(10): + hosts.extend(sorted(self._live_hosts, key=lambda x : x.address)) + + return hosts + + @requiressimulacron -class ConnectionTest(unittest.TestCase): +class ConnectionTests(SimulacronBase): def test_heart_beat_timeout(self): """ @@ -64,7 +79,7 @@ def test_heart_beat_timeout(self): @test_category metadata """ number_of_dcs = 3 - nodes_per_dc = 100 + nodes_per_dc = 20 query_to_prime = "INSERT INTO test3rf.test (k, v) VALUES (0, 1);" @@ -72,16 +87,15 @@ def test_heart_beat_timeout(self): idle_heartbeat_interval = 1 start_and_prime_cluster_defaults(number_of_dcs, nodes_per_dc, CASSANDRA_VERSION) - self.addCleanup(stop_simulacron) listener = TrackDownListener() - executor = ThreadTracker(max_workers=16) + executor = ThreadTracker(max_workers=8) # We need to disable compression since it's not supported in simulacron cluster = Cluster(compression=False, idle_heartbeat_interval=idle_heartbeat_interval, idle_heartbeat_timeout=idle_heartbeat_timeout, - executor_threads=16, + executor_threads=8, execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=RoundRobinPolicy())}) self.addCleanup(cluster.shutdown) @@ -112,7 +126,7 @@ def test_heart_beat_timeout(self): # We allow from some extra time for all the hosts to be to on_down # The callbacks should start happening after idle_heartbeat_timeout + idle_heartbeat_interval - time.sleep((idle_heartbeat_timeout + idle_heartbeat_interval) * 2) + time.sleep((idle_heartbeat_timeout + idle_heartbeat_interval) * 2.5) for host in cluster.metadata.all_hosts(): self.assertIn(host, listener.hosts_marked_down) @@ -133,7 +147,6 @@ def test_callbacks_and_pool_when_oto(self): @test_category metadata """ start_and_prime_singledc() - self.addCleanup(stop_simulacron) cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) session = cluster.connect() @@ -155,3 +168,163 @@ def test_callbacks_and_pool_when_oto(self): # PYTHON-630 -- only the errback should be called errback.assert_called_once() callback.assert_not_called() + + def test_close_when_query(self): + """ + Test to ensure the driver behaves correctly if the connection is closed + just when querying + @since 3.12 + @expected_result NoHostAvailable is risen + + @test_category connection + """ + start_and_prime_singledc() + + cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) + session = cluster.connect() + self.addCleanup(cluster.shutdown) + + query_to_prime = "SELECT * from testkesypace.testtable" + + for close_type in ("disconnect", "shutdown_read", "shutdown_write"): + then = { + "result": "close_connection", + "delay_in_ms": 0, + "close_type": close_type, + "scope": "connection" + } + + prime_query(query_to_prime, then=then) + self.assertRaises(NoHostAvailable, session.execute, query_to_prime) + + def test_retry_after_defunct(self): + """ + We test cluster._retry is called if an the connection is defunct + in the middle of a query + + Finally we verify the driver recovers correctly in the event + of a network partition + + @since 3.12 + @expected_result the driver is able to query even if a host is marked + as down in the middle of the query, it will go to the next one if the timeout + hasn't expired + + @test_category connection + """ + number_of_dcs = 3 + nodes_per_dc = 2 + + query_to_prime = "INSERT INTO test3rf.test (k, v) VALUES (0, 1);" + + idle_heartbeat_timeout = 1 + idle_heartbeat_interval = 5 + + simulacron_cluster = start_and_prime_cluster_defaults(number_of_dcs, nodes_per_dc, CASSANDRA_VERSION) + + dc_ids = sorted(simulacron_cluster.data_center_ids) + last_host = dc_ids.pop() + prime_query(query_to_prime, + cluster_name="{}/{}".format(simulacron_cluster.cluster_name, last_host)) + + roundrobin_lbp = OrderedRoundRobinPolicy() + cluster = Cluster(compression=False, + idle_heartbeat_interval=idle_heartbeat_interval, + idle_heartbeat_timeout=idle_heartbeat_timeout, + execution_profiles={ + EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=roundrobin_lbp)}) + + session = cluster.connect(wait_for_all_pools=True) + self.addCleanup(cluster.shutdown) + + # This simulates we only have access to one DC + for dc_id in dc_ids: + datacenter_path = "{}/{}".format(simulacron_cluster.cluster_name, dc_id) + prime_query(query_to_prime, then=NO_THEN, cluster_name=datacenter_path) + prime_request(PrimeOptions(then=NO_THEN, cluster_name=datacenter_path)) + + # Only the last datacenter will respond, therefore the first host won't + # We want to make sure the returned hosts are 127.0.0.1, 127.0.0.2, ... 127.0.0.8 + roundrobin_lbp._position = 0 + + # After 3 + 1 seconds the connection should be marked and down and another host retried + response_future = session.execute_async(query_to_prime, timeout=4 * idle_heartbeat_interval + + idle_heartbeat_timeout) + response_future.result() + self.assertGreater(len(response_future.attempted_hosts), 1) + + # No error should be raised here since the hosts have been marked + # as down and there's still 1 DC available + for _ in range(10): + session.execute(query_to_prime) + + # Might take some time to close the previous connections and reconnect + time.sleep(10) + assert_quiescent_pool_state(self, cluster) + clear_queries() + + time.sleep(10) + assert_quiescent_pool_state(self, cluster) + + def test_idle_connection_is_not_closed(self): + """ + Test to ensure that the connections aren't closed if they are idle + @since 3.12 + @jira_ticket PYTHON-573 + @expected_result the connections aren't closed nor the hosts are + set to down if the connection is idle + + @test_category connection + """ + start_and_prime_singledc() + + idle_heartbeat_timeout = 1 + idle_heartbeat_interval = 1 + + listener = TrackDownListener() + cluster = Cluster(compression=False, + idle_heartbeat_interval=idle_heartbeat_interval, + idle_heartbeat_timeout=idle_heartbeat_timeout) + session = cluster.connect(wait_for_all_pools=True) + cluster.register_listener(listener) + + self.addCleanup(cluster.shutdown) + + time.sleep(20) + + self.assertEqual(listener.hosts_marked_down, []) + + def test_host_is_not_set_to_down_after_query_oto(self): + """ + Test to ensure that the connections aren't closed if there's an + OperationTimedOut in a normal query. This should only happen from the + heart beat thread (in the case of a OperationTimedOut) with the default + configuration + @since 3.12 + @expected_result the connections aren't closed nor the hosts are + set to down + + @test_category connection + """ + start_and_prime_singledc() + + query_to_prime = "SELECT * FROM madeup_keyspace.madeup_table" + + prime_query(query_to_prime, then=NO_THEN) + + listener = TrackDownListener() + cluster = Cluster(compression=False) + session = cluster.connect(wait_for_all_pools=True) + cluster.register_listener(listener) + + futures = [] + for _ in range(10): + future = session.execute_async(query_to_prime) + futures.append(future) + + for f in futures: + f._event.wait() + self.assertIsInstance(f._final_exception, OperationTimedOut) + + self.assertEqual(listener.hosts_marked_down, []) + assert_quiescent_pool_state(self, cluster) diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index 98ba62eaee..5f1191e149 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -22,7 +22,6 @@ from cassandra.policies import ConstantSpeculativeExecutionPolicy, RoundRobinPolicy from tests.integration import PROTOCOL_VERSION, greaterthancass21, requiressimulacron, SIMULACRON_JAR -from tests import notwindows from tests.integration.simulacron.utils import start_and_prime_singledc, prime_query, \ stop_simulacron, NO_THEN, clear_queries diff --git a/tests/integration/simulacron/utils.py b/tests/integration/simulacron/utils.py index 409d9d6b4c..716b8996a2 100644 --- a/tests/integration/simulacron/utils.py +++ b/tests/integration/simulacron/utils.py @@ -17,6 +17,7 @@ from tests.integration import CASSANDRA_VERSION, SIMULACRON_JAR import subprocess import time +import nose DEFAULT_CLUSTER = "python_simulacron_cluster" @@ -280,7 +281,7 @@ def start_and_prime_singledc(cluster_name=DEFAULT_CLUSTER): :param cluster_name: name of the cluster to start and prime :return: """ - start_and_prime_cluster_defaults(number_of_dc=1, nodes_per_dc=3, cluster_name=cluster_name) + return start_and_prime_cluster_defaults(number_of_dc=1, nodes_per_dc=3, cluster_name=cluster_name) def start_and_prime_cluster_defaults(number_of_dc=1, nodes_per_dc=3, version=None, cluster_name=DEFAULT_CLUSTER): @@ -291,9 +292,11 @@ def start_and_prime_cluster_defaults(number_of_dc=1, nodes_per_dc=3, version=Non """ start_simulacron() data_centers = ",".join([str(nodes_per_dc)] * number_of_dc) - prime_cluster(data_centers=data_centers, version=version, cluster_name=cluster_name) + simulacron_cluster = prime_cluster(data_centers=data_centers, version=version, cluster_name=cluster_name) prime_driver_defaults() + return simulacron_cluster + default_column_types = { "key": "bigint", @@ -316,6 +319,13 @@ def prime_query(query, rows=default_rows, column_types=default_column_types, whe Shortcut function for priming a query :return: """ + # If then is set, then rows and column_types should not + if then: + nose.tools.assert_equal(rows, default_rows) + nose.tools.assert_equal(column_types, default_column_types) + rows=None + column_types=None + query = PrimeQuery(query, rows=rows, column_types=column_types, when=when, then=then, cluster_name=cluster_name) response = prime_request(query) return response diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index dc2297824b..fd6c915071 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -30,6 +30,7 @@ from cassandra.policies import (RoundRobinPolicy, ExponentialReconnectionPolicy, RetryPolicy, SimpleConvictionPolicy, HostDistance, AddressTranslator, TokenAwarePolicy, HostFilterPolicy) +from cassandra import ConsistencyLevel from cassandra.pool import Host from cassandra.query import SimpleStatement, TraceUnavailable, tuple_factory @@ -559,20 +560,13 @@ def test_trace(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() - def check_trace(trace): - self.assertIsNotNone(trace.request_type) - self.assertIsNotNone(trace.duration) - self.assertIsNotNone(trace.started_at) - self.assertIsNotNone(trace.coordinator) - self.assertIsNotNone(trace.events) - result = session.execute( "SELECT * FROM system.local", trace=True) - check_trace(result.get_query_trace()) + self._check_trace(result.get_query_trace()) query = "SELECT * FROM system.local" statement = SimpleStatement(query) result = session.execute(statement, trace=True) - check_trace(result.get_query_trace()) + self._check_trace(result.get_query_trace()) query = "SELECT * FROM system.local" statement = SimpleStatement(query) @@ -582,7 +576,7 @@ def check_trace(trace): statement2 = SimpleStatement(query) future = session.execute_async(statement2, trace=True) future.result() - check_trace(future.get_query_trace()) + self._check_trace(future.get_query_trace()) statement2 = SimpleStatement(query) future = session.execute_async(statement2) @@ -592,7 +586,7 @@ def check_trace(trace): prepared = session.prepare("SELECT * FROM system.local") future = session.execute_async(prepared, parameters=(), trace=True) future.result() - check_trace(future.get_query_trace()) + self._check_trace(future.get_query_trace()) cluster.shutdown() def test_trace_unavailable(self): @@ -623,7 +617,7 @@ def test_trace_unavailable(self): try: result = future.get_query_trace(-1.0) # In case the result has time to come back before this timeout due to a race condition - self.check_trace(result) + self._check_trace(result) except TraceUnavailable: break else: @@ -635,7 +629,7 @@ def test_trace_unavailable(self): try: result = future.get_query_trace(max_wait=120) # In case the result has been set check the trace - self.check_trace(result) + self._check_trace(result) except TraceUnavailable: break else: @@ -1044,10 +1038,13 @@ def test_replicas_are_queried(self): WHERE k1 = ? AND k2 = ? AND k3 = ? AND k4 = ?""") for i in range(10): result = session.execute(prepared, (i, i, i, i), trace=True) - queried_hosts = self._assert_replica_queried(result.get_query_trace(), only_replicas=True) + trace = result.response_future.get_query_trace(query_cl=ConsistencyLevel.ALL) + queried_hosts = self._assert_replica_queried(trace, only_replicas=True) last_i = i only_replica = queried_hosts.pop() + log = logging.getLogger(__name__) + log.info("The only replica found was: {}".format(only_replica)) available_hosts = [host for host in ["127.0.0.1", "127.0.0.2", "127.0.0.3"] if host != only_replica] with Cluster(contact_points=available_hosts, protocol_version=PROTOCOL_VERSION, @@ -1059,7 +1056,8 @@ def test_replicas_are_queried(self): WHERE k1 = ? AND k2 = ? AND k3 = ? AND k4 = ?""") for _ in range(10): result = session.execute(prepared, (last_i, last_i, last_i, last_i), trace=True) - self._assert_replica_queried(result.get_query_trace(), only_replicas=False) + trace = result.response_future.get_query_trace(query_cl=ConsistencyLevel.ALL) + self._assert_replica_queried(trace, only_replicas=False) session.execute('''DROP TABLE test1rf.table_with_big_key''') @@ -1073,6 +1071,13 @@ def _assert_replica_queried(self, trace, only_replicas=True): self.assertGreater(len(queried_hosts), 1, "The host queried was {}".format(queried_hosts)) return queried_hosts + def _check_trace(self, trace): + self.assertIsNotNone(trace.request_type) + self.assertIsNotNone(trace.duration) + self.assertIsNotNone(trace.started_at) + self.assertIsNotNone(trace.coordinator) + self.assertIsNotNone(trace.events) + class LocalHostAdressTranslator(AddressTranslator): From d94cece1be224493581b43f6c158f90cf4c3bee8 Mon Sep 17 00:00:00 2001 From: Josh McKenzie Date: Fri, 30 Jun 2017 22:57:03 -0500 Subject: [PATCH 1251/2005] add CDCWriteException --- CHANGELOG.rst | 1 + cassandra/__init__.py | 8 ++++++++ cassandra/protocol.py | 5 +++++ docs/api/cassandra.rst | 3 +++ 4 files changed, 17 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index b6e41a230a..7f0beb9ea8 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -37,6 +37,7 @@ Features -------- * Add idle_heartbeat_timeout cluster option to tune how long to wait for heartbeat responses. (PYTHON-762) * Add HostFilterPolicy (PYTHON-761) +* Add new exception type for CDC (PYTHON-837) Bug Fixes --------- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 678663cccd..28194a6636 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -387,6 +387,14 @@ def __init__(self, message, write_type=None, **kwargs): self.write_type = write_type +class CDCWriteFailure(RequestExecutionException): + """ + Hit limit on data in CDC folder, writes are rejected + """ + def __init__(self, message): + Exception.__init__(self, message) + + class CoordinationFailure(RequestExecutionException): """ Replicas sent a failure to the coordinator. diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 2901654001..6ba8905357 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -331,6 +331,11 @@ def to_exception(self): return WriteFailure(self.summary_msg(), **self.info) +class CDCWriteException(RequestExecutionException): + summary = 'Failed to execute write due to CDC space exhaustion.' + error_code = 0x1600 + + class SyntaxException(RequestValidationException): summary = 'Syntax error in CQL query' error_code = 0x2000 diff --git a/docs/api/cassandra.rst b/docs/api/cassandra.rst index d46aae56cb..b18fdc20db 100644 --- a/docs/api/cassandra.rst +++ b/docs/api/cassandra.rst @@ -31,6 +31,9 @@ .. autoexception:: RequestExecutionException() :members: +.. autoexception:: CDCWriteException() + :members: + .. autoexception:: Unavailable() :members: From cd3085e6c5344e2bfb82ce16bd97d2dade18f2d8 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 6 Sep 2017 18:11:01 -0400 Subject: [PATCH 1252/2005] Added tests for PYTHON-808 --- .../standard/test_prepared_statements.py | 131 +++++++++++++++--- 1 file changed, 111 insertions(+), 20 deletions(-) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index a3a291cf69..6be0d7a32a 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -24,7 +24,7 @@ from cassandra import ConsistencyLevel from cassandra.cluster import Cluster from cassandra.query import PreparedStatement, UNSET_VALUE, tuple_factory -from tests.integration import get_server_versions +from tests.integration import get_server_versions, greaterthanorequalcass40, BasicSharedKeyspaceUnitTestCase def setup_module(): @@ -390,8 +390,21 @@ def test_raise_error_on_prepared_statement_execution_dropped_table(self): with self.assertRaises(InvalidRequest): self.session.execute(prepared, [0]) - # TODO revisit this test, it on hold now due to CASSANDRA-10786 - @unittest.skip + +@greaterthanorequalcass40 +class PreparedStatementInvalidationTest(BasicSharedKeyspaceUnitTestCase): + + def setUp(self): + self.table_name = "{}.prepared_statement_invalidation_test".format(self.keyspace_name) + self.session.execute("CREATE TABLE {} (a int PRIMARY KEY, b int, d int);".format(self.table_name)) + self.session.execute("INSERT INTO {} (a, b, d) VALUES (1, 1, 1);".format(self.table_name)) + self.session.execute("INSERT INTO {} (a, b, d) VALUES (2, 2, 2);".format(self.table_name)) + self.session.execute("INSERT INTO {} (a, b, d) VALUES (3, 3, 3);".format(self.table_name)) + self.session.execute("INSERT INTO {} (a, b, d) VALUES (4, 4, 4);".format(self.table_name)) + + def tearDown(self): + self.session.execute("DROP TABLE {}".format(self.table_name)) + def test_invalidated_result_metadata(self): """ Tests to make sure cached metadata is updated when an invalidated prepared statement is reprepared. @@ -402,29 +415,107 @@ def test_invalidated_result_metadata(self): Prior to this fix, the request would blow up with a protocol error when the result was decoded expecting a different number of columns. """ - s = self.session - s.result_factory = tuple_factory - - table = "test1rf.%s" % self._testMethodName.lower() - - s.execute("DROP TABLE IF EXISTS %s" % table) - s.execute("CREATE TABLE %s (k int PRIMARY KEY, a int, b int, c int)" % table) - s.execute("INSERT INTO %s (k, a, b, c) VALUES (0, 0, 0, 0)" % table) - - wildcard_prepared = s.prepare("SELECT * FROM %s" % table) + wildcard_prepared = self.session.prepare("SELECT * FROM {}".format(self.table_name)) original_result_metadata = wildcard_prepared.result_metadata - self.assertEqual(len(original_result_metadata), 4) + self.assertEqual(len(original_result_metadata), 3) - r = s.execute(wildcard_prepared) - self.assertEqual(r[0], (0, 0, 0, 0)) + r = self.session.execute(wildcard_prepared) + self.assertEqual(r[0], (1, 1, 1)) - s.execute("ALTER TABLE %s DROP c" % table) + self.session.execute("ALTER TABLE {} DROP d".format(self.table_name)) # Get a bunch of requests in the pipeline with varying states of result_meta, reprepare, resolved - futures = set(s.execute_async(wildcard_prepared.bind(None)) for _ in range(200)) + futures = set(self.session.execute_async(wildcard_prepared.bind(None)) for _ in range(200)) for f in futures: + self.assertEqual(f.result()[0], (1, 1)) - self.assertEqual(f.result()[0], (0, 0, 0)) self.assertIsNot(wildcard_prepared.result_metadata, original_result_metadata) - s.execute("DROP TABLE %s" % table) + def test_prepared_id_is_update(self): + """ + Tests that checks the query id from the prepared statement + is updated properly if the table that the prepared statement is querying + is altered. + + @since 3.12 + @jira_ticket PYTHON-808 + + The query id from the prepared statment must have changed + """ + prepared_statement = self.session.prepare("SELECT * from {} WHERE a = ?".format(self.table_name)) + id_before = prepared_statement.query_id + + self.session.execute("ALTER TABLE {} ADD c int".format(self.table_name)) + bound_statement = prepared_statement.bind((1, )) + self.session.execute(bound_statement) + + id_after = prepared_statement.query_id + + self.assertNotEqual(id_before, id_after) + + def test_prepared_id_is_updated_across_pages(self): + """ + Test that checks that the query id from the prepared statement + is updated if the table hat the prepared statement is querying + is altered while fetching pages in a single query. + Then it checks that the updated rows have the expected result. + + @since 3.12 + @jira_ticket PYTHON-808 + """ + prepared_statement = self.session.prepare("SELECT * from {}".format(self.table_name)) + id_before = prepared_statement.query_id + + prepared_statement.fetch_size = 2 + result = self.session.execute(prepared_statement.bind((None))) + + + self.assertTrue(result.has_more_pages) + + self.session.execute("ALTER TABLE {} ADD c int".format(self.table_name)) + + result_set = set(x for x in ((1, 1, 1),(2, 2, 2), (3, 3, None, 3), (4, 4, None, 4))) + expected_result_set = set(row for row in result) + + id_after = prepared_statement.query_id + + self.assertEqual(result_set, expected_result_set) + self.assertNotEqual(id_before, id_after) + + def test_prepare_id_is_updated_across_session(self): + """ + Test that checks that the query id from the prepared statement + is updated if the table hat the prepared statement is querying + is altered by a different session + + @since 3.12 + @jira_ticket PYTHON-808 + """ + one_cluster = Cluster() + one_session = one_cluster.connect() + self.addCleanup(one_cluster.shutdown) + + stm = "SELECT * from {} WHERE a = ?".format(self.table_name) + one_prepared_stm = one_session.prepare(stm) + + one_id_before = one_prepared_stm.query_id + + self.session.execute("ALTER TABLE {} ADD c int".format(self.table_name)) + one_session.execute(one_prepared_stm, (1, )) + + one_id_after = one_prepared_stm.query_id + self.assertNotEqual(one_id_before, one_id_after) + + def test_not_reprepare_invalid_statements(self): + """ + Test that checks that an InvalidRequest is arisen if a column + expected by the prepared statement is dropped. + + @since 3.12 + @jira_ticket PYTHON-808 + """ + prepared_statement = self.session.prepare( + "SELECT a, b, d FROM {} WHERE a = ?".format(self.table_name)) + self.session.execute("ALTER TABLE {} DROP d".format(self.table_name)) + with self.assertRaises(InvalidRequest): + self.session.execute(prepared_statement.bind((1, ))) From 1429349a195f5e71b4300c30453598d571aec41e Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 13 Sep 2017 17:48:15 -0400 Subject: [PATCH 1253/2005] add prepared metadata hash to protocol --- cassandra/__init__.py | 4 +++ cassandra/cluster.py | 16 +++++---- cassandra/marshal.py | 2 +- cassandra/protocol.py | 35 +++++++++++++------ cassandra/query.py | 12 ++++--- tests/integration/__init__.py | 20 ++++++----- .../standard/test_prepared_statements.py | 28 ++++++++------- 7 files changed, 73 insertions(+), 44 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 678663cccd..6bb228dc07 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -200,6 +200,10 @@ def uses_int_query_flags(cls, version): def uses_prepare_flags(cls, version): return version >= cls.V5 + @classmethod + def uses_prepared_metadata(cls, version): + return version >= cls.V5 + @classmethod def uses_error_code_map(cls, version): return version >= cls.V5 diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 2870b4c64a..d9e8393f2b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2163,7 +2163,8 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time message = ExecuteMessage( prepared_statement.query_id, query.values, cl, serial_cl, fetch_size, - timestamp=timestamp, skip_meta=bool(prepared_statement.result_metadata)) + timestamp=timestamp, skip_meta=bool(prepared_statement.result_metadata), + result_metadata_id=prepared_statement.result_metadata_id) elif isinstance(query, BatchStatement): if self._protocol_version < 2: raise UnsupportedOperation( @@ -2286,15 +2287,15 @@ def prepare(self, query, custom_payload=None, keyspace=None): future = ResponseFuture(self, message, query=None, timeout=self.default_timeout) try: future.send_request() - query_id, bind_metadata, pk_indexes, result_metadata = future.result() + query_id, bind_metadata, pk_indexes, result_metadata, result_metadata_id = future.result() except Exception: log.exception("Error preparing query:") raise prepared_keyspace = keyspace if keyspace else self.keyspace prepared_statement = PreparedStatement.from_message( - query_id, bind_metadata, pk_indexes, self.cluster.metadata, query, prepared_keyspace, - self._protocol_version, result_metadata) + query_id, bind_metadata, pk_indexes, self.cluster.metadata, query, self.keyspace, + self._protocol_version, result_metadata, result_metadata_id) prepared_statement.custom_payload = future.custom_payload self.cluster.add_prepared(query_id, prepared_statement) @@ -3784,8 +3785,11 @@ def _execute_after_prepare(self, host, connection, pool, response): if self.prepared_statement: # result metadata is the only thing that could have # changed from an alter - _, _, _, result_metadata = response.results - self.prepared_statement.result_metadata = result_metadata + (_, _, _, + self.prepared_statement.result_metadata, + new_metadata_id) = response.results + if new_metadata_id is not None: + self.prepared_statement.result_metadata_id = new_metadata_id # use self._query to re-use the same host and # at the same time properly borrow the connection diff --git a/cassandra/marshal.py b/cassandra/marshal.py index f8f5123033..6d04ea25c9 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -154,4 +154,4 @@ def vints_pack(values): revbytes.append(abs(v)) revbytes.reverse() - return six.binary_type(revbytes) \ No newline at end of file + return six.binary_type(revbytes) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 2901654001..26f8dbeea5 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -629,7 +629,7 @@ def recv_body(cls, f, protocol_version, user_type_map, result_metadata): if kind == RESULT_KIND_VOID: results = None elif kind == RESULT_KIND_ROWS: - paging_state, col_types, results = cls.recv_results_rows( + paging_state, col_types, results, result_metadata_id = cls.recv_results_rows( f, protocol_version, user_type_map, result_metadata) elif kind == RESULT_KIND_SET_KEYSPACE: ksname = read_string(f) @@ -644,7 +644,7 @@ def recv_body(cls, f, protocol_version, user_type_map, result_metadata): @classmethod def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): - paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map) + paging_state, column_metadata, result_metadata_id = cls.recv_results_metadata(f, user_type_map) column_metadata = column_metadata or result_metadata rowcount = read_int(f) rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)] @@ -664,13 +664,17 @@ def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): raise DriverException('Failed decoding result column "%s" of type %s: %s' % (colnames[i], coltypes[i].cql_parameterized_type(), str(e))) - return paging_state, coltypes, (colnames, parsed_rows) + return paging_state, coltypes, (colnames, parsed_rows), result_metadata_id @classmethod def recv_results_prepared(cls, f, protocol_version, user_type_map): query_id = read_binary_string(f) - bind_metadata, pk_indexes, result_metadata = cls.recv_prepared_metadata(f, protocol_version, user_type_map) - return query_id, bind_metadata, pk_indexes, result_metadata + if ProtocolVersion.uses_prepared_metadata(protocol_version): + result_metadata_id = read_binary_string(f) + else: + result_metadata_id = None + bind_metadata, pk_indexes, result_metadata, _ = cls.recv_prepared_metadata(f, protocol_version, user_type_map) + return query_id, bind_metadata, pk_indexes, result_metadata, result_metadata_id @classmethod def recv_results_metadata(cls, f, user_type_map): @@ -682,9 +686,14 @@ def recv_results_metadata(cls, f, user_type_map): else: paging_state = None + if flags & 0x0008: + result_metadata_id = read_binary_string(f) + else: + result_metadata_id = None + no_meta = bool(flags & cls._NO_METADATA_FLAG) if no_meta: - return paging_state, [] + return paging_state, [], result_metadata_id glob_tblspec = bool(flags & cls._FLAGS_GLOBAL_TABLES_SPEC) if glob_tblspec: @@ -701,7 +710,7 @@ def recv_results_metadata(cls, f, user_type_map): colname = read_string(f) coltype = cls.read_type(f, user_type_map) column_metadata.append((colksname, colcfname, colname, coltype)) - return paging_state, column_metadata + return paging_state, column_metadata, result_metadata_id @classmethod def recv_prepared_metadata(cls, f, protocol_version, user_type_map): @@ -729,10 +738,10 @@ def recv_prepared_metadata(cls, f, protocol_version, user_type_map): bind_metadata.append(ColumnMetadata(colksname, colcfname, colname, coltype)) if protocol_version >= 2: - _, result_metadata = cls.recv_results_metadata(f, user_type_map) - return bind_metadata, pk_indexes, result_metadata + _, result_metadata, result_metadata_id = cls.recv_results_metadata(f, user_type_map) + return bind_metadata, pk_indexes, result_metadata, result_metadata_id else: - return bind_metadata, pk_indexes, None + return bind_metadata, pk_indexes, None, None @classmethod def recv_results_schema_change(cls, f, protocol_version): @@ -820,7 +829,8 @@ class ExecuteMessage(_MessageType): name = 'EXECUTE' def __init__(self, query_id, query_params, consistency_level, serial_consistency_level=None, fetch_size=None, - paging_state=None, timestamp=None, skip_meta=False): + paging_state=None, timestamp=None, skip_meta=False, + result_metadata_id=None): self.query_id = query_id self.query_params = query_params self.consistency_level = consistency_level @@ -829,9 +839,12 @@ def __init__(self, query_id, query_params, consistency_level, self.paging_state = paging_state self.timestamp = timestamp self.skip_meta = skip_meta + self.result_metadata_id = result_metadata_id def send_body(self, f, protocol_version): write_string(f, self.query_id) + if ProtocolVersion.uses_prepared_metadata(protocol_version): + write_string(f, self.result_metadata_id) if protocol_version == 1: if self.serial_consistency_level: raise UnsupportedOperation( diff --git a/cassandra/query.py b/cassandra/query.py index a3dcc1d85b..ab58f66b26 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -392,12 +392,13 @@ class PreparedStatement(object): query_id = None query_string = None result_metadata = None + result_metadata_id = None routing_key_indexes = None _routing_key_index_set = None serial_consistency_level = None def __init__(self, column_metadata, query_id, routing_key_indexes, query, - keyspace, protocol_version, result_metadata): + keyspace, protocol_version, result_metadata, result_metadata_id): self.column_metadata = column_metadata self.query_id = query_id self.routing_key_indexes = routing_key_indexes @@ -405,13 +406,15 @@ def __init__(self, column_metadata, query_id, routing_key_indexes, query, self.keyspace = keyspace self.protocol_version = protocol_version self.result_metadata = result_metadata + self.result_metadata_id = result_metadata_id self.is_idempotent = False @classmethod def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, - query, prepared_keyspace, protocol_version, result_metadata): + query, prepared_keyspace, protocol_version, result_metadata, + result_metadata_id): if not column_metadata: - return PreparedStatement(column_metadata, query_id, None, query, prepared_keyspace, protocol_version, result_metadata) + return PreparedStatement(column_metadata, query_id, None, query, prepared_keyspace, protocol_version, result_metadata, result_metadata_id) if pk_indexes: routing_key_indexes = pk_indexes @@ -436,7 +439,8 @@ def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, pass # statement; just leave routing_key_indexes as None return PreparedStatement(column_metadata, query_id, routing_key_indexes, - query, prepared_keyspace, protocol_version, result_metadata) + query, prepared_keyspace, protocol_version, result_metadata, + result_metadata_id) def bind(self, values): """ diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 11a8f98e72..3194281596 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -170,14 +170,15 @@ def set_default_beta_flag_true(): def get_default_protocol(): - if Version(CASSANDRA_VERSION) >= Version('4.0'): + version = Version(CASSANDRA_VERSION) + if version >= Version('4.0'): set_default_beta_flag_true() return 5 - if Version(CASSANDRA_VERSION) >= Version('2.2'): + elif version >= Version('2.2'): return 4 - elif Version(CASSANDRA_VERSION) >= Version('2.1'): + elif version >= Version('2.1'): return 3 - elif Version(CASSANDRA_VERSION) >= Version('2.0'): + elif version >= Version('2.0'): return 2 else: return 1 @@ -192,15 +193,16 @@ def get_supported_protocol_versions(): 3.X -> 4, 3 3.10 -> 5(beta),4,3 ` """ - if Version(CASSANDRA_VERSION) >= Version('3.10'): + version = Version(CASSANDRA_VERSION) + if version >= Version('3.10'): return (3, 4, 5) - elif Version(CASSANDRA_VERSION) >= Version('3.0'): + elif version >= Version('3.0'): return (3, 4) - elif Version(CASSANDRA_VERSION) >= Version('2.2'): + elif version >= Version('2.2'): return (1, 2, 3, 4) - elif Version(CASSANDRA_VERSION) >= Version('2.1'): + elif version >= Version('2.1'): return (1, 2, 3) - elif Version(CASSANDRA_VERSION) >= Version('2.0'): + elif version >= Version('2.0'): return (1, 2) else: return (1) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 6be0d7a32a..8eafc882e8 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -21,10 +21,12 @@ import unittest # noqa from cassandra import InvalidRequest -from cassandra import ConsistencyLevel +from cassandra import ConsistencyLevel, ProtocolVersion from cassandra.cluster import Cluster from cassandra.query import PreparedStatement, UNSET_VALUE, tuple_factory -from tests.integration import get_server_versions, greaterthanorequalcass40, BasicSharedKeyspaceUnitTestCase +from tests.integration import (get_server_versions, greaterthanorequalcass40, + set_default_beta_flag_true, + BasicSharedKeyspaceUnitTestCase) def setup_module(): @@ -38,7 +40,8 @@ def setUpClass(cls): cls.cass_version = get_server_versions() def setUp(self): - self.cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION) + self.cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, + allow_beta_protocol_version=True) self.session = self.cluster.connect() def tearDown(self): @@ -443,13 +446,13 @@ def test_prepared_id_is_update(self): The query id from the prepared statment must have changed """ prepared_statement = self.session.prepare("SELECT * from {} WHERE a = ?".format(self.table_name)) - id_before = prepared_statement.query_id + id_before = prepared_statement.result_metadata_id self.session.execute("ALTER TABLE {} ADD c int".format(self.table_name)) bound_statement = prepared_statement.bind((1, )) - self.session.execute(bound_statement) + self.session.execute(bound_statement, timeout=1) - id_after = prepared_statement.query_id + id_after = prepared_statement.result_metadata_id self.assertNotEqual(id_before, id_after) @@ -464,20 +467,19 @@ def test_prepared_id_is_updated_across_pages(self): @jira_ticket PYTHON-808 """ prepared_statement = self.session.prepare("SELECT * from {}".format(self.table_name)) - id_before = prepared_statement.query_id + id_before = prepared_statement.result_metadata_id prepared_statement.fetch_size = 2 result = self.session.execute(prepared_statement.bind((None))) - self.assertTrue(result.has_more_pages) self.session.execute("ALTER TABLE {} ADD c int".format(self.table_name)) - result_set = set(x for x in ((1, 1, 1),(2, 2, 2), (3, 3, None, 3), (4, 4, None, 4))) + result_set = set(x for x in ((1, 1, 1), (2, 2, 2), (3, 3, None, 3), (4, 4, None, 4))) expected_result_set = set(row for row in result) - id_after = prepared_statement.query_id + id_after = prepared_statement.result_metadata_id self.assertEqual(result_set, expected_result_set) self.assertNotEqual(id_before, id_after) @@ -491,19 +493,19 @@ def test_prepare_id_is_updated_across_session(self): @since 3.12 @jira_ticket PYTHON-808 """ - one_cluster = Cluster() + one_cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION) one_session = one_cluster.connect() self.addCleanup(one_cluster.shutdown) stm = "SELECT * from {} WHERE a = ?".format(self.table_name) one_prepared_stm = one_session.prepare(stm) - one_id_before = one_prepared_stm.query_id + one_id_before = one_prepared_stm.result_metadata_id self.session.execute("ALTER TABLE {} ADD c int".format(self.table_name)) one_session.execute(one_prepared_stm, (1, )) - one_id_after = one_prepared_stm.query_id + one_id_after = one_prepared_stm.result_metadata_id self.assertNotEqual(one_id_before, one_id_after) def test_not_reprepare_invalid_statements(self): From 395281035593dafb64cf4b2d535c8a4546c2b1a3 Mon Sep 17 00:00:00 2001 From: Alex Petrov Date: Mon, 18 Sep 2017 23:21:16 +0200 Subject: [PATCH 1254/2005] Fix dtests (#825) * Fix dtests * fix custom protocol tests --- cassandra/row_parser.pyx | 4 ++-- .../integration/standard/test_custom_protocol_handler.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cassandra/row_parser.pyx b/cassandra/row_parser.pyx index c85da3d5fb..a16717eb2b 100644 --- a/cassandra/row_parser.pyx +++ b/cassandra/row_parser.pyx @@ -24,7 +24,7 @@ def make_recv_results_rows(ColumnParser colparser): Parse protocol data given as a BytesIO f into a set of columns (e.g. list of tuples) This is used as the recv_results_rows method of (Fast)ResultMessage """ - paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map) + paging_state, column_metadata, result_metadata_id = cls.recv_results_metadata(f, user_type_map) column_metadata = column_metadata or result_metadata @@ -45,6 +45,6 @@ def make_recv_results_rows(ColumnParser colparser): for i in range(rowcount): rowparser.unpack_row(reader, desc) - return (paging_state, coltypes, (colnames, parsed_rows)) + return (paging_state, coltypes, (colnames, parsed_rows), result_metadata_id) return recv_results_rows diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index d9d8b6b2f0..c1a8f3d479 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -179,12 +179,12 @@ class CustomResultMessageRaw(ResultMessage): @classmethod def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): - paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map) + paging_state, column_metadata, result_metadata_id = cls.recv_results_metadata(f, user_type_map) rowcount = read_int(f) rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)] colnames = [c[2] for c in column_metadata] coltypes = [c[3] for c in column_metadata] - return paging_state, coltypes, (colnames, rows) + return paging_state, coltypes, (colnames, rows), result_metadata_id class CustomTestRawRowType(ProtocolHandler): @@ -209,7 +209,7 @@ class CustomResultMessageTracked(ResultMessage): @classmethod def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): - paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map) + paging_state, column_metadata, result_metadata_id = cls.recv_results_metadata(f, user_type_map) rowcount = read_int(f) rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)] colnames = [c[2] for c in column_metadata] @@ -219,7 +219,7 @@ def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): tuple(ctype.from_binary(val, protocol_version) for ctype, val in zip(coltypes, row)) for row in rows] - return paging_state, coltypes, (colnames, parsed_rows) + return paging_state, coltypes, (colnames, parsed_rows), result_metadata_id class CustomProtocolHandlerResultMessageTracked(ProtocolHandler): From 0115d2104b7b3e0cc56f44cbbdb4533dde6b79f4 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 18 Sep 2017 17:23:56 -0400 Subject: [PATCH 1255/2005] CHANGELOG entry & test fixes --- CHANGELOG.rst | 1 + cassandra/protocol.py | 3 ++- tests/unit/test_parameter_binding.py | 32 ++++++++++++++++------------ tests/unit/test_protocol.py | 6 +++++- tests/unit/test_response_future.py | 2 +- 5 files changed, 27 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index b6e41a230a..6cecd76422 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,6 +7,7 @@ Features * Add IPv4Address/IPv6Address support for inet types (PYTHON-751) * WriteType.CDC and VIEW missing (PYTHON-794) * Warn on Cluster init if contact points are specified but LBP isn't (PYTHON-812) +* Include hash of result set metadata in prepared stmt id (PYTHON-808) Bug Fixes --------- diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 26f8dbeea5..b2f8df659b 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -614,6 +614,7 @@ class ResultMessage(_MessageType): _FLAGS_GLOBAL_TABLES_SPEC = 0x0001 _HAS_MORE_PAGES_FLAG = 0x0002 _NO_METADATA_FLAG = 0x0004 + _METADATA_ID_FLAG = 0x0008 def __init__(self, kind, results, paging_state=None, col_types=None): self.kind = kind @@ -686,7 +687,7 @@ def recv_results_metadata(cls, f, user_type_map): else: paging_state = None - if flags & 0x0008: + if flags & cls._METADATA_ID_FLAG: result_metadata_id = read_binary_string(f) else: result_metadata_id = None diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index ef03b3c5bc..e3b667dab7 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -77,21 +77,21 @@ def test_float_precision(self): class BoundStatementTestV1(unittest.TestCase): - protocol_version=1 + protocol_version = 1 @classmethod def setUpClass(cls): - cls.prepared = PreparedStatement(column_metadata=[ - ColumnMetadata('keyspace', 'cf', 'rk0', Int32Type), - ColumnMetadata('keyspace', 'cf', 'rk1', Int32Type), - ColumnMetadata('keyspace', 'cf', 'ck0', Int32Type), - ColumnMetadata('keyspace', 'cf', 'v0', Int32Type) - ], + column_metadata = [ColumnMetadata('keyspace', 'cf', 'rk0', Int32Type), + ColumnMetadata('keyspace', 'cf', 'rk1', Int32Type), + ColumnMetadata('keyspace', 'cf', 'ck0', Int32Type), + ColumnMetadata('keyspace', 'cf', 'v0', Int32Type)] + cls.prepared = PreparedStatement(column_metadata=column_metadata, query_id=None, routing_key_indexes=[1, 0], query=None, keyspace='keyspace', - protocol_version=cls.protocol_version, result_metadata=None) + protocol_version=cls.protocol_version, result_metadata=None, + result_metadata_id=None) cls.bound = BoundStatement(prepared_statement=cls.prepared) def test_invalid_argument_type(self): @@ -131,7 +131,8 @@ def test_inherit_fetch_size(self): query=None, keyspace=keyspace, protocol_version=self.protocol_version, - result_metadata=None) + result_metadata=None, + result_metadata_id=None) prepared_statement.fetch_size = 1234 bound_statement = BoundStatement(prepared_statement=prepared_statement) self.assertEqual(1234, bound_statement.fetch_size) @@ -165,7 +166,8 @@ def test_values_none(self): query=None, keyspace='whatever', protocol_version=self.protocol_version, - result_metadata=None) + result_metadata=None, + result_metadata_id=None) bound = prepared_statement.bind(None) self.assertListEqual(bound.values, []) @@ -184,15 +186,15 @@ def test_unset_value(self): class BoundStatementTestV2(BoundStatementTestV1): - protocol_version=2 + protocol_version = 2 class BoundStatementTestV3(BoundStatementTestV1): - protocol_version=3 + protocol_version = 3 class BoundStatementTestV4(BoundStatementTestV1): - protocol_version=4 + protocol_version = 4 def test_dict_missing_routing_key(self): # in v4 it implicitly binds UNSET_VALUE for missing items, @@ -214,6 +216,8 @@ def test_unset_value(self): self.bound.bind({'rk0': 0, 'rk1': 0, 'ck0': 0, 'v0': UNSET_VALUE}) self.assertEqual(self.bound.values[-1], UNSET_VALUE) - old_values = self.bound.values self.bound.bind((0, 0, 0, UNSET_VALUE)) self.assertEqual(self.bound.values[-1], UNSET_VALUE) + +class BoundStatementTestV5(BoundStatementTestV4): + protocol_version = 5 diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index a4ee3385eb..21223d27d1 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -40,9 +40,13 @@ def test_execute_message(self): self._check_calls(io, [(b'\x00\x01',), (b'1',), (b'\x00\x04',), (b'\x01',), (b'\x00\x00',)]) io.reset_mock() + message.result_metadata_id = 'foo' message.send_body(io, 5) - self._check_calls(io, [(b'\x00\x01',), (b'1',), (b'\x00\x04',), (b'\x00\x00\x00\x01',), (b'\x00\x00',)]) + self._check_calls(io, [(b'\x00\x01',), (b'1',), + (b'\x00\x03',), (b'foo',), + (b'\x00\x04',), + (b'\x00\x00\x00\x01',), (b'\x00\x00',)]) def test_query_message(self): """ diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index e861301304..be25b42b5a 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -517,7 +517,7 @@ def test_repeat_orig_query_after_succesful_reprepare(self): rf = self.make_response_future(session) response = Mock(spec=ResultMessage, kind=RESULT_KIND_PREPARED) - response.results = (None, None, None, None) + response.results = (None, None, None, None, None) rf._query = Mock(return_value=True) rf._execute_after_prepare('host', None, None, response) From ec70a54ea30e90c3e57f618c6934a6c7713b8d75 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 5 Oct 2017 13:33:52 -0400 Subject: [PATCH 1256/2005] Added tests for ignoring and rethrowing in RetryPolicy --- tests/integration/simulacron/test_policies.py | 72 +++++++++++++++++-- 1 file changed, 68 insertions(+), 4 deletions(-) diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index 5f1191e149..4759e3b36a 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -16,10 +16,10 @@ except ImportError: import unittest # noqa -from cassandra import OperationTimedOut +from cassandra import OperationTimedOut, WriteTimeout from cassandra.cluster import Cluster, ExecutionProfile from cassandra.query import SimpleStatement -from cassandra.policies import ConstantSpeculativeExecutionPolicy, RoundRobinPolicy +from cassandra.policies import ConstantSpeculativeExecutionPolicy, RoundRobinPolicy, RetryPolicy, WriteType from tests.integration import PROTOCOL_VERSION, greaterthancass21, requiressimulacron, SIMULACRON_JAR from tests.integration.simulacron.utils import start_and_prime_singledc, prime_query, \ @@ -139,8 +139,6 @@ def test_speculative_execution(self): result = self.session.execute(prepared_statement, ("0",), execution_profile='spec_ep_brr') self.assertLess(1, len(result.response_future.attempted_hosts)) - - def test_speculative_and_timeout(self): """ Test to ensure the timeout is honored when using speculative execution @@ -166,3 +164,69 @@ def test_speculative_and_timeout(self): # This is because 14 / 4 + 1 = 4 self.assertEqual(len(response_future.attempted_hosts), 4) + + +class CustomRetryPolicy(RetryPolicy): + def on_write_timeout(self, query, consistency, write_type, + required_responses, received_responses, retry_num): + if retry_num != 0: + return self.RETHROW, None + elif write_type == WriteType.SIMPLE: + return self.RETHROW, None + elif write_type == WriteType.CDC: + return self.IGNORE, None + + +@requiressimulacron +class RetryPolicyTets(unittest.TestCase): + @classmethod + def setUpClass(cls): + if SIMULACRON_JAR is None: + return + start_and_prime_singledc() + + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False, + default_retry_policy=CustomRetryPolicy()) + cls.session = cls.cluster.connect(wait_for_all_pools=True) + + @classmethod + def tearDownClass(cls): + if SIMULACRON_JAR is None: + return + cls.cluster.shutdown() + stop_simulacron() + + def tearDown(self): + clear_queries() + + def test_retry_policy_ignores_and_rethrows(self): + """ + Test to verify :class:`~cassandra.protocol.WriteTimeoutErrorMessage` is decoded correctly and that + :attr:`.~cassandra.policies.RetryPolicy.RETHROW` and + :attr:`.~cassandra.policies.RetryPolicy.IGNORE` are respected + to localhost + + @since 3.12 + @jira_ticket PYTHON-812 + @expected_result the retry policy functions as expected + + @test_category connection + """ + query_to_prime_simple = "SELECT * from simulacron_keyspace.simple" + query_to_prime_cdc = "SELECT * from simulacron_keyspace.cdc" + then = { + "result": "write_timeout", + "delay_in_ms": 0, + "consistency_level": "LOCAL_QUORUM", + "received": 1, + "block_for": 2, + "write_type": "SIMPLE" + } + prime_query(query_to_prime_simple, then=then) + then["write_type"] = "CDC" + prime_query(query_to_prime_cdc, then=then) + + with self.assertRaises(WriteTimeout): + self.session.execute(query_to_prime_simple) + #CDC should be ignored + self.session.execute(query_to_prime_cdc) From c4efbe0769f819fd44c68328b6113771774bcb68 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 12 Oct 2017 13:11:51 -0400 Subject: [PATCH 1257/2005] fix 3.11 failures after CASSANDRA-10786 --- tests/integration/__init__.py | 4 +++- tests/integration/long/test_failure_types.py | 14 +++----------- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 3194281596..fbc0654ddf 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -194,8 +194,10 @@ def get_supported_protocol_versions(): 3.10 -> 5(beta),4,3 ` """ version = Version(CASSANDRA_VERSION) - if version >= Version('3.10'): + if version >= Version('4.0'): return (3, 4, 5) + elif version >= Version('3.10'): + return (3, 4) elif version >= Version('3.0'): return (3, 4) elif version >= Version('2.2'): diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index 10059d320f..7a5a8d3946 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -65,20 +65,12 @@ def setUp(self): """ Test is skipped if run with native protocol version <4 """ - self.support_v5 = True if PROTOCOL_VERSION < 4: raise unittest.SkipTest( "Native protocol 4,0+ is required for custom payloads, currently using %r" % (PROTOCOL_VERSION,)) - try: - self.cluster = Cluster(protocol_version=ProtocolVersion.MAX_SUPPORTED, allow_beta_protocol_version=True) - self.session = self.cluster.connect() - except NoHostAvailable: - log.info("Protocol Version 5 not supported,") - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect() - self.support_v5 = False - + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() self.nodes_currently_failing = [] self.node1, self.node2, self.node3 = get_cluster().nodes.values() @@ -157,7 +149,7 @@ def _perform_cql_statement(self, text, consistency_level, expected_exception, se else: with self.assertRaises(expected_exception) as cm: self.execute_helper(session, statement) - if self.support_v5 and (isinstance(cm.exception, WriteFailure) or isinstance(cm.exception, ReadFailure)): + if ProtocolVersion.uses_error_code_map(PROTOCOL_VERSION) and (isinstance(cm.exception, WriteFailure) or isinstance(cm.exception, ReadFailure)): if isinstance(cm.exception, ReadFailure): self.assertEqual(list(cm.exception.error_code_map.values())[0], 1) else: From ee6f9d6ba695ba485cbca032d2a376219e5ea11d Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 12 Oct 2017 14:27:19 -0400 Subject: [PATCH 1258/2005] simplify redundant error type checking --- tests/integration/long/test_failure_types.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index 7a5a8d3946..6ce80d3338 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -149,10 +149,10 @@ def _perform_cql_statement(self, text, consistency_level, expected_exception, se else: with self.assertRaises(expected_exception) as cm: self.execute_helper(session, statement) - if ProtocolVersion.uses_error_code_map(PROTOCOL_VERSION) and (isinstance(cm.exception, WriteFailure) or isinstance(cm.exception, ReadFailure)): + if ProtocolVersion.uses_error_code_map(PROTOCOL_VERSION): if isinstance(cm.exception, ReadFailure): self.assertEqual(list(cm.exception.error_code_map.values())[0], 1) - else: + if isinstance(cm.exception, WriteFailure): self.assertEqual(list(cm.exception.error_code_map.values())[0], 0) def test_write_failures_from_coordinator(self): From 22571afaea37f466cdec4b00c5c867079d858817 Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 9 Oct 2017 09:34:35 -0400 Subject: [PATCH 1259/2005] Fixed eventlet tests --- tests/unit/io/test_eventletreactor.py | 5 +++-- tests/unit/io/test_geventreactor.py | 5 +++-- tests/unit/test_cluster.py | 6 +++--- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index 05ee73d38b..678e8e4715 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -28,16 +28,17 @@ except ImportError: EventletConnection = None # noqa +skip_condition = EventletConnection is None or MONKEY_PATCH_LOOP != "eventlet" # There are some issues with some versions of pypy and eventlet @notpypy -@unittest.skipIf(EventletConnection is None, "Skpping the eventlet tests because it's not installed") +@unittest.skipIf(skip_condition, "Skipping the eventlet tests because it's not installed") @notmonkeypatch class EventletTimerTest(unittest.TestCase, TimerConnectionTests): @classmethod def setUpClass(cls): # This is run even though the class is skipped, so we need # to make sure no monkey patching is happening - if not MONKEY_PATCH_LOOP: + if skip_condition: return # This is being added temporarily due to a bug in eventlet: diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py index 9db1e2ef93..e865fe9a43 100644 --- a/tests/unit/io/test_geventreactor.py +++ b/tests/unit/io/test_geventreactor.py @@ -27,14 +27,15 @@ GeventConnection = None # noqa -@unittest.skipIf(GeventConnection is None, "Skpping the gevent tests because it's not installed") +skip_condition = GeventConnection is None or MONKEY_PATCH_LOOP != "gevent" +@unittest.skipIf(skip_condition, "Skipping the gevent tests because it's not installed") @notmonkeypatch class GeventTimerTest(unittest.TestCase, TimerConnectionTests): @classmethod def setUpClass(cls): # This is run even though the class is skipped, so we need # to make sure no monkey patching is happening - if not MONKEY_PATCH_LOOP: + if skip_condition: return gevent.monkey.patch_all() cls.connection_class = GeventConnection diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 7d7238376e..c84838a21b 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -377,11 +377,11 @@ def test_warning_on_no_lbp_with_contact_points(self): @test_category configuration """ with patch('cassandra.cluster.log') as patched_logger: - Cluster(contact_points=['1']) + Cluster(contact_points=['127.0.0.1']) patched_logger.warn.assert_called_once() warning_message = patched_logger.warn.call_args[0][0] self.assertIn('no load_balancing_policy', warning_message) - self.assertIn("contact_points = ['1']", warning_message) + self.assertIn("contact_points = ['127.0.0.1']", warning_message) self.assertIn('lbp = None', warning_message) @mock_session_pools @@ -397,5 +397,5 @@ def test_no_warning_on_contact_points_with_lbp(self): @test_category configuration """ with patch('cassandra.cluster.log') as patched_logger: - Cluster(contact_points=['1'], load_balancing_policy=object()) + Cluster(contact_points=['127.0.0.1'], load_balancing_policy=object()) patched_logger.warn.assert_not_called() From 66c2b3384ac1557b35934c77b0576b346be04869 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 24 Oct 2017 12:16:43 -0500 Subject: [PATCH 1260/2005] timestamp gen docstrings: sec-->usec --- cassandra/cluster.py | 2 +- cassandra/timestamps.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d9e8393f2b..b05d9123f3 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1940,7 +1940,7 @@ def default_serial_consistency_level(self, cl): increasing timestamps across clusters, or set it to to ``lambda: int(time.time() * 1e6)`` if losing records over clock inconsistencies is acceptable for the application. Custom :attr:`timestamp_generator` s should - be callable, and calling them should return an integer representing seconds + be callable, and calling them should return an integer representing microseconds since some point in time, typically UNIX epoch. .. versionadded:: 3.8.0 diff --git a/cassandra/timestamps.py b/cassandra/timestamps.py index 2ebf0b7f6c..36850a4115 100644 --- a/cassandra/timestamps.py +++ b/cassandra/timestamps.py @@ -70,7 +70,7 @@ def _next_timestamp(self, now, last): call an instantiated ``MonotonicTimestampGenerator`` object. :param int now: an integer to be used as the current time, typically - representing the current time in seconds since the UNIX epoch + representing the current time in microseconds since the UNIX epoch :param int last: an integer representing the last timestamp returned by this object """ From 67e473403fc4516db2cad3f2941adc3078cf7f7e Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 19 Oct 2017 09:36:11 -0400 Subject: [PATCH 1261/2005] Added upgrade tests --- build.yaml | 23 +- tests/integration/__init__.py | 35 ++- tests/integration/long/upgrade/__init__.py | 179 ++++++++++++ .../integration/long/upgrade/test_upgrade.py | 275 ++++++++++++++++++ 4 files changed, 499 insertions(+), 13 deletions(-) create mode 100644 tests/integration/long/upgrade/__init__.py create mode 100644 tests/integration/long/upgrade/test_upgrade.py diff --git a/build.yaml b/build.yaml index cfc4269878..ec70de7b46 100644 --- a/build.yaml +++ b/build.yaml @@ -58,6 +58,18 @@ schedules: env_vars: | EVENT_LOOP_MANAGER='twisted' + upgrade_tests: + schedule: adhoc + branches: + include: [master, python-546] + env_vars: | + EVENT_LOOP_MANAGER='libev' + JUST_UPGRADE=True + matrix: + exclude: + - python: [3.4, 3.6] + - cassandra: ['2.0', '2.1', '2.2', '3.0'] + python: - 2.7 - 3.4 @@ -91,6 +103,7 @@ build: pip install -r test-requirements.txt pip install nose-ignore-docstring + pip install nose-exclude FORCE_CYTHON=False if [[ $CYTHON == 'CYTHON' ]]; then FORCE_CYTHON=True @@ -118,6 +131,14 @@ build: popd + + echo "JUST_UPGRADE: $JUST_UPGRADE" + if [[ $JUST_UPGRADE == 'True' ]]; then + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=upgrade_results.xml tests/integration/long/upgrade || true + exit 0 + fi + + # Run the unit tests, this is not done in travis because # it takes too much time for the whole matrix to build with cython if [[ $CYTHON == 'CYTHON' ]]; then @@ -139,6 +160,6 @@ build: EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true echo "==========RUNNING LONG INTEGRATION TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true - xunit: - "*_results.xml" diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index fbc0654ddf..2059297b69 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -31,9 +31,11 @@ from threading import Event from subprocess import call from itertools import groupby +import six from cassandra import OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure, AlreadyExists, \ InvalidRequest +from cassandra.cluster import NoHostAvailable from cassandra.protocol import ConfigurationException @@ -339,9 +341,14 @@ def is_current_cluster(cluster_name, node_counts): return False -def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): +def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], ccm_options=None, + configuration_options={}): set_default_cass_ip() + if ccm_options is None: + ccm_options = CCM_KWARGS + cassandra_version = ccm_options["version"] if "version" in ccm_options else CASSANDRA_VERSION + global CCM_CLUSTER if USE_CASS_EXTERNAL: if CCM_CLUSTER: @@ -362,27 +369,25 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): CCM_CLUSTER = CCMClusterFactory.load(path, cluster_name) log.debug("Found existing CCM cluster, {0}; clearing.".format(cluster_name)) CCM_CLUSTER.clear() - CCM_CLUSTER.set_install_dir(**CCM_KWARGS) + CCM_CLUSTER.set_install_dir(**ccm_options) except Exception: ex_type, ex, tb = sys.exc_info() log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb - log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, CCM_KWARGS)) + log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, ccm_options)) if DSE_VERSION: log.error("creating dse cluster") - CCM_CLUSTER = DseCluster(path, cluster_name, **CCM_KWARGS) + CCM_CLUSTER = DseCluster(path, cluster_name, **ccm_options) else: - CCM_CLUSTER = CCMCluster(path, cluster_name, **CCM_KWARGS) + CCM_CLUSTER = CCMCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) - if CASSANDRA_VERSION >= '2.2': + if cassandra_version >= '2.2': CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) - if CASSANDRA_VERSION >= '3.0': + if cassandra_version >= '3.0': CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) - if 'spark' in workloads: - config_options = {"initial_spark_worker_resources": 0.1} - CCM_CLUSTER.set_dse_configuration_options(config_options) common.switch_cluster(path, cluster_name) + CCM_CLUSTER.set_configuration_options(configuration_options) CCM_CLUSTER.populate(nodes, ipformat=ipformat) try: jvm_args = [] @@ -400,18 +405,24 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): # Added to wait for slow nodes to start up for node in CCM_CLUSTER.nodes.values(): wait_for_node_socket(node, 120) - setup_keyspace(ipformat=ipformat) + try: + setup_keyspace(ipformat=ipformat) + # This could happen if authentication was set on the server + # through configuration_options + except NoHostAvailable: + pass except Exception: log.exception("Failed to start CCM cluster; removing cluster.") if os.name == "nt": if CCM_CLUSTER: - for node in CCM_CLUSTER.nodes.itervalues(): + for node in six.itervalues(CCM_CLUSTER.nodes): os.system("taskkill /F /PID " + str(node.pid)) else: call(["pkill", "-9", "-f", ".ccm"]) remove_cluster() raise + return CCM_CLUSTER def teardown_package(): diff --git a/tests/integration/long/upgrade/__init__.py b/tests/integration/long/upgrade/__init__.py new file mode 100644 index 0000000000..1d22ba8cea --- /dev/null +++ b/tests/integration/long/upgrade/__init__.py @@ -0,0 +1,179 @@ +# Copyright 2013-2017 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from tests.integration import CCM_KWARGS, use_cluster, remove_cluster, MockLoggingHandler +from tests.integration import setup_keyspace + +from cassandra.cluster import Cluster +from cassandra import cluster + +from collections import namedtuple +from functools import wraps +from threading import Thread, Event +from ccmlib.node import TimeoutError +import time +import logging + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + + +def setup_module(): + remove_cluster() + + +UPGRADE_CLUSTER = "upgrade_cluster" +UpgradePath = namedtuple('UpgradePath', ('name', 'starting_version', 'upgrade_version', 'configuration_options')) + + +class upgrade_paths(object): + """ + Decorator used to specify the upgrade paths for a particular method + """ + def __init__(self, paths): + self.paths = paths + + def __call__(self, method): + @wraps(method) + def wrapper(*args, **kwargs): + for path in self.paths: + self_from_decorated = args[0] + self_from_decorated.UPGRADE_PATH = path + self_from_decorated._setUp() + method(*args, **kwargs) + self_from_decorated._tearDown() + return wrapper + + +class UpgradeBase(unittest.TestCase): + """ + Base class for the upgrade tests. The _setup method + will clean the environment and start the appropriate C* version according + to the upgrade path. The upgrade can be done in a different thread using the + start_upgrade upgrade_method (this would be the most realistic scenario) + or node by node, waiting for the upgrade to happen, using _upgrade_one_node method + """ + UPGRADE_PATH = None + start_cluster = True + + @classmethod + def setUpClass(cls): + cls.logger_handler = MockLoggingHandler() + logger = logging.getLogger(cluster.__name__) + logger.addHandler(cls.logger_handler) + + def _setUp(self): + """ + This is not the regular _setUp method because it will be called from + the decorator instead of letting nose handle it. + This setup method will start a cluster with the right version according + to the variable UPGRADE_PATH. + """ + remove_cluster() + self.cluster = use_cluster(UPGRADE_CLUSTER + self.UPGRADE_PATH.name, [3], + ccm_options=self.UPGRADE_PATH.starting_version, + configuration_options=self.UPGRADE_PATH.configuration_options) + self.nodes = self.cluster.nodelist() + self.last_node_upgraded = None + self.upgrade_done = Event() + self.upgrade_thread = None + + if self.start_cluster: + setup_keyspace() + + self.cluster_driver = Cluster() + self.session = self.cluster_driver.connect() + self.logger_handler.reset() + + def _tearDown(self): + """ + special tearDown method called by the decorator after the method has ended + """ + if self.upgrade_thread: + self.upgrade_thread.join(timeout=5) + self.upgrade_thread = None + + if self.start_cluster: + self.cluster_driver.shutdown() + + def start_upgrade(self, time_node_upgrade): + """ + Starts the upgrade in a different thread + """ + self.upgrade_thread = Thread(target=self._upgrade, args=(time_node_upgrade,)) + self.upgrade_thread.start() + + def _upgrade(self, time_node_upgrade): + """ + Starts the upgrade in the same thread + """ + start_time = time.time() + while self._upgrade_one_node(): + end_time = time.time() + time_to_upgrade = end_time - start_time + if time_node_upgrade > time_to_upgrade: + time.sleep(time_node_upgrade - time_to_upgrade) + self.upgrade_done.set() + + def is_upgraded(self): + """ + Returns True if the upgrade has finished and False otherwise + """ + return self.upgrade_done.is_set() + + def wait_for_upgrade(self, timeout=None): + """ + Waits until the upgrade has completed + """ + self.upgrade_done.wait(timeout=timeout) + + def _upgrade_one_node(self): + """ + Upgrades only one node. Return True if the upgrade + has finished and False otherwise + """ + if self.last_node_upgraded is None: + node_to_upgrade = self.nodes[0] + self.last_node_upgraded = 0 + else: + if len(self.nodes) - 1 == self.last_node_upgraded: + return False + self.last_node_upgraded += 1 + node_to_upgrade = self.nodes[self.last_node_upgraded] + + node_to_upgrade.drain() + node_to_upgrade.stop(gently=True) + + node_to_upgrade.set_install_dir(**self.UPGRADE_PATH.upgrade_version) + + # There must be a cleaner way of doing this, but it's necessary here + # to call the private method from cluster __update_topology_files + self.cluster._Cluster__update_topology_files() + try: + node_to_upgrade.start(wait_for_binary_proto=True, wait_other_notice=True) + except TimeoutError: + self.fail("Error starting C* node while upgrading") + + return True + + +class UpgradeBaseAuth(UpgradeBase): + """ + Base class of authentication test, the authentication parameters for + C* still have to be specified within the upgrade path variable + """ + start_cluster = False diff --git a/tests/integration/long/upgrade/test_upgrade.py b/tests/integration/long/upgrade/test_upgrade.py new file mode 100644 index 0000000000..02d2443962 --- /dev/null +++ b/tests/integration/long/upgrade/test_upgrade.py @@ -0,0 +1,275 @@ +# Copyright 2013-2017 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from tests.integration.long.upgrade import UpgradeBase, UpgradeBaseAuth, UpgradePath, upgrade_paths + +from cassandra.cluster import ConsistencyLevel, Cluster, DriverException, ExecutionProfile +from cassandra.policies import ConstantSpeculativeExecutionPolicy +from cassandra.auth import PlainTextAuthProvider, SaslAuthProvider + +from itertools import count +import time + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + + +two_to_three_path = upgrade_paths([ + UpgradePath("2.2.9-3.11", {"version": "2.2.9"}, {"version": "3.11"}, {}), + ]) +class UpgradeTests(UpgradeBase): + @two_to_three_path + def test_can_write(self): + """ + Verify that the driver will keep querying C* even if there is a host down while being + upgraded and that all the writes will eventually succeed + @since 3.12 + @jira_ticket PYTHON-546 + @expected_result all the writes succeed + + @test_category upgrade + """ + self.start_upgrade(0) + + c = count() + while not self.is_upgraded(): + self.session.execute("INSERT INTO test3rf.test(k, v) VALUES (%s, 0)", (next(c), )) + time.sleep(0.0001) + + self.session.default_consistency_level = ConsistencyLevel.ALL + total_number_of_inserted = self.session.execute("SELECT COUNT(*) from test3rf.test")[0][0] + self.assertEqual(total_number_of_inserted, next(c)) + + self.session.default_consistency_level = ConsistencyLevel.LOCAL_ONE + self.assertEqual(self.logger_handler.get_message_count("error", ""), 0) + + @two_to_three_path + def test_can_connect(self): + """ + Verify that the driver can connect to all the nodes + despite some nodes being in different versions + @since 3.12 + @jira_ticket PYTHON-546 + @expected_result the driver connects successfully and can execute queries against + all the hosts + + @test_category upgrade + """ + def connect_and_shutdown(): + cluster = Cluster() + session = cluster.connect(wait_for_all_pools=True) + queried_hosts = set() + for _ in range(10): + results = session.execute("SELECT * from system.local") + self.assertGreater(len(results.current_rows), 0) + self.assertEqual(len(results.response_future.attempted_hosts), 1) + queried_hosts.add(results.response_future.attempted_hosts[0]) + self.assertEqual(len(queried_hosts), 3) + cluster.shutdown() + + connect_and_shutdown() + while self._upgrade_one_node(): + connect_and_shutdown() + + connect_and_shutdown() + + +class UpgradeTestsMetadata(UpgradeBase): + @two_to_three_path + def test_can_write(self): + """ + Verify that the driver will keep querying C* even if there is a host down while being + upgraded and that all the writes will eventually succeed + @since 3.12 + @jira_ticket PYTHON-546 + @expected_result all the writes succeed + + @test_category upgrade + """ + self.start_upgrade(0) + + c = count() + while not self.is_upgraded(): + self.session.execute("INSERT INTO test3rf.test(k, v) VALUES (%s, 0)", (next(c),)) + time.sleep(0.0001) + + self.session.default_consistency_level = ConsistencyLevel.ALL + total_number_of_inserted = self.session.execute("SELECT COUNT(*) from test3rf.test")[0][0] + self.assertEqual(total_number_of_inserted, next(c)) + + self.session.default_consistency_level = ConsistencyLevel.LOCAL_ONE + self.assertEqual(self.logger_handler.get_message_count("error", ""), 0) + @two_to_three_path + def test_schema_metadata_gets_refreshed(self): + """ + Verify that the driver fails to update the metadata while connected against + different versions of nodes. This won't succeed because each node will report a + different schema version + + @since 3.12 + @jira_ticket PYTHON-546 + @expected_result the driver raises DriverException when updating the schema + metadata while upgrading + all the hosts + + @test_category metadata + """ + original_meta = self.cluster_driver.metadata.keyspaces + number_of_nodes = len(self.cluster.nodelist()) + for _ in range(number_of_nodes - 1): + self._upgrade_one_node() + # Wait for the control connection to reconnect + time.sleep(20) + + with self.assertRaises(DriverException): + self.cluster_driver.refresh_schema_metadata(max_schema_agreement_wait=10) + + self._upgrade_one_node() + self.cluster_driver.refresh_schema_metadata(max_schema_agreement_wait=10) + self.assertNotEqual(original_meta, self.cluster_driver.metadata.keyspaces) + + @two_to_three_path + def test_schema_nodes_gets_refreshed(self): + """ + Verify that the driver token map and node list gets rebuild correctly while upgrading. + The token map and the node list should be the same after each node upgrade + + @since 3.12 + @jira_ticket PYTHON-546 + @expected_result the token map and the node list stays consistent with each node upgrade + metadata while upgrading + all the hosts + + @test_category metadata + """ + number_of_nodes = len(self.cluster.nodelist()) + for _ in range(number_of_nodes): + token_map = self.cluster_driver.metadata.token_map + self._upgrade_one_node() + # Wait for the control connection to reconnect + time.sleep(20) + + self.cluster_driver.refresh_nodes(force_token_rebuild=True) + self._assert_same_token_map(token_map, self.cluster_driver.metadata.token_map) + + def _assert_same_token_map(self, original, new): + self.assertIsNot(original, new) + self.assertEqual(original.tokens_to_hosts_by_ks, new.tokens_to_hosts_by_ks) + self.assertEqual(original.token_to_host_owner, new.token_to_host_owner) + self.assertEqual(original.ring, new.ring) + + +two_to_three_with_auth_path = upgrade_paths([ + UpgradePath("2.2.9-3.11-auth", {"version": "2.2.9"}, {"version": "3.11"}, + {'authenticator': 'PasswordAuthenticator', + 'authorizer': 'CassandraAuthorizer'}), + ]) +class UpgradeTestsAuthentication(UpgradeBaseAuth): + @two_to_three_with_auth_path + def test_can_connect_auth_plain(self): + """ + Verify that the driver can connect despite some nodes being in different versions + with plain authentication + @since 3.12 + @jira_ticket PYTHON-546 + @expected_result the driver connects successfully and can execute queries against + all the hosts + + @test_category upgrade + """ + auth_provider = PlainTextAuthProvider( + username="cassandra", + password="cassandra" + ) + self.connect_and_shutdown(auth_provider) + while self._upgrade_one_node(): + self.connect_and_shutdown(auth_provider) + + self.connect_and_shutdown(auth_provider) + + @two_to_three_with_auth_path + def test_can_connect_auth_sasl(self): + """ + Verify that the driver can connect despite some nodes being in different versions + with ssl authentication + @since 3.12 + @jira_ticket PYTHON-546 + @expected_result the driver connects successfully and can execute queries against + all the hosts + + @test_category upgrade + """ + sasl_kwargs = {'service': 'cassandra', + 'mechanism': 'PLAIN', + 'qops': ['auth'], + 'username': 'cassandra', + 'password': 'cassandra'} + auth_provider = SaslAuthProvider(**sasl_kwargs) + self.connect_and_shutdown(auth_provider) + while self._upgrade_one_node(): + self.connect_and_shutdown(auth_provider) + + self.connect_and_shutdown(auth_provider) + + def connect_and_shutdown(self, auth_provider): + cluster = Cluster(idle_heartbeat_interval=0, + auth_provider=auth_provider) + session = cluster.connect(wait_for_all_pools=True) + queried_hosts = set() + for _ in range(10): + results = session.execute("SELECT * from system.local") + self.assertGreater(len(results.current_rows), 0) + self.assertEqual(len(results.response_future.attempted_hosts), 1) + queried_hosts.add(results.response_future.attempted_hosts[0]) + self.assertEqual(len(queried_hosts), 3) + cluster.shutdown() + + +class UpgradeTestsPolicies(UpgradeBase): + @two_to_three_path + def test_can_write_speculative(self): + """ + Verify that the driver will keep querying C* even if there is a host down while being + upgraded and that all the writes will eventually succeed using the ConstantSpeculativeExecutionPolicy + policy + @since 3.12 + @jira_ticket PYTHON-546 + @expected_result all the writes succeed + + @test_category upgrade + """ + spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.5, 10), + request_timeout=12) + cluster = Cluster() + self.addCleanup(cluster.shutdown) + cluster.add_execution_profile("spec_ep_rr", spec_ep_rr) + + session = cluster.connect() + + self.start_upgrade(0) + + c = count() + while not self.is_upgraded(): + session.execute("INSERT INTO test3rf.test(k, v) VALUES (%s, 0)", (next(c),), + execution_profile='spec_ep_rr') + time.sleep(0.0001) + + session.default_consistency_level = ConsistencyLevel.ALL + total_number_of_inserted = session.execute("SELECT COUNT(*) from test3rf.test")[0][0] + self.assertEqual(total_number_of_inserted, next(c)) + + self.assertEqual(self.logger_handler.get_message_count("error", ""), 0) From 55167353c8b5cd83cac40a797d9e2caca7bd45de Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 17 Oct 2017 14:48:33 -0400 Subject: [PATCH 1262/2005] cleanup pre-PYTHON-838 - remove new sentinel; reuse _NOT_SET - add file-terminating newline --- cassandra/cluster.py | 6 ++---- tests/__init__.py | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d9e8393f2b..e6559cfc3b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -312,8 +312,6 @@ class _ConfigMode(object): PROFILES = 2 -_UNSET_ARG = object() - class Cluster(object): """ The main class to use when interacting with a Cassandra cluster. @@ -735,7 +733,7 @@ def token_metadata_enabled(self, enabled): _listener_lock = None def __init__(self, - contact_points=_UNSET_ARG, + contact_points=_NOT_SET, port=9042, compression=True, auth_provider=None, @@ -772,7 +770,7 @@ def __init__(self, Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. """ - if contact_points is not _UNSET_ARG and load_balancing_policy is None: + if contact_points is not _NOT_SET and load_balancing_policy is None: log.warn('Cluster.__init__ called with contact_points specified, ' 'but no load_balancing_policy. In the next major ' 'version, this will raise an error; please specify a ' diff --git a/tests/__init__.py b/tests/__init__.py index b84a8ed1d3..f5bcf73ed2 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -90,4 +90,4 @@ def is_monkey_patched(): notwindows = unittest.skipUnless(not "Windows" in platform.system(), "This test is not adequate for windows") notpypy = unittest.skipUnless(not platform.python_implementation() == 'PyPy', "This tests is not suitable for pypy") -notmonkeypatch = unittest.skipUnless(MONKEY_PATCH_LOOP, "Skipping this test because monkey patching is required") \ No newline at end of file +notmonkeypatch = unittest.skipUnless(MONKEY_PATCH_LOOP, "Skipping this test because monkey patching is required") From d69c746bfe0ea7cc11d3fd53e22f157b4c823cc5 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 17 Oct 2017 15:32:13 -0400 Subject: [PATCH 1263/2005] add profile mode contact point checking --- CHANGELOG.rst | 3 +- cassandra/cluster.py | 81 ++++++++++++++++++++++---- tests/unit/test_cluster.py | 113 +++++++++++++++++++++++++++++++++---- 3 files changed, 175 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 1b321f0b83..eaa65ba965 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,7 +6,8 @@ Features * Send keyspace in QUERY, PREPARE, and BATCH messages (PYTHON-678) * Add IPv4Address/IPv6Address support for inet types (PYTHON-751) * WriteType.CDC and VIEW missing (PYTHON-794) -* Warn on Cluster init if contact points are specified but LBP isn't (PYTHON-812) +* Warn on Cluster init if contact points are specified but LBP isn't (legacy mode) (PYTHON-812) +* Warn on Cluster init if contact points are specified but LBP isn't (exection profile mode) (PYTHON-838) * Include hash of result set metadata in prepared stmt id (PYTHON-808) Bug Fixes diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e6559cfc3b..4b6222dad3 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -242,10 +242,19 @@ class ExecutionProfile(object): Defaults to :class:`.NoSpeculativeExecutionPolicy` if not specified """ - def __init__(self, load_balancing_policy=None, retry_policy=None, + # indicates if lbp was set explicitly or uses default values + _load_balancing_policy_explicit = False + + def __init__(self, load_balancing_policy=_NOT_SET, retry_policy=None, consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, request_timeout=10.0, row_factory=named_tuple_factory, speculative_execution_policy=None): - self.load_balancing_policy = load_balancing_policy or default_lbp_factory() + + if load_balancing_policy is _NOT_SET: + self._load_balancing_policy_explicit = False + self.load_balancing_policy = default_lbp_factory() + else: + self._load_balancing_policy_explicit = True + self.load_balancing_policy = load_balancing_policy self.retry_policy = retry_policy or RetryPolicy() self.consistency_level = consistency_level self.serial_consistency_level = serial_consistency_level @@ -259,6 +268,15 @@ class ProfileManager(object): def __init__(self): self.profiles = dict() + def _profiles_without_explicit_lbps(self): + names = (profile_name for + profile_name, profile in self.profiles.items() + if not profile._load_balancing_policy_explicit) + return tuple( + 'EXEC_PROFILE_DEFAULT' if n is EXEC_PROFILE_DEFAULT else n + for n in names + ) + def distance(self, host): distances = set(p.load_balancing_policy.distance(host) for p in self.profiles.values()) return HostDistance.LOCAL if HostDistance.LOCAL in distances else \ @@ -341,7 +359,14 @@ class Cluster(object): local_dc set (as is the default), the DC is chosen from an arbitrary host in contact_points. In this case, contact_points should contain only nodes from a single, local DC. + + Note: In the next major version, if you specify contact points, you will + also be required to also explicitly specify a load-balancing policy. This + change will help prevent cases where users had hard-to-debug issues + surrounding unintuitive default load-balancing policy behavior. """ + # tracks if contact_points was set explicitly or with default values + _contact_points_explicit = None port = 9042 """ @@ -770,17 +795,12 @@ def __init__(self, Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. """ - if contact_points is not _NOT_SET and load_balancing_policy is None: - log.warn('Cluster.__init__ called with contact_points specified, ' - 'but no load_balancing_policy. In the next major ' - 'version, this will raise an error; please specify a ' - 'load balancing policy. ' - '(contact_points = {cp}, lbp = {lbp}' - ''.format(cp=contact_points, lbp=load_balancing_policy)) - if contact_points is not None: - if contact_points is _UNSET_ARG: + if contact_points is _NOT_SET: + self._contact_points_explicit = False contact_points = ['127.0.0.1'] + else: + self._contact_points_explicit = True if isinstance(contact_points, six.string_types): raise TypeError("contact_points should not be a string, it should be a sequence (e.g. list) of strings") @@ -853,11 +873,35 @@ def __init__(self, raise ValueError("Clusters constructed with execution_profiles should not specify legacy parameters " "load_balancing_policy or default_retry_policy. Configure this in a profile instead.") self._config_mode = _ConfigMode.LEGACY + else: if execution_profiles: self.profile_manager.profiles.update(execution_profiles) self._config_mode = _ConfigMode.PROFILES + if self._contact_points_explicit: + if self._config_mode is _ConfigMode.PROFILES: + default_lbp_profiles = self.profile_manager._profiles_without_explicit_lbps() + if default_lbp_profiles: + log.warn( + 'Cluster.__init__ called with contact_points ' + 'specified, but load-balancing policies are not ' + 'specified in some ExecutionProfiles. In the next ' + 'major version, this will raise an error; please ' + 'specify a load-balancing policy. ' + '(contact_points = {cp}, ' + 'EPs without explicit LBPs = {eps})' + ''.format(cp=contact_points, eps=default_lbp_profiles)) + else: + if load_balancing_policy is None: + log.warn( + 'Cluster.__init__ called with contact_points ' + 'specified, but no load_balancing_policy. In the next ' + 'major version, this will raise an error; please ' + 'specify a load-balancing policy. ' + '(contact_points = {cp}, lbp = {lbp})' + ''.format(cp=contact_points, lbp=load_balancing_policy)) + self.metrics_enabled = metrics_enabled self.ssl_options = ssl_options self.sockopts = sockopts @@ -999,6 +1043,21 @@ def add_execution_profile(self, name, profile, pool_wait_timeout=5): raise ValueError("Cannot add execution profiles when legacy parameters are set explicitly.") if name in self.profile_manager.profiles: raise ValueError("Profile %s already exists") + contact_points_but_no_lbp = ( + self._contact_points_explicit and not + profile._load_balancing_policy_explicit) + if contact_points_but_no_lbp: + log.warn( + 'Tried to add an ExecutionProfile with name {name}. ' + '{self} was explicitly configured with contact_points, but ' + '{ep} was not explicitly configured with a ' + 'load_balancing_policy. In the next major version, trying to ' + 'add an ExecutionProfile without an explicitly configured LBP ' + 'to a cluster with explicitly configured contact_points will ' + 'raise an exception; please specify a load-balancing policy ' + 'in the ExecutionProfile.' + ''.format(name=repr(name), self=self, ep=profile)) + self.profile_manager.profiles[name] = profile profile.load_balancing_policy.populate(self, self.metadata.all_hosts()) # on_up after populate allows things like DCA LBP to choose default local dc diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index c84838a21b..a7ae88cd70 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -16,7 +16,9 @@ except ImportError: import unittest # noqa -from mock import patch +import logging + +from mock import patch, Mock from cassandra import ConsistencyLevel, DriverException, Timeout, Unavailable, RequestExecutionException, ReadTimeout, WriteTimeout, CoordinationFailure, ReadFailure, WriteFailure, FunctionFailure, AlreadyExists,\ InvalidRequest, Unauthorized, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, RequestValidationException, ConfigurationException @@ -30,6 +32,8 @@ from tests import connection_class +log = logging.getLogger(__name__) + class ExceptionTypeTest(unittest.TestCase): def test_exception_types(self): @@ -364,11 +368,26 @@ def test_no_profiles_same_name(self): # cannot add a profile added dynamically self.assertRaises(ValueError, cluster.add_execution_profile, 'two', ExecutionProfile()) - @mock_session_pools - def test_warning_on_no_lbp_with_contact_points(self): + def test_warning_on_no_lbp_with_contact_points_legacy_mode(self): + """ + Test that users are warned when they instantiate a Cluster object in + legacy mode with contact points but no load-balancing policy. + + @since 3.12.0 + @jira_ticket PYTHON-812 + @expected_result logs + + @test_category configuration + """ + self._check_warning_on_no_lbp_with_contact_points( + cluster_kwargs={'contact_points': ['127.0.0.1']} + ) + + def test_warning_on_no_lbp_with_contact_points_profile_mode(self): """ - Test that users are warned when they instantiate a Cluster object with - contact points but no load-balancing policy. + Test that users are warned when they instantiate a Cluster object in + execution profile mode with contact points but no load-balancing + policy. @since 3.12.0 @jira_ticket PYTHON-812 @@ -376,16 +395,58 @@ def test_warning_on_no_lbp_with_contact_points(self): @test_category configuration """ + self._check_warning_on_no_lbp_with_contact_points(cluster_kwargs={ + 'contact_points': ['127.0.0.1'], + 'execution_profiles': {EXEC_PROFILE_DEFAULT: ExecutionProfile()} + }) + + @mock_session_pools + def _check_warning_on_no_lbp_with_contact_points(self, cluster_kwargs): with patch('cassandra.cluster.log') as patched_logger: - Cluster(contact_points=['127.0.0.1']) + Cluster(**cluster_kwargs) patched_logger.warn.assert_called_once() warning_message = patched_logger.warn.call_args[0][0] - self.assertIn('no load_balancing_policy', warning_message) + self.assertIn('please specify a load-balancing policy', warning_message) self.assertIn("contact_points = ['127.0.0.1']", warning_message) - self.assertIn('lbp = None', warning_message) + + def test_no_warning_on_contact_points_with_lbp_legacy_mode(self): + """ + Test that users aren't warned when they instantiate a Cluster object + with contact points and a load-balancing policy in legacy mode. + + @since 3.12.0 + @jira_ticket PYTHON-812 + @expected_result no logs + + @test_category configuration + """ + self._check_no_warning_on_contact_points_with_lbp({ + 'contact_points': ['127.0.0.1'], + 'load_balancing_policy': object() + }) + + def test_no_warning_on_contact_points_with_lbp_profiles_mode(self): + """ + Test that users aren't warned when they instantiate a Cluster object + with contact points and a load-balancing policy in execution profile + mode. + + @since 3.12.0 + @jira_ticket PYTHON-812 + @expected_result no logs + + @test_category configuration + """ + ep_with_lbp = ExecutionProfile(load_balancing_policy=object()) + self._check_no_warning_on_contact_points_with_lbp(cluster_kwargs={ + 'contact_points': ['127.0.0.1'], + 'execution_profiles': { + EXEC_PROFILE_DEFAULT: ep_with_lbp + } + }) @mock_session_pools - def test_no_warning_on_contact_points_with_lbp(self): + def _check_no_warning_on_contact_points_with_lbp(self, cluster_kwargs): """ Test that users aren't warned when they instantiate a Cluster object with contact points and a load-balancing policy. @@ -397,5 +458,37 @@ def test_no_warning_on_contact_points_with_lbp(self): @test_category configuration """ with patch('cassandra.cluster.log') as patched_logger: - Cluster(contact_points=['127.0.0.1'], load_balancing_policy=object()) + Cluster(**cluster_kwargs) + patched_logger.warn.assert_not_called() + + @mock_session_pools + def test_warning_adding_no_lbp_ep_to_cluster_with_contact_points(self): + ep_with_lbp = ExecutionProfile(load_balancing_policy=object()) + cluster = Cluster( + contact_points=['127.0.0.1'], + execution_profiles={EXEC_PROFILE_DEFAULT: ep_with_lbp}) + with patch('cassandra.cluster.log') as patched_logger: + cluster.add_execution_profile( + name='no_lbp', + profile=ExecutionProfile() + ) + + patched_logger.warn.assert_called_once() + warning_message = patched_logger.warn.call_args[0][0] + self.assertIn('no_lbp', warning_message) + self.assertIn('trying to add', warning_message) + self.assertIn('please specify a load-balancing policy', warning_message) + + @mock_session_pools + def test_no_warning_adding_lbp_ep_to_cluster_with_contact_points(self): + ep_with_lbp = ExecutionProfile(load_balancing_policy=object()) + cluster = Cluster( + contact_points=['127.0.0.1'], + execution_profiles={EXEC_PROFILE_DEFAULT: ep_with_lbp}) + with patch('cassandra.cluster.log') as patched_logger: + cluster.add_execution_profile( + name='with_lbp', + profile=ExecutionProfile(load_balancing_policy=Mock(name='lbp')) + ) + patched_logger.warn.assert_not_called() From bb315d4e40a5212578d826f2ab704a774f026dbd Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 27 Oct 2017 09:59:52 -0400 Subject: [PATCH 1264/2005] whitespace --- .../integration/long/upgrade/test_upgrade.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/tests/integration/long/upgrade/test_upgrade.py b/tests/integration/long/upgrade/test_upgrade.py index 02d2443962..8264d818db 100644 --- a/tests/integration/long/upgrade/test_upgrade.py +++ b/tests/integration/long/upgrade/test_upgrade.py @@ -29,8 +29,8 @@ two_to_three_path = upgrade_paths([ - UpgradePath("2.2.9-3.11", {"version": "2.2.9"}, {"version": "3.11"}, {}), - ]) + UpgradePath("2.2.9-3.11", {"version": "2.2.9"}, {"version": "3.11"}, {}), +]) class UpgradeTests(UpgradeBase): @two_to_three_path def test_can_write(self): @@ -113,6 +113,7 @@ def test_can_write(self): self.session.default_consistency_level = ConsistencyLevel.LOCAL_ONE self.assertEqual(self.logger_handler.get_message_count("error", ""), 0) + @two_to_three_path def test_schema_metadata_gets_refreshed(self): """ @@ -174,10 +175,10 @@ def _assert_same_token_map(self, original, new): two_to_three_with_auth_path = upgrade_paths([ - UpgradePath("2.2.9-3.11-auth", {"version": "2.2.9"}, {"version": "3.11"}, - {'authenticator': 'PasswordAuthenticator', - 'authorizer': 'CassandraAuthorizer'}), - ]) + UpgradePath("2.2.9-3.11-auth", {"version": "2.2.9"}, {"version": "3.11"}, + {'authenticator': 'PasswordAuthenticator', + 'authorizer': 'CassandraAuthorizer'}), +]) class UpgradeTestsAuthentication(UpgradeBaseAuth): @two_to_three_with_auth_path def test_can_connect_auth_plain(self): @@ -192,9 +193,9 @@ def test_can_connect_auth_plain(self): @test_category upgrade """ auth_provider = PlainTextAuthProvider( - username="cassandra", - password="cassandra" - ) + username="cassandra", + password="cassandra" + ) self.connect_and_shutdown(auth_provider) while self._upgrade_one_node(): self.connect_and_shutdown(auth_provider) From c966d805fe31c5c2123bd2866bb771468040f381 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 26 Oct 2017 19:03:04 -0400 Subject: [PATCH 1265/2005] Don't set the keyspace if it's already set in the session --- CHANGELOG.rst | 1 + cassandra/cluster.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index eaa65ba965..509c105763 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -20,6 +20,7 @@ Bug Fixes * Fix sites where `sessions` can change during iteration (PYTHON-793) * cqlengine: allow min_length=0 for Ascii and Text column types (PYTHON-735) * Rare exception when "sys.exit(0)" after query timeouts (PYTHON-752) +* Dont set the session keyspace when preparing statements (PYTHON-843) Others ------ diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 4b6222dad3..5175517b4b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2349,7 +2349,7 @@ def prepare(self, query, custom_payload=None, keyspace=None): log.exception("Error preparing query:") raise - prepared_keyspace = keyspace if keyspace else self.keyspace + prepared_keyspace = keyspace if keyspace else None prepared_statement = PreparedStatement.from_message( query_id, bind_metadata, pk_indexes, self.cluster.metadata, query, self.keyspace, self._protocol_version, result_metadata, result_metadata_id) From ee515ac26e52be94eba14272a4bf6e0e1ddf6e4a Mon Sep 17 00:00:00 2001 From: Alex Petrov Date: Fri, 27 Oct 2017 19:00:12 +0200 Subject: [PATCH 1266/2005] Add `NO_COMPACT` version to CQL protocol, patch for CASSANDRA-10857. (#846) * Add `NO_COMPACT` version to CQL protocol, patch for CASSANDRA-10857. * add changelog entry; preserve Connection.__init__ API --- CHANGELOG.rst | 1 + cassandra/cluster.py | 9 ++++++++- cassandra/connection.py | 12 ++++++++---- cassandra/protocol.py | 1 + 4 files changed, 18 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index eaa65ba965..b1a4646ac8 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -9,6 +9,7 @@ Features * Warn on Cluster init if contact points are specified but LBP isn't (legacy mode) (PYTHON-812) * Warn on Cluster init if contact points are specified but LBP isn't (exection profile mode) (PYTHON-838) * Include hash of result set metadata in prepared stmt id (PYTHON-808) +* Add NO_COMPACT startup option (PYTHON-839) Bug Fixes --------- diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 4b6222dad3..7753a97399 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -395,6 +395,9 @@ class Cluster(object): """ allow_beta_protocol_version = False + + no_compact = False + """ Setting true injects a flag in all messages that makes the server accept and use "beta" protocol version. Used for testing new protocol features incrementally before the new version is complete. @@ -788,7 +791,8 @@ def __init__(self, execution_profiles=None, allow_beta_protocol_version=False, timestamp_generator=None, - idle_heartbeat_timeout=30): + idle_heartbeat_timeout=30, + no_compact=False): """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as extablishing connection pools or refreshing metadata. @@ -821,6 +825,8 @@ def __init__(self, self._protocol_version_explicit = True self.allow_beta_protocol_version = allow_beta_protocol_version + self.no_compact = no_compact + self.auth_provider = auth_provider if load_balancing_policy is not None: @@ -1204,6 +1210,7 @@ def _make_connection_kwargs(self, address, kwargs_dict): kwargs_dict.setdefault('protocol_version', self.protocol_version) kwargs_dict.setdefault('user_type_map', self._user_types) kwargs_dict.setdefault('allow_beta_protocol_version', self.allow_beta_protocol_version) + kwargs_dict.setdefault('no_compact', self.no_compact) return kwargs_dict diff --git a/cassandra/connection.py b/cassandra/connection.py index 0f98d30279..af9b0d45a3 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -203,6 +203,7 @@ class Connection(object): out_buffer_size = 4096 cql_version = None + no_compact = False protocol_version = ProtocolVersion.MAX_SUPPORTED keyspace = None @@ -259,7 +260,7 @@ class Connection(object): def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, cql_version=None, protocol_version=ProtocolVersion.MAX_SUPPORTED, is_control_connection=False, - user_type_map=None, connect_timeout=None, allow_beta_protocol_version=False): + user_type_map=None, connect_timeout=None, allow_beta_protocol_version=False, no_compact=False): self.host = host self.port = port self.authenticator = authenticator @@ -272,6 +273,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.user_type_map = user_type_map self.connect_timeout = connect_timeout self.allow_beta_protocol_version = allow_beta_protocol_version + self.no_compact = no_compact self._push_watchers = defaultdict(set) self._requests = {} self._iobuf = io.BytesIO() @@ -637,7 +639,7 @@ def _send_options_message(self): "specified", id(self), self.host) self._compressor = None self.cql_version = DEFAULT_CQL_VERSION - self._send_startup_message() + self._send_startup_message(no_compact=self.no_compact) else: log.debug("Sending initial options message for new connection (%s) to %s", id(self), self.host) self.send_msg(OptionsMessage(), self.get_request_id(), self._handle_options_response) @@ -703,14 +705,16 @@ def _handle_options_response(self, options_response): self._compressor, self.decompressor = \ locally_supported_compressions[compression_type] - self._send_startup_message(compression_type) + self._send_startup_message(compression_type, no_compact=self.no_compact) @defunct_on_error - def _send_startup_message(self, compression=None): + def _send_startup_message(self, compression=None, no_compact=False): log.debug("Sending StartupMessage on %s", self) opts = {} if compression: opts['COMPRESSION'] = compression + if no_compact: + opts['NO_COMPACT'] = 'true' sm = StartupMessage(cqlversion=self.cql_version, options=opts) self.send_msg(sm, self.get_request_id(), cb=self._handle_startup_response) log.debug("Sent StartupMessage on %s", self) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index b7d71bc65c..a4bda892f0 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -394,6 +394,7 @@ class StartupMessage(_MessageType): KNOWN_OPTION_KEYS = set(( 'CQL_VERSION', 'COMPRESSION', + 'NO_COMPACT' )) def __init__(self, cqlversion, options): From 778bb440af79d7851c38e2a0038ba058470171e7 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 30 Oct 2017 11:39:58 -0400 Subject: [PATCH 1267/2005] add logging to upgrade tests --- tests/__init__.py | 2 +- tests/integration/long/upgrade/__init__.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/__init__.py b/tests/__init__.py index b84a8ed1d3..f5bcf73ed2 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -90,4 +90,4 @@ def is_monkey_patched(): notwindows = unittest.skipUnless(not "Windows" in platform.system(), "This test is not adequate for windows") notpypy = unittest.skipUnless(not platform.python_implementation() == 'PyPy', "This tests is not suitable for pypy") -notmonkeypatch = unittest.skipUnless(MONKEY_PATCH_LOOP, "Skipping this test because monkey patching is required") \ No newline at end of file +notmonkeypatch = unittest.skipUnless(MONKEY_PATCH_LOOP, "Skipping this test because monkey patching is required") diff --git a/tests/integration/long/upgrade/__init__.py b/tests/integration/long/upgrade/__init__.py index 1d22ba8cea..d738fb27dc 100644 --- a/tests/integration/long/upgrade/__init__.py +++ b/tests/integration/long/upgrade/__init__.py @@ -21,6 +21,7 @@ from collections import namedtuple from functools import wraps +import logging from threading import Thread, Event from ccmlib.node import TimeoutError import time @@ -39,6 +40,8 @@ def setup_module(): UPGRADE_CLUSTER = "upgrade_cluster" UpgradePath = namedtuple('UpgradePath', ('name', 'starting_version', 'upgrade_version', 'configuration_options')) +log = logging.getLogger(__name__) + class upgrade_paths(object): """ @@ -52,9 +55,11 @@ def __call__(self, method): def wrapper(*args, **kwargs): for path in self.paths: self_from_decorated = args[0] + log.debug('setting up {path}'.format(path=path)) self_from_decorated.UPGRADE_PATH = path self_from_decorated._setUp() method(*args, **kwargs) + log.debug('tearing down {path}'.format(path=path)) self_from_decorated._tearDown() return wrapper @@ -114,6 +119,7 @@ def start_upgrade(self, time_node_upgrade): """ Starts the upgrade in a different thread """ + log.debug('Starting upgrade in new thread') self.upgrade_thread = Thread(target=self._upgrade, args=(time_node_upgrade,)) self.upgrade_thread.start() @@ -155,6 +161,7 @@ def _upgrade_one_node(self): self.last_node_upgraded += 1 node_to_upgrade = self.nodes[self.last_node_upgraded] + log.debug('Upgrading node {}'.format(node_to_upgrade)) node_to_upgrade.drain() node_to_upgrade.stop(gently=True) From ed3dadd899a9c6006ff26bfb89a15348b417a031 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 30 Oct 2017 14:40:48 -0400 Subject: [PATCH 1268/2005] clearer constant name When I read UPGRADE_CLUSTER, I interpret it as a bool meaning "the cluster should be upgraded" --- tests/integration/long/upgrade/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/long/upgrade/__init__.py b/tests/integration/long/upgrade/__init__.py index d738fb27dc..b53a741a5e 100644 --- a/tests/integration/long/upgrade/__init__.py +++ b/tests/integration/long/upgrade/__init__.py @@ -37,7 +37,7 @@ def setup_module(): remove_cluster() -UPGRADE_CLUSTER = "upgrade_cluster" +UPGRADE_CLUSTER_NAME = "upgrade_cluster" UpgradePath = namedtuple('UpgradePath', ('name', 'starting_version', 'upgrade_version', 'configuration_options')) log = logging.getLogger(__name__) @@ -89,7 +89,7 @@ def _setUp(self): to the variable UPGRADE_PATH. """ remove_cluster() - self.cluster = use_cluster(UPGRADE_CLUSTER + self.UPGRADE_PATH.name, [3], + self.cluster = use_cluster(UPGRADE_CLUSTER_NAME + self.UPGRADE_PATH.name, [3], ccm_options=self.UPGRADE_PATH.starting_version, configuration_options=self.UPGRADE_PATH.configuration_options) self.nodes = self.cluster.nodelist() From 0f3b5bee3d2fe4dbc55133d1cde633272e43bf96 Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 25 Oct 2017 15:15:46 -0400 Subject: [PATCH 1269/2005] Added tests for PYTHON-839 --- tests/integration/__init__.py | 3 +- tests/integration/standard/test_cluster.py | 62 +++++++++++++++++++++- 2 files changed, 62 insertions(+), 3 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index fbc0654ddf..83bda91b36 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -162,7 +162,7 @@ def set_default_cass_ip(): def set_default_beta_flag_true(): defaults = list(Cluster.__init__.__defaults__) - defaults = defaults[:-3] + [True] + defaults[-2:] + defaults = (defaults[:28] + [True] + defaults[29:]) try: Cluster.__init__.__defaults__ = tuple(defaults) except: @@ -255,6 +255,7 @@ def get_unsupported_upper_protocol(): greaterthanorequalcass3_10 = unittest.skipUnless(CASSANDRA_VERSION >= '3.10', 'Cassandra version 3.10 or greater required') greaterthanorequalcass3_11 = unittest.skipUnless(CASSANDRA_VERSION >= '3.11', 'Cassandra version 3.10 or greater required') greaterthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION >= '4.0', 'Cassandra version 4.0 or greater required') +lessthanorequalcass40 = unittest.skipIf(CASSANDRA_VERSION >= '4.0', 'Cassandra version 4.0 or greater required') lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < '3.0', 'Cassandra version less then 3.0 required') dseonly = unittest.skipUnless(DSE_VERSION, "Test is only applicalbe to DSE clusters") pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 76ec65ad84..0e726308cd 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -38,7 +38,7 @@ from tests import notwindows from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, CASSANDRA_VERSION, \ execute_until_pass, execute_with_long_wait_retry, get_node, MockLoggingHandler, get_unsupported_lower_protocol, \ - get_unsupported_upper_protocol, protocolv5, local, CASSANDRA_IP + get_unsupported_upper_protocol, protocolv5, local, CASSANDRA_IP, greaterthanorequalcass30, lessthanorequalcass40 from tests.integration.util import assert_quiescent_pool_state import sys @@ -1062,6 +1062,63 @@ def test_replicas_are_queried(self): session.execute('''DROP TABLE test1rf.table_with_big_key''') + @unittest.skip + @greaterthanorequalcass30 + @lessthanorequalcass40 + def test_compact_option(self): + """ + Test the driver can connect with the no_compact option and the results + are as expected. This test is very similar to the corresponding dtest + + @since 3.12 + @jira_ticket PYTHON-366 + @expected_result only one hosts' metadata will be populated + + @test_category connection + """ + nc_cluster = Cluster(protocol_version=PROTOCOL_VERSION, no_compact=True) + nc_session = nc_cluster.connect() + + cluster = Cluster(protocol_version=PROTOCOL_VERSION, no_compact=False) + session = cluster.connect() + + self.addCleanup(cluster.shutdown) + self.addCleanup(nc_cluster.shutdown) + + nc_session.set_keyspace("test3rf") + session.set_keyspace("test3rf") + + nc_session.execute( + "CREATE TABLE IF NOT EXISTS compact_table (k int PRIMARY KEY, v1 int, v2 int) WITH COMPACT STORAGE;") + + for i in range(1, 5): + nc_session.execute( + "INSERT INTO compact_table (k, column1, v1, v2, value) VALUES " + "({i}, 'a{i}', {i}, {i}, textAsBlob('b{i}'))".format(i=i)) + nc_session.execute( + "INSERT INTO compact_table (k, column1, v1, v2, value) VALUES " + "({i}, 'a{i}{i}', {i}{i}, {i}{i}, textAsBlob('b{i}{i}'))".format(i=i)) + + nc_results = nc_session.execute("SELECT * FROM compact_table") + self.assertEqual( + set(nc_results.current_rows), + {(1, u'a1', 11, 11, 'b1'), + (1, u'a11', 11, 11, 'b11'), + (2, u'a2', 22, 22, 'b2'), + (2, u'a22', 22, 22, 'b22'), + (3, u'a3', 33, 33, 'b3'), + (3, u'a33', 33, 33, 'b33'), + (4, u'a4', 44, 44, 'b4'), + (4, u'a44', 44, 44, 'b44')}) + + results = session.execute("SELECT * FROM compact_table") + self.assertEqual( + set(results.current_rows), + {(1, 11, 11), + (2, 22, 22), + (3, 33, 33), + (4, 44, 44)}) + def _assert_replica_queried(self, trace, only_replicas=True): queried_hosts = set() for row in trace.events: @@ -1255,6 +1312,7 @@ def test_down_event_with_active_connection(self): time.sleep(.01) self.assertTrue(was_marked_down) + @local class DontPrepareOnIgnoredHostsTest(unittest.TestCase): ignored_addresses = ['127.0.0.3'] @@ -1292,6 +1350,7 @@ def test_prepare_on_ignored_hosts(self): self.assertEqual(call(unignored_address), c) cluster.shutdown() + @local class DuplicateRpcTest(unittest.TestCase): @@ -1331,7 +1390,6 @@ def test_duplicate(self): test_cluster.shutdown() - @protocolv5 class BetaProtocolTest(unittest.TestCase): From 72a0935d75a24e26c927f38dab1242e83d1a5091 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 2 Nov 2017 16:26:54 -0400 Subject: [PATCH 1270/2005] bump release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index c2750fc023..68356c0517 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 11, 0, 'post0') +__version_info__ = (3, 12, 0) __version__ = '.'.join(map(str, __version_info__)) From 0855cbde125b57d65dea771a2e99db90b13e31fa Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 2 Nov 2017 16:30:25 -0400 Subject: [PATCH 1271/2005] add new version to docs.yaml --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index f2e3768a0e..38181227e5 100644 --- a/docs.yaml +++ b/docs.yaml @@ -13,6 +13,8 @@ sections: prebuilds: - python setup.py build_ext --inplace --force --no-murmur3 --no-cython versions: + - name: '3.12' + ref: '72a0935d' - name: '3.11' ref: '3.11.0' - name: '3.10' From a89563ff426fd5360dc6801ba952756b4f75a862 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 6 Nov 2017 12:33:41 -0500 Subject: [PATCH 1272/2005] version 3.12.0 changelog --- CHANGELOG.rst | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 16ef2f2adb..4093c72e8d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,6 @@ 3.12.0 ====== +November 6, 2017 Features -------- @@ -10,6 +11,7 @@ Features * Warn on Cluster init if contact points are specified but LBP isn't (exection profile mode) (PYTHON-838) * Include hash of result set metadata in prepared stmt id (PYTHON-808) * Add NO_COMPACT startup option (PYTHON-839) +* Add new exception type for CDC (PYTHON-837) Bug Fixes --------- @@ -22,14 +24,11 @@ Bug Fixes * cqlengine: allow min_length=0 for Ascii and Text column types (PYTHON-735) * Rare exception when "sys.exit(0)" after query timeouts (PYTHON-752) * Dont set the session keyspace when preparing statements (PYTHON-843) +* Use of DCAwareRoundRobinPolicy raises NoHostAvailable exception (PYTHON-781) -Others +Other ------ * Remove DeprecationWarning when using WhiteListRoundRobinPolicy (PYTHON-810) - -Other ------ - * Bump Cython dependency version to 0.27 (PYTHON-833) 3.11.0 @@ -41,7 +40,6 @@ Features -------- * Add idle_heartbeat_timeout cluster option to tune how long to wait for heartbeat responses. (PYTHON-762) * Add HostFilterPolicy (PYTHON-761) -* Add new exception type for CDC (PYTHON-837) Bug Fixes --------- From 43b9c995e7fb335e4f88e931f69f575435ce2ba5 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 6 Nov 2017 12:43:59 -0500 Subject: [PATCH 1273/2005] fix doc generation --- docs/api/cassandra.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/api/cassandra.rst b/docs/api/cassandra.rst index b18fdc20db..d46aae56cb 100644 --- a/docs/api/cassandra.rst +++ b/docs/api/cassandra.rst @@ -31,9 +31,6 @@ .. autoexception:: RequestExecutionException() :members: -.. autoexception:: CDCWriteException() - :members: - .. autoexception:: Unavailable() :members: From 40393ffbbac4e7b5fff8519ef3d639169563aea5 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 6 Nov 2017 12:46:22 -0500 Subject: [PATCH 1274/2005] fix 3.12.0 docs ref --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 38181227e5..8de9bff5e9 100644 --- a/docs.yaml +++ b/docs.yaml @@ -14,7 +14,7 @@ prebuilds: - python setup.py build_ext --inplace --force --no-murmur3 --no-cython versions: - name: '3.12' - ref: '72a0935d' + ref: '43b9c995' - name: '3.11' ref: '3.11.0' - name: '3.10' From 15ef6acefbb3806130cd388f1528804e8f179292 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 6 Nov 2017 12:50:30 -0500 Subject: [PATCH 1275/2005] bump to post0 version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 68356c0517..7a88ad9bc5 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 12, 0) +__version_info__ = (3, 12, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From 08777cb8c37a413dea4cc0c7d9dd8dd2ca032703 Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 7 Nov 2017 15:52:36 -0500 Subject: [PATCH 1276/2005] Added tests for prepared metadata id and conditional statements --- .../standard/test_prepared_statements.py | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 8eafc882e8..3cbd36a2c5 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -447,6 +447,7 @@ def test_prepared_id_is_update(self): """ prepared_statement = self.session.prepare("SELECT * from {} WHERE a = ?".format(self.table_name)) id_before = prepared_statement.result_metadata_id + self.assertEqual(len(prepared_statement.result_metadata), 3) self.session.execute("ALTER TABLE {} ADD c int".format(self.table_name)) bound_statement = prepared_statement.bind((1, )) @@ -455,6 +456,7 @@ def test_prepared_id_is_update(self): id_after = prepared_statement.result_metadata_id self.assertNotEqual(id_before, id_after) + self.assertEqual(len(prepared_statement.result_metadata), 4) def test_prepared_id_is_updated_across_pages(self): """ @@ -468,6 +470,7 @@ def test_prepared_id_is_updated_across_pages(self): """ prepared_statement = self.session.prepare("SELECT * from {}".format(self.table_name)) id_before = prepared_statement.result_metadata_id + self.assertEqual(len(prepared_statement.result_metadata), 3) prepared_statement.fetch_size = 2 result = self.session.execute(prepared_statement.bind((None))) @@ -483,6 +486,7 @@ def test_prepared_id_is_updated_across_pages(self): self.assertEqual(result_set, expected_result_set) self.assertNotEqual(id_before, id_after) + self.assertEqual(len(prepared_statement.result_metadata), 4) def test_prepare_id_is_updated_across_session(self): """ @@ -499,6 +503,7 @@ def test_prepare_id_is_updated_across_session(self): stm = "SELECT * from {} WHERE a = ?".format(self.table_name) one_prepared_stm = one_session.prepare(stm) + self.assertEqual(len(one_prepared_stm.result_metadata), 3) one_id_before = one_prepared_stm.result_metadata_id @@ -507,6 +512,7 @@ def test_prepare_id_is_updated_across_session(self): one_id_after = one_prepared_stm.result_metadata_id self.assertNotEqual(one_id_before, one_id_after) + self.assertEqual(len(one_prepared_stm.result_metadata), 4) def test_not_reprepare_invalid_statements(self): """ @@ -521,3 +527,59 @@ def test_not_reprepare_invalid_statements(self): self.session.execute("ALTER TABLE {} DROP d".format(self.table_name)) with self.assertRaises(InvalidRequest): self.session.execute(prepared_statement.bind((1, ))) + + def test_id_is_not_updated_conditional_v4(self): + """ + Test that verifies that the result_metadata and the + result_metadata_id are udpated correctly in conditional statements + in protocol V4 + + @since 3.13 + @jira_ticket PYTHON-847 + """ + cluster = Cluster(protocol_version=ProtocolVersion.V4) + session = cluster.connect() + self.addCleanup(cluster.shutdown) + self._test_updated_conditional(session, 9) + + def test_id_is_not_updated_conditional_v5(self): + """ + Test that verifies that the result_metadata and the + result_metadata_id are udpated correctly in conditional statements + in protocol V5 + + @since 3.13 + @jira_ticket PYTHON-847 + """ + cluster = Cluster(protocol_version=ProtocolVersion.V5) + session = cluster.connect() + self.addCleanup(cluster.shutdown) + self._test_updated_conditional(session, 10) + + def _test_updated_conditional(self, session, value): + prepared_statement = session.prepare("INSERT INTO {}(a, b, d) VALUES " + "(?, ? , ?) IF NOT EXISTS".format(self.table_name)) + first_id = prepared_statement.result_metadata_id + self.assertEqual(prepared_statement.result_metadata, []) + + # Sucessfull conditional update + result = session.execute(prepared_statement, (value, value, value)) + self.assertEqual(result[0], (True, )) + second_id = prepared_statement.result_metadata_id + self.assertEqual(first_id, second_id) + + # Failed conditional update + result = session.execute(prepared_statement, (value, value, value)) + self.assertEqual(result[0], (False, value, value, value)) + third_id = prepared_statement.result_metadata_id + self.assertEqual(first_id, third_id) + self.assertEqual(prepared_statement.result_metadata, []) + + session.execute("ALTER TABLE {} ADD c int".format(self.table_name)) + + # Failed conditional update + result = session.execute(prepared_statement, (value, value, value)) + self.assertEqual(result[0], (False, value, value, None, value)) + fourth_id = prepared_statement.result_metadata_id + self.assertEqual(first_id, fourth_id) + self.assertEqual(prepared_statement.result_metadata, []) From 768325e250f8707770ad2dd77bcf8de0b085cc5b Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 19 Oct 2017 09:36:11 -0400 Subject: [PATCH 1277/2005] Added upgrade tests --- build.yaml | 20 ++ tests/integration/__init__.py | 35 ++- tests/integration/long/upgrade/__init__.py | 183 ++++++++++++ .../integration/long/upgrade/test_upgrade.py | 278 ++++++++++++++++++ 4 files changed, 503 insertions(+), 13 deletions(-) create mode 100644 tests/integration/long/upgrade/__init__.py create mode 100644 tests/integration/long/upgrade/test_upgrade.py diff --git a/build.yaml b/build.yaml index cfc4269878..dde5355cbb 100644 --- a/build.yaml +++ b/build.yaml @@ -58,6 +58,18 @@ schedules: env_vars: | EVENT_LOOP_MANAGER='twisted' + upgrade_tests: + schedule: adhoc + branches: + include: [master, python-546] + env_vars: | + EVENT_LOOP_MANAGER='libev' + JUST_UPGRADE=True + matrix: + exclude: + - python: [3.4, 3.6] + - cassandra: ['2.0', '2.1', '2.2', '3.0'] + python: - 2.7 - 3.4 @@ -118,6 +130,14 @@ build: popd + + echo "JUST_UPGRADE: $JUST_UPGRADE" + if [[ $JUST_UPGRADE == 'True' ]]; then + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=upgrade_results.xml tests/integration/upgrade || true + exit 0 + fi + + # Run the unit tests, this is not done in travis because # it takes too much time for the whole matrix to build with cython if [[ $CYTHON == 'CYTHON' ]]; then diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index fbc0654ddf..bf140e98f7 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -31,9 +31,11 @@ from threading import Event from subprocess import call from itertools import groupby +import six from cassandra import OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure, AlreadyExists, \ InvalidRequest +from cassandra.cluster import NoHostAvailable from cassandra.protocol import ConfigurationException @@ -339,16 +341,22 @@ def is_current_cluster(cluster_name, node_counts): return False -def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): +def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], set_keyspace=True, ccm_options=None, + configuration_options={}): set_default_cass_ip() + if ccm_options is None: + ccm_options = CCM_KWARGS + cassandra_version = ccm_options.get('version', CASSANDRA_VERSION) + global CCM_CLUSTER if USE_CASS_EXTERNAL: if CCM_CLUSTER: log.debug("Using external CCM cluster {0}".format(CCM_CLUSTER.name)) else: log.debug("Using unnamed external cluster") - setup_keyspace(ipformat=ipformat, wait=False) + if set_keyspace and start: + setup_keyspace(ipformat=ipformat, wait=False) return if is_current_cluster(cluster_name, nodes): @@ -362,27 +370,26 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): CCM_CLUSTER = CCMClusterFactory.load(path, cluster_name) log.debug("Found existing CCM cluster, {0}; clearing.".format(cluster_name)) CCM_CLUSTER.clear() - CCM_CLUSTER.set_install_dir(**CCM_KWARGS) + CCM_CLUSTER.set_install_dir(**ccm_options) + CCM_CLUSTER.set_configuration_options(configuration_options) except Exception: ex_type, ex, tb = sys.exc_info() log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb - log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, CCM_KWARGS)) + log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, ccm_options)) if DSE_VERSION: log.error("creating dse cluster") - CCM_CLUSTER = DseCluster(path, cluster_name, **CCM_KWARGS) + CCM_CLUSTER = DseCluster(path, cluster_name, **ccm_options) else: - CCM_CLUSTER = CCMCluster(path, cluster_name, **CCM_KWARGS) + CCM_CLUSTER = CCMCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) - if CASSANDRA_VERSION >= '2.2': + if cassandra_version >= '2.2': CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) - if CASSANDRA_VERSION >= '3.0': + if cassandra_version >= '3.0': CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) - if 'spark' in workloads: - config_options = {"initial_spark_worker_resources": 0.1} - CCM_CLUSTER.set_dse_configuration_options(config_options) common.switch_cluster(path, cluster_name) + CCM_CLUSTER.set_configuration_options(configuration_options) CCM_CLUSTER.populate(nodes, ipformat=ipformat) try: jvm_args = [] @@ -400,18 +407,20 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): # Added to wait for slow nodes to start up for node in CCM_CLUSTER.nodes.values(): wait_for_node_socket(node, 120) - setup_keyspace(ipformat=ipformat) + if set_keyspace: + setup_keyspace(ipformat=ipformat) except Exception: log.exception("Failed to start CCM cluster; removing cluster.") if os.name == "nt": if CCM_CLUSTER: - for node in CCM_CLUSTER.nodes.itervalues(): + for node in six.itervalues(CCM_CLUSTER.nodes): os.system("taskkill /F /PID " + str(node.pid)) else: call(["pkill", "-9", "-f", ".ccm"]) remove_cluster() raise + return CCM_CLUSTER def teardown_package(): diff --git a/tests/integration/long/upgrade/__init__.py b/tests/integration/long/upgrade/__init__.py new file mode 100644 index 0000000000..27e424635d --- /dev/null +++ b/tests/integration/long/upgrade/__init__.py @@ -0,0 +1,183 @@ +# Copyright 2013-2017 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from tests.integration import CCM_KWARGS, use_cluster, remove_cluster, MockLoggingHandler +from tests.integration import setup_keyspace + +from cassandra.cluster import Cluster +from cassandra import cluster + +from collections import namedtuple +from functools import wraps +from threading import Thread, Event +from ccmlib.node import TimeoutError +import time +import logging + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + + +def setup_module(): + remove_cluster() + + +UPGRADE_CLUSTER = "upgrade_cluster" +UpgradePath = namedtuple('UpgradePath', ('name', 'starting_version', 'upgrade_version', 'configuration_options')) + + +class upgrade_paths(object): + """ + Decorator used to specify the upgrade paths for a particular method + """ + def __init__(self, paths): + self.paths = paths + + def __call__(self, method): + @wraps(method) + def wrapper(*args, **kwargs): + for path in self.paths: + self_from_decorated = args[0] + self_from_decorated.UPGRADE_PATH = path + self_from_decorated._upgrade_step_setup() + method(*args, **kwargs) + self_from_decorated._upgrade_step_teardown() + return wrapper + + +class UpgradeBase(unittest.TestCase): + """ + Base class for the upgrade tests. The _setup method + will clean the environment and start the appropriate C* version according + to the upgrade path. The upgrade can be done in a different thread using the + start_upgrade upgrade_method (this would be the most realistic scenario) + or node by node, waiting for the upgrade to happen, using _upgrade_one_node method + """ + UPGRADE_PATH = None + start_cluster = True + set_keyspace = True + + @classmethod + def setUpClass(cls): + cls.logger_handler = MockLoggingHandler() + logger = logging.getLogger(cluster.__name__) + logger.addHandler(cls.logger_handler) + + def _upgrade_step_setup(self): + """ + This is not the regular _setUp method because it will be called from + the decorator instead of letting nose handle it. + This setup method will start a cluster with the right version according + to the variable UPGRADE_PATH. + """ + remove_cluster() + self.cluster = use_cluster(UPGRADE_CLUSTER + self.UPGRADE_PATH.name, [3], + ccm_options=self.UPGRADE_PATH.starting_version, set_keyspace=self.set_keyspace, + configuration_options=self.UPGRADE_PATH.configuration_options) + self.nodes = self.cluster.nodelist() + self.last_node_upgraded = None + self.upgrade_done = Event() + self.upgrade_thread = None + + if self.start_cluster: + setup_keyspace() + + self.cluster_driver = Cluster() + self.session = self.cluster_driver.connect() + self.logger_handler.reset() + + def _upgrade_step_teardown(self): + """ + special tearDown method called by the decorator after the method has ended + """ + if self.upgrade_thread: + self.upgrade_thread.join(timeout=5) + self.upgrade_thread = None + + if self.start_cluster: + self.cluster_driver.shutdown() + + def start_upgrade(self, time_node_upgrade): + """ + Starts the upgrade in a different thread + """ + self.upgrade_thread = Thread(target=self._upgrade, args=(time_node_upgrade,)) + self.upgrade_thread.start() + + def _upgrade(self, time_node_upgrade): + """ + Starts the upgrade in the same thread + """ + start_time = time.time() + for node in self.nodes: + self.upgrade_node(node) + end_time = time.time() + time_to_upgrade = end_time - start_time + if time_node_upgrade > time_to_upgrade: + time.sleep(time_node_upgrade - time_to_upgrade) + self.upgrade_done.set() + + def is_upgraded(self): + """ + Returns True if the upgrade has finished and False otherwise + """ + return self.upgrade_done.is_set() + + def wait_for_upgrade(self, timeout=None): + """ + Waits until the upgrade has completed + """ + self.upgrade_done.wait(timeout=timeout) + + def upgrade_node(self, node): + """ + Upgrades only one node. Return True if the upgrade + has finished and False otherwise + """ + node.drain() + node.stop(gently=True) + + node.set_install_dir(**self.UPGRADE_PATH.upgrade_version) + + # There must be a cleaner way of doing this, but it's necessary here + # to call the private method from cluster __update_topology_files + self.cluster._Cluster__update_topology_files() + try: + node.start(wait_for_binary_proto=True, wait_other_notice=True) + except TimeoutError: + self.fail("Error starting C* node while upgrading") + + return True + + +class UpgradeBaseAuth(UpgradeBase): + """ + Base class of authentication test, the authentication parameters for + C* still have to be specified within the upgrade path variable + """ + start_cluster = False + set_keyspace = False + + + def _upgrade_step_setup(self): + """ + We sleep here for the same reason as we do in test_authentication.py: + there seems to be some race, with some versions of C* taking longer to + get the auth (and default user) setup. Sleep here to give it a chance + """ + super(UpgradeBaseAuth, self)._upgrade_step_setup() + time.sleep(10) diff --git a/tests/integration/long/upgrade/test_upgrade.py b/tests/integration/long/upgrade/test_upgrade.py new file mode 100644 index 0000000000..514f46f183 --- /dev/null +++ b/tests/integration/long/upgrade/test_upgrade.py @@ -0,0 +1,278 @@ +# Copyright 2013-2017 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import time +from itertools import count + +from cassandra.auth import PlainTextAuthProvider, SaslAuthProvider +from cassandra.cluster import ConsistencyLevel, Cluster, DriverException, ExecutionProfile +from cassandra.policies import ConstantSpeculativeExecutionPolicy +from tests.integration.long.upgrade import UpgradeBase, UpgradeBaseAuth, UpgradePath, upgrade_paths + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + + +two_to_three_path = upgrade_paths([ + UpgradePath("2.2.9-3.11", {"version": "2.2.9"}, {"version": "3.11"}, {}), + ]) +class UpgradeTests(UpgradeBase): + @two_to_three_path + def test_can_write(self): + """ + Verify that the driver will keep querying C* even if there is a host down while being + upgraded and that all the writes will eventually succeed + @since 3.12 + @jira_ticket PYTHON-546 + @expected_result all the writes succeed + + @test_category upgrade + """ + self.start_upgrade(0) + + c = count() + while not self.is_upgraded(): + self.session.execute("INSERT INTO test3rf.test(k, v) VALUES (%s, 0)", (next(c), )) + time.sleep(0.0001) + + self.session.default_consistency_level = ConsistencyLevel.ALL + total_number_of_inserted = self.session.execute("SELECT COUNT(*) from test3rf.test")[0][0] + self.assertEqual(total_number_of_inserted, next(c)) + + self.session.default_consistency_level = ConsistencyLevel.LOCAL_ONE + self.assertEqual(self.logger_handler.get_message_count("error", ""), 0) + + @two_to_three_path + def test_can_connect(self): + """ + Verify that the driver can connect to all the nodes + despite some nodes being in different versions + @since 3.12 + @jira_ticket PYTHON-546 + @expected_result the driver connects successfully and can execute queries against + all the hosts + + @test_category upgrade + """ + def connect_and_shutdown(): + cluster = Cluster() + session = cluster.connect(wait_for_all_pools=True) + queried_hosts = set() + for _ in range(10): + results = session.execute("SELECT * from system.local") + self.assertGreater(len(results.current_rows), 0) + self.assertEqual(len(results.response_future.attempted_hosts), 1) + queried_hosts.add(results.response_future.attempted_hosts[0]) + self.assertEqual(len(queried_hosts), 3) + cluster.shutdown() + + connect_and_shutdown() + for node in self.nodes: + self.upgrade_node(node) + connect_and_shutdown() + + connect_and_shutdown() + + +class UpgradeTestsMetadata(UpgradeBase): + @two_to_three_path + def test_can_write(self): + """ + Verify that the driver will keep querying C* even if there is a host down while being + upgraded and that all the writes will eventually succeed + @since 3.12 + @jira_ticket PYTHON-546 + @expected_result all the writes succeed + + @test_category upgrade + """ + self.start_upgrade(0) + + c = count() + while not self.is_upgraded(): + self.session.execute("INSERT INTO test3rf.test(k, v) VALUES (%s, 0)", (next(c),)) + time.sleep(0.0001) + + self.session.default_consistency_level = ConsistencyLevel.ALL + total_number_of_inserted = self.session.execute("SELECT COUNT(*) from test3rf.test")[0][0] + self.assertEqual(total_number_of_inserted, next(c)) + + self.session.default_consistency_level = ConsistencyLevel.LOCAL_ONE + self.assertEqual(self.logger_handler.get_message_count("error", ""), 0) + @two_to_three_path + def test_schema_metadata_gets_refreshed(self): + """ + Verify that the driver fails to update the metadata while connected against + different versions of nodes. This won't succeed because each node will report a + different schema version + + @since 3.12 + @jira_ticket PYTHON-546 + @expected_result the driver raises DriverException when updating the schema + metadata while upgrading + all the hosts + + @test_category metadata + """ + original_meta = self.cluster_driver.metadata.keyspaces + number_of_nodes = len(self.cluster.nodelist()) + + nodes = self.nodes + for node in nodes[1:]: + self.upgrade_node(node) + # Wait for the control connection to reconnect + time.sleep(20) + + with self.assertRaises(DriverException): + self.cluster_driver.refresh_schema_metadata(max_schema_agreement_wait=10) + + self.upgrade_node(nodes[0]) + self.cluster_driver.refresh_schema_metadata(max_schema_agreement_wait=20) + self.assertNotEqual(original_meta, self.cluster_driver.metadata.keyspaces) + + @two_to_three_path + def test_schema_nodes_gets_refreshed(self): + """ + Verify that the driver token map and node list gets rebuild correctly while upgrading. + The token map and the node list should be the same after each node upgrade + + @since 3.12 + @jira_ticket PYTHON-546 + @expected_result the token map and the node list stays consistent with each node upgrade + metadata while upgrading + all the hosts + + @test_category metadata + """ + for node in self.nodes: + token_map = self.cluster_driver.metadata.token_map + self.upgrade_node(node) + # Wait for the control connection to reconnect + time.sleep(20) + + self.cluster_driver.refresh_nodes(force_token_rebuild=True) + self._assert_same_token_map(token_map, self.cluster_driver.metadata.token_map) + + def _assert_same_token_map(self, original, new): + self.assertIsNot(original, new) + self.assertEqual(original.tokens_to_hosts_by_ks, new.tokens_to_hosts_by_ks) + self.assertEqual(original.token_to_host_owner, new.token_to_host_owner) + self.assertEqual(original.ring, new.ring) + + +two_to_three_with_auth_path = upgrade_paths([ + UpgradePath("2.2.9-3.11-auth", {"version": "2.2.9"}, {"version": "3.11"}, + {'authenticator': 'PasswordAuthenticator', + 'authorizer': 'CassandraAuthorizer'}), + ]) +class UpgradeTestsAuthentication(UpgradeBaseAuth): + @two_to_three_with_auth_path + def test_can_connect_auth_plain(self): + """ + Verify that the driver can connect despite some nodes being in different versions + with plain authentication + @since 3.12 + @jira_ticket PYTHON-546 + @expected_result the driver connects successfully and can execute queries against + all the hosts + + @test_category upgrade + """ + auth_provider = PlainTextAuthProvider( + username="cassandra", + password="cassandra" + ) + self.connect_and_shutdown(auth_provider) + for node in self.nodes: + self.upgrade_node(node) + self.connect_and_shutdown(auth_provider) + + self.connect_and_shutdown(auth_provider) + + @two_to_three_with_auth_path + def test_can_connect_auth_sasl(self): + """ + Verify that the driver can connect despite some nodes being in different versions + with ssl authentication + @since 3.12 + @jira_ticket PYTHON-546 + @expected_result the driver connects successfully and can execute queries against + all the hosts + + @test_category upgrade + """ + sasl_kwargs = {'service': 'cassandra', + 'mechanism': 'PLAIN', + 'qops': ['auth'], + 'username': 'cassandra', + 'password': 'cassandra'} + auth_provider = SaslAuthProvider(**sasl_kwargs) + self.connect_and_shutdown(auth_provider) + for node in self.nodes: + self.upgrade_node(node) + self.connect_and_shutdown(auth_provider) + + self.connect_and_shutdown(auth_provider) + + def connect_and_shutdown(self, auth_provider): + cluster = Cluster(idle_heartbeat_interval=0, + auth_provider=auth_provider) + session = cluster.connect(wait_for_all_pools=True) + queried_hosts = set() + for _ in range(10): + results = session.execute("SELECT * from system.local") + self.assertGreater(len(results.current_rows), 0) + self.assertEqual(len(results.response_future.attempted_hosts), 1) + queried_hosts.add(results.response_future.attempted_hosts[0]) + self.assertEqual(len(queried_hosts), 3) + cluster.shutdown() + + +class UpgradeTestsPolicies(UpgradeBase): + @two_to_three_path + def test_can_write_speculative(self): + """ + Verify that the driver will keep querying C* even if there is a host down while being + upgraded and that all the writes will eventually succeed using the ConstantSpeculativeExecutionPolicy + policy + @since 3.12 + @jira_ticket PYTHON-546 + @expected_result all the writes succeed + + @test_category upgrade + """ + spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.5, 10), + request_timeout=12) + cluster = Cluster() + self.addCleanup(cluster.shutdown) + cluster.add_execution_profile("spec_ep_rr", spec_ep_rr) + + session = cluster.connect() + + self.start_upgrade(0) + + c = count() + while not self.is_upgraded(): + session.execute("INSERT INTO test3rf.test(k, v) VALUES (%s, 0)", (next(c),), + execution_profile='spec_ep_rr') + time.sleep(0.0001) + + session.default_consistency_level = ConsistencyLevel.ALL + total_number_of_inserted = session.execute("SELECT COUNT(*) from test3rf.test")[0][0] + self.assertEqual(total_number_of_inserted, next(c)) + + self.assertEqual(self.logger_handler.get_message_count("error", ""), 0) From 570cb321185534770eb2ea6c88439c1db5f5424a Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 10 Nov 2017 10:43:52 -0500 Subject: [PATCH 1278/2005] Refactored prepared metadata id conditional tests --- .../standard/test_prepared_statements.py | 38 ++++++++++--------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 3cbd36a2c5..715fbcd83a 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -28,6 +28,10 @@ set_default_beta_flag_true, BasicSharedKeyspaceUnitTestCase) +import logging + + +LOG = logging.getLogger(__name__) def setup_module(): use_singledc() @@ -557,29 +561,27 @@ def test_id_is_not_updated_conditional_v5(self): self._test_updated_conditional(session, 10) def _test_updated_conditional(self, session, value): - prepared_statement = session.prepare("INSERT INTO {}(a, b, d) VALUES " - "(?, ? , ?) IF NOT EXISTS".format(self.table_name)) + prepared_statement = session.prepare( + "INSERT INTO {}(a, b, d) VALUES " + "(?, ? , ?) IF NOT EXISTS".format(self.table_name)) first_id = prepared_statement.result_metadata_id - self.assertEqual(prepared_statement.result_metadata, []) + LOG.debug('initial result_metadata_id: {}'.format(first_id)) + + def check_result_and_metadata(expected): + self.assertEqual( + session.execute(prepared_statement, (value, value, value))[0], + expected + ) + self.assertEqual(prepared_statement.result_metadata_id, first_id) + self.assertEqual(prepared_statement.result_metadata, []) - # Sucessfull conditional update - result = session.execute(prepared_statement, (value, value, value)) - self.assertEqual(result[0], (True, )) - second_id = prepared_statement.result_metadata_id - self.assertEqual(first_id, second_id) + # Successful conditional update + check_result_and_metadata((True,)) # Failed conditional update - result = session.execute(prepared_statement, (value, value, value)) - self.assertEqual(result[0], (False, value, value, value)) - third_id = prepared_statement.result_metadata_id - self.assertEqual(first_id, third_id) - self.assertEqual(prepared_statement.result_metadata, []) + check_result_and_metadata((False, value, value, value)) session.execute("ALTER TABLE {} ADD c int".format(self.table_name)) # Failed conditional update - result = session.execute(prepared_statement, (value, value, value)) - self.assertEqual(result[0], (False, value, value, None, value)) - fourth_id = prepared_statement.result_metadata_id - self.assertEqual(first_id, fourth_id) - self.assertEqual(prepared_statement.result_metadata, []) + check_result_and_metadata((False, value, value, None, value)) From 575f8de67743b5b5dccd56b2dc3b6e43fcc9b9b6 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 13 Nov 2017 15:22:19 -0500 Subject: [PATCH 1279/2005] Set alabaster as theme for documentation (#854) --- docs/conf.py | 9 ++++++++- docs/themes/custom/static/custom.css_t | 19 ++++++++++++++++++- docs/themes/custom/theme.conf | 9 ++++++++- 3 files changed, 34 insertions(+), 3 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 99cc4c7e25..b2bfbe0c00 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -136,7 +136,14 @@ #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +html_sidebars = { + '**': [ + 'about.html', + 'navigation.html', + 'relations.html', + 'searchbox.html' + ] +} # Additional templates that should be rendered to pages, maps page names to # template names. diff --git a/docs/themes/custom/static/custom.css_t b/docs/themes/custom/static/custom.css_t index 00d9b2cf62..c3460e75a5 100644 --- a/docs/themes/custom/static/custom.css_t +++ b/docs/themes/custom/static/custom.css_t @@ -1,4 +1,21 @@ -@import url("sphinxdoc.css"); +@import url("alabaster.css"); + +div.document { + width: 1200px; +} + +div.sphinxsidebar h1.logo a { + font-size: 24px; +} + +code.descname { + color: #4885ed; +} + +th.field-name { + min-width: 100px; + color: #3cba54; +} div.versionmodified { font-weight: bold diff --git a/docs/themes/custom/theme.conf b/docs/themes/custom/theme.conf index f4b51356f2..b0fbb6961e 100644 --- a/docs/themes/custom/theme.conf +++ b/docs/themes/custom/theme.conf @@ -1,4 +1,11 @@ [theme] -inherit = sphinxdoc +inherit = alabaster stylesheet = custom.css pygments_style = friendly + +[options] +description = Python driver for Cassandra +github_user = datastax +github_repo = python-driver +github_button = true +github_type = star \ No newline at end of file From 0056c0a0ed222769f305e0d78266c3515037073b Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 8 Nov 2017 13:52:51 -0500 Subject: [PATCH 1280/2005] Test delay can be zero in ConstantSpeculativeExecutionPolicy --- tests/integration/standard/test_policies.py | 41 ++++++++++++++++++--- 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index c1ad12d720..0527d41a74 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -12,21 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests.integration import use_singledc - try: import unittest2 as unittest except ImportError: import unittest # noqa -from cassandra.cluster import Cluster, ExecutionProfile -from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, SimpleConvictionPolicy, WhiteListRoundRobinPolicy +from cassandra.cluster import Cluster, ExecutionProfile, ResponseFuture +from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, SimpleConvictionPolicy, \ + WhiteListRoundRobinPolicy, ConstantSpeculativeExecutionPolicy from cassandra.pool import Host +from cassandra.query import SimpleStatement from tests.integration import PROTOCOL_VERSION, local +from tests.integration import use_singledc +from unittest.mock import patch from concurrent.futures import wait as wait_futures - +from itertools import count def setup_module(): use_singledc() @@ -36,7 +38,7 @@ class HostFilterPolicyTests(unittest.TestCase): def test_predicate_changes(self): """ - Test to validate hostfilter reacts correctly when the predicate return + Test to validate host filter reacts correctly when the predicate return a different subset of the hosts HostFilterPolicy @since 3.8 @@ -91,3 +93,30 @@ def test_only_connects_to_subset(self): queried_hosts.update(response.response_future.attempted_hosts) queried_hosts = set(host.address for host in queried_hosts) self.assertEqual(queried_hosts, only_connect_hosts) + + +class SpeculativeExecutionPolicy(unittest.TestCase): + def test_delay_can_be_0(self): + """ + Test to validate that the delay can be zero for the ConstantSpeculativeExecutionPolicy + @since 3.13 + @jira_ticket PYTHON-836 + @expected_result all the queries are executed immediately + @test_category policy + """ + number_of_requests = 6 + spec = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0, number_of_requests)) + + cluster = Cluster() + cluster.add_execution_profile("spec", spec) + session = cluster.connect(wait_for_all_pools=True) + self.addCleanup(cluster.shutdown) + + with patch.object(ResponseFuture, "_on_speculative_execute", + side_effect=ResponseFuture._on_speculative_execute, + autospec=True) as on_speculative_mocked: + stmt = SimpleStatement("INSERT INTO test3rf.test(k, v) VALUES (1, 2)") + stmt.is_idempotent = True + results = session.execute(stmt, execution_profile="spec") + self.assertEqual(len(results.response_future.attempted_hosts), 3) + self.assertEqual(on_speculative_mocked.call_count, number_of_requests) From e914c84ad26b8f46da56772285e69d9f25cd489b Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 10 Nov 2017 12:19:35 -0500 Subject: [PATCH 1281/2005] Removed some dse references in tests and compact storage --- tests/integration/__init__.py | 43 +----------- .../integration/simulacron/test_connection.py | 2 +- tests/integration/simulacron/test_policies.py | 4 +- tests/integration/simulacron/utils.py | 6 -- tests/integration/standard/test_metadata.py | 70 ++++++------------- 5 files changed, 26 insertions(+), 99 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 83bda91b36..caefb298ec 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -39,7 +39,6 @@ try: from ccmlib.cluster import Cluster as CCMCluster - from ccmlib.dse_cluster import DseCluster from ccmlib.cluster_factory import ClusterFactory as CCMClusterFactory from ccmlib import common except ImportError as e: @@ -105,30 +104,9 @@ def _tuple_version(version_string): default_cassandra_version = '2.2.0' - -def _get_cass_version_from_dse(dse_version): - if dse_version.startswith('4.6') or dse_version.startswith('4.5'): - cass_ver = "2.0" - elif dse_version.startswith('4.7') or dse_version.startswith('4.8'): - cass_ver = "2.1" - elif dse_version.startswith('5.0'): - cass_ver = "3.0" - elif dse_version.startswith("5.1"): - cass_ver = "3.10" - else: - log.error("Uknown dse version found {0}, defaulting to 2.1".format(dse_version)) - cass_ver = "2.1" - - return cass_ver - CASSANDRA_IP = os.getenv('CASSANDRA_IP', '127.0.0.1') CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) -DSE_VERSION = os.getenv('DSE_VERSION', None) -DSE_CRED = os.getenv('DSE_CREDS', None) -if DSE_VERSION: - CASSANDRA_VERSION = _get_cass_version_from_dse(DSE_VERSION) -else: - CASSANDRA_VERSION = os.getenv('CASSANDRA_VERSION', default_cassandra_version) +CASSANDRA_VERSION = os.getenv('CASSANDRA_VERSION', default_cassandra_version) CCM_KWARGS = {} if CASSANDRA_DIR: @@ -139,15 +117,6 @@ def _get_cass_version_from_dse(dse_version): log.info('Using Cassandra version: %s', CASSANDRA_VERSION) CCM_KWARGS['version'] = CASSANDRA_VERSION -if DSE_VERSION: - log.info('Using DSE version: %s', DSE_VERSION) - if not CASSANDRA_DIR: - CCM_KWARGS['version'] = DSE_VERSION - if DSE_CRED: - log.info("Using DSE credentials file located at {0}".format(DSE_CRED)) - CCM_KWARGS['dse_credentials_file'] = DSE_CRED - - #This changes the default contact_point parameter in Cluster def set_default_cass_ip(): if CASSANDRA_IP.startswith("127.0.0."): @@ -257,7 +226,6 @@ def get_unsupported_upper_protocol(): greaterthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION >= '4.0', 'Cassandra version 4.0 or greater required') lessthanorequalcass40 = unittest.skipIf(CASSANDRA_VERSION >= '4.0', 'Cassandra version 4.0 or greater required') lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < '3.0', 'Cassandra version less then 3.0 required') -dseonly = unittest.skipUnless(DSE_VERSION, "Test is only applicalbe to DSE clusters") pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") notpy3 = unittest.skipIf(sys.version_info >= (3, 0), "Test not applicable for Python 3.x runtime") requiresmallclockgranularity = unittest.skipIf("Windows" in platform.system() or "async" in EVENT_LOOP_MANAGER, @@ -370,19 +338,12 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): del tb log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, CCM_KWARGS)) - if DSE_VERSION: - log.error("creating dse cluster") - CCM_CLUSTER = DseCluster(path, cluster_name, **CCM_KWARGS) - else: - CCM_CLUSTER = CCMCluster(path, cluster_name, **CCM_KWARGS) + CCM_CLUSTER = CCMCluster(path, cluster_name, **CCM_KWARGS) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) if CASSANDRA_VERSION >= '2.2': CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) if CASSANDRA_VERSION >= '3.0': CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) - if 'spark' in workloads: - config_options = {"initial_spark_worker_resources": 0.1} - CCM_CLUSTER.set_dse_configuration_options(config_options) common.switch_cluster(path, cluster_name) CCM_CLUSTER.populate(nodes, ipformat=ipformat) try: diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index bc2ae9f00a..a8aebb78ef 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -194,7 +194,7 @@ def test_close_when_query(self): "scope": "connection" } - prime_query(query_to_prime, then=then) + prime_query(query_to_prime, then=then, rows=None, column_types=None) self.assertRaises(NoHostAvailable, session.execute, query_to_prime) def test_retry_after_defunct(self): diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index 4759e3b36a..7d108c7f11 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -222,9 +222,9 @@ def test_retry_policy_ignores_and_rethrows(self): "block_for": 2, "write_type": "SIMPLE" } - prime_query(query_to_prime_simple, then=then) + prime_query(query_to_prime_simple, then=then, rows=None, column_types=None) then["write_type"] = "CDC" - prime_query(query_to_prime_cdc, then=then) + prime_query(query_to_prime_cdc, then=then, rows=None, column_types=None) with self.assertRaises(WriteTimeout): self.session.execute(query_to_prime_simple) diff --git a/tests/integration/simulacron/utils.py b/tests/integration/simulacron/utils.py index 716b8996a2..7b7768bf5f 100644 --- a/tests/integration/simulacron/utils.py +++ b/tests/integration/simulacron/utils.py @@ -320,12 +320,6 @@ def prime_query(query, rows=default_rows, column_types=default_column_types, whe :return: """ # If then is set, then rows and column_types should not - if then: - nose.tools.assert_equal(rows, default_rows) - nose.tools.assert_equal(column_types, default_column_types) - rows=None - column_types=None - query = PrimeQuery(query, rows=rows, column_types=column_types, when=when, then=then, cluster_name=cluster_name) response = prime_request(query) return response diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index df5b7720f6..4e2f6222ef 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -33,7 +33,6 @@ from tests.integration import (get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION, - BasicExistingSegregatedKeyspaceUnitTestCase, dseonly, DSE_VERSION, get_supported_protocol_versions, greaterthanorequalcass30, lessthancass30, local) from tests.integration import greaterthancass21 @@ -134,7 +133,7 @@ def test_schema_metadata_disable(self): no_schema.shutdown() no_token.shutdown() - def make_create_statement(self, partition_cols, clustering_cols=None, other_cols=None, compact=False): + def make_create_statement(self, partition_cols, clustering_cols=None, other_cols=None): clustering_cols = clustering_cols or [] other_cols = other_cols or [] @@ -162,8 +161,6 @@ def make_create_statement(self, partition_cols, clustering_cols=None, other_cols statement += ")" statement += ")" - if compact: - statement += " WITH COMPACT STORAGE" return statement @@ -278,8 +275,8 @@ def test_composite_in_compound_primary_key(self): self.check_create_statement(tablemeta, create_statement) def test_compound_primary_keys_compact(self): - create_statement = self.make_create_statement(["a"], ["b"], ["c"], compact=True) - create_statement += " AND CLUSTERING ORDER BY (b ASC)" + create_statement = self.make_create_statement(["a"], ["b"], ["c"]) + create_statement += " WITH CLUSTERING ORDER BY (b ASC)" self.session.execute(create_statement) tablemeta = self.get_table_metadata() @@ -302,8 +299,8 @@ def test_cluster_column_ordering_reversed_metadata(self): @test_category metadata """ - create_statement = self.make_create_statement(["a"], ["b", "c"], ["d"], compact=True) - create_statement += " AND CLUSTERING ORDER BY (b ASC, c DESC)" + create_statement = self.make_create_statement(["a"], ["b", "c"], ["d"]) + create_statement += " WITH CLUSTERING ORDER BY (b ASC, c DESC)" self.session.execute(create_statement) tablemeta = self.get_table_metadata() b_column = tablemeta.columns['b'] @@ -312,8 +309,8 @@ def test_cluster_column_ordering_reversed_metadata(self): self.assertTrue(c_column.is_reversed) def test_compound_primary_keys_more_columns_compact(self): - create_statement = self.make_create_statement(["a"], ["b", "c"], ["d"], compact=True) - create_statement += " AND CLUSTERING ORDER BY (b ASC, c ASC)" + create_statement = self.make_create_statement(["a"], ["b", "c"], ["d"]) + create_statement += " WITH CLUSTERING ORDER BY (b ASC, c ASC)" self.session.execute(create_statement) tablemeta = self.get_table_metadata() @@ -324,7 +321,7 @@ def test_compound_primary_keys_more_columns_compact(self): self.check_create_statement(tablemeta, create_statement) def test_composite_primary_key_compact(self): - create_statement = self.make_create_statement(["a", "b"], [], ["c"], compact=True) + create_statement = self.make_create_statement(["a", "b"], [], ["c"]) self.session.execute(create_statement) tablemeta = self.get_table_metadata() @@ -335,8 +332,8 @@ def test_composite_primary_key_compact(self): self.check_create_statement(tablemeta, create_statement) def test_composite_in_compound_primary_key_compact(self): - create_statement = self.make_create_statement(["a", "b"], ["c"], ["d"], compact=True) - create_statement += " AND CLUSTERING ORDER BY (c ASC)" + create_statement = self.make_create_statement(["a", "b"], ["c"], ["d"]) + create_statement += " WITH CLUSTERING ORDER BY (c ASC)" self.session.execute(create_statement) tablemeta = self.get_table_metadata() @@ -352,7 +349,7 @@ def test_cql_compatibility(self): # having more than one non-PK column is okay if there aren't any # clustering columns - create_statement = self.make_create_statement(["a"], [], ["b", "c", "d"], compact=True) + create_statement = self.make_create_statement(["a"], [], ["b", "c", "d"]) self.session.execute(create_statement) tablemeta = self.get_table_metadata() @@ -362,12 +359,12 @@ def test_cql_compatibility(self): self.assertTrue(tablemeta.is_cql_compatible) - # ... but if there are clustering columns, it's not CQL compatible. - # This is a hacky way to simulate having clustering columns. + # It will be cql compatible after CASSANDRA-10857 + # since compact storage is being dropped tablemeta.clustering_key = ["foo", "bar"] tablemeta.columns["foo"] = None tablemeta.columns["bar"] = None - self.assertFalse(tablemeta.is_cql_compatible) + self.assertTrue(tablemeta.is_cql_compatible) def test_compound_primary_keys_ordering(self): create_statement = self.make_create_statement(["a"], ["b"], ["c"]) @@ -1297,8 +1294,7 @@ def test_legacy_tables(self): "b@6869746d65776974686d75736963" blob, "b@6d616d6d616a616d6d61" blob, PRIMARY KEY (key, column1) -) WITH COMPACT STORAGE - AND CLUSTERING ORDER BY (column1 ASC) +) WITH CLUSTERING ORDER BY (column1 ASC) AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = 'Stores file meta data' AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} @@ -1320,8 +1316,7 @@ def test_legacy_tables(self): key2 bigint, full_name text, PRIMARY KEY ((key, key2)) -) WITH COMPACT STORAGE - AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' +) WITH caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} @@ -1339,8 +1334,7 @@ def test_legacy_tables(self): key2 text, col_with_meta text, PRIMARY KEY ((key, key2)) -) WITH COMPACT STORAGE - AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' +) WITH caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} @@ -1359,8 +1353,7 @@ def test_legacy_tables(self): column1 text, value text, PRIMARY KEY ((key, key2), column1) -) WITH COMPACT STORAGE - AND CLUSTERING ORDER BY (column1 ASC) +) WITH CLUSTERING ORDER BY (column1 ASC) AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} @@ -1377,8 +1370,7 @@ def test_legacy_tables(self): CREATE TABLE legacy.simple_with_col ( key uuid PRIMARY KEY, col_with_meta text -) WITH COMPACT STORAGE - AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' +) WITH caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} @@ -1396,8 +1388,7 @@ def test_legacy_tables(self): column1 text, value text, PRIMARY KEY (key, column1) -) WITH COMPACT STORAGE - AND CLUSTERING ORDER BY (column1 ASC) +) WITH CLUSTERING ORDER BY (column1 ASC) AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} @@ -1416,8 +1407,7 @@ def test_legacy_tables(self): column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(b=>org.apache.cassandra.db.marshal.BytesType, s=>org.apache.cassandra.db.marshal.UTF8Type, t=>org.apache.cassandra.db.marshal.TimeUUIDType)', value blob, PRIMARY KEY (key, column1) -) WITH COMPACT STORAGE - AND CLUSTERING ORDER BY (column1 ASC) +) WITH CLUSTERING ORDER BY (column1 ASC) AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = 'Stores file meta data' AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} @@ -2571,21 +2561,3 @@ def test_metadata_with_quoted_identifiers(self): value_column = mv_columns[2] self.assertIsNotNone(value_column) self.assertEqual(value_column.name, 'the Value') - - -@dseonly -class DSEMetadataTest(BasicExistingSegregatedKeyspaceUnitTestCase): - - def test_dse_specific_meta(self): - """ - Test to ensure DSE metadata is populated appropriately. - @since 3.4 - @jira_ticket PYTHON-555 - @expected_result metadata for dse_version, and dse_workload should be populated on dse clusters - - @test_category metadata - """ - for host in self.cluster.metadata.all_hosts(): - self.assertIsNotNone(host.dse_version, "Dse version not populated as expected") - self.assertEqual(host.dse_version, DSE_VERSION) - self.assertTrue("Cassandra" in host.dse_workload) From 1b5e89289e0ddc92d7d8529bade3aa3470bf37a2 Mon Sep 17 00:00:00 2001 From: bjmb Date: Thu, 16 Nov 2017 13:43:10 +0100 Subject: [PATCH 1282/2005] Use mock instead of unittest.mock --- tests/integration/standard/test_policies.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 0527d41a74..de8928b6fd 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -26,9 +26,8 @@ from tests.integration import PROTOCOL_VERSION, local from tests.integration import use_singledc -from unittest.mock import patch +from mock import patch from concurrent.futures import wait as wait_futures -from itertools import count def setup_module(): use_singledc() From 3dfbf92f1ef73470769b27981b335613af66f64c Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 13 Nov 2017 09:55:41 +0100 Subject: [PATCH 1283/2005] Set consitency level to Quorum when creating/droping the keyspace in tests --- setup.py | 2 +- tests/integration/long/utils.py | 3 +++ tests/integration/standard/test_policies.py | 29 +++++++++++++-------- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/setup.py b/setup.py index 9f7c043709..b1d93c77ef 100644 --- a/setup.py +++ b/setup.py @@ -420,7 +420,7 @@ def run_setup(extensions): keywords='cassandra,cql,orm', include_package_data=True, install_requires=dependencies, - tests_require=['nose', 'mock<=1.0.1', 'PyYAML', 'pytz', 'sure'], + tests_require=['nose', 'mock>=2.0.0', 'PyYAML', 'pytz', 'sure'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index 40ecc0bbf3..8c9096980a 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -20,6 +20,7 @@ from ccmlib.node import Node from cassandra.query import named_tuple_factory +from cassandra.cluster import ConsistencyLevel from tests.integration import get_node, get_cluster, wait_for_node_socket @@ -59,6 +60,7 @@ def create_schema(cluster, session, keyspace, simple_strategy=True, replication_factor=1, replication_strategy=None): row_factory = session.row_factory session.row_factory = named_tuple_factory + session.default_consistency_level = ConsistencyLevel.QUORUM if keyspace in cluster.metadata.keyspaces.keys(): session.execute('DROP KEYSPACE %s' % keyspace, timeout=20) @@ -80,6 +82,7 @@ def create_schema(cluster, session, keyspace, simple_strategy=True, session.execute('USE %s' % keyspace) session.row_factory = row_factory + session.default_consistency_level = ConsistencyLevel.LOCAL_ONE def start(node): diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index de8928b6fd..ddf2214e83 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -23,10 +23,10 @@ from cassandra.pool import Host from cassandra.query import SimpleStatement -from tests.integration import PROTOCOL_VERSION, local -from tests.integration import use_singledc +from tests.integration import PROTOCOL_VERSION, local, use_singledc +from tests import notwindows -from mock import patch +from itertools import count from concurrent.futures import wait as wait_futures def setup_module(): @@ -95,6 +95,7 @@ def test_only_connects_to_subset(self): class SpeculativeExecutionPolicy(unittest.TestCase): + @notwindows def test_delay_can_be_0(self): """ Test to validate that the delay can be zero for the ConstantSpeculativeExecutionPolicy @@ -111,11 +112,17 @@ def test_delay_can_be_0(self): session = cluster.connect(wait_for_all_pools=True) self.addCleanup(cluster.shutdown) - with patch.object(ResponseFuture, "_on_speculative_execute", - side_effect=ResponseFuture._on_speculative_execute, - autospec=True) as on_speculative_mocked: - stmt = SimpleStatement("INSERT INTO test3rf.test(k, v) VALUES (1, 2)") - stmt.is_idempotent = True - results = session.execute(stmt, execution_profile="spec") - self.assertEqual(len(results.response_future.attempted_hosts), 3) - self.assertEqual(on_speculative_mocked.call_count, number_of_requests) + counter = count() + + def patch_and_count(f): + def patched(*args, **kwargs): + next(counter) + f(*args, **kwargs) + return patched + + ResponseFuture._on_speculative_execute = patch_and_count(ResponseFuture._on_speculative_execute) + stmt = SimpleStatement("INSERT INTO test3rf.test(k, v) VALUES (1, 2)") + stmt.is_idempotent = True + results = session.execute(stmt, execution_profile="spec") + self.assertEqual(len(results.response_future.attempted_hosts), 3) + self.assertEqual(next(counter), number_of_requests) From 0cee88ec920eab34455de179d7eeea37ada3e6d7 Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 17 Nov 2017 10:47:59 +0100 Subject: [PATCH 1284/2005] Don't test anymore against C* 2.0 --- build.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/build.yaml b/build.yaml index cfc4269878..bd75db98e9 100644 --- a/build.yaml +++ b/build.yaml @@ -68,7 +68,6 @@ os: - ubuntu/trusty64 cassandra: - - '2.0' - '2.1' - '2.2' - '3.0' From 4ceea3e048bda5d9a121f42efddc33d130148bec Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 10 Nov 2017 15:23:27 -0500 Subject: [PATCH 1285/2005] Move upgrade tests under integration --- tests/integration/__init__.py | 2 +- tests/integration/simulacron/test_policies.py | 11 ++++++----- tests/integration/{long => }/upgrade/__init__.py | 0 tests/integration/{long => }/upgrade/test_upgrade.py | 6 ++++-- 4 files changed, 11 insertions(+), 8 deletions(-) rename tests/integration/{long => }/upgrade/__init__.py (100%) rename tests/integration/{long => }/upgrade/test_upgrade.py (98%) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index bf140e98f7..29a89f7b41 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -263,7 +263,7 @@ def get_unsupported_upper_protocol(): notpy3 = unittest.skipIf(sys.version_info >= (3, 0), "Test not applicable for Python 3.x runtime") requiresmallclockgranularity = unittest.skipIf("Windows" in platform.system() or "async" in EVENT_LOOP_MANAGER, "This test is not suitible for environments with large clock granularity") -requiressimulacron = unittest.skipIf(SIMULACRON_JAR is None, "Simulacron jar hasn't been specified") +requiressimulacron = unittest.skipIf(SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1", "Simulacron jar hasn't been specified or C* version is 2.0") def wait_for_node_socket(node, timeout): diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index 4759e3b36a..54af4eb5dc 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -21,7 +21,8 @@ from cassandra.query import SimpleStatement from cassandra.policies import ConstantSpeculativeExecutionPolicy, RoundRobinPolicy, RetryPolicy, WriteType -from tests.integration import PROTOCOL_VERSION, greaterthancass21, requiressimulacron, SIMULACRON_JAR +from tests.integration import PROTOCOL_VERSION, greaterthancass21, requiressimulacron, SIMULACRON_JAR, \ + CASSANDRA_VERSION from tests.integration.simulacron.utils import start_and_prime_singledc, prime_query, \ stop_simulacron, NO_THEN, clear_queries @@ -45,7 +46,7 @@ class SpecExecTest(unittest.TestCase): @classmethod def setUpClass(cls): - if SIMULACRON_JAR is None: + if SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1": return start_and_prime_singledc() @@ -70,7 +71,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - if SIMULACRON_JAR is None: + if SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1": return cls.cluster.shutdown() @@ -181,7 +182,7 @@ def on_write_timeout(self, query, consistency, write_type, class RetryPolicyTets(unittest.TestCase): @classmethod def setUpClass(cls): - if SIMULACRON_JAR is None: + if SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1": return start_and_prime_singledc() @@ -191,7 +192,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - if SIMULACRON_JAR is None: + if SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1": return cls.cluster.shutdown() stop_simulacron() diff --git a/tests/integration/long/upgrade/__init__.py b/tests/integration/upgrade/__init__.py similarity index 100% rename from tests/integration/long/upgrade/__init__.py rename to tests/integration/upgrade/__init__.py diff --git a/tests/integration/long/upgrade/test_upgrade.py b/tests/integration/upgrade/test_upgrade.py similarity index 98% rename from tests/integration/long/upgrade/test_upgrade.py rename to tests/integration/upgrade/test_upgrade.py index 34a842b9ba..b3270cd6e6 100644 --- a/tests/integration/long/upgrade/test_upgrade.py +++ b/tests/integration/upgrade/test_upgrade.py @@ -18,7 +18,7 @@ from cassandra.auth import PlainTextAuthProvider, SaslAuthProvider from cassandra.cluster import ConsistencyLevel, Cluster, DriverException, ExecutionProfile from cassandra.policies import ConstantSpeculativeExecutionPolicy -from tests.integration.long.upgrade import UpgradeBase, UpgradeBaseAuth, UpgradePath, upgrade_paths +from tests.integration.upgrade import UpgradeBase, UpgradeBaseAuth, UpgradePath, upgrade_paths try: import unittest2 as unittest @@ -140,7 +140,9 @@ def test_schema_metadata_gets_refreshed(self): self.cluster_driver.refresh_schema_metadata(max_schema_agreement_wait=10) self.upgrade_node(nodes[0]) - self.cluster_driver.refresh_schema_metadata(max_schema_agreement_wait=20) + # Wait for the control connection to reconnect + time.sleep(20) + self.cluster_driver.refresh_schema_metadata(max_schema_agreement_wait=40) self.assertNotEqual(original_meta, self.cluster_driver.metadata.keyspaces) @two_to_three_path From e5e781c9475387d8eb933ed5729fb5ddd86dca10 Mon Sep 17 00:00:00 2001 From: Corentin Chary Date: Fri, 17 Nov 2017 13:58:05 +0100 Subject: [PATCH 1286/2005] query: make sure the retry_policy is passed to BoundStatements Before this patch all prepared statements would end up with the default RetryPolicy. --- CHANGELOG.rst | 7 +++++++ cassandra/query.py | 5 ++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4093c72e8d..783b0eb150 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,10 @@ +3.13.0 +====== + +Bug Fixes +--------- +* Support retry_policy in PreparedStatement (PYTHON-861) + 3.12.0 ====== November 6, 2017 diff --git a/cassandra/query.py b/cassandra/query.py index ab58f66b26..efc30ddbe1 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -228,7 +228,8 @@ def __init__(self, retry_policy=None, consistency_level=None, routing_key=None, is_idempotent=False): if retry_policy and not hasattr(retry_policy, 'on_read_timeout'): # just checking one method to detect positional parameter errors raise ValueError('retry_policy should implement cassandra.policies.RetryPolicy') - self.retry_policy = retry_policy + if retry_policy is not None: + self.retry_policy = retry_policy if consistency_level is not None: self.consistency_level = consistency_level self._routing_key = routing_key @@ -384,6 +385,7 @@ class PreparedStatement(object): """ column_metadata = None #TODO: make this bind_metadata in next major + retry_policy = None consistency_level = None custom_payload = None fetch_size = FETCH_SIZE_UNSET @@ -488,6 +490,7 @@ def __init__(self, prepared_statement, retry_policy=None, consistency_level=None """ self.prepared_statement = prepared_statement + self.retry_policy = prepared_statement.retry_policy self.consistency_level = prepared_statement.consistency_level self.serial_consistency_level = prepared_statement.serial_consistency_level self.fetch_size = prepared_statement.fetch_size From 52db59ad6038747420f3fc37b3d3099b01978b53 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 28 Nov 2017 13:33:17 -0500 Subject: [PATCH 1287/2005] PYTHON-512: cqlengine: Add LIKE filter operator (#863) cqlengine: Add LIKE filter operator --- CHANGELOG.rst | 8 ++++++++ cassandra/cqlengine/operators.py | 5 +++++ docs/cqlengine/queryset.rst | 15 +++++++++++++++ .../cqlengine/operators/test_where_operators.py | 2 ++ 4 files changed, 30 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4093c72e8d..5f41a3376f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +3.13.0 +====== + +Features +-------- + +* cqlengine: LIKE filter operator (PYTHON-512) + 3.12.0 ====== November 6, 2017 diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index dcaf1081eb..a6702bdec9 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -93,3 +93,8 @@ class LessThanOrEqualOperator(BaseWhereOperator): class ContainsOperator(EqualsOperator): symbol = "CONTAINS" cql_symbol = 'CONTAINS' + + +class LikeOperator(EqualsOperator): + symbol = "LIKE" + cql_symbol = 'LIKE' diff --git a/docs/cqlengine/queryset.rst b/docs/cqlengine/queryset.rst index 85dadc97d9..5920cbb093 100644 --- a/docs/cqlengine/queryset.rst +++ b/docs/cqlengine/queryset.rst @@ -191,6 +191,21 @@ Filtering Operators Note that we need to use allow_filtering() since the *options* column has no secondary index. + :attr:`LIKE (__like) ` + + The LIKE operator is available for text columns that have a SASI secondary index. + + .. code-block:: python + + q = Automobile.objects.filter(model__like='%Civic%').allow_filtering() + + Limitations: + - Currently, cqlengine does not support SASI index creation. To use this feature, + you need to create the SASI index using the core driver. + - Queries using LIKE must use allow_filtering() since the *model* column has no + standard secondary index. Note that the server will use the SASI index properly + when executing the query. + TimeUUID Functions ================== diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index 203ffe62a5..ff1f882f4d 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -35,6 +35,7 @@ def check_lookup(symbol, expected): check_lookup('LT', LessThanOperator) check_lookup('LTE', LessThanOrEqualOperator) check_lookup('CONTAINS', ContainsOperator) + check_lookup('LIKE', LikeOperator) def test_operator_rendering(self): """ tests symbols are rendered properly """ @@ -46,5 +47,6 @@ def test_operator_rendering(self): self.assertEqual("<", six.text_type(LessThanOperator())) self.assertEqual("<=", six.text_type(LessThanOrEqualOperator())) self.assertEqual("CONTAINS", six.text_type(ContainsOperator())) + self.assertEqual("LIKE", six.text_type(LikeOperator())) From a28e247116493e25ca045cce6668b3813e4672fd Mon Sep 17 00:00:00 2001 From: bjmb Date: Tue, 28 Nov 2017 17:32:47 -0500 Subject: [PATCH 1288/2005] Added a test for PYTHON-512 --- tests/integration/__init__.py | 2 +- .../operators/test_where_operators.py | 2 - .../statements/test_base_statement.py | 76 +++++++++++++++---- 3 files changed, 62 insertions(+), 18 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 896a914605..04038cfeb6 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -178,7 +178,7 @@ def get_supported_protocol_versions(): elif version >= Version('2.0'): return (1, 2) else: - return (1) + return (1, ) def get_unsupported_lower_protocol(): diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index ff1f882f4d..62025b302e 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -48,5 +48,3 @@ def test_operator_rendering(self): self.assertEqual("<=", six.text_type(LessThanOrEqualOperator())) self.assertEqual("CONTAINS", six.text_type(ContainsOperator())) self.assertEqual("LIKE", six.text_type(LikeOperator())) - - diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index 21388d0c7e..313ad20146 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -17,17 +17,21 @@ import unittest # noqa from uuid import uuid4 +import six from cassandra.query import FETCH_SIZE_UNSET +from cassandra.cluster import Cluster, ConsistencyLevel from cassandra.cqlengine.statements import BaseCQLStatement from cassandra.cqlengine.management import sync_table, drop_table from cassandra.cqlengine.statements import InsertStatement, UpdateStatement, SelectStatement, DeleteStatement, \ WhereClause -from cassandra.cqlengine.operators import EqualsOperator +from cassandra.cqlengine.operators import EqualsOperator, LikeOperator from cassandra.cqlengine.columns import Column from tests.integration.cqlengine.base import BaseCassEngTestCase, TestQueryUpdateModel from tests.integration.cqlengine import DEFAULT_KEYSPACE +from tests.integration import greaterthanorequalcass3_10 + from cassandra.cqlengine.connection import execute @@ -46,6 +50,8 @@ def test_fetch_size(self): class ExecuteStatementTest(BaseCassEngTestCase): + text = "text_for_db" + @classmethod def setUpClass(cls): super(ExecuteStatementTest, cls).setUpClass() @@ -79,20 +85,7 @@ def test_insert_statement_execute(self): """ partition = uuid4() cluster = 1 - - #Verifying insert statement - st = InsertStatement(self.table_name) - st.add_assignment(Column(db_field='partition'), partition) - st.add_assignment(Column(db_field='cluster'), cluster) - - st.add_assignment(Column(db_field='count'), 1) - st.add_assignment(Column(db_field='text'), "text_for_db") - st.add_assignment(Column(db_field='text_set'), set(("foo", "bar"))) - st.add_assignment(Column(db_field='text_list'), ["foo", "bar"]) - st.add_assignment(Column(db_field='text_map'), {"foo": '1', "bar": '2'}) - - execute(st) - self._verify_statement(st) + self._insert_statement(partition, cluster) # Verifying update statement where = [WhereClause('partition', EqualsOperator(), partition), @@ -111,3 +104,56 @@ def test_insert_statement_execute(self): # Verifying delete statement execute(DeleteStatement(self.table_name, where=where)) self.assertEqual(TestQueryUpdateModel.objects.count(), 0) + + @greaterthanorequalcass3_10 + def test_like_operator(self): + """ + Test to verify the like operator works appropriately + + @since 3.13 + @jira_ticket PYTHON-512 + @expected_result the expected row is read using LIKE + + @test_category data_types:object_mapper + """ + cluster = Cluster() + session = cluster.connect() + self.addCleanup(cluster.shutdown) + + session.execute("""CREATE CUSTOM INDEX text_index ON {} (text) + USING 'org.apache.cassandra.index.sasi.SASIIndex';""".format(self.table_name)) + self.addCleanup(session.execute, "DROP INDEX {}.text_index".format(DEFAULT_KEYSPACE)) + + partition = uuid4() + cluster = 1 + self._insert_statement(partition, cluster) + + ss = SelectStatement(self.table_name) + like_clause = "text_for_%" + ss.add_where(Column(db_field='text'), LikeOperator(), like_clause) + self.assertEqual(six.text_type(ss), + 'SELECT * FROM {} WHERE "text" LIKE %(0)s'.format(self.table_name)) + + result = execute(ss) + self.assertEqual(result[0]["text"], self.text) + + q = TestQueryUpdateModel.objects.filter(text__like=like_clause).allow_filtering() + self.assertEqual(q[0].text, self.text) + + q = TestQueryUpdateModel.objects.filter(text__like=like_clause) + self.assertEqual(q[0].text, self.text) + + def _insert_statement(self, partition, cluster): + # Verifying insert statement + st = InsertStatement(self.table_name) + st.add_assignment(Column(db_field='partition'), partition) + st.add_assignment(Column(db_field='cluster'), cluster) + + st.add_assignment(Column(db_field='count'), 1) + st.add_assignment(Column(db_field='text'), self.text) + st.add_assignment(Column(db_field='text_set'), set(("foo", "bar"))) + st.add_assignment(Column(db_field='text_list'), ["foo", "bar"]) + st.add_assignment(Column(db_field='text_map'), {"foo": '1', "bar": '2'}) + + execute(st) + self._verify_statement(st) From c0fa37bc351a73b03bfb3cb4575da7b3a3e72116 Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 1 Dec 2017 10:36:43 -0500 Subject: [PATCH 1289/2005] Removed compact storage test --- tests/integration/standard/test_metadata.py | 255 -------------------- 1 file changed, 255 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 4e2f6222ef..4c1cf3549e 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1181,261 +1181,6 @@ def test_token_map(self): self.assertEqual(set(get_replicas('test1rf', token)), set([owners[(i + 1) % 3]])) cluster.shutdown() - @local - def test_legacy_tables(self): - - if CASS_SERVER_VERSION < (2, 1, 0): - raise unittest.SkipTest('Test schema output assumes 2.1.0+ options') - - if CASS_SERVER_VERSION >= (2, 2, 0): - raise unittest.SkipTest('Cannot test cli script on Cassandra 2.2.0+') - - if sys.version_info[0:2] != (2, 7): - raise unittest.SkipTest('This test compares static strings generated from dict items, which may change orders. Test with 2.7.') - - cli_script = """CREATE KEYSPACE legacy -WITH placement_strategy = 'SimpleStrategy' -AND strategy_options = {replication_factor:1}; - -USE legacy; - -CREATE COLUMN FAMILY simple_no_col - WITH comparator = UTF8Type - AND key_validation_class = UUIDType - AND default_validation_class = UTF8Type; - -CREATE COLUMN FAMILY simple_with_col - WITH comparator = UTF8Type - and key_validation_class = UUIDType - and default_validation_class = UTF8Type - AND column_metadata = [ - {column_name: col_with_meta, validation_class: UTF8Type} - ]; - -CREATE COLUMN FAMILY composite_partition_no_col - WITH comparator = UTF8Type - AND key_validation_class = 'CompositeType(UUIDType,UTF8Type)' - AND default_validation_class = UTF8Type; - -CREATE COLUMN FAMILY composite_partition_with_col - WITH comparator = UTF8Type - AND key_validation_class = 'CompositeType(UUIDType,UTF8Type)' - AND default_validation_class = UTF8Type - AND column_metadata = [ - {column_name: col_with_meta, validation_class: UTF8Type} - ]; - -CREATE COLUMN FAMILY nested_composite_key - WITH comparator = UTF8Type - and key_validation_class = 'CompositeType(CompositeType(UUIDType,UTF8Type), LongType)' - and default_validation_class = UTF8Type - AND column_metadata = [ - {column_name: full_name, validation_class: UTF8Type} - ]; - -create column family composite_comp_no_col - with column_type = 'Standard' - and comparator = 'DynamicCompositeType(t=>org.apache.cassandra.db.marshal.TimeUUIDType,s=>org.apache.cassandra.db.marshal.UTF8Type,b=>org.apache.cassandra.db.marshal.BytesType)' - and default_validation_class = 'BytesType' - and key_validation_class = 'BytesType' - and read_repair_chance = 0.0 - and dclocal_read_repair_chance = 0.1 - and gc_grace = 864000 - and min_compaction_threshold = 4 - and max_compaction_threshold = 32 - and compaction_strategy = 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' - and caching = 'KEYS_ONLY' - and cells_per_row_to_cache = '0' - and default_time_to_live = 0 - and speculative_retry = 'NONE' - and comment = 'Stores file meta data'; - -create column family composite_comp_with_col - with column_type = 'Standard' - and comparator = 'DynamicCompositeType(t=>org.apache.cassandra.db.marshal.TimeUUIDType,s=>org.apache.cassandra.db.marshal.UTF8Type,b=>org.apache.cassandra.db.marshal.BytesType)' - and default_validation_class = 'BytesType' - and key_validation_class = 'BytesType' - and read_repair_chance = 0.0 - and dclocal_read_repair_chance = 0.1 - and gc_grace = 864000 - and min_compaction_threshold = 4 - and max_compaction_threshold = 32 - and compaction_strategy = 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' - and caching = 'KEYS_ONLY' - and cells_per_row_to_cache = '0' - and default_time_to_live = 0 - and speculative_retry = 'NONE' - and comment = 'Stores file meta data' - and column_metadata = [ - {column_name : 'b@6d616d6d616a616d6d61', - validation_class : BytesType, - index_name : 'idx_one', - index_type : 0}, - {column_name : 'b@6869746d65776974686d75736963', - validation_class : BytesType, - index_name : 'idx_two', - index_type : 0}] - and compression_options = {'sstable_compression' : 'org.apache.cassandra.io.compress.LZ4Compressor'};""" - - # note: the inner key type for legacy.nested_composite_key - # (org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType, org.apache.cassandra.db.marshal.UTF8Type)) - # is a bit strange, but it replays in CQL with desired results - expected_string = """CREATE KEYSPACE legacy WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true; - -/* -Warning: Table legacy.composite_comp_with_col omitted because it has constructs not compatible with CQL (was created via legacy API). - -Approximate structure, for reference: -(this should not be used to reproduce this schema) - -CREATE TABLE legacy.composite_comp_with_col ( - key blob, - column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(b=>org.apache.cassandra.db.marshal.BytesType, s=>org.apache.cassandra.db.marshal.UTF8Type, t=>org.apache.cassandra.db.marshal.TimeUUIDType)', - "b@6869746d65776974686d75736963" blob, - "b@6d616d6d616a616d6d61" blob, - PRIMARY KEY (key, column1) -) WITH CLUSTERING ORDER BY (column1 ASC) - AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' - AND comment = 'Stores file meta data' - AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} - AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = 'NONE'; -CREATE INDEX idx_two ON legacy.composite_comp_with_col ("b@6869746d65776974686d75736963"); -CREATE INDEX idx_one ON legacy.composite_comp_with_col ("b@6d616d6d616a616d6d61"); -*/ - -CREATE TABLE legacy.nested_composite_key ( - key 'org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType, org.apache.cassandra.db.marshal.UTF8Type)', - key2 bigint, - full_name text, - PRIMARY KEY ((key, key2)) -) WITH caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' - AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} - AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = 'NONE'; - -CREATE TABLE legacy.composite_partition_with_col ( - key uuid, - key2 text, - col_with_meta text, - PRIMARY KEY ((key, key2)) -) WITH caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' - AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} - AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = 'NONE'; - -CREATE TABLE legacy.composite_partition_no_col ( - key uuid, - key2 text, - column1 text, - value text, - PRIMARY KEY ((key, key2), column1) -) WITH CLUSTERING ORDER BY (column1 ASC) - AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' - AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} - AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = 'NONE'; - -CREATE TABLE legacy.simple_with_col ( - key uuid PRIMARY KEY, - col_with_meta text -) WITH caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' - AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} - AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = 'NONE'; - -CREATE TABLE legacy.simple_no_col ( - key uuid, - column1 text, - value text, - PRIMARY KEY (key, column1) -) WITH CLUSTERING ORDER BY (column1 ASC) - AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' - AND comment = '' - AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} - AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = 'NONE'; - -CREATE TABLE legacy.composite_comp_no_col ( - key blob, - column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(b=>org.apache.cassandra.db.marshal.BytesType, s=>org.apache.cassandra.db.marshal.UTF8Type, t=>org.apache.cassandra.db.marshal.TimeUUIDType)', - value blob, - PRIMARY KEY (key, column1) -) WITH CLUSTERING ORDER BY (column1 ASC) - AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' - AND comment = 'Stores file meta data' - AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} - AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = 'NONE';""" - - ccm = get_cluster() - livenodes = [node for node in list(ccm.nodelist()) if node.is_live()] - livenodes[0].run_cli(cli_script) - - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - - legacy_meta = cluster.metadata.keyspaces['legacy'] - print(legacy_meta.export_as_string()) - self.assert_equal_diff(legacy_meta.export_as_string(), expected_string) - - session.execute('DROP KEYSPACE legacy') - - cluster.shutdown() - class TokenMetadataTest(unittest.TestCase): """ From 5f2684634f1c96a76bb71a36115cd71357ba7874 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 1 Dec 2017 14:15:16 -0500 Subject: [PATCH 1290/2005] Fix _del__ method in Session is throwing an exception (#865) --- CHANGELOG.rst | 2 +- cassandra/cluster.py | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e4bbf08b93..3c82e0df8a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,7 +8,7 @@ Features Bug Fixes --------- * Support retry_policy in PreparedStatement (PYTHON-861) - +* __del__ method in Session is throwing an exception (PYTHON-813) 3.12.0 ====== diff --git a/cassandra/cluster.py b/cassandra/cluster.py index b6d11d5158..153f324343 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2435,8 +2435,13 @@ def __exit__(self, *args): self.shutdown() def __del__(self): - # Ensure all connections are closed, in case the Session object is deleted by the GC - self.shutdown() + try: + # Ensure all connections are closed, in case the Session object is deleted by the GC + self.shutdown() + except: + # Ignore all errors. Shutdown errors can be caught by the user + # when cluster.shutdown() is called explicitly. + pass def add_or_renew_pool(self, host, is_host_addition): """ From e230c4533d3c6180cd0b18564b4dbb561dc7f8d6 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 24 Nov 2017 09:29:53 -0500 Subject: [PATCH 1291/2005] avoid warning when unregistering connection on shutdown --- CHANGELOG.rst | 4 ++++ cassandra/cqlengine/connection.py | 1 - 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3c82e0df8a..6c8459c812 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -10,6 +10,10 @@ Bug Fixes * Support retry_policy in PreparedStatement (PYTHON-861) * __del__ method in Session is throwing an exception (PYTHON-813) +Other +----- +* cqlengine: avoid warning when unregistering connection on shutdown (PYTHON-865) + 3.12.0 ====== November 6, 2017 diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index bb4c8bf42f..262f921eb0 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -211,7 +211,6 @@ def unregister_connection(name): del _connections[DEFAULT_CONNECTION] cluster = None session = None - log.warning("Unregistering default connection '{0}'. Use set_default_connection to set a new one.".format(name)) conn = _connections[name] if conn.cluster: From ce58a63aaaf1876eff329d56f13ce3463961a23d Mon Sep 17 00:00:00 2001 From: bjmb Date: Mon, 20 Nov 2017 18:40:53 +0100 Subject: [PATCH 1292/2005] PYTHON-861: Added retry policy and prepared statement tests --- appveyor/appveyor.ps1 | 4 +- tests/integration/simulacron/test_policies.py | 127 +++++++++++++++++- 2 files changed, 122 insertions(+), 9 deletions(-) diff --git a/appveyor/appveyor.ps1 b/appveyor/appveyor.ps1 index bca80a7ada..06cc82b090 100644 --- a/appveyor/appveyor.ps1 +++ b/appveyor/appveyor.ps1 @@ -3,7 +3,7 @@ $env:PATH="$($env:JAVA_HOME)\bin;$($env:PATH)" $env:CCM_PATH="C:\Users\appveyor\ccm" $env:CASSANDRA_VERSION=$env:cassandra_version $env:EVENT_LOOP_MANAGER="async" -$env:SIMULACRON_JAR="C:\Users\appveyor\simulacron-standalone-0.5.0.jar" +$env:SIMULACRON_JAR="C:\Users\appveyor\simulacron-standalone-0.7.0.jar" python --version python -c "import platform; print(platform.architecture())" @@ -43,7 +43,7 @@ If (!(Test-Path $jce_indicator)) { } # Download simulacron -$simulacron_url = "https://github.com/datastax/simulacron/releases/download/0.5.0/simulacron-standalone-0.5.0.jar" +$simulacron_url = "https://github.com/datastax/simulacron/releases/download/0.7.0/simulacron-standalone-0.7.0.jar" $simulacron_jar = $env:SIMULACRON_JAR if(!(Test-Path $simulacron_jar)) { (new-object System.Net.WebClient).DownloadFile($simulacron_url, $simulacron_jar) diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index 5dc71408af..85ab2278e6 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -26,6 +26,8 @@ from tests.integration.simulacron.utils import start_and_prime_singledc, prime_query, \ stop_simulacron, NO_THEN, clear_queries +from itertools import count + class BadRoundRobinPolicy(RoundRobinPolicy): @@ -178,23 +180,45 @@ def on_write_timeout(self, query, consistency, write_type, return self.IGNORE, None +class CounterRetryPolicy(RetryPolicy): + def __init__(self): + self.write_timeout = count() + self.read_timeout = count() + self.unavailable = count() + + def on_read_timeout(self, query, consistency, required_responses, + received_responses, data_retrieved, retry_num): + next(self.read_timeout) + return self.IGNORE, None + + def on_write_timeout(self, query, consistency, write_type, + required_responses, received_responses, retry_num): + print("counter on_write_timeout") + next(self.write_timeout) + return self.IGNORE, None + + def on_unavailable(self, query, consistency, required_replicas, alive_replicas, retry_num): + next(self.unavailable) + return self.IGNORE, None + + def reset_counters(self): + self.write_timeout = count() + self.read_timeout = count() + self.unavailable = count() + + @requiressimulacron -class RetryPolicyTets(unittest.TestCase): +class RetryPolicyTests(unittest.TestCase): @classmethod def setUpClass(cls): if SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1": return start_and_prime_singledc() - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False, - default_retry_policy=CustomRetryPolicy()) - cls.session = cls.cluster.connect(wait_for_all_pools=True) - @classmethod def tearDownClass(cls): if SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1": return - cls.cluster.shutdown() stop_simulacron() def tearDown(self): @@ -213,6 +237,7 @@ def test_retry_policy_ignores_and_rethrows(self): @test_category connection """ + self.set_cluster(CustomRetryPolicy()) query_to_prime_simple = "SELECT * from simulacron_keyspace.simple" query_to_prime_cdc = "SELECT * from simulacron_keyspace.cdc" then = { @@ -221,7 +246,8 @@ def test_retry_policy_ignores_and_rethrows(self): "consistency_level": "LOCAL_QUORUM", "received": 1, "block_for": 2, - "write_type": "SIMPLE" + "write_type": "SIMPLE", + "ignore_on_prepare": True } prime_query(query_to_prime_simple, then=then, rows=None, column_types=None) then["write_type"] = "CDC" @@ -231,3 +257,90 @@ def test_retry_policy_ignores_and_rethrows(self): self.session.execute(query_to_prime_simple) #CDC should be ignored self.session.execute(query_to_prime_cdc) + + def test_retry_policy_with_prepared(self): + """ + Test to verify that the retry policy is called as expected + for bound and prepared statements when set at the cluster level + + @since 3.13 + @jira_ticket PYTHON-861 + @expected_result the appropriate retry policy is called + + @test_category connection + """ + counter_policy = CounterRetryPolicy() + self.set_cluster(counter_policy) + query_to_prime = "SELECT * from simulacron_keyspace.simulacron_table" + then = { + "result": "write_timeout", + "delay_in_ms": 0, + "consistency_level": "LOCAL_QUORUM", + "received": 1, + "block_for": 2, + "write_type": "SIMPLE", + "ignore_on_prepare": True + } + prime_query(query_to_prime, then=then, rows=None, column_types=None) + self.session.execute(query_to_prime) + self.assertEqual(next(counter_policy.write_timeout), 1) + counter_policy.reset_counters() + + query_to_prime_prepared = "SELECT * from simulacron_keyspace.simulacron_table WHERE key = :key" + when = {"params": {"key": "0"}, "param_types": {"key": "ascii"}} + + prime_query(query_to_prime_prepared, when=when, then=then, rows=None, column_types=None) + + prepared_stmt = self.session.prepare(query_to_prime_prepared) + + bound_stm = prepared_stmt.bind({"key": "0"}) + self.session.execute(bound_stm) + self.assertEqual(next(counter_policy.write_timeout), 1) + + counter_policy.reset_counters() + self.session.execute(prepared_stmt, ("0",)) + self.assertEqual(next(counter_policy.write_timeout), 1) + + def test_setting_retry_policy_to_statement(self): + """ + Test to verify that the retry policy is called as expected + for bound and prepared statements when set to the prepared statement + + @since 3.13 + @jira_ticket PYTHON-861 + @expected_result the appropriate retry policy is called + + @test_category connection + """ + retry_policy = RetryPolicy() + self.set_cluster(retry_policy) + then = { + "result": "write_timeout", + "delay_in_ms": 0, + "consistency_level": "LOCAL_QUORUM", + "received": 1, + "block_for": 2, + "write_type": "SIMPLE", + "ignore_on_prepare": True + } + query_to_prime_prepared = "SELECT * from simulacron_keyspace.simulacron_table WHERE key = :key" + when = {"params": {"key": "0"}, "param_types": {"key": "ascii"}} + prime_query(query_to_prime_prepared, when=when, then=then, rows=None, column_types=None) + + counter_policy = CounterRetryPolicy() + prepared_stmt = self.session.prepare(query_to_prime_prepared) + prepared_stmt.retry_policy = counter_policy + self.session.execute(prepared_stmt, ("0",)) + self.assertEqual(next(counter_policy.write_timeout), 1) + + counter_policy.reset_counters() + bound_stmt = prepared_stmt.bind({"key": "0"}) + bound_stmt.retry_policy = counter_policy + self.session.execute(bound_stmt) + self.assertEqual(next(counter_policy.write_timeout), 1) + + def set_cluster(self, retry_policy): + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False, + default_retry_policy=retry_policy) + self.session = self.cluster.connect(wait_for_all_pools=True) + self.addCleanup(self.cluster.shutdown) \ No newline at end of file From 1cf623f7aaf3f298b6d7cc2dc4af3fbdb6e48aaa Mon Sep 17 00:00:00 2001 From: bjmb Date: Fri, 1 Dec 2017 16:22:17 -0500 Subject: [PATCH 1293/2005] PYTHON-836: Added lock to query plan for speculative execution --- cassandra/cluster.py | 19 ++++++++++++++++++- tests/integration/standard/test_policies.py | 2 +- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 153f324343..2385ab6990 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3460,12 +3460,12 @@ def __init__(self, session, message, query, timeout, metrics=None, prepared_stat self.prepared_statement = prepared_statement self._callback_lock = Lock() self._start_time = start_time or time.time() + self._spec_execution_plan = speculative_execution_plan or self._spec_execution_plan self._make_query_plan() self._event = Event() self._errors = {} self._callbacks = [] self._errbacks = [] - self._spec_execution_plan = speculative_execution_plan or self._spec_execution_plan self.attempted_hosts = [] self._start_timer() @@ -3534,6 +3534,10 @@ def _make_query_plan(self): # calls to send_request (which retries may do) will resume where # they last left off self.query_plan = iter(self._load_balancer.make_query_plan(self.session.keyspace, self.query)) + # Make iterator thread safe when there can be a speculative delay since it could + # from different threads + if isinstance(self._spec_execution_plan, NoSpeculativeExecutionPlan): + self.query_plan = _threadsafe_iter(self.query_plan) def send_request(self, error_no_hosts=True): """ Internal """ @@ -4306,3 +4310,16 @@ def paging_state(self): avoid sending this to untrusted parties. """ return self.response_future._paging_state + + +class _threadsafe_iter(six.Iterator): + def __init__(self, it): + self.it = it + self.lock = Lock() + + def __iter__(self): + return self + + def __next__(self): + with self.lock: + return next(self.it) diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index ddf2214e83..b8f17c6872 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -104,7 +104,7 @@ def test_delay_can_be_0(self): @expected_result all the queries are executed immediately @test_category policy """ - number_of_requests = 6 + number_of_requests = 4 spec = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0, number_of_requests)) cluster = Cluster() From cc1571ea565805907a1d557325a57a5fab1682e1 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 29 Nov 2017 08:48:19 -0500 Subject: [PATCH 1294/2005] Fix DeprecationWarning of log.warn --- CHANGELOG.rst | 1 + cassandra/cluster.py | 6 +++--- cassandra/cqltypes.py | 10 ++++++---- cassandra/timestamps.py | 2 +- tests/unit/test_cluster.py | 12 ++++++------ tests/unit/test_timestamps.py | 20 ++++++++++---------- 6 files changed, 27 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6c8459c812..f344a58c79 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -13,6 +13,7 @@ Bug Fixes Other ----- * cqlengine: avoid warning when unregistering connection on shutdown (PYTHON-865) +* Fix DeprecationWarning of log.warn (PYTHON-846) 3.12.0 ====== diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 153f324343..3b5704dd4f 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -889,7 +889,7 @@ def __init__(self, if self._config_mode is _ConfigMode.PROFILES: default_lbp_profiles = self.profile_manager._profiles_without_explicit_lbps() if default_lbp_profiles: - log.warn( + log.warning( 'Cluster.__init__ called with contact_points ' 'specified, but load-balancing policies are not ' 'specified in some ExecutionProfiles. In the next ' @@ -900,7 +900,7 @@ def __init__(self, ''.format(cp=contact_points, eps=default_lbp_profiles)) else: if load_balancing_policy is None: - log.warn( + log.warning( 'Cluster.__init__ called with contact_points ' 'specified, but no load_balancing_policy. In the next ' 'major version, this will raise an error; please ' @@ -1053,7 +1053,7 @@ def add_execution_profile(self, name, profile, pool_wait_timeout=5): self._contact_points_explicit and not profile._load_balancing_policy_explicit) if contact_points_but_no_lbp: - log.warn( + log.warning( 'Tried to add an ExecutionProfile with name {name}. ' '{self} was explicitly configured with contact_points, but ' '{ep} was not explicitly configured with a ' diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index fc6ef4595c..3819e288d0 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -971,12 +971,14 @@ def _make_udt_tuple_type(cls, name, field_names): except ValueError: try: t = namedtuple(name, util._positional_rename_invalid_identifiers(field_names)) - log.warn("could not create a namedtuple for '%s' because one or more field names are not valid Python identifiers (%s); " \ - "returning positionally-named fields" % (name, field_names)) + log.warning("could not create a namedtuple for '%s' because one or more " + "field names are not valid Python identifiers (%s); " + "returning positionally-named fields" % (name, field_names)) except ValueError: t = None - log.warn("could not create a namedtuple for '%s' because the name is not a valid Python identifier; " \ - "will return tuples in its place" % (name,)) + log.warning("could not create a namedtuple for '%s' because the name is " + "not a valid Python identifier; will return tuples in " + "its place" % (name,)) return t diff --git a/cassandra/timestamps.py b/cassandra/timestamps.py index 36850a4115..44177eaa41 100644 --- a/cassandra/timestamps.py +++ b/cassandra/timestamps.py @@ -100,7 +100,7 @@ def _maybe_warn(self, now): (diff >= self.warning_threshold * 1e6) and (since_last_warn >= self.warning_interval * 1e6)) if warn: - log.warn( + log.warning( "Clock skew detected: current tick ({now}) was {diff} " "microseconds behind the last generated timestamp " "({last}), returned timestamps will be artificially " diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index a7ae88cd70..7f7fbbc79b 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -404,8 +404,8 @@ def test_warning_on_no_lbp_with_contact_points_profile_mode(self): def _check_warning_on_no_lbp_with_contact_points(self, cluster_kwargs): with patch('cassandra.cluster.log') as patched_logger: Cluster(**cluster_kwargs) - patched_logger.warn.assert_called_once() - warning_message = patched_logger.warn.call_args[0][0] + patched_logger.warning.assert_called_once() + warning_message = patched_logger.warning.call_args[0][0] self.assertIn('please specify a load-balancing policy', warning_message) self.assertIn("contact_points = ['127.0.0.1']", warning_message) @@ -459,7 +459,7 @@ def _check_no_warning_on_contact_points_with_lbp(self, cluster_kwargs): """ with patch('cassandra.cluster.log') as patched_logger: Cluster(**cluster_kwargs) - patched_logger.warn.assert_not_called() + patched_logger.warning.assert_not_called() @mock_session_pools def test_warning_adding_no_lbp_ep_to_cluster_with_contact_points(self): @@ -473,8 +473,8 @@ def test_warning_adding_no_lbp_ep_to_cluster_with_contact_points(self): profile=ExecutionProfile() ) - patched_logger.warn.assert_called_once() - warning_message = patched_logger.warn.call_args[0][0] + patched_logger.warning.assert_called_once() + warning_message = patched_logger.warning.call_args[0][0] self.assertIn('no_lbp', warning_message) self.assertIn('trying to add', warning_message) self.assertIn('please specify a load-balancing policy', warning_message) @@ -491,4 +491,4 @@ def test_no_warning_adding_lbp_ep_to_cluster_with_contact_points(self): profile=ExecutionProfile(load_balancing_policy=Mock(name='lbp')) ) - patched_logger.warn.assert_not_called() + patched_logger.warning.assert_not_called() diff --git a/tests/unit/test_timestamps.py b/tests/unit/test_timestamps.py index 50c0ba92ec..cfbd97e6a4 100644 --- a/tests/unit/test_timestamps.py +++ b/tests/unit/test_timestamps.py @@ -131,12 +131,12 @@ def test_basic_log_content(self): tsg._last_warn = 12 tsg._next_timestamp(20, tsg.last) - self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 0) + self.assertEqual(len(self.patched_timestamp_log.warning.call_args_list), 0) tsg._next_timestamp(16, tsg.last) - self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1) + self.assertEqual(len(self.patched_timestamp_log.warning.call_args_list), 1) self.assertLastCallArgRegex( - self.patched_timestamp_log.warn.call_args, + self.patched_timestamp_log.warning.call_args, r'Clock skew detected:.*\b16\b.*\b4\b.*\b20\b' ) @@ -154,7 +154,7 @@ def test_disable_logging(self): no_warn_tsg.last = 100 no_warn_tsg._next_timestamp(99, no_warn_tsg.last) - self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 0) + self.assertEqual(len(self.patched_timestamp_log.warning.call_args_list), 0) def test_warning_threshold_respected_no_logging(self): """ @@ -171,7 +171,7 @@ def test_warning_threshold_respected_no_logging(self): ) tsg.last, tsg._last_warn = 100, 97 tsg._next_timestamp(98, tsg.last) - self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 0) + self.assertEqual(len(self.patched_timestamp_log.warning.call_args_list), 0) def test_warning_threshold_respected_logs(self): """ @@ -189,7 +189,7 @@ def test_warning_threshold_respected_logs(self): ) tsg.last, tsg._last_warn = 100, 97 tsg._next_timestamp(98, tsg.last) - self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1) + self.assertEqual(len(self.patched_timestamp_log.warning.call_args_list), 1) def test_warning_interval_respected_no_logging(self): """ @@ -207,10 +207,10 @@ def test_warning_interval_respected_no_logging(self): ) tsg.last = 100 tsg._next_timestamp(70, tsg.last) - self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1) + self.assertEqual(len(self.patched_timestamp_log.warning.call_args_list), 1) tsg._next_timestamp(71, tsg.last) - self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1) + self.assertEqual(len(self.patched_timestamp_log.warning.call_args_list), 1) def test_warning_interval_respected_logs(self): """ @@ -229,10 +229,10 @@ def test_warning_interval_respected_logs(self): ) tsg.last = 100 tsg._next_timestamp(70, tsg.last) - self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1) + self.assertEqual(len(self.patched_timestamp_log.warning.call_args_list), 1) tsg._next_timestamp(72, tsg.last) - self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 2) + self.assertEqual(len(self.patched_timestamp_log.warning.call_args_list), 2) class TestTimestampGeneratorMultipleThreads(unittest.TestCase): From cd8d8ff4812ea1ee2badb8169cc456e0460d4988 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Mon, 11 Dec 2017 11:07:00 -0500 Subject: [PATCH 1295/2005] PYTHON-862: use self.create_timer instead of AsyncoreConnection.create_timer --- CHANGELOG.rst | 1 + cassandra/io/asyncorereactor.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6c8459c812..aae692fc79 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,6 +7,7 @@ Features Bug Fixes --------- +* AttributeError: 'NoneType' object has no attribute 'add_timer' (PYTHON-862) * Support retry_policy in PreparedStatement (PYTHON-861) * __del__ method in Session is throwing an exception (PYTHON-813) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 86abe5986b..c5ede7cc39 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -370,7 +370,7 @@ def close(self): self._readable = False # We don't have to wait for this to be closed, we can just schedule it - AsyncoreConnection.create_timer(0, partial(asyncore.dispatcher.close, self)) + self.create_timer(0, partial(asyncore.dispatcher.close, self)) log.debug("Closed socket to %s", self.host) From 1f0bf91735d580478c71d2fee0c2d2361f5bc7bc Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 13 Dec 2017 17:14:11 -0500 Subject: [PATCH 1296/2005] test for PYTHON-862 --- tests/integration/simulacron/test_connection.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index a8aebb78ef..edf3709efa 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -27,6 +27,7 @@ from cassandra.cluster import (EXEC_PROFILE_DEFAULT, Cluster, ExecutionProfile, _Scheduler, NoHostAvailable) from cassandra.policies import HostStateListener, RoundRobinPolicy +from cassandra.io.asyncorereactor import AsyncoreConnection from tests.integration import (CASSANDRA_VERSION, PROTOCOL_VERSION, requiressimulacron) from tests.integration.util import assert_quiescent_pool_state @@ -328,3 +329,13 @@ def test_host_is_not_set_to_down_after_query_oto(self): self.assertEqual(listener.hosts_marked_down, []) assert_quiescent_pool_state(self, cluster) + + def test_can_shutdown_asyncoreconnection_subclass(self): + start_and_prime_singledc() + class ExtendedConnection(AsyncoreConnection): + pass + + cluster = Cluster(contact_points=["127.0.0.2"], + connection_class=ExtendedConnection) + cluster.connect() + cluster.shutdown() From bf214039f6f6110cfa34451a932c690356f455d5 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Fri, 15 Dec 2017 16:21:32 -0500 Subject: [PATCH 1297/2005] Fixed ResponseFuture unit tests --- tests/unit/test_response_future.py | 44 +++++++++++++++++++----------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index be25b42b5a..fa1ba96275 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -39,10 +39,16 @@ class ResponseFutureTests(unittest.TestCase): def make_basic_session(self): return Mock(spec=Session, row_factory=lambda *x: list(x)) + def make_pool(self): + pool = Mock() + pool.is_shutdown = False + pool.borrow_connection.return_value = [Mock(), Mock()] + return pool + def make_session(self): session = self.make_basic_session() session.cluster._default_load_balancing_policy.make_query_plan.return_value = ['ip1', 'ip2'] - session._pools.get.return_value.is_shutdown = False + session._pools.get.return_value = self.make_pool() return session def make_response_future(self, session): @@ -122,14 +128,13 @@ def test_other_result_message_kind(self): def test_read_timeout_error_message(self): session = self.make_session() query = SimpleStatement("SELECT * FROM foo") - query.retry_policy = Mock() - query.retry_policy.on_read_timeout.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) rf = ResponseFuture(session, message, query, 1) rf.send_request() - result = Mock(spec=ReadTimeoutErrorMessage, info={}) + result = Mock(spec=ReadTimeoutErrorMessage, info={"data_retrieved": "", "required_responses":2, + "received_responses":1, "consistency": 1}) rf._set_result(None, None, None, result) self.assertRaises(Exception, rf.result) @@ -137,14 +142,13 @@ def test_read_timeout_error_message(self): def test_write_timeout_error_message(self): session = self.make_session() query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)") - query.retry_policy = Mock() - query.retry_policy.on_write_timeout.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) rf = ResponseFuture(session, message, query, 1) rf.send_request() - result = Mock(spec=WriteTimeoutErrorMessage, info={}) + result = Mock(spec=WriteTimeoutErrorMessage, info={"write_type": 1, "required_responses":2, + "received_responses":1, "consistency": 1}) rf._set_result(None, None, None, result) self.assertRaises(Exception, rf.result) @@ -156,9 +160,10 @@ def test_unavailable_error_message(self): message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) rf = ResponseFuture(session, message, query, 1) + rf._query_retries = 1 rf.send_request() - result = Mock(spec=UnavailableErrorMessage, info={}) + result = Mock(spec=UnavailableErrorMessage, info={"required_replicas":2, "alive_replicas": 1, "consistency": 1}) rf._set_result(None, None, None, result) self.assertRaises(Exception, rf.result) @@ -295,7 +300,11 @@ def test_first_pool_shutdown(self): session = self.make_basic_session() session.cluster._default_load_balancing_policy.make_query_plan.return_value = ['ip1', 'ip2'] # first return a pool with is_shutdown=True, then is_shutdown=False - session._pools.get.side_effect = [Mock(is_shutdown=True), Mock(is_shutdown=False)] + pool_shutdown = self.make_pool() + pool_shutdown.is_shutdown = True + pool_ok = self.make_pool() + pool_ok.is_shutdown = True + session._pools.get.side_effect = [pool_shutdown, pool_ok] rf = self.make_response_future(session) rf.send_request() @@ -358,16 +367,17 @@ def test_errback(self): pool.borrow_connection.return_value = (connection, 1) query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)") - query.retry_policy = Mock() - query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) rf = ResponseFuture(session, message, query, 1) + rf._query_retries = 1 rf.send_request() rf.add_errback(self.assertIsInstance, Exception) - result = Mock(spec=UnavailableErrorMessage, info={}) + result = Mock(spec=UnavailableErrorMessage, info={"required_replicas":2, "alive_replicas": 1, "consistency": 1}) + result.to_exception.return_value = Exception() + rf._set_result(None, None, None, result) self.assertRaises(Exception, rf.result) @@ -423,9 +433,10 @@ def test_multiple_errbacks(self): rf.add_errback(callback2, arg2, **kwargs2) expected_exception = Unavailable("message", 1, 2, 3) - result = Mock(spec=UnavailableErrorMessage, info={'something': 'here'}) + result = Mock(spec=UnavailableErrorMessage, info={"required_replicas":2, "alive_replicas": 1, "consistency": 1}) result.to_exception.return_value = expected_exception rf._set_result(None, None, None, result) + rf._event.set() self.assertRaises(Exception, rf.result) callback.assert_called_once_with(expected_exception, arg, **kwargs) @@ -434,19 +445,20 @@ def test_multiple_errbacks(self): def test_add_callbacks(self): session = self.make_session() query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)") - query.retry_policy = Mock() - query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) # test errback rf = ResponseFuture(session, message, query, 1) + rf._query_retries = 1 rf.send_request() rf.add_callbacks( callback=self.assertEqual, callback_args=([{'col': 'val'}],), errback=self.assertIsInstance, errback_args=(Exception,)) - result = Mock(spec=UnavailableErrorMessage, info={}) + result = Mock(spec=UnavailableErrorMessage, + info={"required_replicas":2, "alive_replicas": 1, "consistency": 1}) + result.to_exception.return_value = Exception() rf._set_result(None, None, None, result) self.assertRaises(Exception, rf.result) From 054a183a115c933c4c993e4f717fd0f071812871 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Mon, 18 Dec 2017 16:38:32 -0500 Subject: [PATCH 1298/2005] Alternative solution for PYTHON-836 --- CHANGELOG.rst | 1 + cassandra/cluster.py | 29 ++++++------- tests/integration/simulacron/test_policies.py | 42 ++++++++++++++++++- tests/integration/standard/test_policies.py | 39 +---------------- 4 files changed, 54 insertions(+), 57 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index aae692fc79..2454f080a5 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -29,6 +29,7 @@ Features * Include hash of result set metadata in prepared stmt id (PYTHON-808) * Add NO_COMPACT startup option (PYTHON-839) * Add new exception type for CDC (PYTHON-837) +* Allow 0ms in ConstantSpeculativeExecutionPolicy (PYTHON-836) Bug Fixes --------- diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 2385ab6990..006462aaa2 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3521,6 +3521,18 @@ def _on_timeout(self): def _on_speculative_execute(self): self._timer = None if not self._event.is_set(): + + # PYTHON-836, the speculative queries must be after + # the query is sent from the main thread, otherwise the + # query from the main thread may raise NoHostAvailable + # if the _query_plan has been exhausted by the specualtive queries. + # This also prevents a race condition accessing the iterator. + # We reschedule this call until the main thread has succeeded + # making a query + if not self.attempted_hosts: + self._timer = self.session.cluster.connection_class.create_timer(0.01, self._on_speculative_execute) + return + if self._time_remaining is not None: if self._time_remaining <= 0: self._on_timeout() @@ -3534,10 +3546,6 @@ def _make_query_plan(self): # calls to send_request (which retries may do) will resume where # they last left off self.query_plan = iter(self._load_balancer.make_query_plan(self.session.keyspace, self.query)) - # Make iterator thread safe when there can be a speculative delay since it could - # from different threads - if isinstance(self._spec_execution_plan, NoSpeculativeExecutionPlan): - self.query_plan = _threadsafe_iter(self.query_plan) def send_request(self, error_no_hosts=True): """ Internal """ @@ -4310,16 +4318,3 @@ def paging_state(self): avoid sending this to untrusted parties. """ return self.response_future._paging_state - - -class _threadsafe_iter(six.Iterator): - def __init__(self, it): - self.it = it - self.lock = Lock() - - def __iter__(self): - return self - - def __next__(self): - with self.lock: - return next(self.it) diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index 85ab2278e6..99752c1b07 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -17,7 +17,7 @@ import unittest # noqa from cassandra import OperationTimedOut, WriteTimeout -from cassandra.cluster import Cluster, ExecutionProfile +from cassandra.cluster import Cluster, ExecutionProfile, ResponseFuture from cassandra.query import SimpleStatement from cassandra.policies import ConstantSpeculativeExecutionPolicy, RoundRobinPolicy, RetryPolicy, WriteType @@ -168,6 +168,45 @@ def test_speculative_and_timeout(self): # This is because 14 / 4 + 1 = 4 self.assertEqual(len(response_future.attempted_hosts), 4) + def test_delay_can_be_0(self): + """ + Test to validate that the delay can be zero for the ConstantSpeculativeExecutionPolicy + @since 3.13 + @jira_ticket PYTHON-836 + @expected_result all the queries are executed immediately + @test_category policy + """ + query_to_prime = "INSERT INTO madeup_keyspace.madeup_table(k, v) VALUES (1, 2)" + prime_query(query_to_prime, then={"delay_in_ms": 5000}) + number_of_requests = 4 + spec = ExecutionProfile(load_balancing_policy=RoundRobinPolicy(), + speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0, number_of_requests)) + + cluster = Cluster() + cluster.add_execution_profile("spec", spec) + session = cluster.connect(wait_for_all_pools=True) + self.addCleanup(cluster.shutdown) + + counter = count() + + def patch_and_count(f): + def patched(*args, **kwargs): + next(counter) + print("patched") + f(*args, **kwargs) + return patched + + self.addCleanup(setattr, ResponseFuture, "send_request", ResponseFuture.send_request) + ResponseFuture.send_request = patch_and_count(ResponseFuture.send_request) + stmt = SimpleStatement(query_to_prime) + stmt.is_idempotent = True + results = session.execute(stmt, execution_profile="spec") + self.assertEqual(len(results.response_future.attempted_hosts), 3) + + # send_request is called number_of_requests times for the speculative request + # plus one for the call from the main thread. + self.assertEqual(next(counter), number_of_requests + 1) + class CustomRetryPolicy(RetryPolicy): def on_write_timeout(self, query, consistency, write_type, @@ -193,7 +232,6 @@ def on_read_timeout(self, query, consistency, required_responses, def on_write_timeout(self, query, consistency, write_type, required_responses, received_responses, retry_num): - print("counter on_write_timeout") next(self.write_timeout) return self.IGNORE, None diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index b8f17c6872..405a25b465 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -19,14 +19,11 @@ from cassandra.cluster import Cluster, ExecutionProfile, ResponseFuture from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, SimpleConvictionPolicy, \ - WhiteListRoundRobinPolicy, ConstantSpeculativeExecutionPolicy + WhiteListRoundRobinPolicy from cassandra.pool import Host -from cassandra.query import SimpleStatement from tests.integration import PROTOCOL_VERSION, local, use_singledc -from tests import notwindows -from itertools import count from concurrent.futures import wait as wait_futures def setup_module(): @@ -92,37 +89,3 @@ def test_only_connects_to_subset(self): queried_hosts.update(response.response_future.attempted_hosts) queried_hosts = set(host.address for host in queried_hosts) self.assertEqual(queried_hosts, only_connect_hosts) - - -class SpeculativeExecutionPolicy(unittest.TestCase): - @notwindows - def test_delay_can_be_0(self): - """ - Test to validate that the delay can be zero for the ConstantSpeculativeExecutionPolicy - @since 3.13 - @jira_ticket PYTHON-836 - @expected_result all the queries are executed immediately - @test_category policy - """ - number_of_requests = 4 - spec = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0, number_of_requests)) - - cluster = Cluster() - cluster.add_execution_profile("spec", spec) - session = cluster.connect(wait_for_all_pools=True) - self.addCleanup(cluster.shutdown) - - counter = count() - - def patch_and_count(f): - def patched(*args, **kwargs): - next(counter) - f(*args, **kwargs) - return patched - - ResponseFuture._on_speculative_execute = patch_and_count(ResponseFuture._on_speculative_execute) - stmt = SimpleStatement("INSERT INTO test3rf.test(k, v) VALUES (1, 2)") - stmt.is_idempotent = True - results = session.execute(stmt, execution_profile="spec") - self.assertEqual(len(results.response_future.attempted_hosts), 3) - self.assertEqual(next(counter), number_of_requests) From c9ef7c39f0f57c4c2fa04d30d149f3c98ade7db9 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 28 Dec 2017 10:34:06 -0500 Subject: [PATCH 1299/2005] Support cassandra.query.BatchType with cqlengine BatchQuery --- CHANGELOG.rst | 1 + cassandra/cqlengine/query.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index aae692fc79..3606b96eb0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,7 @@ Features -------- * cqlengine: LIKE filter operator (PYTHON-512) +* Support cassandra.query.BatchType with cqlengine BatchQuery (PYTHON-888) Bug Fixes --------- diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 615a650b05..44d604ab6c 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -151,7 +151,7 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on timeout=conn.NOT_SET, connection=None): """ :param batch_type: (optional) One of batch type values available through BatchType enum - :type batch_type: str or None + :type batch_type: BatchType, str or None :param timestamp: (optional) A datetime or timedelta object with desired timestamp to be applied to the batch conditional. :type timestamp: datetime or timedelta or None @@ -225,7 +225,7 @@ def execute(self): self._execute_callbacks() return - opener = 'BEGIN ' + (self.batch_type + ' ' if self.batch_type else '') + ' BATCH' + opener = 'BEGIN ' + (str(self.batch_type) + ' ' if self.batch_type else '') + ' BATCH' if self.timestamp: if isinstance(self.timestamp, six.integer_types): From 2684b242dc6842118df457c96e199465aba653b0 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 28 Dec 2017 11:49:41 -0500 Subject: [PATCH 1300/2005] Add Cluster.set_meta_refresh_enabled deprecation warning --- cassandra/cluster.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 2385ab6990..f7f413fc47 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -25,6 +25,7 @@ from functools import partial, wraps from itertools import groupby, count import logging +from warnings import warn from random import random import six from six.moves import filter, range, queue as Queue @@ -1795,6 +1796,8 @@ def set_meta_refresh_enabled(self, enabled): Meta refresh must be enabled for the driver to become aware of any cluster topology changes or schema updates. """ + warn("Cluster.set_meta_refresh_enabled is deprecated and will be removed in 4.0. Set " + "Cluster.schema_metadata_enabled and Cluster.token_metadata_enabled instead.", DeprecationWarning) self.schema_metadata_enabled = enabled self.token_metadata_enabled = enabled From d88de60d2f502bf0e164365e8da6ed2fc46f8938 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 4 Dec 2017 14:48:25 -0500 Subject: [PATCH 1301/2005] Fix example_mapper.py for python3 --- CHANGELOG.rst | 1 + example_mapper.py | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3cb1f982e2..b96f64b936 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -15,6 +15,7 @@ Other ----- * cqlengine: avoid warning when unregistering connection on shutdown (PYTHON-865) * Fix DeprecationWarning of log.warn (PYTHON-846) +* Fix example_mapper.py for python3 (PYTHON-860) 3.12.0 ====== diff --git a/example_mapper.py b/example_mapper.py index 9263c61c1e..82ab34d406 100755 --- a/example_mapper.py +++ b/example_mapper.py @@ -76,7 +76,7 @@ def main(): try: nick.iff(birth_year=1988).update(birth_year=1989) except LWTException: - print "precondition not met" + print("precondition not met") log.info("### setting individual column to NULL by updating it to None") nick.update(birth_year=None) @@ -99,15 +99,15 @@ def main(): log.info("### All members") for m in FamilyMembers.all(): - print m, m.birth_year, m.sex + print(m, m.birth_year, m.sex) log.info("### Select by partition key") for m in FamilyMembers.objects(id=simmons.id): - print m, m.birth_year, m.sex + print(m, m.birth_year, m.sex) log.info("### Constrain on clustering key") for m in FamilyMembers.objects(id=simmons.id, surname=simmons.surname): - print m, m.birth_year, m.sex + print(m, m.birth_year, m.sex) log.info("### Constrain on clustering key") kids = FamilyMembers.objects(id=simmons.id, surname=simmons.surname, name__in=['Nick', 'Sophie']) @@ -115,7 +115,7 @@ def main(): log.info("### Delete a record") FamilyMembers(id=hogan_id, surname='Hogan', name='Linda').delete() for m in FamilyMembers.objects(id=hogan_id): - print m, m.birth_year, m.sex + print(m, m.birth_year, m.sex) management.drop_keyspace(KEYSPACE) From 624e41bf17f6be5cd621e82713d4d12823eb9a8d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 6 Dec 2017 16:22:29 -0500 Subject: [PATCH 1302/2005] Add docs about possible deadlock on cassandra.concurrent.execute_concurrent --- CHANGELOG.rst | 1 + cassandra/concurrent.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index b96f64b936..ea121a8f69 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -16,6 +16,7 @@ Other * cqlengine: avoid warning when unregistering connection on shutdown (PYTHON-865) * Fix DeprecationWarning of log.warn (PYTHON-846) * Fix example_mapper.py for python3 (PYTHON-860) +* Possible deadlock on cassandra.concurrent.execute_concurrent (PYTHON-768) 3.12.0 ====== diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index 204bef964b..afed0f83b2 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -81,6 +81,9 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais else: process_user(result[0]) # result will be a list of rows + Note: in the case that `generators` are used, it is important to ensure the consumers do not + block or attempt further synchronous requests, because no further IO will be processed until + the consumer returns. This may also produce a deadlock in the IO event thread. """ if concurrency <= 0: raise ValueError("concurrency must be greater than 0") From 5443fb81fd403e0b2277f17ed37ae85601608668 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 13 Dec 2017 16:38:24 -0500 Subject: [PATCH 1303/2005] Add some knowns deprecated warnings for 4.x --- CHANGELOG.rst | 1 + cassandra/cluster.py | 3 +++ cassandra/cqlengine/models.py | 1 + cassandra/cqlengine/query.py | 11 ++++++++++- 4 files changed, 15 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index ea121a8f69..48ec6a052a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -17,6 +17,7 @@ Other * Fix DeprecationWarning of log.warn (PYTHON-846) * Fix example_mapper.py for python3 (PYTHON-860) * Possible deadlock on cassandra.concurrent.execute_concurrent (PYTHON-768) +* Add some known deprecated warnings for 4.x (PYTHON-877) 3.12.0 ====== diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 61b6da5f1b..863a40c452 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -879,7 +879,10 @@ def __init__(self, if execution_profiles: raise ValueError("Clusters constructed with execution_profiles should not specify legacy parameters " "load_balancing_policy or default_retry_policy. Configure this in a profile instead.") + self._config_mode = _ConfigMode.LEGACY + warn("Legacy execution parameters will be removed in 4.0. Consider using " + "execution profiles.", DeprecationWarning) else: if execution_profiles: diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 0b6ad89a10..46ec118770 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -567,6 +567,7 @@ def _raw_column_family_name(cls): if not cls._table_name: if cls.__table_name__: if cls.__table_name_case_sensitive__: + warn("Model __table_name_case_sensitive__ will be removed in 4.0.", PendingDeprecationWarning) cls._table_name = cls.__table_name__ else: table_name = cls.__table_name__.lower() diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 615a650b05..d3bcd09a3e 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -532,6 +532,10 @@ def __getitem__(self, s): if isinstance(s, slice): start = s.start if s.start else 0 + if start < 0 or (s.stop is not None and s.stop < 0): + warn("ModelQuerySet slicing with negative indices support will be removed in 4.0.", + DeprecationWarning) + # calculate the amount of results that need to be loaded end = s.stop if start < 0 or s.stop is None or s.stop < 0: @@ -549,6 +553,10 @@ def __getitem__(self, s): except (ValueError, TypeError): raise TypeError('QuerySet indices must be integers') + if s < 0: + warn("ModelQuerySet indexing with negative indices support will be removed in 4.0.", + DeprecationWarning) + # Using negative indexing is costly since we have to execute a count() if s < 0: num_results = self.count() @@ -1450,7 +1458,8 @@ def save(self): nulled_fields = set() if self.instance._has_counter or self.instance._can_update(): if self.instance._has_counter: - warn("'create' and 'save' actions on Counters are deprecated. A future version will disallow this. Use the 'update' mechanism instead.") + warn("'create' and 'save' actions on Counters are deprecated. It will be disallowed in 4.0. " + "Use the 'update' mechanism instead.", DeprecationWarning) return self.update() else: insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, if_not_exists=self._if_not_exists) From 05fb16a664e9ddd13affd7d051c38cef27aad1b6 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 21 Dec 2017 08:45:03 -0500 Subject: [PATCH 1304/2005] Remove copyright dates from copyright notices --- CHANGELOG.rst | 1 + benchmarks/base.py | 2 +- benchmarks/callback_full_pipeline.py | 2 +- benchmarks/future_batches.py | 2 +- benchmarks/future_full_pipeline.py | 2 +- benchmarks/future_full_throttle.py | 2 +- benchmarks/sync.py | 2 +- cassandra/__init__.py | 2 +- cassandra/auth.py | 2 +- cassandra/buffer.pxd | 2 +- cassandra/bytesio.pxd | 2 +- cassandra/bytesio.pyx | 2 +- cassandra/cluster.py | 2 +- cassandra/cmurmur3.c | 2 +- cassandra/concurrent.py | 2 +- cassandra/connection.py | 2 +- cassandra/cqlengine/__init__.py | 2 +- cassandra/cqlengine/columns.py | 2 +- cassandra/cqlengine/connection.py | 2 +- cassandra/cqlengine/functions.py | 2 +- cassandra/cqlengine/management.py | 2 +- cassandra/cqlengine/models.py | 2 +- cassandra/cqlengine/named.py | 2 +- cassandra/cqlengine/operators.py | 2 +- cassandra/cqlengine/query.py | 2 +- cassandra/cqlengine/statements.py | 2 +- cassandra/cqltypes.py | 2 +- cassandra/cython_marshal.pyx | 2 +- cassandra/cython_utils.pyx | 2 +- cassandra/deserializers.pxd | 2 +- cassandra/deserializers.pyx | 2 +- cassandra/encoder.py | 2 +- cassandra/io/__init__.py | 2 +- cassandra/io/asyncorereactor.py | 2 +- cassandra/io/eventletreactor.py | 2 +- cassandra/io/geventreactor.py | 2 +- cassandra/io/libevreactor.py | 2 +- cassandra/io/twistedreactor.py | 2 +- cassandra/ioutils.pyx | 2 +- cassandra/marshal.py | 2 +- cassandra/metadata.py | 2 +- cassandra/metrics.py | 2 +- cassandra/numpy_parser.pyx | 2 +- cassandra/obj_parser.pyx | 2 +- cassandra/parsing.pxd | 2 +- cassandra/parsing.pyx | 2 +- cassandra/policies.py | 2 +- cassandra/pool.py | 2 +- cassandra/protocol.py | 2 +- cassandra/query.py | 2 +- cassandra/row_parser.pyx | 2 +- cassandra/timestamps.py | 2 +- cassandra/tuple.pxd | 2 +- cassandra/type_codes.pxd | 2 +- cassandra/util.py | 2 +- example_core.py | 2 +- example_mapper.py | 2 +- examples/request_init_listener.py | 2 +- setup.py | 2 +- tests/__init__.py | 2 +- tests/integration/__init__.py | 2 +- tests/integration/cqlengine/__init__.py | 2 +- tests/integration/cqlengine/base.py | 2 +- tests/integration/cqlengine/columns/__init__.py | 2 +- tests/integration/cqlengine/columns/test_container_columns.py | 2 +- tests/integration/cqlengine/columns/test_counter_column.py | 2 +- tests/integration/cqlengine/columns/test_static_column.py | 2 +- tests/integration/cqlengine/columns/test_validation.py | 2 +- tests/integration/cqlengine/columns/test_value_io.py | 2 +- tests/integration/cqlengine/connections/__init__.py | 2 +- tests/integration/cqlengine/connections/test_connection.py | 2 +- tests/integration/cqlengine/management/__init__.py | 2 +- .../cqlengine/management/test_compaction_settings.py | 2 +- tests/integration/cqlengine/management/test_management.py | 2 +- tests/integration/cqlengine/model/__init__.py | 2 +- tests/integration/cqlengine/model/test_class_construction.py | 2 +- tests/integration/cqlengine/model/test_equality_operations.py | 2 +- tests/integration/cqlengine/model/test_model.py | 2 +- tests/integration/cqlengine/model/test_model_io.py | 2 +- tests/integration/cqlengine/model/test_polymorphism.py | 2 +- tests/integration/cqlengine/model/test_udts.py | 2 +- tests/integration/cqlengine/model/test_updates.py | 2 +- tests/integration/cqlengine/model/test_value_lists.py | 2 +- tests/integration/cqlengine/operators/__init__.py | 2 +- tests/integration/cqlengine/operators/test_where_operators.py | 2 +- tests/integration/cqlengine/query/__init__.py | 2 +- tests/integration/cqlengine/query/test_batch_query.py | 2 +- tests/integration/cqlengine/query/test_datetime_queries.py | 2 +- tests/integration/cqlengine/query/test_named.py | 2 +- tests/integration/cqlengine/query/test_queryoperators.py | 2 +- tests/integration/cqlengine/query/test_queryset.py | 2 +- tests/integration/cqlengine/query/test_updates.py | 2 +- tests/integration/cqlengine/statements/__init__.py | 2 +- .../integration/cqlengine/statements/test_assignment_clauses.py | 2 +- tests/integration/cqlengine/statements/test_base_clause.py | 2 +- tests/integration/cqlengine/statements/test_base_statement.py | 2 +- tests/integration/cqlengine/statements/test_delete_statement.py | 2 +- tests/integration/cqlengine/statements/test_insert_statement.py | 2 +- tests/integration/cqlengine/statements/test_select_statement.py | 2 +- tests/integration/cqlengine/statements/test_update_statement.py | 2 +- tests/integration/cqlengine/statements/test_where_clause.py | 2 +- tests/integration/cqlengine/test_batch_query.py | 2 +- tests/integration/cqlengine/test_connections.py | 2 +- tests/integration/cqlengine/test_consistency.py | 2 +- tests/integration/cqlengine/test_context_query.py | 2 +- tests/integration/cqlengine/test_ifexists.py | 2 +- tests/integration/cqlengine/test_ifnotexists.py | 2 +- tests/integration/cqlengine/test_lwt_conditional.py | 2 +- tests/integration/cqlengine/test_timestamp.py | 2 +- tests/integration/cqlengine/test_ttl.py | 2 +- tests/integration/datatype_utils.py | 2 +- tests/integration/long/__init__.py | 2 +- tests/integration/long/test_consistency.py | 2 +- tests/integration/long/test_failure_types.py | 2 +- tests/integration/long/test_ipv6.py | 2 +- tests/integration/long/test_large_data.py | 2 +- tests/integration/long/test_loadbalancingpolicies.py | 2 +- tests/integration/long/test_schema.py | 2 +- tests/integration/long/test_ssl.py | 2 +- tests/integration/long/utils.py | 2 +- tests/integration/simulacron/__init__.py | 2 +- tests/integration/simulacron/test_connection.py | 2 +- tests/integration/simulacron/test_policies.py | 2 +- tests/integration/simulacron/utils.py | 2 +- tests/integration/standard/__init__.py | 2 +- tests/integration/standard/test_authentication.py | 2 +- tests/integration/standard/test_client_warnings.py | 2 +- tests/integration/standard/test_cluster.py | 2 +- tests/integration/standard/test_concurrent.py | 2 +- tests/integration/standard/test_connection.py | 2 +- tests/integration/standard/test_control_connection.py | 2 +- tests/integration/standard/test_custom_payload.py | 2 +- tests/integration/standard/test_custom_protocol_handler.py | 2 +- tests/integration/standard/test_metadata.py | 2 +- tests/integration/standard/test_metrics.py | 2 +- tests/integration/standard/test_policies.py | 2 +- tests/integration/standard/test_prepared_statements.py | 2 +- tests/integration/standard/test_query.py | 2 +- tests/integration/standard/test_query_paging.py | 2 +- tests/integration/standard/test_routing.py | 2 +- tests/integration/standard/test_row_factories.py | 2 +- tests/integration/standard/test_types.py | 2 +- tests/integration/standard/test_udts.py | 2 +- tests/integration/upgrade/__init__.py | 2 +- tests/integration/upgrade/test_upgrade.py | 2 +- tests/integration/util.py | 2 +- tests/stress_tests/test_load.py | 2 +- tests/stress_tests/test_multi_inserts.py | 2 +- tests/unit/__init__.py | 2 +- tests/unit/cqlengine/__init__.py | 2 +- tests/unit/cqlengine/test_columns.py | 2 +- tests/unit/cqlengine/test_connection.py | 2 +- tests/unit/cqlengine/test_udt.py | 2 +- tests/unit/cython/__init__.py | 2 +- tests/unit/cython/bytesio_testhelper.pyx | 2 +- tests/unit/cython/test_bytesio.py | 2 +- tests/unit/cython/test_types.py | 2 +- tests/unit/cython/test_utils.py | 2 +- tests/unit/cython/types_testhelper.pyx | 2 +- tests/unit/cython/utils.py | 2 +- tests/unit/cython/utils_testhelper.pyx | 2 +- tests/unit/io/__init__.py | 2 +- tests/unit/io/eventlet_utils.py | 2 +- tests/unit/io/gevent_utils.py | 2 +- tests/unit/io/test_asyncorereactor.py | 2 +- tests/unit/io/test_eventletreactor.py | 2 +- tests/unit/io/test_geventreactor.py | 2 +- tests/unit/io/test_libevreactor.py | 2 +- tests/unit/io/test_libevtimer.py | 2 +- tests/unit/io/test_twistedreactor.py | 2 +- tests/unit/io/utils.py | 2 +- tests/unit/test_cluster.py | 2 +- tests/unit/test_concurrent.py | 2 +- tests/unit/test_connection.py | 2 +- tests/unit/test_control_connection.py | 2 +- tests/unit/test_exception.py | 2 +- tests/unit/test_host_connection_pool.py | 2 +- tests/unit/test_marshalling.py | 2 +- tests/unit/test_metadata.py | 2 +- tests/unit/test_orderedmap.py | 2 +- tests/unit/test_parameter_binding.py | 2 +- tests/unit/test_policies.py | 2 +- tests/unit/test_query.py | 2 +- tests/unit/test_response_future.py | 2 +- tests/unit/test_resultset.py | 2 +- tests/unit/test_sortedset.py | 2 +- tests/unit/test_time_util.py | 2 +- tests/unit/test_timestamps.py | 2 +- tests/unit/test_types.py | 2 +- tests/unit/test_util_types.py | 2 +- 190 files changed, 190 insertions(+), 189 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 48ec6a052a..f2bddfced7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -18,6 +18,7 @@ Other * Fix example_mapper.py for python3 (PYTHON-860) * Possible deadlock on cassandra.concurrent.execute_concurrent (PYTHON-768) * Add some known deprecated warnings for 4.x (PYTHON-877) +* Remove copyright dates from copyright notices (PYTHON-863) 3.12.0 ====== diff --git a/benchmarks/base.py b/benchmarks/base.py index fb97d64dc3..fac750e140 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/callback_full_pipeline.py b/benchmarks/callback_full_pipeline.py index 68b54f4d52..e3ecfe3be5 100644 --- a/benchmarks/callback_full_pipeline.py +++ b/benchmarks/callback_full_pipeline.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/future_batches.py b/benchmarks/future_batches.py index d3c47e280c..8cd915ebab 100644 --- a/benchmarks/future_batches.py +++ b/benchmarks/future_batches.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/future_full_pipeline.py b/benchmarks/future_full_pipeline.py index 58105f1197..9a9fcfcd50 100644 --- a/benchmarks/future_full_pipeline.py +++ b/benchmarks/future_full_pipeline.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/future_full_throttle.py b/benchmarks/future_full_throttle.py index d42c98be91..b4ba951c28 100644 --- a/benchmarks/future_full_throttle.py +++ b/benchmarks/future_full_throttle.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/sync.py b/benchmarks/sync.py index a9ea19ce58..f2a45fcd7d 100644 --- a/benchmarks/sync.py +++ b/benchmarks/sync.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 7a88ad9bc5..ba3816519d 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/auth.py b/cassandra/auth.py index 773c77ac94..1d94817b64 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/buffer.pxd b/cassandra/buffer.pxd index 248ae334ea..0bbb1d5f57 100644 --- a/cassandra/buffer.pxd +++ b/cassandra/buffer.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/bytesio.pxd b/cassandra/bytesio.pxd index 6bc7510ec4..d52d3fa8fe 100644 --- a/cassandra/bytesio.pxd +++ b/cassandra/bytesio.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/bytesio.pyx b/cassandra/bytesio.pyx index eaa3d861ec..1a57911fcf 100644 --- a/cassandra/bytesio.pyx +++ b/cassandra/bytesio.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 863a40c452..75d437d633 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cmurmur3.c b/cassandra/cmurmur3.c index b28ddb54cb..bce513217d 100644 --- a/cassandra/cmurmur3.c +++ b/cassandra/cmurmur3.c @@ -6,7 +6,7 @@ * * Copyright (c) 2011 Austin Appleby (Murmur3 routine) * Copyright (c) 2011 Patrick Hensley (Python wrapper, packaging) - * Copyright 2013-2017 DataStax (Minor modifications to match Cassandra's MM3 hashes) + * Copyright DataStax (Minor modifications to match Cassandra's MM3 hashes) * */ diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index afed0f83b2..cb9fd1bf48 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/connection.py b/cassandra/connection.py index af9b0d45a3..cc4d18a869 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/__init__.py b/cassandra/cqlengine/__init__.py index 2783513843..e2a952d682 100644 --- a/cassandra/cqlengine/__init__.py +++ b/cassandra/cqlengine/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 511084ce95..cfceea32ce 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 262f921eb0..88371e9b7e 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index 761d5437a3..5cb0f673d1 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index a4d7e59084..42ded8a6c2 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 46ec118770..9fe5d3e642 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/named.py b/cassandra/cqlengine/named.py index 9acee17f9f..265d5c91e4 100644 --- a/cassandra/cqlengine/named.py +++ b/cassandra/cqlengine/named.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index a6702bdec9..8585b9c619 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index d3bcd09a3e..aeacea9a8e 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index b05a59059a..a138e755ef 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 3819e288d0..22dec9ada1 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cython_marshal.pyx b/cassandra/cython_marshal.pyx index 1c94921ccd..e4f30e6a85 100644 --- a/cassandra/cython_marshal.pyx +++ b/cassandra/cython_marshal.pyx @@ -1,6 +1,6 @@ # -- cython: profile=True # -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cython_utils.pyx b/cassandra/cython_utils.pyx index 551e6a6a7b..7539f33f31 100644 --- a/cassandra/cython_utils.pyx +++ b/cassandra/cython_utils.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/deserializers.pxd b/cassandra/deserializers.pxd index 2fddf7cd4c..7b307226ad 100644 --- a/cassandra/deserializers.pxd +++ b/cassandra/deserializers.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/deserializers.pyx b/cassandra/deserializers.pyx index a0d6c7a755..7de6949099 100644 --- a/cassandra/deserializers.pyx +++ b/cassandra/deserializers.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 881b21f532..1f02348a8d 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/__init__.py b/cassandra/io/__init__.py index 077defb32e..386372eb4a 100644 --- a/cassandra/io/__init__.py +++ b/cassandra/io/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index c5ede7cc39..f7b7cac421 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index d85bef5d6e..bc01f755c9 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -1,5 +1,5 @@ # Copyright 2014 Symantec Corporation -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 53518f5531..bbf9e83eb0 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index dc1a6e6a91..90bd76158a 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index ec872fdaff..337d7a4265 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/ioutils.pyx b/cassandra/ioutils.pyx index 5f56503140..b0ab4f16cb 100644 --- a/cassandra/ioutils.pyx +++ b/cassandra/ioutils.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 6d04ea25c9..3b80f34043 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 1a935db84e..224dfe5e27 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/metrics.py b/cassandra/metrics.py index deb6f07031..64c1d54fb5 100644 --- a/cassandra/metrics.py +++ b/cassandra/metrics.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/numpy_parser.pyx b/cassandra/numpy_parser.pyx index fbb040102d..bb5b9a1c8c 100644 --- a/cassandra/numpy_parser.pyx +++ b/cassandra/numpy_parser.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/obj_parser.pyx b/cassandra/obj_parser.pyx index d709a36dbf..a0b5316a33 100644 --- a/cassandra/obj_parser.pyx +++ b/cassandra/obj_parser.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/parsing.pxd b/cassandra/parsing.pxd index 49fa8b6d0e..aa9478cd14 100644 --- a/cassandra/parsing.pxd +++ b/cassandra/parsing.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/parsing.pyx b/cassandra/parsing.pyx index 4fb033a33e..d2bc0a3abe 100644 --- a/cassandra/parsing.pyx +++ b/cassandra/parsing.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/policies.py b/cassandra/policies.py index cc471be51c..13c112d0d1 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/pool.py b/cassandra/pool.py index 5043c9d114..1d6bcf4e6e 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/protocol.py b/cassandra/protocol.py index a4bda892f0..e9b6dab5c1 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/query.py b/cassandra/query.py index efc30ddbe1..56b470db74 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/row_parser.pyx b/cassandra/row_parser.pyx index a16717eb2b..49fafd32e5 100644 --- a/cassandra/row_parser.pyx +++ b/cassandra/row_parser.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/timestamps.py b/cassandra/timestamps.py index 44177eaa41..d11359cf13 100644 --- a/cassandra/timestamps.py +++ b/cassandra/timestamps.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/tuple.pxd b/cassandra/tuple.pxd index 514bb52984..08d95b6c1f 100644 --- a/cassandra/tuple.pxd +++ b/cassandra/tuple.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/type_codes.pxd b/cassandra/type_codes.pxd index 7078b61c6d..076cacd3de 100644 --- a/cassandra/type_codes.pxd +++ b/cassandra/type_codes.pxd @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/util.py b/cassandra/util.py index 829658d6ad..694b1bc032 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/example_core.py b/example_core.py index bed7c259f2..01c766e109 100644 --- a/example_core.py +++ b/example_core.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/example_mapper.py b/example_mapper.py index 82ab34d406..35100471c7 100755 --- a/example_mapper.py +++ b/example_mapper.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/request_init_listener.py b/examples/request_init_listener.py index 80616eaa44..b90bce6b64 100644 --- a/examples/request_init_listener.py +++ b/examples/request_init_listener.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/setup.py b/setup.py index b1d93c77ef..a49bb1e21b 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/__init__.py b/tests/__init__.py index f5bcf73ed2..6f29cf3f2f 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 04038cfeb6..8940085bef 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index 2cb2e28d3d..d098ea7014 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/base.py b/tests/integration/cqlengine/base.py index 7be5894774..8a6903350f 100644 --- a/tests/integration/cqlengine/base.py +++ b/tests/integration/cqlengine/base.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/__init__.py b/tests/integration/cqlengine/columns/__init__.py index 077defb32e..386372eb4a 100644 --- a/tests/integration/cqlengine/columns/__init__.py +++ b/tests/integration/cqlengine/columns/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index 7ab5ded19c..92dab467b1 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/test_counter_column.py b/tests/integration/cqlengine/columns/test_counter_column.py index a751ab8425..95792dd452 100644 --- a/tests/integration/cqlengine/columns/test_counter_column.py +++ b/tests/integration/cqlengine/columns/test_counter_column.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/test_static_column.py b/tests/integration/cqlengine/columns/test_static_column.py index 08e8ce7f9f..69e222d2b9 100644 --- a/tests/integration/cqlengine/columns/test_static_column.py +++ b/tests/integration/cqlengine/columns/test_static_column.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 43eb0fe678..b8c432be3f 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py index 95d3cde886..243c2b0fdb 100644 --- a/tests/integration/cqlengine/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/connections/__init__.py b/tests/integration/cqlengine/connections/__init__.py index bcdbd2269e..2c9ca172f8 100644 --- a/tests/integration/cqlengine/connections/__init__.py +++ b/tests/integration/cqlengine/connections/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index e3158c6143..c10a56e66b 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/management/__init__.py b/tests/integration/cqlengine/management/__init__.py index 077defb32e..386372eb4a 100644 --- a/tests/integration/cqlengine/management/__init__.py +++ b/tests/integration/cqlengine/management/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index c0c1359a16..d5dea12744 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index a5fd39b9a3..b4c7a6140f 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/__init__.py b/tests/integration/cqlengine/model/__init__.py index 077defb32e..386372eb4a 100644 --- a/tests/integration/cqlengine/model/__init__.py +++ b/tests/integration/cqlengine/model/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index 5124b1c091..9c5afecbfc 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_equality_operations.py b/tests/integration/cqlengine/model/test_equality_operations.py index ba7b34d396..3b40ed4bf3 100644 --- a/tests/integration/cqlengine/model/test_equality_operations.py +++ b/tests/integration/cqlengine/model/test_equality_operations.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index 8d346f80c3..26d4f1a56f 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index e2776e297d..32ace5363f 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_polymorphism.py b/tests/integration/cqlengine/model/test_polymorphism.py index 3400847a52..e78fef498e 100644 --- a/tests/integration/cqlengine/model/test_polymorphism.py +++ b/tests/integration/cqlengine/model/test_polymorphism.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index fe0096a2d8..82973436ac 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index 15fe7bf3a2..17eed8ddd9 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/model/test_value_lists.py b/tests/integration/cqlengine/model/test_value_lists.py index 9027266bec..0c913158cf 100644 --- a/tests/integration/cqlengine/model/test_value_lists.py +++ b/tests/integration/cqlengine/model/test_value_lists.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/operators/__init__.py b/tests/integration/cqlengine/operators/__init__.py index bcdbd2269e..2c9ca172f8 100644 --- a/tests/integration/cqlengine/operators/__init__.py +++ b/tests/integration/cqlengine/operators/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index 62025b302e..da4e9c5b30 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/__init__.py b/tests/integration/cqlengine/query/__init__.py index 077defb32e..386372eb4a 100644 --- a/tests/integration/cqlengine/query/__init__.py +++ b/tests/integration/cqlengine/query/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_batch_query.py b/tests/integration/cqlengine/query/test_batch_query.py index 91727c3836..6323ec7d32 100644 --- a/tests/integration/cqlengine/query/test_batch_query.py +++ b/tests/integration/cqlengine/query/test_batch_query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_datetime_queries.py b/tests/integration/cqlengine/query/test_datetime_queries.py index 2c08f6fe99..ba1c90bb9e 100644 --- a/tests/integration/cqlengine/query/test_datetime_queries.py +++ b/tests/integration/cqlengine/query/test_datetime_queries.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index 3fbe847ad1..4907c26661 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_queryoperators.py b/tests/integration/cqlengine/query/test_queryoperators.py index 52a824fa1d..7f7c0ac1c5 100644 --- a/tests/integration/cqlengine/query/test_queryoperators.py +++ b/tests/integration/cqlengine/query/test_queryoperators.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 004f21a037..83586d1c30 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index 7c4917be7d..fb6082bfe2 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/__init__.py b/tests/integration/cqlengine/statements/__init__.py index bcdbd2269e..2c9ca172f8 100644 --- a/tests/integration/cqlengine/statements/__init__.py +++ b/tests/integration/cqlengine/statements/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_assignment_clauses.py b/tests/integration/cqlengine/statements/test_assignment_clauses.py index c8a242efa1..594224d72d 100644 --- a/tests/integration/cqlengine/statements/test_assignment_clauses.py +++ b/tests/integration/cqlengine/statements/test_assignment_clauses.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_base_clause.py b/tests/integration/cqlengine/statements/test_base_clause.py index ef85de2622..351983806b 100644 --- a/tests/integration/cqlengine/statements/test_base_clause.py +++ b/tests/integration/cqlengine/statements/test_base_clause.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index 313ad20146..db7d1ebd6a 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_delete_statement.py b/tests/integration/cqlengine/statements/test_delete_statement.py index dc051b76ae..5e2894a06b 100644 --- a/tests/integration/cqlengine/statements/test_delete_statement.py +++ b/tests/integration/cqlengine/statements/test_delete_statement.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_insert_statement.py b/tests/integration/cqlengine/statements/test_insert_statement.py index b788052b88..3bf90ec313 100644 --- a/tests/integration/cqlengine/statements/test_insert_statement.py +++ b/tests/integration/cqlengine/statements/test_insert_statement.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_select_statement.py b/tests/integration/cqlengine/statements/test_select_statement.py index 791b755049..90c14bcfb6 100644 --- a/tests/integration/cqlengine/statements/test_select_statement.py +++ b/tests/integration/cqlengine/statements/test_select_statement.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_update_statement.py b/tests/integration/cqlengine/statements/test_update_statement.py index 7be341727c..c6ed228d91 100644 --- a/tests/integration/cqlengine/statements/test_update_statement.py +++ b/tests/integration/cqlengine/statements/test_update_statement.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/statements/test_where_clause.py b/tests/integration/cqlengine/statements/test_where_clause.py index c6d05b916e..3173320f7c 100644 --- a/tests/integration/cqlengine/statements/test_where_clause.py +++ b/tests/integration/cqlengine/statements/test_where_clause.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index 424d0b27a7..7b78fa9979 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_connections.py b/tests/integration/cqlengine/test_connections.py index 4be46ce4f6..ce4f4cc631 100644 --- a/tests/integration/cqlengine/test_connections.py +++ b/tests/integration/cqlengine/test_connections.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_consistency.py b/tests/integration/cqlengine/test_consistency.py index cb04bd75ec..dc0aa32c64 100644 --- a/tests/integration/cqlengine/test_consistency.py +++ b/tests/integration/cqlengine/test_consistency.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_context_query.py b/tests/integration/cqlengine/test_context_query.py index 9a4fa05203..6f2a161352 100644 --- a/tests/integration/cqlengine/test_context_query.py +++ b/tests/integration/cqlengine/test_context_query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_ifexists.py b/tests/integration/cqlengine/test_ifexists.py index 4ff0aff49e..2797edd846 100644 --- a/tests/integration/cqlengine/test_ifexists.py +++ b/tests/integration/cqlengine/test_ifexists.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py index 0bd7fb9a21..206101f1b2 100644 --- a/tests/integration/cqlengine/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index 0b7d0e578b..efd3b33603 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_timestamp.py b/tests/integration/cqlengine/test_timestamp.py index 90a6c2ea35..88ed5e91df 100644 --- a/tests/integration/cqlengine/test_timestamp.py +++ b/tests/integration/cqlengine/test_timestamp.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/cqlengine/test_ttl.py b/tests/integration/cqlengine/test_ttl.py index ed7bb3d5cd..fd6a01440b 100644 --- a/tests/integration/cqlengine/test_ttl.py +++ b/tests/integration/cqlengine/test_ttl.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/datatype_utils.py b/tests/integration/datatype_utils.py index f2314467e8..8a1c813baa 100644 --- a/tests/integration/datatype_utils.py +++ b/tests/integration/datatype_utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/__init__.py b/tests/integration/long/__init__.py index bbab66014b..447f4885cc 100644 --- a/tests/integration/long/__init__.py +++ b/tests/integration/long/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index 2091859dbf..5b73d9a390 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index 6ce80d3338..44094de03a 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index 2943dbdef4..5b27b94d68 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_large_data.py b/tests/integration/long/test_large_data.py index bbd0cf23bd..c29950fe81 100644 --- a/tests/integration/long/test_large_data.py +++ b/tests/integration/long/test_large_data.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index c92bf6402a..30a298e166 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index 086130776a..5163066e43 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index a5d16c4544..972cf1487e 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index 8c9096980a..e09cc349d2 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/simulacron/__init__.py b/tests/integration/simulacron/__init__.py index 842fa50348..79868c38bf 100644 --- a/tests/integration/simulacron/__init__.py +++ b/tests/integration/simulacron/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2016 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index edf3709efa..1bded2e749 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index 85ab2278e6..fc244ba8ed 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/simulacron/utils.py b/tests/integration/simulacron/utils.py index 7b7768bf5f..f3965009bb 100644 --- a/tests/integration/simulacron/utils.py +++ b/tests/integration/simulacron/utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/__init__.py b/tests/integration/standard/__init__.py index b72d6c4e63..e54b6fd6bd 100644 --- a/tests/integration/standard/__init__.py +++ b/tests/integration/standard/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 1eb6fbf6c1..bb6fedfa1c 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index 6c9f7c52a0..1092af7776 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 0e726308cd..d9ec659460 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index b11e031f87..5c81daa024 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index c6e6363069..81238a2e5e 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_control_connection.py b/tests/integration/standard/test_control_connection.py index 89042cd060..b928cd2b68 100644 --- a/tests/integration/standard/test_control_connection.py +++ b/tests/integration/standard/test_control_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py index 22bc45759b..c68e9ef843 100644 --- a/tests/integration/standard/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index c1a8f3d479..e76972b9b8 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 4c1cf3549e..6cd9b019bf 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 90af9ee4a9..d5052ace57 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index b8f17c6872..0d8b9b475c 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 715fbcd83a..76073d7cba 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index e2b422603b..6cc70055f1 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index 88694047a7..dfe9f70df8 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_routing.py b/tests/integration/standard/test_routing.py index c27a53684c..bf4c7878b6 100644 --- a/tests/integration/standard/test_routing.py +++ b/tests/integration/standard/test_routing.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index 527ddeea32..dea7fb4933 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index cda3c9379f..b49ee06ca1 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 91cc12441d..514c562390 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/upgrade/__init__.py b/tests/integration/upgrade/__init__.py index 71e404c17c..d2b9076bc2 100644 --- a/tests/integration/upgrade/__init__.py +++ b/tests/integration/upgrade/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/upgrade/test_upgrade.py b/tests/integration/upgrade/test_upgrade.py index b3270cd6e6..1e4fbb2e92 100644 --- a/tests/integration/upgrade/test_upgrade.py +++ b/tests/integration/upgrade/test_upgrade.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/util.py b/tests/integration/util.py index 26f2ca7739..0a95d81dfb 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/stress_tests/test_load.py b/tests/stress_tests/test_load.py index 14c8778b4c..a9771147ce 100644 --- a/tests/stress_tests/test_load.py +++ b/tests/stress_tests/test_load.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/stress_tests/test_multi_inserts.py b/tests/stress_tests/test_multi_inserts.py index 8aa1d68143..65bbe2a4e4 100644 --- a/tests/stress_tests/test_multi_inserts.py +++ b/tests/stress_tests/test_multi_inserts.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 077defb32e..386372eb4a 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cqlengine/__init__.py b/tests/unit/cqlengine/__init__.py index 077defb32e..386372eb4a 100644 --- a/tests/unit/cqlengine/__init__.py +++ b/tests/unit/cqlengine/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cqlengine/test_columns.py b/tests/unit/cqlengine/test_columns.py index 47c55b9715..bcb174a8c0 100644 --- a/tests/unit/cqlengine/test_columns.py +++ b/tests/unit/cqlengine/test_columns.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cqlengine/test_connection.py b/tests/unit/cqlengine/test_connection.py index b6646ec756..3efff57369 100644 --- a/tests/unit/cqlengine/test_connection.py +++ b/tests/unit/cqlengine/test_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cqlengine/test_udt.py b/tests/unit/cqlengine/test_udt.py index bedeef8e3d..ebe1139fd0 100644 --- a/tests/unit/cqlengine/test_udt.py +++ b/tests/unit/cqlengine/test_udt.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/__init__.py b/tests/unit/cython/__init__.py index 077defb32e..386372eb4a 100644 --- a/tests/unit/cython/__init__.py +++ b/tests/unit/cython/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/bytesio_testhelper.pyx b/tests/unit/cython/bytesio_testhelper.pyx index b78b329170..7ba91bc4c0 100644 --- a/tests/unit/cython/bytesio_testhelper.pyx +++ b/tests/unit/cython/bytesio_testhelper.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/test_bytesio.py b/tests/unit/cython/test_bytesio.py index 488f18bd97..a156fc1272 100644 --- a/tests/unit/cython/test_bytesio.py +++ b/tests/unit/cython/test_bytesio.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/test_types.py b/tests/unit/cython/test_types.py index 04014934dd..a0d2138c6d 100644 --- a/tests/unit/cython/test_types.py +++ b/tests/unit/cython/test_types.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/test_utils.py b/tests/unit/cython/test_utils.py index 18d4b65312..dc8745e471 100644 --- a/tests/unit/cython/test_utils.py +++ b/tests/unit/cython/test_utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/types_testhelper.pyx b/tests/unit/cython/types_testhelper.pyx index 538ce43c23..55fd310837 100644 --- a/tests/unit/cython/types_testhelper.pyx +++ b/tests/unit/cython/types_testhelper.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/utils.py b/tests/unit/cython/utils.py index 6531894d5b..ee5cb1a9c5 100644 --- a/tests/unit/cython/utils.py +++ b/tests/unit/cython/utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/cython/utils_testhelper.pyx b/tests/unit/cython/utils_testhelper.pyx index 489fbd1b7f..fe67691aa8 100644 --- a/tests/unit/cython/utils_testhelper.pyx +++ b/tests/unit/cython/utils_testhelper.pyx @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/__init__.py b/tests/unit/io/__init__.py index 077defb32e..386372eb4a 100644 --- a/tests/unit/io/__init__.py +++ b/tests/unit/io/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/eventlet_utils.py b/tests/unit/io/eventlet_utils.py index c06f4cdc0e..785856be20 100644 --- a/tests/unit/io/eventlet_utils.py +++ b/tests/unit/io/eventlet_utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/gevent_utils.py b/tests/unit/io/gevent_utils.py index 4d368ea6e9..a341fd9385 100644 --- a/tests/unit/io/gevent_utils.py +++ b/tests/unit/io/gevent_utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 3309c6a8f2..19562bc23a 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index 678e8e4715..746b365956 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py index e865fe9a43..d62bf20036 100644 --- a/tests/unit/io/test_geventreactor.py +++ b/tests/unit/io/test_geventreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index f08af0f650..cfb06e4afd 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_libevtimer.py b/tests/unit/io/test_libevtimer.py index e0799605dc..082fa69c63 100644 --- a/tests/unit/io/test_libevtimer.py +++ b/tests/unit/io/test_libevtimer.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index 76a766fe3d..c6f603a30e 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 0c8ab4b24d..9f02fa007a 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 7f7fbbc79b..e52658b704 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index f8c22cb831..cc6c12cdaa 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 9d312b85db..1d1bb39b0e 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index e9efc2d021..6cabd923c2 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_exception.py b/tests/unit/test_exception.py index e01e782f83..3a082f7363 100644 --- a/tests/unit/test_exception.py +++ b/tests/unit/test_exception.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index e4ea0a98d8..733ccb55a1 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index c7b1c9ca4f..c2363e0adc 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 1f425d94d6..3cc810e89a 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_orderedmap.py b/tests/unit/test_orderedmap.py index 71470ec90a..f2baab40f0 100644 --- a/tests/unit/test_orderedmap.py +++ b/tests/unit/test_orderedmap.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index e3b667dab7..9c91679bc6 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 5237a4b793..f1f4e15b74 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_query.py b/tests/unit/test_query.py index e345a61527..7c2bfc0d14 100644 --- a/tests/unit/test_query.py +++ b/tests/unit/test_query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index fa1ba96275..b785507801 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index 8812c76cf5..f30b9f6f9c 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_sortedset.py b/tests/unit/test_sortedset.py index 91047d0013..72e557c642 100644 --- a/tests/unit/test_sortedset.py +++ b/tests/unit/test_sortedset.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_time_util.py b/tests/unit/test_time_util.py index a683076fee..7025f151d6 100644 --- a/tests/unit/test_time_util.py +++ b/tests/unit/test_time_util.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_timestamps.py b/tests/unit/test_timestamps.py index cfbd97e6a4..bbca3527f6 100644 --- a/tests/unit/test_timestamps.py +++ b/tests/unit/test_timestamps.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 21e1a4165b..c8f3011d07 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py index de75829d81..05cc8dbdae 100644 --- a/tests/unit/test_util_types.py +++ b/tests/unit/test_util_types.py @@ -1,4 +1,4 @@ -# Copyright 2013-2017 DataStax, Inc. +# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From e9b9082e1d9ff4e1e2eb2dbf0704e72f23679f73 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 20 Dec 2017 14:27:00 -0500 Subject: [PATCH 1305/2005] Remove "Experimental" tag from execution profiles documentation --- CHANGELOG.rst | 1 + docs/execution_profiles.rst | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f2bddfced7..05a0c1ca09 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -19,6 +19,7 @@ Other * Possible deadlock on cassandra.concurrent.execute_concurrent (PYTHON-768) * Add some known deprecated warnings for 4.x (PYTHON-877) * Remove copyright dates from copyright notices (PYTHON-863) +* Remove "Experimental" tag from execution profiles documentation (PYTHON-840) 3.12.0 ====== diff --git a/docs/execution_profiles.rst b/docs/execution_profiles.rst index 9ccaec5746..0243bb19ab 100644 --- a/docs/execution_profiles.rst +++ b/docs/execution_profiles.rst @@ -1,13 +1,13 @@ -Execution Profiles (experimental) -================================= +Execution Profiles +================== -Execution profiles are an experimental API aimed at making it easier to execute requests in different ways within +Execution profiles aim at making it easier to execute requests in different ways within a single connected ``Session``. Execution profiles are being introduced to deal with the exploding number of configuration options, especially as the database platform evolves more complex workloads. -The Execution Profile API is being introduced now, in an experimental capacity, in order to take advantage of it in -existing projects, and to gauge interest and feedback in the community. For now, the legacy configuration remains -intact, but legacy and Execution Profile APIs cannot be used simultaneously on the same client ``Cluster``. +The legacy configuration remains intact, but legacy and Execution Profile APIs +cannot be used simultaneously on the same client ``Cluster``. Legacy configuration +will be removed in the next major release (4.0). This document explains how Execution Profiles relate to existing settings, and shows how to use the new profiles for request execution. From fcd5659ad53aa3cecf2a37c1a592ac5adbc6e791 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 21 Dec 2017 09:04:58 -0500 Subject: [PATCH 1306/2005] request_timer metrics descriptions are slightly incorrect --- CHANGELOG.rst | 1 + cassandra/metrics.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 05a0c1ca09..339cdd9051 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -20,6 +20,7 @@ Other * Add some known deprecated warnings for 4.x (PYTHON-877) * Remove copyright dates from copyright notices (PYTHON-863) * Remove "Experimental" tag from execution profiles documentation (PYTHON-840) +* request_timer metrics descriptions are slightly incorrect (PYTHON-885) 3.12.0 ====== diff --git a/cassandra/metrics.py b/cassandra/metrics.py index 64c1d54fb5..1431f50bce 100644 --- a/cassandra/metrics.py +++ b/cassandra/metrics.py @@ -39,10 +39,10 @@ class Metrics(object): * min - min latency * max - max latency * mean - mean latency - * stdev - standard deviation for latencies + * stddev - standard deviation for latencies * median - median latency * 75percentile - 75th percentile latencies - * 97percentile - 97th percentile latencies + * 95percentile - 95th percentile latencies * 98percentile - 98th percentile latencies * 99percentile - 99th percentile latencies * 999percentile - 99.9th percentile latencies From 27b8b9b372cf792b3c211944f9f9730957d2f046 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Tue, 9 Jan 2018 13:43:43 -0500 Subject: [PATCH 1307/2005] Renamed warn to warning in tests (PYTHON-846) --- benchmarks/base.py | 2 +- tests/integration/__init__.py | 20 +++++++++---------- .../columns/test_container_columns.py | 4 ++-- tests/integration/long/test_consistency.py | 2 +- tests/integration/long/test_failure_types.py | 4 ++-- tests/integration/long/test_large_data.py | 8 ++++---- .../long/test_loadbalancingpolicies.py | 6 +++--- tests/integration/long/test_ssl.py | 4 ++-- tests/integration/standard/test_concurrent.py | 4 ++-- 9 files changed, 27 insertions(+), 27 deletions(-) diff --git a/benchmarks/base.py b/benchmarks/base.py index fac750e140..5c23cfe623 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -248,7 +248,7 @@ def parse_options(): try: log.setLevel(_log_levels[level]) except KeyError: - log.warn("Unknown log level specified: %s; specify one of %s", options.log_level, _log_levels.keys()) + log.warning("Unknown log level specified: %s; specify one of %s", options.log_level, _log_levels.keys()) if options.asyncore_only: options.supported_reactors = [AsyncoreConnection] diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 8940085bef..0206c91501 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -238,7 +238,7 @@ def get_unsupported_upper_protocol(): def wait_for_node_socket(node, timeout): binary_itf = node.network_interfaces['binary'] if not common.check_socket_listening(binary_itf, timeout=timeout): - log.warn("Unable to connect to binary socket for node " + node.name) + log.warning("Unable to connect to binary socket for node " + node.name) else: log.debug("Node %s is up and listening " % (node.name,)) @@ -293,7 +293,7 @@ def remove_cluster(): return except OSError: ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb tries += 1 time.sleep(1) @@ -343,7 +343,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], se CCM_CLUSTER.set_configuration_options(configuration_options) except Exception: ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, ccm_options)) @@ -413,12 +413,12 @@ def execute_until_pass(session, query): try: return session.execute(query) except (ConfigurationException, AlreadyExists, InvalidRequest): - log.warn("Received already exists from query {0} not exiting".format(query)) + log.warning("Received already exists from query {0} not exiting".format(query)) # keyspace/table was already created/dropped return except (OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure): ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb tries += 1 @@ -431,12 +431,12 @@ def execute_with_long_wait_retry(session, query, timeout=30): try: return session.execute(query, timeout=timeout) except (ConfigurationException, AlreadyExists): - log.warn("Received already exists from query {0} not exiting".format(query)) + log.warning("Received already exists from query {0} not exiting".format(query)) # keyspace/table was already created/dropped return except (OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure): ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb tries += 1 @@ -463,12 +463,12 @@ def drop_keyspace_shutdown_cluster(keyspace_name, session, cluster): try: execute_with_long_wait_retry(session, "DROP KEYSPACE {0}".format(keyspace_name)) except: - log.warn("Error encountered when droping keyspace {0}".format(keyspace_name)) + log.warning("Error encountered when droping keyspace {0}".format(keyspace_name)) ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb finally: - log.warn("Shutting down cluster") + log.warning("Shutting down cluster") cluster.shutdown() diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index 92dab467b1..ad653e1db3 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -252,7 +252,7 @@ def test_element_count_validation(self): break except WriteTimeout: ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb self.assertRaises(ValidationError, TestListModel.create, **{'text_list': [str(uuid4()) for _ in range(65536)]}) @@ -422,7 +422,7 @@ def test_element_count_validation(self): break except WriteTimeout: ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb self.assertRaises(ValidationError, TestMapModel.create, **{'text_map': dict((str(uuid4()), i) for i in range(65536))}) diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index 5b73d9a390..bb6828aa3b 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -83,7 +83,7 @@ def _query(self, session, keyspace, count, consistency_level=ConsistencyLevel.ON break except (OperationTimedOut, ReadTimeout): ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb tries += 1 time.sleep(1) diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index 44094de03a..a67c05aa57 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -89,7 +89,7 @@ def execute_helper(self, session, query): return session.execute(query) except OperationTimedOut: ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb tries += 1 @@ -102,7 +102,7 @@ def execute_concurrent_args_helper(self, session, query, params): return execute_concurrent_with_args(session, query, params, concurrency=50) except (ReadTimeout, WriteTimeout, OperationTimedOut, ReadFailure, WriteFailure): ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb tries += 1 diff --git a/tests/integration/long/test_large_data.py b/tests/integration/long/test_large_data.py index c29950fe81..76cafa03b7 100644 --- a/tests/integration/long/test_large_data.py +++ b/tests/integration/long/test_large_data.py @@ -82,7 +82,7 @@ def batch_futures(self, session, statement_generator): except (OperationTimedOut, WriteTimeout): ex_type, ex, tb = sys.exc_info() number_of_timeouts += 1 - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb time.sleep(1) except Empty: @@ -97,7 +97,7 @@ def batch_futures(self, session, statement_generator): except (OperationTimedOut, WriteTimeout): ex_type, ex, tb = sys.exc_info() number_of_timeouts += 1 - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb time.sleep(1) except Empty: @@ -156,8 +156,8 @@ def test_wide_batch_rows(self): #If we timeout on insertion that's bad but it could be just slow underlying c* #Attempt to validate anyway, we will fail if we don't get the right data back. ex_type, ex, tb = sys.exc_info() - log.warn("Batch wide row insertion timed out, this may require additional investigation") - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("Batch wide row insertion timed out, this may require additional investigation") + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb # Verify diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 30a298e166..dec8b5eedb 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -97,7 +97,7 @@ def _insert(self, session, keyspace, count=12, return except (OperationTimedOut, WriteTimeout, WriteFailure): ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb tries += 1 @@ -120,7 +120,7 @@ def _query(self, session, keyspace, count=12, break except (OperationTimedOut, ReadTimeout, ReadFailure): ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb tries += 1 else: @@ -138,7 +138,7 @@ def _query(self, session, keyspace, count=12, break except (OperationTimedOut, ReadTimeout, ReadFailure): ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb tries += 1 diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 972cf1487e..ffaa76e9fe 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -80,7 +80,7 @@ def validate_ssl_options(ssl_options): break except Exception: ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb tries += 1 @@ -158,7 +158,7 @@ def test_can_connect_with_ssl_long_running(self): break except Exception: ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb tries += 1 diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index 5c81daa024..c85bb64b94 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -60,7 +60,7 @@ def execute_concurrent_helper(self, session, query, results_generator=False): return execute_concurrent(session, query, results_generator=False) except (ReadTimeout, WriteTimeout, OperationTimedOut, ReadFailure, WriteFailure): ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb count += 1 @@ -73,7 +73,7 @@ def execute_concurrent_args_helper(self, session, query, params, results_generat return execute_concurrent_with_args(session, query, params, results_generator=results_generator) except (ReadTimeout, WriteTimeout, OperationTimedOut, ReadFailure, WriteFailure): ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query)) From 769d658759ffb2d6e3fb2e7d3db237c0990398af Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 15 Jan 2018 10:22:00 -0500 Subject: [PATCH 1308/2005] lz4 import issue with recent versions --- CHANGELOG.rst | 1 + cassandra/connection.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 67eb7c7fba..f77f41f176 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -10,6 +10,7 @@ Bug Fixes * AttributeError: 'NoneType' object has no attribute 'add_timer' (PYTHON-862) * Support retry_policy in PreparedStatement (PYTHON-861) * __del__ method in Session is throwing an exception (PYTHON-813) +* LZ4 import issue with recent versions (PYTHON-897) Other ----- diff --git a/cassandra/connection.py b/cassandra/connection.py index cc4d18a869..3d13cbd4a0 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -64,8 +64,8 @@ # The compress and decompress functions we need were moved from the lz4 to # the lz4.block namespace, so we try both here. try: - lz4_block = lz4.block - except AttributeError: + from lz4 import block as lz4_block + except ImportError: lz4_block = lz4 # Cassandra writes the uncompressed message length in big endian order, From ecc32cc40104e35f9f889b6a98a0459425e6cda9 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Wed, 10 Jan 2018 12:30:24 -0500 Subject: [PATCH 1309/2005] Added tests for PYTHON-890 and PYTHON-877 --- .../integration/cqlengine/model/test_model.py | 42 +++++++++++++++++++ tests/integration/standard/test_cluster.py | 39 +++++++++++++++++ 2 files changed, 81 insertions(+) diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index 26d4f1a56f..81de0ead0c 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -224,3 +224,45 @@ def test_comparison(self): self.assertLessEqual(TestQueryUpdateModel.partition.column, TestQueryUpdateModel.cluster.column) self.assertGreater(TestQueryUpdateModel.cluster.column, TestQueryUpdateModel.partition.column) self.assertGreaterEqual(TestQueryUpdateModel.cluster.column, TestQueryUpdateModel.partition.column) + + +class TestDeprecationWarning(unittest.TestCase): + def test_deprecation_warnings(self): + """ + Test to some deprecation warning have been added. It tests warnings for + negative index, negative index slicing and table sensitive removal + + This test should be removed in 4.0, that's why the imports are in + this test, so it's easier to remove + + @since 3.13 + @jira_ticket PYTHON-877 + @expected_result the deprecation warnings are emitted + + @test_category logs + """ + import warnings + + class SensitiveModel(Model): + __table_name__ = 'SensitiveModel' + __table_name_case_sensitive__ = True + k = columns.Integer(primary_key=True) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + sync_table(SensitiveModel) + self.addCleanup(drop_table, SensitiveModel) + + SensitiveModel.create(k=0) + + rows = SensitiveModel.objects().all().allow_filtering() + rows[-1] + rows[-1:] + + self.assertEqual(len(w), 4) + self.assertIn("__table_name_case_sensitive__ will be removed in 4.0.", str(w[0].message)) + self.assertIn("__table_name_case_sensitive__ will be removed in 4.0.", str(w[1].message)) + self.assertIn("ModelQuerySet indexing with negative indices support will be removed in 4.0.", + str(w[2].message)) + self.assertIn("ModelQuerySet slicing with negative indices support will be removed in 4.0.", + str(w[3].message)) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index d9ec659460..8857f94389 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -23,6 +23,7 @@ import time from uuid import uuid4 import logging +import warnings import cassandra from cassandra.cluster import Cluster, Session, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT @@ -45,6 +46,7 @@ def setup_module(): use_singledc() + warnings.simplefilter("always") class IgnoredHostPolicy(RoundRobinPolicy): @@ -1428,3 +1430,40 @@ def test_valid_protocol_version_beta_options_connect(self): self.assertEqual(cluster.protocol_version, cassandra.ProtocolVersion.MAX_SUPPORTED) self.assertTrue(session.execute("select release_version from system.local")[0]) cluster.shutdown() + + +class DeprecationWarningTest(unittest.TestCase): + def test_deprecation_warnings_legacy_parameters(self): + """ + Tests the deprecation warning has been added when using + legacy parameters + + @since 3.13 + @jira_ticket PYTHON-877 + @expected_result the deprecation warning is emitted + + @test_category logs + """ + with warnings.catch_warnings(record=True) as w: + Cluster(load_balancing_policy=RoundRobinPolicy()) + self.assertEqual(len(w), 1) + self.assertIn("Legacy execution parameters will be removed in 4.0. Consider using execution profiles.", + str(w[0].message)) + + def test_deprecation_warnings_meta_refreshed(self): + """ + Tests the deprecation warning has been added when enabling + metadata refreshment + + @since 3.13 + @jira_ticket PYTHON-890 + @expected_result the deprecation warning is emitted + + @test_category logs + """ + with warnings.catch_warnings(record=True) as w: + cluster = Cluster() + cluster.set_meta_refresh_enabled(True) + self.assertEqual(len(w), 1) + self.assertIn("Cluster.set_meta_refresh_enabled is deprecated and will be removed in 4.0.", + str(w[0].message)) From 2d5df2b14a7c06ecaf27f6d2515d77a2f2ea1614 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Thu, 18 Jan 2018 15:24:38 -0500 Subject: [PATCH 1310/2005] PYTHON-897 Added test for importing lz4 --- .travis.yml | 2 +- tests/unit/test_connection.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 4e58ae68c3..35d124fa80 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,7 +21,7 @@ addons: - libev-dev install: - - pip install tox-travis + - pip install tox-travis lz4 script: - | diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 1d1bb39b0e..c6085f8275 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -421,6 +421,14 @@ def send_msg(msg, req_id, msg_callback): [call(connection)] * get_holders.call_count) +class LZ4Tests(unittest.TestCase): + def test_lz4_is_correctly_imported(self): + try: + import lz4 + except ImportError: + return + from lz4 import block as lz4_block + class TimerTest(unittest.TestCase): def test_timer_collision(self): From 64039eb6efecaad1785255ad4abb793c1136e19c Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 3 Nov 2017 16:22:27 -0400 Subject: [PATCH 1311/2005] factor out util methods also do some cleanup --- tests/unit/io/test_asyncorereactor.py | 46 +++++++-------------------- tests/unit/io/test_libevreactor.py | 42 +++++++----------------- tests/unit/io/utils.py | 35 ++++++++++++++++++++ 3 files changed, 57 insertions(+), 66 deletions(-) diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 19562bc23a..bae66e8826 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import sys import six try: @@ -22,22 +21,24 @@ import errno import math import time -from mock import patch, Mock +from mock import patch import os from six import BytesIO import socket from socket import error as socket_error -from cassandra.connection import (HEADER_DIRECTION_TO_CLIENT, - ConnectionException, ProtocolError,Timer) +from cassandra.connection import ConnectionException, ProtocolError from cassandra.io.asyncorereactor import AsyncoreConnection -from cassandra.protocol import (write_stringmultimap, write_int, write_string, - SupportedMessage, ReadyMessage, ServerError) -from cassandra.marshal import uint8_pack, uint32_pack, int32_pack +from cassandra.protocol import (write_int, write_string, SupportedMessage, + ReadyMessage, ServerError) +from cassandra.marshal import uint32_pack, int32_pack from tests import is_monkey_patched -from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback +from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback, ReactorTestMixin -class AsyncoreConnectionTest(unittest.TestCase): +class AsyncoreConnectionTest(unittest.TestCase, ReactorTestMixin): + + connection_class = AsyncoreConnection + socket_attr_name = 'socket' @classmethod def setUpClass(cls): @@ -62,28 +63,6 @@ def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") - def make_connection(self): - c = AsyncoreConnection('1.2.3.4', cql_version='3.0.1', connect_timeout=5) - c.socket = Mock() - c.socket.send.side_effect = lambda x: len(x) - return c - - def make_header_prefix(self, message_class, version=2, stream_id=0): - return six.binary_type().join(map(uint8_pack, [ - 0xff & (HEADER_DIRECTION_TO_CLIENT | version), - 0, # flags (compression) - stream_id, - message_class.opcode # opcode - ])) - - def make_options_body(self): - options_buf = BytesIO() - write_stringmultimap(options_buf, { - 'CQL_VERSION': ['3.0.1'], - 'COMPRESSION': [] - }) - return options_buf.getvalue() - def make_error_body(self, code, msg): buf = BytesIO() write_int(buf, code) @@ -305,7 +284,7 @@ def test_multi_timer_validation(self, *args): """ Verify that timer timeouts are honored appropriately """ - c = self.make_connection() + self.make_connection() # Tests timers submitted in order at various timeouts submit_and_wait_for_completion(self, AsyncoreConnection, 0, 100, 1, 100) # Tests timers submitted in reverse order at various timeouts @@ -331,6 +310,3 @@ def test_timer_cancellation(self): self.assertFalse(timer_manager._queue) self.assertFalse(timer_manager._new_timers) self.assertFalse(callback.was_invoked()) - - - diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index cfb06e4afd..dd083d38a6 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -18,26 +18,25 @@ import errno import math -from mock import patch, Mock +from mock import patch import os import weakref import six from six import BytesIO from socket import error as socket_error -from cassandra.connection import (HEADER_DIRECTION_TO_CLIENT, - ConnectionException, ProtocolError) - -from cassandra.protocol import (write_stringmultimap, write_int, write_string, - SupportedMessage, ReadyMessage, ServerError) -from cassandra.marshal import uint8_pack, uint32_pack, int32_pack +from cassandra.connection import ConnectionException, ProtocolError +from cassandra.protocol import (write_int, write_string, SupportedMessage, + ReadyMessage, ServerError) +from cassandra.marshal import uint32_pack, int32_pack from tests import is_monkey_patched +from tests.unit.io.utils import ReactorTestMixin try: from cassandra.io.libevreactor import _cleanup as libev__cleanup - from cassandra.io.libevreactor import LibevConnection, LibevLoop + from cassandra.io.libevreactor import LibevConnection except ImportError: LibevConnection = None # noqa @@ -47,7 +46,10 @@ @patch('cassandra.io.libevwrapper.Prepare') @patch('cassandra.io.libevwrapper.Async') @patch('cassandra.io.libevreactor.LibevLoop.maybe_start') -class LibevConnectionTest(unittest.TestCase): +class LibevConnectionTest(unittest.TestCase, ReactorTestMixin): + + connection_class = LibevConnection + socket_attr_name = '_socket' def setUp(self): if is_monkey_patched(): @@ -56,28 +58,6 @@ def setUp(self): raise unittest.SkipTest('libev does not appear to be installed correctly') LibevConnection.initialize_reactor() - def make_connection(self): - c = LibevConnection('1.2.3.4', cql_version='3.0.1') - c._socket = Mock() - c._socket.send.side_effect = lambda x: len(x) - return c - - def make_header_prefix(self, message_class, version=2, stream_id=0): - return six.binary_type().join(map(uint8_pack, [ - 0xff & (HEADER_DIRECTION_TO_CLIENT | version), - 0, # flags (compression) - stream_id, - message_class.opcode # opcode - ])) - - def make_options_body(self): - options_buf = BytesIO() - write_stringmultimap(options_buf, { - 'CQL_VERSION': ['3.0.1'], - 'COMPRESSION': [] - }) - return options_buf.getvalue() - def make_error_body(self, code, msg): buf = BytesIO() write_int(buf, code) diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 9f02fa007a..779e370751 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -12,6 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +from cassandra.connection import HEADER_DIRECTION_TO_CLIENT +from cassandra.marshal import uint8_pack +from cassandra.protocol import write_stringmultimap + +import six +from mock import Mock + try: import unittest2 as unittest except ImportError: @@ -139,3 +146,31 @@ def test_timer_cancellation(self): self.assertFalse(timer_manager._queue) self.assertFalse(timer_manager._new_timers) self.assertFalse(callback.was_invoked()) + + +class ReactorTestMixin(object): + + connection_class = socket_attr_name = None + + def make_header_prefix(self, message_class, version=2, stream_id=0): + return six.binary_type().join(map(uint8_pack, [ + 0xff & (HEADER_DIRECTION_TO_CLIENT | version), + 0, # flags (compression) + stream_id, + message_class.opcode # opcode + ])) + + def make_connection(self): + c = self.connection_class('1.2.3.4', cql_version='3.0.1', connect_timeout=5) + mocket = Mock() + mocket.send.side_effect = lambda x: len(x) + setattr(c, self.socket_attr_name, mocket) + return c + + def make_options_body(self): + options_buf = six.BytesIO() + write_stringmultimap(options_buf, { + 'CQL_VERSION': ['3.0.1'], + 'COMPRESSION': [] + }) + return options_buf.getvalue() From 10ddff5beff5f21de71d6831ffa604f766d09bbe Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 8 Nov 2017 11:30:14 -0500 Subject: [PATCH 1312/2005] move make_error_body --- tests/unit/io/test_asyncorereactor.py | 10 +--------- tests/unit/io/test_libevreactor.py | 10 +--------- tests/unit/io/utils.py | 14 ++++++++++---- 3 files changed, 12 insertions(+), 22 deletions(-) diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index bae66e8826..d591c4b40e 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -23,13 +23,11 @@ import time from mock import patch import os -from six import BytesIO import socket from socket import error as socket_error from cassandra.connection import ConnectionException, ProtocolError from cassandra.io.asyncorereactor import AsyncoreConnection -from cassandra.protocol import (write_int, write_string, SupportedMessage, - ReadyMessage, ServerError) +from cassandra.protocol import (SupportedMessage, ReadyMessage, ServerError) from cassandra.marshal import uint32_pack, int32_pack from tests import is_monkey_patched from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback, ReactorTestMixin @@ -63,12 +61,6 @@ def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") - def make_error_body(self, code, msg): - buf = BytesIO() - write_int(buf, code) - write_string(buf, msg) - return buf.getvalue() - def make_msg(self, header, body=six.binary_type()): return header + uint32_pack(len(body)) + body diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index dd083d38a6..66485fe6f8 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -22,12 +22,10 @@ import os import weakref import six -from six import BytesIO from socket import error as socket_error from cassandra.connection import ConnectionException, ProtocolError -from cassandra.protocol import (write_int, write_string, SupportedMessage, - ReadyMessage, ServerError) +from cassandra.protocol import (SupportedMessage, ReadyMessage, ServerError) from cassandra.marshal import uint32_pack, int32_pack from tests import is_monkey_patched @@ -58,12 +56,6 @@ def setUp(self): raise unittest.SkipTest('libev does not appear to be installed correctly') LibevConnection.initialize_reactor() - def make_error_body(self, code, msg): - buf = BytesIO() - write_int(buf, code) - write_string(buf, msg) - return buf.getvalue() - def make_msg(self, header, body=six.binary_type()): return header + uint32_pack(len(body)) + body diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 779e370751..b4aa04e684 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -14,9 +14,9 @@ from cassandra.connection import HEADER_DIRECTION_TO_CLIENT from cassandra.marshal import uint8_pack -from cassandra.protocol import write_stringmultimap +from cassandra.protocol import write_stringmultimap, write_int, write_string -import six +from six import binary_type, BytesIO from mock import Mock try: @@ -153,7 +153,7 @@ class ReactorTestMixin(object): connection_class = socket_attr_name = None def make_header_prefix(self, message_class, version=2, stream_id=0): - return six.binary_type().join(map(uint8_pack, [ + return binary_type().join(map(uint8_pack, [ 0xff & (HEADER_DIRECTION_TO_CLIENT | version), 0, # flags (compression) stream_id, @@ -168,9 +168,15 @@ def make_connection(self): return c def make_options_body(self): - options_buf = six.BytesIO() + options_buf = BytesIO() write_stringmultimap(options_buf, { 'CQL_VERSION': ['3.0.1'], 'COMPRESSION': [] }) return options_buf.getvalue() + + def make_error_body(self, code, msg): + buf = BytesIO() + write_int(buf, code) + write_string(buf, msg) + return buf.getvalue() From 4e928b2611ed0038821aa5b5bdf06f20a837e9e4 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 19 Jan 2018 11:36:23 -0500 Subject: [PATCH 1313/2005] move patching to setUp --- tests/unit/io/test_asyncorereactor.py | 22 +++++++------- tests/unit/io/test_libevreactor.py | 41 ++++++++++++++++----------- 2 files changed, 35 insertions(+), 28 deletions(-) diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index d591c4b40e..d5ec973cd2 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -64,7 +64,7 @@ def setUp(self): def make_msg(self, header, body=six.binary_type()): return header + uint32_pack(len(body)) + body - def test_successful_connection(self, *args): + def test_successful_connection(self): c = self.make_connection() # let it write the OptionsMessage @@ -86,7 +86,7 @@ def test_successful_connection(self, *args): self.assertTrue(c.connected_event.is_set()) return c - def test_egain_on_buffer_size(self, *args): + def test_egain_on_buffer_size(self): # get a connection that's already fully started c = self.test_successful_connection() @@ -119,7 +119,7 @@ def side_effect(*args): pos = c._iobuf.tell() self.assertEqual(pos, 4096 + 4096 + 100) - def test_protocol_error(self, *args): + def test_protocol_error(self): c = self.make_connection() # let it write the OptionsMessage @@ -136,7 +136,7 @@ def test_protocol_error(self, *args): self.assertTrue(c.connected_event.is_set()) self.assertIsInstance(c.last_error, ProtocolError) - def test_error_message_on_startup(self, *args): + def test_error_message_on_startup(self): c = self.make_connection() # let it write the OptionsMessage @@ -161,7 +161,7 @@ def test_error_message_on_startup(self, *args): self.assertIsInstance(c.last_error, ConnectionException) self.assertTrue(c.connected_event.is_set()) - def test_socket_error_on_write(self, *args): + def test_socket_error_on_write(self): c = self.make_connection() # make the OptionsMessage write fail @@ -173,7 +173,7 @@ def test_socket_error_on_write(self, *args): self.assertIsInstance(c.last_error, socket_error) self.assertTrue(c.connected_event.is_set()) - def test_blocking_on_write(self, *args): + def test_blocking_on_write(self): c = self.make_connection() # make the OptionsMessage write block @@ -188,7 +188,7 @@ def test_blocking_on_write(self, *args): self.assertFalse(c.is_defunct) self.assertTrue(c.socket.send.call_args is not None) - def test_partial_send(self, *args): + def test_partial_send(self): c = self.make_connection() # only write the first four bytes of the OptionsMessage @@ -205,7 +205,7 @@ def test_partial_send(self, *args): self.assertEqual(expected_writes, c.socket.send.call_count) self.assertEqual(last_write_size, len(c.socket.send.call_args[0][0])) - def test_socket_error_on_read(self, *args): + def test_socket_error_on_read(self): c = self.make_connection() # let it write the OptionsMessage @@ -220,7 +220,7 @@ def test_socket_error_on_read(self, *args): self.assertIsInstance(c.last_error, socket_error) self.assertTrue(c.connected_event.is_set()) - def test_partial_header_read(self, *args): + def test_partial_header_read(self): c = self.make_connection() header = self.make_header_prefix(SupportedMessage) @@ -245,7 +245,7 @@ def test_partial_header_read(self, *args): self.assertTrue(c.connected_event.is_set()) self.assertFalse(c.is_defunct) - def test_partial_message_read(self, *args): + def test_partial_message_read(self): c = self.make_connection() header = self.make_header_prefix(SupportedMessage) @@ -272,7 +272,7 @@ def test_partial_message_read(self, *args): self.assertTrue(c.connected_event.is_set()) self.assertFalse(c.is_defunct) - def test_multi_timer_validation(self, *args): + def test_multi_timer_validation(self): """ Verify that timer timeouts are honored appropriately """ diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 66485fe6f8..66aab3a765 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -39,11 +39,6 @@ LibevConnection = None # noqa -@patch('socket.socket') -@patch('cassandra.io.libevwrapper.IO') -@patch('cassandra.io.libevwrapper.Prepare') -@patch('cassandra.io.libevwrapper.Async') -@patch('cassandra.io.libevreactor.LibevLoop.maybe_start') class LibevConnectionTest(unittest.TestCase, ReactorTestMixin): connection_class = LibevConnection @@ -58,8 +53,20 @@ def setUp(self): def make_msg(self, header, body=six.binary_type()): return header + uint32_pack(len(body)) + body - - def test_successful_connection(self, *args): + # we patch here rather than as a decorator so that the Mixin can avoid + # specifying patch args to test methods + patchers = [patch(obj) for obj in + ('socket.socket', + 'cassandra.io.libevwrapper.IO', + 'cassandra.io.libevwrapper.Prepare', + 'cassandra.io.libevwrapper.Async', + 'cassandra.io.libevreactor.LibevLoop.maybe_start')] + for p in patchers: + self.addCleanup(p.stop) + for p in patchers: + p.start() + + def test_successful_connection(self): c = self.make_connection() # let it write the OptionsMessage @@ -81,7 +88,7 @@ def test_successful_connection(self, *args): self.assertTrue(c.connected_event.is_set()) return c - def test_egain_on_buffer_size(self, *args): + def test_egain_on_buffer_size(self): # get a connection that's already fully started c = self.test_successful_connection() @@ -114,7 +121,7 @@ def side_effect(*args): pos = c._iobuf.tell() self.assertEqual(pos, 4096 + 4096 + 100) - def test_protocol_error(self, *args): + def test_protocol_error(self): c = self.make_connection() # let it write the OptionsMessage @@ -131,7 +138,7 @@ def test_protocol_error(self, *args): self.assertTrue(c.connected_event.is_set()) self.assertIsInstance(c.last_error, ProtocolError) - def test_error_message_on_startup(self, *args): + def test_error_message_on_startup(self): c = self.make_connection() # let it write the OptionsMessage @@ -156,7 +163,7 @@ def test_error_message_on_startup(self, *args): self.assertIsInstance(c.last_error, ConnectionException) self.assertTrue(c.connected_event.is_set()) - def test_socket_error_on_write(self, *args): + def test_socket_error_on_write(self): c = self.make_connection() # make the OptionsMessage write fail @@ -168,7 +175,7 @@ def test_socket_error_on_write(self, *args): self.assertIsInstance(c.last_error, socket_error) self.assertTrue(c.connected_event.is_set()) - def test_blocking_on_write(self, *args): + def test_blocking_on_write(self): c = self.make_connection() # make the OptionsMessage write block @@ -183,7 +190,7 @@ def test_blocking_on_write(self, *args): self.assertFalse(c.is_defunct) self.assertTrue(c._socket.send.call_args is not None) - def test_partial_send(self, *args): + def test_partial_send(self): c = self.make_connection() # only write the first four bytes of the OptionsMessage @@ -200,7 +207,7 @@ def test_partial_send(self, *args): self.assertEqual(expected_writes, c._socket.send.call_count) self.assertEqual(last_write_size, len(c._socket.send.call_args[0][0])) - def test_socket_error_on_read(self, *args): + def test_socket_error_on_read(self): c = self.make_connection() # let it write the OptionsMessage @@ -215,7 +222,7 @@ def test_socket_error_on_read(self, *args): self.assertIsInstance(c.last_error, socket_error) self.assertTrue(c.connected_event.is_set()) - def test_partial_header_read(self, *args): + def test_partial_header_read(self): c = self.make_connection() header = self.make_header_prefix(SupportedMessage) @@ -241,7 +248,7 @@ def test_partial_header_read(self, *args): self.assertTrue(c.connected_event.is_set()) self.assertFalse(c.is_defunct) - def test_partial_message_read(self, *args): + def test_partial_message_read(self): c = self.make_connection() header = self.make_header_prefix(SupportedMessage) @@ -268,7 +275,7 @@ def test_partial_message_read(self, *args): self.assertTrue(c.connected_event.is_set()) self.assertFalse(c.is_defunct) - def test_watchers_are_finished(self, *args): + def test_watchers_are_finished(self): """ Test for asserting that watchers are closed in LibevConnection From d324e37a6ca3ce3183681373a7bcf9e498963a90 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 19 Jan 2018 11:36:31 -0500 Subject: [PATCH 1314/2005] move make_msg --- tests/unit/io/test_asyncorereactor.py | 5 +---- tests/unit/io/test_libevreactor.py | 4 +--- tests/unit/io/utils.py | 5 ++++- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index d5ec973cd2..616766910f 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -28,7 +28,7 @@ from cassandra.connection import ConnectionException, ProtocolError from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.protocol import (SupportedMessage, ReadyMessage, ServerError) -from cassandra.marshal import uint32_pack, int32_pack +from cassandra.marshal import int32_pack from tests import is_monkey_patched from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback, ReactorTestMixin @@ -61,9 +61,6 @@ def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") - def make_msg(self, header, body=six.binary_type()): - return header + uint32_pack(len(body)) + body - def test_successful_connection(self): c = self.make_connection() diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 66aab3a765..6af59a1b0b 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -26,7 +26,7 @@ from cassandra.connection import ConnectionException, ProtocolError from cassandra.protocol import (SupportedMessage, ReadyMessage, ServerError) -from cassandra.marshal import uint32_pack, int32_pack +from cassandra.marshal import int32_pack from tests import is_monkey_patched from tests.unit.io.utils import ReactorTestMixin @@ -51,8 +51,6 @@ def setUp(self): raise unittest.SkipTest('libev does not appear to be installed correctly') LibevConnection.initialize_reactor() - def make_msg(self, header, body=six.binary_type()): - return header + uint32_pack(len(body)) + body # we patch here rather than as a decorator so that the Mixin can avoid # specifying patch args to test methods patchers = [patch(obj) for obj in diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index b4aa04e684..0841868d79 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -13,7 +13,7 @@ # limitations under the License. from cassandra.connection import HEADER_DIRECTION_TO_CLIENT -from cassandra.marshal import uint8_pack +from cassandra.marshal import uint8_pack, uint32_pack from cassandra.protocol import write_stringmultimap, write_int, write_string from six import binary_type, BytesIO @@ -180,3 +180,6 @@ def make_error_body(self, code, msg): write_int(buf, code) write_string(buf, msg) return buf.getvalue() + + def make_msg(self, header, body=binary_type()): + return header + uint32_pack(len(body)) + body From 14ce7334279dd95d5206c14f6a88606a074ab383 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 8 Nov 2017 14:47:00 -0500 Subject: [PATCH 1315/2005] add socket-getting utils --- tests/unit/io/test_asyncorereactor.py | 22 ----------------- tests/unit/io/test_libevreactor.py | 23 +----------------- tests/unit/io/utils.py | 34 +++++++++++++++++++++++++-- 3 files changed, 33 insertions(+), 46 deletions(-) diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 616766910f..7c4cdba115 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -61,28 +61,6 @@ def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") - def test_successful_connection(self): - c = self.make_connection() - - # let it write the OptionsMessage - c.handle_write() - - # read in a SupportedMessage response - header = self.make_header_prefix(SupportedMessage) - options = self.make_options_body() - c.socket.recv.return_value = self.make_msg(header, options) - c.handle_read() - - # let it write out a StartupMessage - c.handle_write() - - header = self.make_header_prefix(ReadyMessage, stream_id=1) - c.socket.recv.return_value = self.make_msg(header) - c.handle_read() - - self.assertTrue(c.connected_event.is_set()) - return c - def test_egain_on_buffer_size(self): # get a connection that's already fully started c = self.test_successful_connection() diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 6af59a1b0b..0a1ef1b180 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -43,6 +43,7 @@ class LibevConnectionTest(unittest.TestCase, ReactorTestMixin): connection_class = LibevConnection socket_attr_name = '_socket' + null_handle_function_args = None, 0 def setUp(self): if is_monkey_patched(): @@ -64,28 +65,6 @@ def setUp(self): for p in patchers: p.start() - def test_successful_connection(self): - c = self.make_connection() - - # let it write the OptionsMessage - c.handle_write(None, 0) - - # read in a SupportedMessage response - header = self.make_header_prefix(SupportedMessage) - options = self.make_options_body() - c._socket.recv.return_value = self.make_msg(header, options) - c.handle_read(None, 0) - - # let it write out a StartupMessage - c.handle_write(None, 0) - - header = self.make_header_prefix(ReadyMessage, stream_id=1) - c._socket.recv.return_value = self.make_msg(header) - c.handle_read(None, 0) - - self.assertTrue(c.connected_event.is_set()) - return c - def test_egain_on_buffer_size(self): # get a connection that's already fully started c = self.test_successful_connection() diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 0841868d79..642d6fb4a5 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -14,7 +14,8 @@ from cassandra.connection import HEADER_DIRECTION_TO_CLIENT from cassandra.marshal import uint8_pack, uint32_pack -from cassandra.protocol import write_stringmultimap, write_int, write_string +from cassandra.protocol import (write_stringmultimap, write_int, write_string, + SupportedMessage, ReadyMessage) from six import binary_type, BytesIO from mock import Mock @@ -151,6 +152,13 @@ def test_timer_cancellation(self): class ReactorTestMixin(object): connection_class = socket_attr_name = None + null_handle_function_args = () + + def get_socket(self, connection): + return getattr(connection, self.socket_attr_name) + + def set_socket(self, connection, obj): + return setattr(connection, self.socket_attr_name, obj) def make_header_prefix(self, message_class, version=2, stream_id=0): return binary_type().join(map(uint8_pack, [ @@ -164,7 +172,7 @@ def make_connection(self): c = self.connection_class('1.2.3.4', cql_version='3.0.1', connect_timeout=5) mocket = Mock() mocket.send.side_effect = lambda x: len(x) - setattr(c, self.socket_attr_name, mocket) + self.set_socket(c, mocket) return c def make_options_body(self): @@ -183,3 +191,25 @@ def make_error_body(self, code, msg): def make_msg(self, header, body=binary_type()): return header + uint32_pack(len(body)) + body + + def test_successful_connection(self): + c = self.make_connection() + + # let it write the OptionsMessage + c.handle_write(*self.null_handle_function_args) + + # read in a SupportedMessage response + header = self.make_header_prefix(SupportedMessage) + options = self.make_options_body() + self.get_socket(c).recv.return_value = self.make_msg(header, options) + c.handle_read(*self.null_handle_function_args) + + # let it write out a StartupMessage + c.handle_write(*self.null_handle_function_args) + + header = self.make_header_prefix(ReadyMessage, stream_id=1) + self.get_socket(c).recv.return_value = self.make_msg(header) + c.handle_read(*self.null_handle_function_args) + + self.assertTrue(c.connected_event.is_set()) + return c From efa73a70e25195144a992b61eff19894e8e86af0 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 8 Nov 2017 15:21:57 -0500 Subject: [PATCH 1316/2005] move test_egain_on_buffer_size --- tests/unit/io/test_asyncorereactor.py | 35 ------------------------ tests/unit/io/test_libevreactor.py | 35 ------------------------ tests/unit/io/utils.py | 39 ++++++++++++++++++++++++++- 3 files changed, 38 insertions(+), 71 deletions(-) diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 7c4cdba115..c2a91155d0 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -22,13 +22,11 @@ import math import time from mock import patch -import os import socket from socket import error as socket_error from cassandra.connection import ConnectionException, ProtocolError from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.protocol import (SupportedMessage, ReadyMessage, ServerError) -from cassandra.marshal import int32_pack from tests import is_monkey_patched from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback, ReactorTestMixin @@ -61,39 +59,6 @@ def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") - def test_egain_on_buffer_size(self): - # get a connection that's already fully started - c = self.test_successful_connection() - - header = six.b('\x00\x00\x00\x00') + int32_pack(20000) - responses = [ - header + (six.b('a') * (4096 - len(header))), - six.b('a') * 4096, - socket_error(errno.EAGAIN), - six.b('a') * 100, - socket_error(errno.EAGAIN)] - - def side_effect(*args): - response = responses.pop(0) - if isinstance(response, socket_error): - raise response - else: - return response - - c.socket.recv.side_effect = side_effect - c.handle_read() - self.assertEqual(c._current_frame.end_pos, 20000 + len(header)) - # the EAGAIN prevents it from reading the last 100 bytes - c._iobuf.seek(0, os.SEEK_END) - pos = c._iobuf.tell() - self.assertEqual(pos, 4096 + 4096) - - # now tell it to read the last 100 bytes - c.handle_read() - c._iobuf.seek(0, os.SEEK_END) - pos = c._iobuf.tell() - self.assertEqual(pos, 4096 + 4096 + 100) - def test_protocol_error(self): c = self.make_connection() diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 0a1ef1b180..11104bac40 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -19,14 +19,12 @@ import errno import math from mock import patch -import os import weakref import six from socket import error as socket_error from cassandra.connection import ConnectionException, ProtocolError from cassandra.protocol import (SupportedMessage, ReadyMessage, ServerError) -from cassandra.marshal import int32_pack from tests import is_monkey_patched from tests.unit.io.utils import ReactorTestMixin @@ -65,39 +63,6 @@ def setUp(self): for p in patchers: p.start() - def test_egain_on_buffer_size(self): - # get a connection that's already fully started - c = self.test_successful_connection() - - header = six.b('\x00\x00\x00\x00') + int32_pack(20000) - responses = [ - header + (six.b('a') * (4096 - len(header))), - six.b('a') * 4096, - socket_error(errno.EAGAIN), - six.b('a') * 100, - socket_error(errno.EAGAIN)] - - def side_effect(*args): - response = responses.pop(0) - if isinstance(response, socket_error): - raise response - else: - return response - - c._socket.recv.side_effect = side_effect - c.handle_read(None, 0) - self.assertEqual(c._current_frame.end_pos, 20000 + len(header)) - # the EAGAIN prevents it from reading the last 100 bytes - c._iobuf.seek(0, os.SEEK_END) - pos = c._iobuf.tell() - self.assertEqual(pos, 4096 + 4096) - - # now tell it to read the last 100 bytes - c.handle_read(None, 0) - c._iobuf.seek(0, os.SEEK_END) - pos = c._iobuf.tell() - self.assertEqual(pos, 4096 + 4096 + 100) - def test_protocol_error(self): c = self.make_connection() diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 642d6fb4a5..da9e441947 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -13,13 +13,18 @@ # limitations under the License. from cassandra.connection import HEADER_DIRECTION_TO_CLIENT -from cassandra.marshal import uint8_pack, uint32_pack +from cassandra.marshal import int32_pack, uint8_pack, uint32_pack from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ReadyMessage) +import six from six import binary_type, BytesIO from mock import Mock +import errno +import os +from socket import error as socket_error + try: import unittest2 as unittest except ImportError: @@ -213,3 +218,35 @@ def test_successful_connection(self): self.assertTrue(c.connected_event.is_set()) return c + + def test_egain_on_buffer_size(self): + c = self.test_successful_connection() + + header = six.b('\x00\x00\x00\x00') + int32_pack(20000) + responses = [ + header + (six.b('a') * (4096 - len(header))), + six.b('a') * 4096, + socket_error(errno.EAGAIN), + six.b('a') * 100, + socket_error(errno.EAGAIN)] + + def side_effect(*args): + response = responses.pop(0) + if isinstance(response, socket_error): + raise response + else: + return response + + self.get_socket(c).recv.side_effect = side_effect + c.handle_read(*self.null_handle_function_args) + self.assertEqual(c._current_frame.end_pos, 20000 + len(header)) + # the EAGAIN prevents it from reading the last 100 bytes + c._iobuf.seek(0, os.SEEK_END) + pos = c._iobuf.tell() + self.assertEqual(pos, 4096 + 4096) + + # now tell it to read the last 100 bytes + c.handle_read(*self.null_handle_function_args) + c._iobuf.seek(0, os.SEEK_END) + pos = c._iobuf.tell() + self.assertEqual(pos, 4096 + 4096 + 100) From 539a0f3687d9735197201c9b69169b743b143aac Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 8 Nov 2017 15:23:21 -0500 Subject: [PATCH 1317/2005] rename EAGAIN test --- tests/unit/io/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index da9e441947..db3a2f8ef7 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -219,7 +219,7 @@ def test_successful_connection(self): self.assertTrue(c.connected_event.is_set()) return c - def test_egain_on_buffer_size(self): + def test_eagain_on_buffer_size(self): c = self.test_successful_connection() header = six.b('\x00\x00\x00\x00') + int32_pack(20000) From 97f8f5d5fd3d5e90161159b31bd4b89f5f82595a Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 8 Nov 2017 15:37:28 -0500 Subject: [PATCH 1318/2005] move test_protocol_error --- tests/unit/io/test_asyncorereactor.py | 19 +------------------ tests/unit/io/test_libevreactor.py | 19 +------------------ tests/unit/io/utils.py | 19 ++++++++++++++++++- 3 files changed, 20 insertions(+), 37 deletions(-) diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index c2a91155d0..593cbc5f1a 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -24,7 +24,7 @@ from mock import patch import socket from socket import error as socket_error -from cassandra.connection import ConnectionException, ProtocolError +from cassandra.connection import ConnectionException from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.protocol import (SupportedMessage, ReadyMessage, ServerError) from tests import is_monkey_patched @@ -59,23 +59,6 @@ def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") - def test_protocol_error(self): - c = self.make_connection() - - # let it write the OptionsMessage - c.handle_write() - - # read in a SupportedMessage response - header = self.make_header_prefix(SupportedMessage, version=0xa4) - options = self.make_options_body() - c.socket.recv.return_value = self.make_msg(header, options) - c.handle_read() - - # make sure it errored correctly - self.assertTrue(c.is_defunct) - self.assertTrue(c.connected_event.is_set()) - self.assertIsInstance(c.last_error, ProtocolError) - def test_error_message_on_startup(self): c = self.make_connection() diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 11104bac40..7397a0701e 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -23,7 +23,7 @@ import six from socket import error as socket_error -from cassandra.connection import ConnectionException, ProtocolError +from cassandra.connection import ConnectionException from cassandra.protocol import (SupportedMessage, ReadyMessage, ServerError) from tests import is_monkey_patched @@ -63,23 +63,6 @@ def setUp(self): for p in patchers: p.start() - def test_protocol_error(self): - c = self.make_connection() - - # let it write the OptionsMessage - c.handle_write(None, 0) - - # read in a SupportedMessage response - header = self.make_header_prefix(SupportedMessage, version=0xa4) - options = self.make_options_body() - c._socket.recv.return_value = self.make_msg(header, options) - c.handle_read(None, 0) - - # make sure it errored correctly - self.assertTrue(c.is_defunct) - self.assertTrue(c.connected_event.is_set()) - self.assertIsInstance(c.last_error, ProtocolError) - def test_error_message_on_startup(self): c = self.make_connection() diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index db3a2f8ef7..5ad0c9b149 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from cassandra.connection import HEADER_DIRECTION_TO_CLIENT +from cassandra.connection import ProtocolError, HEADER_DIRECTION_TO_CLIENT from cassandra.marshal import int32_pack, uint8_pack, uint32_pack from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ReadyMessage) @@ -250,3 +250,20 @@ def side_effect(*args): c._iobuf.seek(0, os.SEEK_END) pos = c._iobuf.tell() self.assertEqual(pos, 4096 + 4096 + 100) + + def test_protocol_error(self): + c = self.make_connection() + + # let it write the OptionsMessage + c.handle_write(*self.null_handle_function_args) + + # read in a SupportedMessage response + header = self.make_header_prefix(SupportedMessage, version=0xa4) + options = self.make_options_body() + self.get_socket(c).recv.return_value = self.make_msg(header, options) + c.handle_read(*self.null_handle_function_args) + + # make sure it errored correctly + self.assertTrue(c.is_defunct) + self.assertTrue(c.connected_event.is_set()) + self.assertIsInstance(c.last_error, ProtocolError) From ec13d5f2cf35b9eefa7ff97d8a6059c5131b239f Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 8 Nov 2017 15:46:30 -0500 Subject: [PATCH 1319/2005] move test_error_message_on_startup --- tests/unit/io/test_asyncorereactor.py | 28 +------------------------ tests/unit/io/test_libevreactor.py | 28 +------------------------ tests/unit/io/utils.py | 30 +++++++++++++++++++++++++-- 3 files changed, 30 insertions(+), 56 deletions(-) diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 593cbc5f1a..7f6128d3b4 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -24,9 +24,8 @@ from mock import patch import socket from socket import error as socket_error -from cassandra.connection import ConnectionException from cassandra.io.asyncorereactor import AsyncoreConnection -from cassandra.protocol import (SupportedMessage, ReadyMessage, ServerError) +from cassandra.protocol import SupportedMessage, ReadyMessage from tests import is_monkey_patched from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback, ReactorTestMixin @@ -59,31 +58,6 @@ def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") - def test_error_message_on_startup(self): - c = self.make_connection() - - # let it write the OptionsMessage - c.handle_write() - - # read in a SupportedMessage response - header = self.make_header_prefix(SupportedMessage) - options = self.make_options_body() - c.socket.recv.return_value = self.make_msg(header, options) - c.handle_read() - - # let it write out a StartupMessage - c.handle_write() - - header = self.make_header_prefix(ServerError, stream_id=1) - body = self.make_error_body(ServerError.error_code, ServerError.summary) - c.socket.recv.return_value = self.make_msg(header, body) - c.handle_read() - - # make sure it errored correctly - self.assertTrue(c.is_defunct) - self.assertIsInstance(c.last_error, ConnectionException) - self.assertTrue(c.connected_event.is_set()) - def test_socket_error_on_write(self): c = self.make_connection() diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 7397a0701e..2751c04054 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -23,8 +23,7 @@ import six from socket import error as socket_error -from cassandra.connection import ConnectionException -from cassandra.protocol import (SupportedMessage, ReadyMessage, ServerError) +from cassandra.protocol import SupportedMessage, ReadyMessage from tests import is_monkey_patched from tests.unit.io.utils import ReactorTestMixin @@ -63,31 +62,6 @@ def setUp(self): for p in patchers: p.start() - def test_error_message_on_startup(self): - c = self.make_connection() - - # let it write the OptionsMessage - c.handle_write(None, 0) - - # read in a SupportedMessage response - header = self.make_header_prefix(SupportedMessage) - options = self.make_options_body() - c._socket.recv.return_value = self.make_msg(header, options) - c.handle_read(None, 0) - - # let it write out a StartupMessage - c.handle_write(None, 0) - - header = self.make_header_prefix(ServerError, stream_id=1) - body = self.make_error_body(ServerError.error_code, ServerError.summary) - c._socket.recv.return_value = self.make_msg(header, body) - c.handle_read(None, 0) - - # make sure it errored correctly - self.assertTrue(c.is_defunct) - self.assertIsInstance(c.last_error, ConnectionException) - self.assertTrue(c.connected_event.is_set()) - def test_socket_error_on_write(self): c = self.make_connection() diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 5ad0c9b149..e117bdb54c 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from cassandra.connection import ProtocolError, HEADER_DIRECTION_TO_CLIENT +from cassandra.connection import (ConnectionException, ProtocolError, + HEADER_DIRECTION_TO_CLIENT) from cassandra.marshal import int32_pack, uint8_pack, uint32_pack from cassandra.protocol import (write_stringmultimap, write_int, write_string, - SupportedMessage, ReadyMessage) + SupportedMessage, ReadyMessage, ServerError) import six from six import binary_type, BytesIO @@ -267,3 +268,28 @@ def test_protocol_error(self): self.assertTrue(c.is_defunct) self.assertTrue(c.connected_event.is_set()) self.assertIsInstance(c.last_error, ProtocolError) + + def test_error_message_on_startup(self): + c = self.make_connection() + + # let it write the OptionsMessage + c.handle_write(*self.null_handle_function_args) + + # read in a SupportedMessage response + header = self.make_header_prefix(SupportedMessage) + options = self.make_options_body() + self.get_socket(c).recv.return_value = self.make_msg(header, options) + c.handle_read(*self.null_handle_function_args) + + # let it write out a StartupMessage + c.handle_write(*self.null_handle_function_args) + + header = self.make_header_prefix(ServerError, stream_id=1) + body = self.make_error_body(ServerError.error_code, ServerError.summary) + self.get_socket(c).recv.return_value = self.make_msg(header, body) + c.handle_read(*self.null_handle_function_args) + + # make sure it errored correctly + self.assertTrue(c.is_defunct) + self.assertIsInstance(c.last_error, ConnectionException) + self.assertTrue(c.connected_event.is_set()) From 8030978cf5909f1b5e3483cd278f9cc287931438 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 8 Nov 2017 16:47:44 -0500 Subject: [PATCH 1320/2005] move test_socket_error_on_write --- tests/unit/io/test_asyncorereactor.py | 12 ------------ tests/unit/io/test_libevreactor.py | 12 ------------ tests/unit/io/utils.py | 12 ++++++++++++ 3 files changed, 12 insertions(+), 24 deletions(-) diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 7f6128d3b4..490d7b5a23 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -58,18 +58,6 @@ def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") - def test_socket_error_on_write(self): - c = self.make_connection() - - # make the OptionsMessage write fail - c.socket.send.side_effect = socket_error(errno.EIO, "bad stuff!") - c.handle_write() - - # make sure it errored correctly - self.assertTrue(c.is_defunct) - self.assertIsInstance(c.last_error, socket_error) - self.assertTrue(c.connected_event.is_set()) - def test_blocking_on_write(self): c = self.make_connection() diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 2751c04054..afde5cdd9c 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -62,18 +62,6 @@ def setUp(self): for p in patchers: p.start() - def test_socket_error_on_write(self): - c = self.make_connection() - - # make the OptionsMessage write fail - c._socket.send.side_effect = socket_error(errno.EIO, "bad stuff!") - c.handle_write(None, 0) - - # make sure it errored correctly - self.assertTrue(c.is_defunct) - self.assertIsInstance(c.last_error, socket_error) - self.assertTrue(c.connected_event.is_set()) - def test_blocking_on_write(self): c = self.make_connection() diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index e117bdb54c..4d473d48e0 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -293,3 +293,15 @@ def test_error_message_on_startup(self): self.assertTrue(c.is_defunct) self.assertIsInstance(c.last_error, ConnectionException) self.assertTrue(c.connected_event.is_set()) + + def test_socket_error_on_write(self): + c = self.make_connection() + + # make the OptionsMessage write fail + self.get_socket(c).send.side_effect = socket_error(errno.EIO, "bad stuff!") + c.handle_write(*self.null_handle_function_args) + + # make sure it errored correctly + self.assertTrue(c.is_defunct) + self.assertIsInstance(c.last_error, socket_error) + self.assertTrue(c.connected_event.is_set()) From ade204bb9d462a3c09fe3d4bc5fad6375d3d7354 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 8 Nov 2017 16:59:41 -0500 Subject: [PATCH 1321/2005] move test_blocking_on_write --- tests/unit/io/test_asyncorereactor.py | 15 --------------- tests/unit/io/test_libevreactor.py | 15 --------------- tests/unit/io/utils.py | 16 ++++++++++++++++ 3 files changed, 16 insertions(+), 30 deletions(-) diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 490d7b5a23..9c7001f247 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -58,21 +58,6 @@ def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") - def test_blocking_on_write(self): - c = self.make_connection() - - # make the OptionsMessage write block - c.socket.send.side_effect = socket_error(errno.EAGAIN, "socket busy") - c.handle_write() - - self.assertFalse(c.is_defunct) - - # try again with normal behavior - c.socket.send.side_effect = lambda x: len(x) - c.handle_write() - self.assertFalse(c.is_defunct) - self.assertTrue(c.socket.send.call_args is not None) - def test_partial_send(self): c = self.make_connection() diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index afde5cdd9c..9fedc78ca3 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -62,21 +62,6 @@ def setUp(self): for p in patchers: p.start() - def test_blocking_on_write(self): - c = self.make_connection() - - # make the OptionsMessage write block - c._socket.send.side_effect = socket_error(errno.EAGAIN, "socket busy") - c.handle_write(None, 0) - - self.assertFalse(c.is_defunct) - - # try again with normal behavior - c._socket.send.side_effect = lambda x: len(x) - c.handle_write(None, 0) - self.assertFalse(c.is_defunct) - self.assertTrue(c._socket.send.call_args is not None) - def test_partial_send(self): c = self.make_connection() diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 4d473d48e0..e81e41b1c0 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -305,3 +305,19 @@ def test_socket_error_on_write(self): self.assertTrue(c.is_defunct) self.assertIsInstance(c.last_error, socket_error) self.assertTrue(c.connected_event.is_set()) + + def test_blocking_on_write(self): + c = self.make_connection() + + # make the OptionsMessage write block + self.get_socket(c).send.side_effect = socket_error(errno.EAGAIN, + "socket busy") + c.handle_write(*self.null_handle_function_args) + + self.assertFalse(c.is_defunct) + + # try again with normal behavior + self.get_socket(c).send.side_effect = lambda x: len(x) + c.handle_write(*self.null_handle_function_args) + self.assertFalse(c.is_defunct) + self.assertTrue(self.get_socket(c).send.call_args is not None) From 3230a7de45f420a78a338b5abbd7a71dd1f401c5 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 8 Nov 2017 17:20:40 -0500 Subject: [PATCH 1322/2005] move test_partial_send --- tests/unit/io/test_asyncorereactor.py | 17 ----------------- tests/unit/io/test_libevreactor.py | 17 ----------------- tests/unit/io/utils.py | 19 +++++++++++++++++++ 3 files changed, 19 insertions(+), 34 deletions(-) diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 9c7001f247..1c42a03eae 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -58,23 +58,6 @@ def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") - def test_partial_send(self): - c = self.make_connection() - - # only write the first four bytes of the OptionsMessage - write_size = 4 - c.socket.send.side_effect = None - c.socket.send.return_value = write_size - c.handle_write() - - msg_size = 9 # v3+ frame header - expected_writes = int(math.ceil(float(msg_size) / write_size)) - size_mod = msg_size % write_size - last_write_size = size_mod if size_mod else write_size - self.assertFalse(c.is_defunct) - self.assertEqual(expected_writes, c.socket.send.call_count) - self.assertEqual(last_write_size, len(c.socket.send.call_args[0][0])) - def test_socket_error_on_read(self): c = self.make_connection() diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 9fedc78ca3..e6ffa4dac4 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -62,23 +62,6 @@ def setUp(self): for p in patchers: p.start() - def test_partial_send(self): - c = self.make_connection() - - # only write the first four bytes of the OptionsMessage - write_size = 4 - c._socket.send.side_effect = None - c._socket.send.return_value = write_size - c.handle_write(None, 0) - - msg_size = 9 # v3+ frame header - expected_writes = int(math.ceil(float(msg_size) / write_size)) - size_mod = msg_size % write_size - last_write_size = size_mod if size_mod else write_size - self.assertFalse(c.is_defunct) - self.assertEqual(expected_writes, c._socket.send.call_count) - self.assertEqual(last_write_size, len(c._socket.send.call_args[0][0])) - def test_socket_error_on_read(self): c = self.make_connection() diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index e81e41b1c0..0687717991 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -23,6 +23,7 @@ from mock import Mock import errno +import math import os from socket import error as socket_error @@ -321,3 +322,21 @@ def test_blocking_on_write(self): c.handle_write(*self.null_handle_function_args) self.assertFalse(c.is_defunct) self.assertTrue(self.get_socket(c).send.call_args is not None) + + def test_partial_send(self): + c = self.make_connection() + + # only write the first four bytes of the OptionsMessage + write_size = 4 + self.get_socket(c).send.side_effect = None + self.get_socket(c).send.return_value = write_size + c.handle_write(*self.null_handle_function_args) + + msg_size = 9 # v3+ frame header + expected_writes = int(math.ceil(float(msg_size) / write_size)) + size_mod = msg_size % write_size + last_write_size = size_mod if size_mod else write_size + self.assertFalse(c.is_defunct) + self.assertEqual(expected_writes, self.get_socket(c).send.call_count) + self.assertEqual(last_write_size, + len(self.get_socket(c).send.call_args[0][0])) From 11b2493e16e182467e83c32a83b2a27d008c737b Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 9 Nov 2017 09:45:38 -0500 Subject: [PATCH 1323/2005] move test_socket_error_on_read --- tests/unit/io/test_asyncorereactor.py | 18 ------------------ tests/unit/io/test_libevreactor.py | 18 ------------------ tests/unit/io/utils.py | 16 ++++++++++++++++ 3 files changed, 16 insertions(+), 36 deletions(-) diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 1c42a03eae..c77231ff9a 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -18,12 +18,9 @@ except ImportError: import unittest # noqa -import errno -import math import time from mock import patch import socket -from socket import error as socket_error from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.protocol import SupportedMessage, ReadyMessage from tests import is_monkey_patched @@ -58,21 +55,6 @@ def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") - def test_socket_error_on_read(self): - c = self.make_connection() - - # let it write the OptionsMessage - c.handle_write() - - # read in a SupportedMessage response - c.socket.recv.side_effect = socket_error(errno.EIO, "busy socket") - c.handle_read() - - # make sure it errored correctly - self.assertTrue(c.is_defunct) - self.assertIsInstance(c.last_error, socket_error) - self.assertTrue(c.connected_event.is_set()) - def test_partial_header_read(self): c = self.make_connection() diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index e6ffa4dac4..abbdc63fb7 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -16,12 +16,9 @@ except ImportError: import unittest # noqa -import errno -import math from mock import patch import weakref import six -from socket import error as socket_error from cassandra.protocol import SupportedMessage, ReadyMessage @@ -62,21 +59,6 @@ def setUp(self): for p in patchers: p.start() - def test_socket_error_on_read(self): - c = self.make_connection() - - # let it write the OptionsMessage - c.handle_write(None, 0) - - # read in a SupportedMessage response - c._socket.recv.side_effect = socket_error(errno.EIO, "busy socket") - c.handle_read(None, 0) - - # make sure it errored correctly - self.assertTrue(c.is_defunct) - self.assertIsInstance(c.last_error, socket_error) - self.assertTrue(c.connected_event.is_set()) - def test_partial_header_read(self): c = self.make_connection() diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 0687717991..8992476dc1 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -340,3 +340,19 @@ def test_partial_send(self): self.assertEqual(expected_writes, self.get_socket(c).send.call_count) self.assertEqual(last_write_size, len(self.get_socket(c).send.call_args[0][0])) + + def test_socket_error_on_read(self): + c = self.make_connection() + + # let it write the OptionsMessage + c.handle_write(*self.null_handle_function_args) + + # read in a SupportedMessage response + self.get_socket(c).recv.side_effect = socket_error(errno.EIO, + "busy socket") + c.handle_read(*self.null_handle_function_args) + + # make sure it errored correctly + self.assertTrue(c.is_defunct) + self.assertIsInstance(c.last_error, socket_error) + self.assertTrue(c.connected_event.is_set()) From 430ebcbb18b13bf78b53c2f529e38b3b9c975414 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 9 Nov 2017 10:09:31 -0500 Subject: [PATCH 1324/2005] move test_partial_message_read --- tests/unit/io/test_asyncorereactor.py | 55 -------------------------- tests/unit/io/test_libevreactor.py | 56 --------------------------- tests/unit/io/utils.py | 52 +++++++++++++++++++++++++ 3 files changed, 52 insertions(+), 111 deletions(-) diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index c77231ff9a..1a02999fed 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -11,8 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import six - try: import unittest2 as unittest except ImportError: @@ -22,7 +20,6 @@ from mock import patch import socket from cassandra.io.asyncorereactor import AsyncoreConnection -from cassandra.protocol import SupportedMessage, ReadyMessage from tests import is_monkey_patched from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback, ReactorTestMixin @@ -55,58 +52,6 @@ def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") - def test_partial_header_read(self): - c = self.make_connection() - - header = self.make_header_prefix(SupportedMessage) - options = self.make_options_body() - message = self.make_msg(header, options) - - c.socket.recv.return_value = message[0:1] - c.handle_read() - self.assertEqual(c._iobuf.getvalue(), message[0:1]) - - c.socket.recv.return_value = message[1:] - c.handle_read() - self.assertEqual(six.binary_type(), c._iobuf.getvalue()) - - # let it write out a StartupMessage - c.handle_write() - - header = self.make_header_prefix(ReadyMessage, stream_id=1) - c.socket.recv.return_value = self.make_msg(header) - c.handle_read() - - self.assertTrue(c.connected_event.is_set()) - self.assertFalse(c.is_defunct) - - def test_partial_message_read(self): - c = self.make_connection() - - header = self.make_header_prefix(SupportedMessage) - options = self.make_options_body() - message = self.make_msg(header, options) - - # read in the first nine bytes - c.socket.recv.return_value = message[:9] - c.handle_read() - self.assertEqual(c._iobuf.getvalue(), message[:9]) - - # ... then read in the rest - c.socket.recv.return_value = message[9:] - c.handle_read() - self.assertEqual(six.binary_type(), c._iobuf.getvalue()) - - # let it write out a StartupMessage - c.handle_write() - - header = self.make_header_prefix(ReadyMessage, stream_id=1) - c.socket.recv.return_value = self.make_msg(header) - c.handle_read() - - self.assertTrue(c.connected_event.is_set()) - self.assertFalse(c.is_defunct) - def test_multi_timer_validation(self): """ Verify that timer timeouts are honored appropriately diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index abbdc63fb7..b8d1ad43c0 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -18,9 +18,6 @@ from mock import patch import weakref -import six - -from cassandra.protocol import SupportedMessage, ReadyMessage from tests import is_monkey_patched from tests.unit.io.utils import ReactorTestMixin @@ -59,59 +56,6 @@ def setUp(self): for p in patchers: p.start() - def test_partial_header_read(self): - c = self.make_connection() - - header = self.make_header_prefix(SupportedMessage) - options = self.make_options_body() - message = self.make_msg(header, options) - - # read in the first byte - c._socket.recv.return_value = message[0:1] - c.handle_read(None, 0) - self.assertEqual(c._iobuf.getvalue(), message[0:1]) - - c._socket.recv.return_value = message[1:] - c.handle_read(None, 0) - self.assertEqual(six.binary_type(), c._iobuf.getvalue()) - - # let it write out a StartupMessage - c.handle_write(None, 0) - - header = self.make_header_prefix(ReadyMessage, stream_id=1) - c._socket.recv.return_value = self.make_msg(header) - c.handle_read(None, 0) - - self.assertTrue(c.connected_event.is_set()) - self.assertFalse(c.is_defunct) - - def test_partial_message_read(self): - c = self.make_connection() - - header = self.make_header_prefix(SupportedMessage) - options = self.make_options_body() - message = self.make_msg(header, options) - - # read in the first nine bytes - c._socket.recv.return_value = message[:9] - c.handle_read(None, 0) - self.assertEqual(c._iobuf.getvalue(), message[:9]) - - # ... then read in the rest - c._socket.recv.return_value = message[9:] - c.handle_read(None, 0) - self.assertEqual(six.binary_type(), c._iobuf.getvalue()) - - # let it write out a StartupMessage - c.handle_write(None, 0) - - header = self.make_header_prefix(ReadyMessage, stream_id=1) - c._socket.recv.return_value = self.make_msg(header) - c.handle_read(None, 0) - - self.assertTrue(c.connected_event.is_set()) - self.assertFalse(c.is_defunct) - def test_watchers_are_finished(self): """ Test for asserting that watchers are closed in LibevConnection diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 8992476dc1..5a57ec3def 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -356,3 +356,55 @@ def test_socket_error_on_read(self): self.assertTrue(c.is_defunct) self.assertIsInstance(c.last_error, socket_error) self.assertTrue(c.connected_event.is_set()) + + def test_partial_header_read(self): + c = self.make_connection() + + header = self.make_header_prefix(SupportedMessage) + options = self.make_options_body() + message = self.make_msg(header, options) + + self.get_socket(c).recv.return_value = message[0:1] + c.handle_read(*self.null_handle_function_args) + self.assertEqual(c._iobuf.getvalue(), message[0:1]) + + self.get_socket(c).recv.return_value = message[1:] + c.handle_read(*self.null_handle_function_args) + self.assertEqual(six.binary_type(), c._iobuf.getvalue()) + + # let it write out a StartupMessage + c.handle_write(*self.null_handle_function_args) + + header = self.make_header_prefix(ReadyMessage, stream_id=1) + self.get_socket(c).recv.return_value = self.make_msg(header) + c.handle_read(*self.null_handle_function_args) + + self.assertTrue(c.connected_event.is_set()) + self.assertFalse(c.is_defunct) + + def test_partial_message_read(self): + c = self.make_connection() + + header = self.make_header_prefix(SupportedMessage) + options = self.make_options_body() + message = self.make_msg(header, options) + + # read in the first nine bytes + self.get_socket(c).recv.return_value = message[:9] + c.handle_read(*self.null_handle_function_args) + self.assertEqual(c._iobuf.getvalue(), message[:9]) + + # ... then read in the rest + self.get_socket(c).recv.return_value = message[9:] + c.handle_read(*self.null_handle_function_args) + self.assertEqual(six.binary_type(), c._iobuf.getvalue()) + + # let it write out a StartupMessage + c.handle_write(*self.null_handle_function_args) + + header = self.make_header_prefix(ReadyMessage, stream_id=1) + self.get_socket(c).recv.return_value = self.make_msg(header) + c.handle_read(*self.null_handle_function_args) + + self.assertTrue(c.connected_event.is_set()) + self.assertFalse(c.is_defunct) From e44742edd672848fb6735d657ee04f8e72e4440f Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 9 Nov 2017 10:26:32 -0500 Subject: [PATCH 1325/2005] refactor timer tests --- tests/__init__.py | 2 +- tests/unit/io/test_asyncorereactor.py | 115 ++++++++++++++++---------- tests/unit/io/test_eventletreactor.py | 36 +++++++- tests/unit/io/test_geventreactor.py | 34 ++++++-- tests/unit/io/test_libevreactor.py | 68 +++++++++++++-- tests/unit/io/test_libevtimer.py | 83 ------------------- tests/unit/io/test_twistedreactor.py | 57 ++++--------- tests/unit/io/utils.py | 46 +++++++++-- 8 files changed, 245 insertions(+), 196 deletions(-) delete mode 100644 tests/unit/io/test_libevtimer.py diff --git a/tests/__init__.py b/tests/__init__.py index 6f29cf3f2f..8f42bf0a49 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -86,7 +86,7 @@ def is_monkey_patched(): connection_class = None -MONKEY_PATCH_LOOP = bool(os.getenv('MONKEY_PATCH_LOOP', False)) +MONKEY_PATCH_LOOP = os.getenv('MONKEY_PATCH_LOOP', False) notwindows = unittest.skipUnless(not "Windows" in platform.system(), "This test is not adequate for windows") notpypy = unittest.skipUnless(not platform.python_implementation() == 'PyPy', "This tests is not suitable for pypy") diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 1a02999fed..0e43b43f98 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -16,69 +16,94 @@ except ImportError: import unittest # noqa -import time from mock import patch import socket from cassandra.io.asyncorereactor import AsyncoreConnection from tests import is_monkey_patched -from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback, ReactorTestMixin +from tests.unit.io.utils import ReactorTestMixin, TimerTestMixin, noop_if_monkey_patched -class AsyncoreConnectionTest(unittest.TestCase, ReactorTestMixin): - - connection_class = AsyncoreConnection - socket_attr_name = 'socket' +class AsyncorePatcher(unittest.TestCase): @classmethod + @noop_if_monkey_patched def setUpClass(cls): if is_monkey_patched(): return AsyncoreConnection.initialize_reactor() - cls.socket_patcher = patch('socket.socket', spec=socket.socket) - cls.mock_socket = cls.socket_patcher.start() - cls.mock_socket().connect_ex.return_value = 0 - cls.mock_socket().getsockopt.return_value = 0 - cls.mock_socket().fileno.return_value = 100 - AsyncoreConnection.add_channel = lambda *args, **kwargs: None + socket_patcher = patch('socket.socket', spec=socket.socket) + channel_patcher = patch( + 'cassandra.io.asyncorereactor.AsyncoreConnection.add_channel', + new=(lambda *args, **kwargs: None) + ) + + cls.mock_socket = socket_patcher.start() + cls.mock_socket.connect_ex.return_value = 0 + cls.mock_socket.getsockopt.return_value = 0 + cls.mock_socket.fileno.return_value = 100 + + channel_patcher.start() + + cls.patchers = (socket_patcher, channel_patcher) @classmethod + @noop_if_monkey_patched def tearDownClass(cls): - if is_monkey_patched(): - return - cls.socket_patcher.stop() + for p in cls.patchers: + try: + p.stop() + except: + pass + + +class AsyncoreConnectionTest(ReactorTestMixin, AsyncorePatcher): + + connection_class = AsyncoreConnection + socket_attr_name = 'socket' + loop_attr_name = '_loop' + + # @classmethod + # def setUpClass(cls): + # if is_monkey_patched(): + # return + # patch_socket_and_set_up_connection(cls) + + # @classmethod + # def tearDownClass(cls): + # if is_monkey_patched(): + # return + # cls.socket_patcher.stop() def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") - def test_multi_timer_validation(self): - """ - Verify that timer timeouts are honored appropriately - """ - self.make_connection() - # Tests timers submitted in order at various timeouts - submit_and_wait_for_completion(self, AsyncoreConnection, 0, 100, 1, 100) - # Tests timers submitted in reverse order at various timeouts - submit_and_wait_for_completion(self, AsyncoreConnection, 100, 0, -1, 100) - # Tests timers submitted in varying order at various timeouts - submit_and_wait_for_completion(self, AsyncoreConnection, 0, 100, 1, 100, True) - - def test_timer_cancellation(self): - """ - Verify that timer cancellation is honored - """ - - # Various lists for tracking callback stage - connection = self.make_connection() - timeout = .1 - callback = TimerCallback(timeout) - timer = connection.create_timer(timeout, callback.invoke) - timer.cancel() - # Release context allow for timer thread to run. - time.sleep(.2) - timer_manager = connection._loop._timers - # Assert that the cancellation was honored - self.assertFalse(timer_manager._queue) - self.assertFalse(timer_manager._new_timers) - self.assertFalse(callback.was_invoked()) + +class TestAsyncoreTimer(TimerTestMixin, AsyncorePatcher): + connection_class = AsyncoreConnection + + @property + def create_timer(self): + return self.connection.create_timer + + @property + def _timers(self): + return self.connection._loop._timers + + # @classmethod + # def setUpClass(cls): + # if is_monkey_patched(): + # return + # patch_socket_and_set_up_connection(cls) + + # @classmethod + # def tearDownClass(cls): + # if is_monkey_patched(): + # return + # cls.socket_patcher.stop() + + def setUp(self): + if is_monkey_patched(): + raise unittest.SkipTest("Can't test asyncore with monkey patching") + super(TestAsyncoreTimer, self).setUp() diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index 746b365956..5bf1b5792b 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -18,10 +18,11 @@ except ImportError: import unittest # noqa -from tests.unit.io.utils import TimerConnectionTests +from tests.unit.io.utils import TimerTestMixin from tests import notpypy, MONKEY_PATCH_LOOP, notmonkeypatch from eventlet import monkey_patch +from mock import patch try: from cassandra.io.eventletreactor import EventletConnection @@ -33,7 +34,10 @@ @notpypy @unittest.skipIf(skip_condition, "Skipping the eventlet tests because it's not installed") @notmonkeypatch -class EventletTimerTest(unittest.TestCase, TimerConnectionTests): +class EventletTimerTest(TimerTestMixin, unittest.TestCase): + + connection_class = EventletConnection + @classmethod def setUpClass(cls): # This is run even though the class is skipped, so we need @@ -43,10 +47,34 @@ def setUpClass(cls): # This is being added temporarily due to a bug in eventlet: # https://github.com/eventlet/eventlet/issues/401 - import eventlet; eventlet.sleep() + import eventlet + eventlet.sleep() monkey_patch() - cls.connection_class = EventletConnection + # cls.connection_class = EventletConnection + EventletConnection.initialize_reactor() + assert EventletConnection._timers is not None + + def setUp(self): + socket_patcher = patch('eventlet.green.socket.socket') + self.addCleanup(socket_patcher.stop) + socket_patcher.start() + + super(EventletTimerTest, self).setUp() + + recv_patcher = patch.object(self.connection._socket, + 'recv', + return_value=b'') + self.addCleanup(recv_patcher.stop) + recv_patcher.start() + + @property + def create_timer(self): + return self.connection.create_timer + + @property + def _timers(self): + return self.connection._timers # There is no unpatching because there is not a clear way # of doing it reliably diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py index d62bf20036..1b474a921a 100644 --- a/tests/unit/io/test_geventreactor.py +++ b/tests/unit/io/test_geventreactor.py @@ -18,7 +18,7 @@ import unittest # noqa -from tests.unit.io.utils import TimerConnectionTests +from tests.unit.io.utils import TimerTestMixin from tests import MONKEY_PATCH_LOOP, notmonkeypatch try: from cassandra.io.geventreactor import GeventConnection @@ -26,20 +26,44 @@ except ImportError: GeventConnection = None # noqa +from mock import patch + skip_condition = GeventConnection is None or MONKEY_PATCH_LOOP != "gevent" @unittest.skipIf(skip_condition, "Skipping the gevent tests because it's not installed") @notmonkeypatch -class GeventTimerTest(unittest.TestCase, TimerConnectionTests): +class GeventTimerTest(TimerTestMixin, unittest.TestCase): + + connection_class = GeventConnection + @classmethod def setUpClass(cls): # This is run even though the class is skipped, so we need # to make sure no monkey patching is happening if skip_condition: return + # There is no unpatching because there is not a clear way + # of doing it reliably gevent.monkey.patch_all() - cls.connection_class = GeventConnection GeventConnection.initialize_reactor() - # There is no unpatching because there is not a clear way - # of doing it reliably + def setUp(self): + socket_patcher = patch('gevent.socket.socket') + self.addCleanup(socket_patcher.stop) + socket_patcher.start() + + super(GeventTimerTest, self).setUp() + + recv_patcher = patch.object(self.connection._socket, + 'recv', + return_value=b'') + self.addCleanup(recv_patcher.stop) + recv_patcher.start() + + @property + def create_timer(self): + return self.connection.create_timer + + @property + def _timers(self): + return self.connection._timers diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index b8d1ad43c0..4220d69851 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -16,11 +16,12 @@ except ImportError: import unittest # noqa -from mock import patch +from mock import patch, Mock import weakref +import socket from tests import is_monkey_patched -from tests.unit.io.utils import ReactorTestMixin +from tests.unit.io.utils import ReactorTestMixin, TimerTestMixin, noop_if_monkey_patched try: @@ -30,10 +31,11 @@ LibevConnection = None # noqa -class LibevConnectionTest(unittest.TestCase, ReactorTestMixin): +class LibevConnectionTest(ReactorTestMixin, unittest.TestCase): connection_class = LibevConnection socket_attr_name = '_socket' + loop_attr_name = '_libevloop' null_handle_function_args = None, 0 def setUp(self): @@ -48,9 +50,8 @@ def setUp(self): patchers = [patch(obj) for obj in ('socket.socket', 'cassandra.io.libevwrapper.IO', - 'cassandra.io.libevwrapper.Prepare', - 'cassandra.io.libevwrapper.Async', - 'cassandra.io.libevreactor.LibevLoop.maybe_start')] + 'cassandra.io.libevreactor.LibevLoop.maybe_start' + )] for p in patchers: self.addCleanup(p.stop) for p in patchers: @@ -71,7 +72,7 @@ def test_watchers_are_finished(self): @test_category connection """ - with patch.object(LibevConnection._libevloop, "_thread"), \ + with patch.object(LibevConnection._libevloop, "_thread"),\ patch.object(LibevConnection._libevloop, "notify"): self.make_connection() @@ -85,7 +86,56 @@ def test_watchers_are_finished(self): # be called libev__cleanup(weakref.ref(LibevConnection._libevloop)) for conn in live_connections: - for watcher in (conn._write_watcher, conn._read_watcher): - self.assertTrue(watcher.stop.mock_calls) + self.assertTrue(conn._write_watcher.stop.mock_calls) + self.assertTrue(conn._read_watcher.stop.mock_calls) LibevConnection._libevloop._shutdown = False + + +class LibevTimerPatcher(unittest.TestCase): + + @classmethod + @noop_if_monkey_patched + def setUpClass(cls): + cls.patchers = [ + patch('socket.socket', spec=socket.socket), + patch('cassandra.io.libevwrapper.IO') + ] + for p in cls.patchers: + p.start() + + @classmethod + @noop_if_monkey_patched + def tearDownClass(cls): + for p in cls.patchers: + try: + p.stop() + except: + pass + + +class LibevTimerTest(TimerTestMixin, LibevTimerPatcher): + connection_class = LibevConnection + + @property + def create_timer(self): + return self.connection.create_timer + + @property + def _timers(self): + return self.connection._libevloop._timers + + def make_connection(self): + c = LibevConnection('1.2.3.4', cql_version='3.0.1') + c._socket_impl = Mock() + c._socket.return_value.send.side_effect = lambda x: len(x) + return c + + def setUp(self): + if is_monkey_patched(): + raise unittest.SkipTest("Can't test libev with monkey patching.") + if LibevConnection is None: + raise unittest.SkipTest('libev does not appear to be installed correctly') + + LibevConnection.initialize_reactor() + super(LibevTimerTest, self).setUp() diff --git a/tests/unit/io/test_libevtimer.py b/tests/unit/io/test_libevtimer.py deleted file mode 100644 index 082fa69c63..0000000000 --- a/tests/unit/io/test_libevtimer.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa - - -from mock import patch, Mock - -import time - -from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback -from tests.unit.io.utils import TimerConnectionTests -from tests import is_monkey_patched - - -try: - from cassandra.io.libevreactor import LibevConnection -except ImportError: - LibevConnection = None # noqa - - -@patch('socket.socket') -@patch('cassandra.io.libevwrapper.IO') -class LibevTimerTest(unittest.TestCase): - - def setUp(self): - if is_monkey_patched(): - raise unittest.SkipTest("Can't test libev with monkey patching") - if LibevConnection is None: - raise unittest.SkipTest('libev does not appear to be installed correctly') - LibevConnection.initialize_reactor() - - def make_connection(self): - c = LibevConnection('1.2.3.4', cql_version='3.0.1') - c._socket = Mock() - c._socket.send.side_effect = lambda x: len(x) - return c - - def test_multi_timer_validation(self, *args): - """ - Verify that timer timeouts are honored appropriately - """ - c = self.make_connection() - c.initialize_reactor() - # Tests timers submitted in order at various timeouts - submit_and_wait_for_completion(self, c, 0, 100, 1, 100) - # Tests timers submitted in reverse order at various timeouts - submit_and_wait_for_completion(self, c, 100, 0, -1, 100) - # Tests timers submitted in varying order at various timeouts - submit_and_wait_for_completion(self, c, 0, 100, 1, 100, True) - - def test_timer_cancellation(self, *args): - """ - Verify that timer cancellation is honored - """ - - # Various lists for tracking callback stage - connection = self.make_connection() - timeout = .1 - callback = TimerCallback(timeout) - timer = connection.create_timer(timeout, callback.invoke) - timer.cancel() - # Release context allow for timer thread to run. - time.sleep(.2) - timer_manager = connection._libevloop._timers - # Assert that the cancellation was honored - self.assertFalse(timer_manager._queue) - self.assertFalse(timer_manager._new_timers) - self.assertFalse(callback.was_invoked()) - diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index c6f603a30e..982ce11b55 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -17,63 +17,41 @@ except ImportError: import unittest from mock import Mock, patch -import time try: from twisted.test import proto_helpers from twisted.python.failure import Failure from cassandra.io import twistedreactor + from cassandra.io.twistedreactor import TwistedConnection except ImportError: - twistedreactor = None # NOQA + twistedreactor = TwistedConnection = None # NOQA + from cassandra.connection import _Frame -from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback +from tests.unit.io.utils import TimerTestMixin -class TestTwistedTimer(unittest.TestCase): +class TestTwistedTimer(TimerTestMixin, unittest.TestCase): """ Simple test class that is used to validate that the TimerManager, and timer classes function appropriately with the twisted infrastructure """ + connection_class = TwistedConnection + + @property + def create_timer(self): + return self.connection.create_timer + + @property + def _timers(self): + return self.connection._loop._timers + def setUp(self): if twistedreactor is None: raise unittest.SkipTest("Twisted libraries not available") twistedreactor.TwistedConnection.initialize_reactor() - - def test_multi_timer_validation(self): - """ - Verify that the timers are called in the correct order - """ - twistedreactor.TwistedConnection.initialize_reactor() - connection = twistedreactor.TwistedConnection('1.2.3.4', - cql_version='3.0.1') - # Tests timers submitted in order at various timeouts - submit_and_wait_for_completion(self, connection, 0, 100, 1, 100) - # Tests timers submitted in reverse order at various timeouts - submit_and_wait_for_completion(self, connection, 100, 0, -1, 100) - # Tests timers submitted in varying order at various timeouts - submit_and_wait_for_completion(self, connection, 0, 100, 1, 100, True) - - def test_timer_cancellation(self, *args): - """ - Verify that timer cancellation is honored - """ - - # Various lists for tracking callback stage - connection = twistedreactor.TwistedConnection('1.2.3.4', - cql_version='3.0.1') - timeout = .1 - callback = TimerCallback(timeout) - timer = connection.create_timer(timeout, callback.invoke) - timer.cancel() - # Release context allow for timer thread to run. - time.sleep(.2) - timer_manager = connection._loop._timers - # Assert that the cancellation was honored - self.assertFalse(timer_manager._queue) - self.assertFalse(timer_manager._new_timers) - self.assertFalse(callback.was_invoked()) + super(TestTwistedTimer, self).setUp() class TestTwistedProtocol(unittest.TestCase): @@ -231,7 +209,7 @@ def test_handle_read__fullmessage(self): self.obj_ut.handle_read() self.assertEqual(self.obj_ut._iobuf.getvalue(), extra) self.obj_ut.process_msg.assert_called_with( - _Frame(version=4, flags=1, stream=2, opcode=3, body_offset=9, end_pos= 9 + len(body)), body) + _Frame(version=4, flags=1, stream=2, opcode=3, body_offset=9, end_pos=9 + len(body)), body) @patch('twisted.internet.reactor.connectTCP') def test_push(self, mock_connectTCP): @@ -242,4 +220,3 @@ def test_push(self, mock_connectTCP): self.obj_ut.push('123 pickup') self.mock_reactor_cft.assert_called_with( self.obj_ut.connector.transport.write, '123 pickup') - diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 5a57ec3def..4ab3e16b78 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -17,7 +17,9 @@ from cassandra.marshal import int32_pack, uint8_pack, uint32_pack from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ReadyMessage, ServerError) +from tests import is_monkey_patched +from functools import wraps import six from six import binary_type, BytesIO from mock import Mock @@ -88,7 +90,7 @@ def get_timeout(gross_time, start, end, precision, split_range): return timeout -def submit_and_wait_for_completion(unit_test, connection, start, end, increment, precision, split_range=False): +def submit_and_wait_for_completion(unit_test, create_timer, start, end, increment, precision, split_range=False): """ This will submit a number of timers to the provided connection. It will then ensure that the corresponding callback is invoked in the appropriate amount of time. @@ -109,7 +111,7 @@ def submit_and_wait_for_completion(unit_test, connection, start, end, increment, for gross_time in range(start, end, increment): timeout = get_timeout(gross_time, start, end, precision, split_range) callback = TimerCallback(timeout) - connection.create_timer(timeout, callback.invoke) + create_timer(timeout, callback.invoke) pending_callbacks.append(callback) # wait for all the callbacks associated with the timers to be invoked @@ -125,17 +127,40 @@ def submit_and_wait_for_completion(unit_test, connection, start, end, increment, unit_test.assertAlmostEqual(callback.expected_wait, callback.get_wait_time(), delta=.15) -class TimerConnectionTests(object): +def noop_if_monkey_patched(f): + if is_monkey_patched(): + @wraps(f) + def noop(*args, **kwargs): + return + return noop + + return f + + +class TimerTestMixin(object): + + connection_class = connection = None + # replace with property returning the connection's create_timer and _timers + create_timer = _timers = None + + def setUp(self): + self.connection = self.connection_class( + connect_timeout=5 + ) + + def tearDown(self): + self.connection.close() + def test_multi_timer_validation(self): """ Verify that timer timeouts are honored appropriately """ # Tests timers submitted in order at various timeouts - submit_and_wait_for_completion(self, self.connection_class, 0, 100, 1, 100) + submit_and_wait_for_completion(self, self.create_timer, 0, 100, 1, 100) # Tests timers submitted in reverse order at various timeouts - submit_and_wait_for_completion(self, self.connection_class, 100, 0, -1, 100) + submit_and_wait_for_completion(self, self.create_timer, 100, 0, -1, 100) # Tests timers submitted in varying order at various timeouts - submit_and_wait_for_completion(self, self.connection_class, 0, 100, 1, 100, True), + submit_and_wait_for_completion(self, self.create_timer, 0, 100, 1, 100, True), def test_timer_cancellation(self): """ @@ -145,11 +170,11 @@ def test_timer_cancellation(self): # Various lists for tracking callback stage timeout = .1 callback = TimerCallback(timeout) - timer = self.connection_class.create_timer(timeout, callback.invoke) + timer = self.create_timer(timeout, callback.invoke) timer.cancel() # Release context allow for timer thread to run. time.sleep(.2) - timer_manager = self.connection_class._timers + timer_manager = self._timers # Assert that the cancellation was honored self.assertFalse(timer_manager._queue) self.assertFalse(timer_manager._new_timers) @@ -158,7 +183,7 @@ def test_timer_cancellation(self): class ReactorTestMixin(object): - connection_class = socket_attr_name = None + connection_class = loop_attr_name = socket_attr_name = None null_handle_function_args = () def get_socket(self, connection): @@ -167,6 +192,9 @@ def get_socket(self, connection): def set_socket(self, connection, obj): return setattr(connection, self.socket_attr_name, obj) + def get_loop(self, connection): + return getattr(connection, self.loop_attr_name) + def make_header_prefix(self, message_class, version=2, stream_id=0): return binary_type().join(map(uint8_pack, [ 0xff & (HEADER_DIRECTION_TO_CLIENT | version), From 5327e717f0f39b82f776d978862843807077c0cc Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 1 Dec 2017 16:17:10 -0500 Subject: [PATCH 1326/2005] remove unused get_loop --- tests/unit/io/test_asyncorereactor.py | 1 - tests/unit/io/utils.py | 5 +---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 0e43b43f98..eabb4398c4 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -61,7 +61,6 @@ class AsyncoreConnectionTest(ReactorTestMixin, AsyncorePatcher): connection_class = AsyncoreConnection socket_attr_name = 'socket' - loop_attr_name = '_loop' # @classmethod # def setUpClass(cls): diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 4ab3e16b78..8dae866f2c 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -183,7 +183,7 @@ def test_timer_cancellation(self): class ReactorTestMixin(object): - connection_class = loop_attr_name = socket_attr_name = None + connection_class = socket_attr_name = None null_handle_function_args = () def get_socket(self, connection): @@ -192,9 +192,6 @@ def get_socket(self, connection): def set_socket(self, connection, obj): return setattr(connection, self.socket_attr_name, obj) - def get_loop(self, connection): - return getattr(connection, self.loop_attr_name) - def make_header_prefix(self, message_class, version=2, stream_id=0): return binary_type().join(map(uint8_pack, [ 0xff & (HEADER_DIRECTION_TO_CLIENT | version), From 407a739567ea11e120df38e3f13c0821ccf79f85 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 4 Dec 2017 14:10:58 -0500 Subject: [PATCH 1327/2005] add (commented, non-working) wip eventlet connection test --- tests/unit/io/test_eventletreactor.py | 34 ++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index 5bf1b5792b..4dbf177978 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -18,7 +18,7 @@ except ImportError: import unittest # noqa -from tests.unit.io.utils import TimerTestMixin +from tests.unit.io.utils import ReactorTestMixin, TimerTestMixin from tests import notpypy, MONKEY_PATCH_LOOP, notmonkeypatch from eventlet import monkey_patch @@ -78,3 +78,35 @@ def _timers(self): # There is no unpatching because there is not a clear way # of doing it reliably + + +# @notpypy +# @unittest.skipIf(skip_condition, "Skipping the eventlet tests because it's not installed") +# @notmonkeypatch +# class EventletConnectionTest(ReactorTestMixin, unittest.TestCase): +# connection_class = EventletConnection +# socket_attr_name = '_socket' + +# @classmethod +# def setUpClass(cls): +# # This is run even though the class is skipped, so we need +# # to make sure no monkey patching is happening +# if skip_condition: +# return + +# # This is being added temporarily due to a bug in eventlet: +# # https://github.com/eventlet/eventlet/issues/401 +# import eventlet +# eventlet.sleep() +# monkey_patch() +# # cls.connection_class = EventletConnection + +# EventletConnection.initialize_reactor() +# assert EventletConnection._timers is not None + +# def setUp(self): +# socket_patcher = patch('eventlet.green.socket.socket') +# self.addCleanup(socket_patcher.stop) +# socket_patcher.start() + +# super(EventletConnectionTest, self).setUp() From f5a932adb3c3e50234113aa86b8f38593f69d099 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 4 Dec 2017 17:22:26 -0500 Subject: [PATCH 1328/2005] remove commented-out code --- tests/unit/io/test_asyncorereactor.py | 24 ------------------- tests/unit/io/test_eventletreactor.py | 34 +-------------------------- 2 files changed, 1 insertion(+), 57 deletions(-) diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index eabb4398c4..8e9dc553ae 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -62,18 +62,6 @@ class AsyncoreConnectionTest(ReactorTestMixin, AsyncorePatcher): connection_class = AsyncoreConnection socket_attr_name = 'socket' - # @classmethod - # def setUpClass(cls): - # if is_monkey_patched(): - # return - # patch_socket_and_set_up_connection(cls) - - # @classmethod - # def tearDownClass(cls): - # if is_monkey_patched(): - # return - # cls.socket_patcher.stop() - def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") @@ -90,18 +78,6 @@ def create_timer(self): def _timers(self): return self.connection._loop._timers - # @classmethod - # def setUpClass(cls): - # if is_monkey_patched(): - # return - # patch_socket_and_set_up_connection(cls) - - # @classmethod - # def tearDownClass(cls): - # if is_monkey_patched(): - # return - # cls.socket_patcher.stop() - def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index 4dbf177978..5bf1b5792b 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -18,7 +18,7 @@ except ImportError: import unittest # noqa -from tests.unit.io.utils import ReactorTestMixin, TimerTestMixin +from tests.unit.io.utils import TimerTestMixin from tests import notpypy, MONKEY_PATCH_LOOP, notmonkeypatch from eventlet import monkey_patch @@ -78,35 +78,3 @@ def _timers(self): # There is no unpatching because there is not a clear way # of doing it reliably - - -# @notpypy -# @unittest.skipIf(skip_condition, "Skipping the eventlet tests because it's not installed") -# @notmonkeypatch -# class EventletConnectionTest(ReactorTestMixin, unittest.TestCase): -# connection_class = EventletConnection -# socket_attr_name = '_socket' - -# @classmethod -# def setUpClass(cls): -# # This is run even though the class is skipped, so we need -# # to make sure no monkey patching is happening -# if skip_condition: -# return - -# # This is being added temporarily due to a bug in eventlet: -# # https://github.com/eventlet/eventlet/issues/401 -# import eventlet -# eventlet.sleep() -# monkey_patch() -# # cls.connection_class = EventletConnection - -# EventletConnection.initialize_reactor() -# assert EventletConnection._timers is not None - -# def setUp(self): -# socket_patcher = patch('eventlet.green.socket.socket') -# self.addCleanup(socket_patcher.stop) -# socket_patcher.start() - -# super(EventletConnectionTest, self).setUp() From 4685d43f10c3e034995d57563dde8fe94078548c Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 5 Dec 2017 14:43:21 -0500 Subject: [PATCH 1329/2005] fix build.yaml --- build.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.yaml b/build.yaml index 09d193e926..81e6507446 100644 --- a/build.yaml +++ b/build.yaml @@ -142,8 +142,8 @@ build: # it takes too much time for the whole matrix to build with cython if [[ $CYTHON == 'CYTHON' ]]; then EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true - MONKEY_PATCH_LOOP=1 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true - MONKEY_PATCH_LOOP=1 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true + MONKEY_PATCH_LOOP=$EVENT_LOOP_MANAGER EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true + MONKEY_PATCH_LOOP=$EVENT_LOOP_MANAGER EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true fi From ec325b9ed30ccb5bbe45cf5fb877d91c639c0ac2 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 1 Nov 2017 17:12:47 -0400 Subject: [PATCH 1330/2005] cleanup pre-tests --- tests/unit/test_connection.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index c6085f8275..3611dd9662 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -57,7 +57,6 @@ def make_header_prefix(self, message_class, version=Connection.protocol_version, message_class.opcode # opcode ])) - def make_options_body(self): options_buf = BytesIO() write_stringmultimap(options_buf, { @@ -116,9 +115,6 @@ def test_unsupported_cql_version(self, *args): c.defunct = Mock() c.cql_version = "3.0.3" - # read in a SupportedMessage response - header = self.make_header_prefix(SupportedMessage) - options_buf = BytesIO() write_stringmultimap(options_buf, { 'CQL_VERSION': ['7.8.9'], @@ -168,9 +164,6 @@ def test_requested_compression_not_available(self, *args): locally_supported_compressions['lz4'] = ('lz4compress', 'lz4decompress') locally_supported_compressions['snappy'] = ('snappycompress', 'snappydecompress') - # read in a SupportedMessage response - header = self.make_header_prefix(SupportedMessage) - # the server only supports snappy options_buf = BytesIO() write_stringmultimap(options_buf, { @@ -198,9 +191,6 @@ def test_use_requested_compression(self, *args): locally_supported_compressions['lz4'] = ('lz4compress', 'lz4decompress') locally_supported_compressions['snappy'] = ('snappycompress', 'snappydecompress') - # read in a SupportedMessage response - header = self.make_header_prefix(SupportedMessage) - # the server only supports snappy options_buf = BytesIO() write_stringmultimap(options_buf, { From f9b2c99cd33c2f785f6deaa11552b063e75ea1ed Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 22 Jan 2018 14:43:12 -0500 Subject: [PATCH 1331/2005] Remove 'Experimental' tag from cqlengine connections documentation --- CHANGELOG.rst | 1 + docs/cqlengine/connections.rst | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f91251b1f0..7bc9052821 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -23,6 +23,7 @@ Other * Remove copyright dates from copyright notices (PYTHON-863) * Remove "Experimental" tag from execution profiles documentation (PYTHON-840) * request_timer metrics descriptions are slightly incorrect (PYTHON-885) +* Remove "Experimental" tag from cqlengine connections documentation (PYTHON-892) 3.12.0 ====== diff --git a/docs/cqlengine/connections.rst b/docs/cqlengine/connections.rst index 6d25682d37..f5c06b3df6 100644 --- a/docs/cqlengine/connections.rst +++ b/docs/cqlengine/connections.rst @@ -1,8 +1,8 @@ -========================== -Connections (experimental) -========================== +=========== +Connections +=========== -Connections are experimental and aimed to ease the use of multiple sessions with cqlengine. Connections can be set on a model class, per query or using a context manager. +Connections aim to ease the use of multiple sessions with cqlengine. Connections can be set on a model class, per query or using a context manager. Register a new connection From 37783126faf9c12a9565fd1ad73a2111f0189f6b Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 4 Jan 2018 14:41:28 -0500 Subject: [PATCH 1332/2005] PYTHON-853: delay timeout --- CHANGELOG.rst | 1 + cassandra/cluster.py | 39 +++++++++++++++++++++++++++------------ 2 files changed, 28 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f91251b1f0..0105fca7ae 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -12,6 +12,7 @@ Bug Fixes * Support retry_policy in PreparedStatement (PYTHON-861) * __del__ method in Session is throwing an exception (PYTHON-813) * LZ4 import issue with recent versions (PYTHON-897) +* ResponseFuture._connection can be None when returning request_id (PYTHON-853) Other ----- diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 87085d8aaa..126552b77a 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3495,22 +3495,37 @@ def _cancel_timer(self): if self._timer: self._timer.cancel() - def _on_timeout(self): + def _on_timeout(self, _attempts=0): + """ + Called when the request associated with this ResponseFuture times out. - try: - self._connection._requests.pop(self._req_id) - # This prevents the race condition of the - # event loop thread just receiving the waited message - # If it arrives after this, it will be ignored - except KeyError: + This function may reschedule itself. The ``_attempts`` parameter tracks + the number of times this has happened. This parameter should only be + set in those cases, where ``_on_timeout`` reschedules itself. + """ + # PYTHON-853: for short timeouts, we sometimes race with our __init__ + if self._connection is None and _attempts < 3: + self._timer = self.session.cluster.connection_class.create_timer( + 0.01, + partial(self._on_timeout, _attempts=_attempts + 1) + ) return - pool = self.session._pools.get(self._current_host) - if pool and not pool.is_shutdown: - with self._connection.lock: - self._connection.request_ids.append(self._req_id) + if self._connection is not None: + try: + self._connection._requests.pop(self._req_id) + # This prevents the race condition of the + # event loop thread just receiving the waited message + # If it arrives after this, it will be ignored + except KeyError: + return + + pool = self.session._pools.get(self._current_host) + if pool and not pool.is_shutdown: + with self._connection.lock: + self._connection.request_ids.append(self._req_id) - pool.return_connection(self._connection) + pool.return_connection(self._connection) errors = self._errors if not errors: From 16158dd9ddbeb2e219896d593dad3e706c2c72cb Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 19 Jan 2018 16:55:58 -0500 Subject: [PATCH 1333/2005] add asyncioreactor --- CHANGELOG.rst | 1 + appveyor/appveyor.ps1 | 4 +- benchmarks/base.py | 12 ++ build.yaml | 12 +- cassandra/cluster.py | 6 + cassandra/io/asyncioreactor.py | 185 +++++++++++++++++++++++ docs/api/cassandra/io/asyncioreactor.rst | 7 + docs/api/index.rst | 1 + test-requirements.txt | 1 + tests/__init__.py | 5 +- tests/integration/__init__.py | 2 +- tests/unit/io/test_asyncioreactor.py | 65 ++++++++ tests/unit/io/test_libevreactor.py | 2 + 13 files changed, 298 insertions(+), 5 deletions(-) create mode 100644 cassandra/io/asyncioreactor.py create mode 100644 docs/api/cassandra/io/asyncioreactor.rst create mode 100644 tests/unit/io/test_asyncioreactor.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f77f41f176..a8f497ae0d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -38,6 +38,7 @@ Features * Add NO_COMPACT startup option (PYTHON-839) * Add new exception type for CDC (PYTHON-837) * Allow 0ms in ConstantSpeculativeExecutionPolicy (PYTHON-836) +* Add asyncio reactor (PYTHON-507) Bug Fixes --------- diff --git a/appveyor/appveyor.ps1 b/appveyor/appveyor.ps1 index 06cc82b090..cc1e6aa76f 100644 --- a/appveyor/appveyor.ps1 +++ b/appveyor/appveyor.ps1 @@ -2,7 +2,7 @@ $env:JAVA_HOME="C:\Program Files\Java\jdk1.8.0" $env:PATH="$($env:JAVA_HOME)\bin;$($env:PATH)" $env:CCM_PATH="C:\Users\appveyor\ccm" $env:CASSANDRA_VERSION=$env:cassandra_version -$env:EVENT_LOOP_MANAGER="async" +$env:EVENT_LOOP_MANAGER="asyncore" $env:SIMULACRON_JAR="C:\Users\appveyor\simulacron-standalone-0.7.0.jar" python --version @@ -77,4 +77,4 @@ If (!(Test-Path C:\Users\appveyor\.ccm\repository\$env:cassandra_version)) { } Start-Process python -ArgumentList "-m pip install -r test-requirements.txt" -Wait -NoNewWindow -Start-Process python -ArgumentList "-m pip install nose-ignore-docstring" -Wait -NoNewWindow \ No newline at end of file +Start-Process python -ArgumentList "-m pip install nose-ignore-docstring" -Wait -NoNewWindow diff --git a/benchmarks/base.py b/benchmarks/base.py index 5c23cfe623..2997840645 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -57,6 +57,14 @@ except ImportError as exc: pass +have_asyncio = False +try: + from cassandra.io.asyncioreactor import AsyncioConnection + have_asyncio = True + supported_reactors.append(AsyncioConnection) +except ImportError: + pass + have_twisted = False try: from cassandra.io.twistedreactor import TwistedConnection @@ -216,6 +224,8 @@ def parse_options(): help='number of operations [default: %default]') parser.add_option('--asyncore-only', action='store_true', dest='asyncore_only', help='only benchmark with asyncore connections') + parser.add_option('--asyncio-only', action='store_true', dest='asyncio_only', + help='only benchmark with asyncio connections') parser.add_option('--libev-only', action='store_true', dest='libev_only', help='only benchmark with libev connections') parser.add_option('--twisted-only', action='store_true', dest='twisted_only', @@ -252,6 +262,8 @@ def parse_options(): if options.asyncore_only: options.supported_reactors = [AsyncoreConnection] + elif options.asyncio_only: + options.supported_reactors = [AsyncioConnection] elif options.libev_only: if not have_libev: log.error("libev is not available") diff --git a/build.yaml b/build.yaml index 81e6507446..5c56ae1468 100644 --- a/build.yaml +++ b/build.yaml @@ -44,12 +44,22 @@ schedules: env_vars: | EVENT_LOOP_MANAGER='eventlet' + weekly_asyncio: + schedule: 0 22 * * 6 + branches: + include: [master] + env_vars: | + EVENT_LOOP_MANAGER='asyncio' + matrix: + exclude: + - python: [2.7] + weekly_async: schedule: 0 10 * * 7 branches: include: [master] env_vars: | - EVENT_LOOP_MANAGER='async' + EVENT_LOOP_MANAGER='asyncore' weekly_twister: schedule: 0 14 * * 7 diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 87085d8aaa..688f4bd0d5 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -594,6 +594,7 @@ def default_retry_policy(self, policy): * :class:`cassandra.io.eventletreactor.EventletConnection` (requires monkey-patching - see doc for details) * :class:`cassandra.io.geventreactor.GeventConnection` (requires monkey-patching - see doc for details) * :class:`cassandra.io.twistedreactor.TwistedConnection` + * EXPERIMENTAL: :class:`cassandra.io.asyncioreactor.AsyncioConnection` By default, ``AsyncoreConnection`` will be used, which uses the ``asyncore`` module in the Python standard library. @@ -602,6 +603,11 @@ def default_retry_policy(self, policy): If ``gevent`` or ``eventlet`` monkey-patching is detected, the corresponding connection class will be used automatically. + + ``AsyncioConnection``, which uses the ``asyncio`` module in the Python + standard library, is also available, but currently experimental. Note that + it requires ``asyncio`` features that were only introduced in the 3.4 line + in 3.4.6, and in the 3.5 line in 3.5.1. """ control_connection_timeout = 2.0 diff --git a/cassandra/io/asyncioreactor.py b/cassandra/io/asyncioreactor.py new file mode 100644 index 0000000000..6037b973ac --- /dev/null +++ b/cassandra/io/asyncioreactor.py @@ -0,0 +1,185 @@ +from cassandra.connection import Connection + +import asyncio +import logging +import os +import socket +import ssl +from threading import Lock, Thread + + +log = logging.getLogger(__name__) + + +# This module uses ``yield from`` and ``@asyncio.coroutine`` over ``await`` and +# ``async def`` for pre-Python-3.5 compatibility, so keep in mind that the +# managed coroutines are generator-based, not native coroutines. See PEP 492: +# https://www.python.org/dev/peps/pep-0492/#coroutine-objects + + +try: + asyncio.run_coroutine_threadsafe +except AttributeError: + raise ImportError( + 'Cannot use asyncioreactor without access to ' + 'asyncio.run_coroutine_threadsafe (added in 3.4.6 and 3.5.1)' + ) + + +class AsyncioTimer(object): + """ + An ``asyncioreactor``-specific Timer. Similar to :class:`.connection.Timer, + but with a slightly different API due to limitations in the underlying + ``call_later`` interface. Not meant to be used with a + :class:`.connection.TimerManager`. + """ + + @property + def end(self): + raise NotImplementedError('{} is not compatible with TimerManager and ' + 'does not implement .end()') + + def __init__(self, timeout, callback, loop): + delayed = self._call_delayed_coro(timeout=timeout, + callback=callback, + loop=loop) + self._handle = asyncio.run_coroutine_threadsafe(delayed, loop=loop) + + @staticmethod + @asyncio.coroutine + def _call_delayed_coro(timeout, callback, loop): + yield from asyncio.sleep(timeout, loop=loop) + return callback() + + def __lt__(self, other): + try: + return self._handle < other._handle + except AttributeError: + raise NotImplemented + + def cancel(self): + self._handle.cancel() + + def finish(self): + # connection.Timer method not implemented here because we can't inspect + # the Handle returned from call_later + raise NotImplementedError('{} is not compatible with TimerManager and ' + 'does not implement .finish()') + + +class AsyncioConnection(Connection): + """ + An experimental implementation of :class:`.Connection` that uses the + ``asyncio`` module in the Python standard library for its event loop. + + Note that it requires ``asyncio`` features that were only introduced in the + 3.4 line in 3.4.6, and in the 3.5 line in 3.5.1. + """ + + _loop = None + _pid = os.getpid() + + _lock = Lock() + _loop_thread = None + + _write_queue = None + + def __init__(self, *args, **kwargs): + Connection.__init__(self, *args, **kwargs) + + self._connect_socket() + self._socket.setblocking(0) + + self._write_queue = asyncio.Queue(loop=self._loop) + + # see initialize_reactor -- loop is running in a separate thread, so we + # have to use a threadsafe call + self._read_watcher = asyncio.run_coroutine_threadsafe( + self.handle_read(), loop=self._loop + ) + self._write_watcher = asyncio.run_coroutine_threadsafe( + self.handle_write(), loop=self._loop + ) + self._send_options_message() + + @classmethod + def initialize_reactor(cls): + with cls._lock: + if cls._pid != os.getpid(): + cls._loop = None + if cls._loop is None: + cls._loop = asyncio.get_event_loop() + + if not cls._loop_thread: + # daemonize so the loop will be shut down on interpreter + # shutdown + cls._loop_thread = Thread(target=cls._loop.run_forever, + daemon=True) + cls._loop_thread.start() + + @classmethod + def create_timer(cls, timeout, callback): + return AsyncioTimer(timeout, callback, loop=cls._loop) + + def close(self): + log.debug("Closing connection (%s) to %s" % (id(self), self.host)) + with self.lock: + if self.is_closed: + return + self.is_closed = True + + self._write_watcher.cancel() + self._read_watcher.cancel() + self.connected_event.set() + + def push(self, data): + buff_size = self.out_buffer_size + if len(data) > buff_size: + for i in range(0, len(data), buff_size): + self._push_chunk(data[i:i + buff_size]) + else: + self._push_chunk(data) + + def _push_chunk(self, chunk): + asyncio.run_coroutine_threadsafe( + self._write_queue.put(chunk), + loop=self._loop + ) + + @asyncio.coroutine + def handle_write(self): + while True: + try: + next_msg = yield from self._write_queue.get() + if next_msg: + yield from self._loop.sock_sendall(self._socket, next_msg) + except socket.error as err: + log.debug("Exception in send for %s: %s", self, err) + self.defunct(err) + return + + @asyncio.coroutine + def handle_read(self): + while True: + try: + buf = yield from self._loop.sock_recv(self._socket, self.in_buffer_size) + self._iobuf.write(buf) + # sock_recv expects EWOULDBLOCK if socket provides no data, but + # nonblocking ssl sockets raise these instead, so we handle them + # ourselves by yielding to the event loop, where the socket will + # get the reading/writing it "wants" before retrying + except (ssl.SSLWantWriteError, ssl.SSLWantReadError): + yield + continue + except socket.error as err: + log.debug("Exception during socket recv for %s: %s", + self, err) + self.defunct(err) + return # leave the read loop + + if buf and self._iobuf.tell(): + self.process_io_buffer() + else: + log.debug("Connection %s closed by server", self) + self.close() + return diff --git a/docs/api/cassandra/io/asyncioreactor.rst b/docs/api/cassandra/io/asyncioreactor.rst new file mode 100644 index 0000000000..38ae63ca7f --- /dev/null +++ b/docs/api/cassandra/io/asyncioreactor.rst @@ -0,0 +1,7 @@ +``cassandra.io.asyncioreactor`` - ``asyncio`` Event Loop +===================================================================== + +.. module:: cassandra.io.asyncioreactor + +.. autoclass:: AsyncioConnection + :members: diff --git a/docs/api/index.rst b/docs/api/index.rst index 340a5e0235..a56d12031a 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -20,6 +20,7 @@ Core Driver cassandra/concurrent cassandra/connection cassandra/util + cassandra/io/asyncioreactor cassandra/io/asyncorereactor cassandra/io/eventletreactor cassandra/io/libevreactor diff --git a/test-requirements.txt b/test-requirements.txt index ca71b0dd71..ed78256317 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -13,3 +13,4 @@ gevent>=1.0 eventlet cython>=0.20,<0.28 packaging +asynctest diff --git a/tests/__init__.py b/tests/__init__.py index 8f42bf0a49..8ed3d797c4 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -71,12 +71,15 @@ def is_monkey_patched(): from cassandra.io.eventletreactor import EventletConnection connection_class = EventletConnection -elif "async" in EVENT_LOOP_MANAGER: +elif "asyncore" in EVENT_LOOP_MANAGER: from cassandra.io.asyncorereactor import AsyncoreConnection connection_class = AsyncoreConnection elif "twisted" in EVENT_LOOP_MANAGER: from cassandra.io.twistedreactor import TwistedConnection connection_class = TwistedConnection +elif "asyncio" in EVENT_LOOP_MANAGER: + from cassandra.io.asyncioreactor import AsyncioConnection + connection_class = AsyncioConnection else: try: diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 0206c91501..a8e0b29f9f 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -230,7 +230,7 @@ def get_unsupported_upper_protocol(): lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < '3.0', 'Cassandra version less then 3.0 required') pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") notpy3 = unittest.skipIf(sys.version_info >= (3, 0), "Test not applicable for Python 3.x runtime") -requiresmallclockgranularity = unittest.skipIf("Windows" in platform.system() or "async" in EVENT_LOOP_MANAGER, +requiresmallclockgranularity = unittest.skipIf("Windows" in platform.system() or "asyncore" in EVENT_LOOP_MANAGER, "This test is not suitible for environments with large clock granularity") requiressimulacron = unittest.skipIf(SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1", "Simulacron jar hasn't been specified or C* version is 2.0") diff --git a/tests/unit/io/test_asyncioreactor.py b/tests/unit/io/test_asyncioreactor.py new file mode 100644 index 0000000000..fb7965a5e3 --- /dev/null +++ b/tests/unit/io/test_asyncioreactor.py @@ -0,0 +1,65 @@ + +try: + from cassandra.io.asyncioreactor import AsyncioConnection + import asynctest + ASYNCIO_AVAILABLE = True +except (ImportError, SyntaxError): + ASYNCIO_AVAILABLE = False + +from tests import is_monkey_patched +from tests.unit.io.utils import TimerCallback, TimerTestMixin + +from mock import patch + +import unittest +import time + + +@unittest.skipUnless(ASYNCIO_AVAILABLE, "asyncio is not available for this runtime") +class AsyncioTimerTests(TimerTestMixin, unittest.TestCase): + + @classmethod + def setUpClass(cls): + if is_monkey_patched() or not ASYNCIO_AVAILABLE: + return + cls.connection_class = AsyncioConnection + AsyncioConnection.initialize_reactor() + + @classmethod + def tearDownClass(cls): + if ASYNCIO_AVAILABLE: + AsyncioConnection._loop.stop() + + @property + def create_timer(self): + return self.connection.create_timer + + @property + def _timers(self): + raise RuntimeError('no TimerManager for AsyncioConnection') + + def setUp(self): + socket_patcher = patch('socket.socket') + self.addCleanup(socket_patcher.stop) + socket_patcher.start() + + old_selector = AsyncioConnection._loop._selector + AsyncioConnection._loop._selector = asynctest.TestSelector() + + def reset_selector(): + AsyncioConnection._loop._selector = old_selector + + self.addCleanup(reset_selector) + + super(AsyncioTimerTests, self).setUp() + + def test_timer_cancellation(self): + # Various lists for tracking callback stage + timeout = .1 + callback = TimerCallback(timeout) + timer = self.create_timer(timeout, callback.invoke) + timer.cancel() + # Release context allow for timer thread to run. + time.sleep(.2) + # Assert that the cancellation was honored + self.assertFalse(callback.was_invoked()) diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 4220d69851..d22bc14ed6 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -97,6 +97,8 @@ class LibevTimerPatcher(unittest.TestCase): @classmethod @noop_if_monkey_patched def setUpClass(cls): + if LibevConnection is None: + raise unittest.SkipTest('libev does not appear to be installed correctly') cls.patchers = [ patch('socket.socket', spec=socket.socket), patch('cassandra.io.libevwrapper.IO') From ce4c925e3427c0df730e6782461b4117c39c434b Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Fri, 19 Jan 2018 17:00:05 -0500 Subject: [PATCH 1334/2005] asyncio fixes --- benchmarks/base.py | 2 +- cassandra/io/asyncioreactor.py | 54 ++++++++++++++++++++++++++-------- 2 files changed, 43 insertions(+), 13 deletions(-) diff --git a/benchmarks/base.py b/benchmarks/base.py index 2997840645..47a03bbd68 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -62,7 +62,7 @@ from cassandra.io.asyncioreactor import AsyncioConnection have_asyncio = True supported_reactors.append(AsyncioConnection) -except ImportError: +except (ImportError, SyntaxError): pass have_twisted = False diff --git a/cassandra/io/asyncioreactor.py b/cassandra/io/asyncioreactor.py index 6037b973ac..f8835e897c 100644 --- a/cassandra/io/asyncioreactor.py +++ b/cassandra/io/asyncioreactor.py @@ -1,11 +1,11 @@ -from cassandra.connection import Connection +from cassandra.connection import Connection, ConnectionShutdown import asyncio import logging import os import socket import ssl -from threading import Lock, Thread +from threading import Lock, Thread, get_ident log = logging.getLogger(__name__) @@ -108,13 +108,14 @@ def initialize_reactor(cls): if cls._pid != os.getpid(): cls._loop = None if cls._loop is None: - cls._loop = asyncio.get_event_loop() + cls._loop = asyncio.new_event_loop() + asyncio.set_event_loop(cls._loop) if not cls._loop_thread: # daemonize so the loop will be shut down on interpreter # shutdown cls._loop_thread = Thread(target=cls._loop.run_forever, - daemon=True) + daemon=True, name="asyncio_thread") cls._loop_thread.start() @classmethod @@ -122,15 +123,36 @@ def create_timer(cls, timeout, callback): return AsyncioTimer(timeout, callback, loop=cls._loop) def close(self): - log.debug("Closing connection (%s) to %s" % (id(self), self.host)) with self.lock: if self.is_closed: return self.is_closed = True - self._write_watcher.cancel() - self._read_watcher.cancel() - self.connected_event.set() + # close from the loop thread to avoid races when removing file + # descriptors + asyncio.run_coroutine_threadsafe( + self._close(), loop=self._loop + ) + + @asyncio.coroutine + def _close(self): + log.debug("Closing connection (%s) to %s" % (id(self), self.host)) + if self._write_watcher: + self._write_watcher.cancel() + if self._read_watcher: + self._read_watcher.cancel() + if self._socket: + self._loop.remove_writer(self._socket.fileno()) + self._loop.remove_reader(self._socket.fileno()) + self._socket.close() + + log.debug("Closed socket to %s" % (self.host,)) + + if not self.is_defunct: + self.error_all_requests( + ConnectionShutdown("Connection to %s was closed" % self.host)) + # don't leave in-progress operations hanging + self.connected_event.set() def push(self, data): buff_size = self.out_buffer_size @@ -141,10 +163,14 @@ def push(self, data): self._push_chunk(data) def _push_chunk(self, chunk): - asyncio.run_coroutine_threadsafe( - self._write_queue.put(chunk), - loop=self._loop - ) + if self._loop_thread.ident != get_ident(): + asyncio.run_coroutine_threadsafe( + self._write_queue.put(chunk), + loop=self._loop + ) + else: + # avoid races/hangs by just scheduling this, not using threadsafe + self._loop.create_task(self._write_queue.put(chunk)) @asyncio.coroutine def handle_write(self): @@ -157,6 +183,8 @@ def handle_write(self): log.debug("Exception in send for %s: %s", self, err) self.defunct(err) return + except asyncio.CancelledError: + return @asyncio.coroutine def handle_read(self): @@ -176,6 +204,8 @@ def handle_read(self): self, err) self.defunct(err) return # leave the read loop + except asyncio.CancelledError: + return if buf and self._iobuf.tell(): self.process_io_buffer() From 3a1a058a19374b4d44e6821a093688c001f47568 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 22 Jan 2018 10:37:55 -0500 Subject: [PATCH 1335/2005] minor improvement in ExecutionProfile and Metrics docs --- CHANGELOG.rst | 1 + cassandra/metrics.py | 2 ++ docs/api/cassandra/cluster.rst | 6 +++++- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 30094208ba..6040fadcc7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -24,6 +24,7 @@ Other * Remove "Experimental" tag from execution profiles documentation (PYTHON-840) * request_timer metrics descriptions are slightly incorrect (PYTHON-885) * Remove "Experimental" tag from cqlengine connections documentation (PYTHON-892) +* Set in documentation default consistency for operations is LOCAL_ONE (PYTHON-901) 3.12.0 ====== diff --git a/cassandra/metrics.py b/cassandra/metrics.py index 1431f50bce..dce3de2973 100644 --- a/cassandra/metrics.py +++ b/cassandra/metrics.py @@ -28,6 +28,8 @@ class Metrics(object): """ A collection of timers and counters for various performance metrics. + + Timer metrics are represented as floating point seconds. """ request_timer = None diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 86e168ad08..e8f26de3fa 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -114,8 +114,12 @@ .. automethod:: set_meta_refresh_enabled -.. autoclass:: ExecutionProfile +.. autoclass:: ExecutionProfile (load_balancing_policy=, retry_policy=None, consistency_level=LOCAL_ONE, serial_consistency_level=None, request_timeout=10.0, row_factory=, speculative_execution_policy=None) :members: + :exclude-members: consistency_level + + .. autoattribute:: consistency_level + :annotation: = LOCAL_ONE .. autodata:: EXEC_PROFILE_DEFAULT :annotation: From 8c932369cec4613fe23e256117f595150ac23bc0 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 23 Jan 2018 12:30:28 -0500 Subject: [PATCH 1336/2005] Fix cqlengine LOGGED batch statement generation --- cassandra/cqlengine/query.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index fce78ae899..93becd7275 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -19,7 +19,7 @@ import six from warnings import warn -from cassandra.query import SimpleStatement +from cassandra.query import SimpleStatement, BatchType as CBatchType from cassandra.cqlengine import columns, CQLEngineException, ValidationError, UnicodeMixin from cassandra.cqlengine import connection as conn from cassandra.cqlengine.functions import Token, BaseQueryFunction, QueryValue @@ -225,7 +225,8 @@ def execute(self): self._execute_callbacks() return - opener = 'BEGIN ' + (str(self.batch_type) + ' ' if self.batch_type else '') + ' BATCH' + batch_type = None if self.batch_type is CBatchType.LOGGED else self.batch_type + opener = 'BEGIN ' + (str(batch_type) + ' ' if batch_type else '') + ' BATCH' if self.timestamp: if isinstance(self.timestamp, six.integer_types): From 6bae1485dddd5d637b98c25d5a188b0edda88c12 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Tue, 23 Jan 2018 11:26:40 -0500 Subject: [PATCH 1337/2005] Added tests for PYTHON-888 --- .../cqlengine/query/test_batch_query.py | 80 +++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/tests/integration/cqlengine/query/test_batch_query.py b/tests/integration/cqlengine/query/test_batch_query.py index 6323ec7d32..f0c9c43266 100644 --- a/tests/integration/cqlengine/query/test_batch_query.py +++ b/tests/integration/cqlengine/query/test_batch_query.py @@ -22,6 +22,8 @@ from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine import execute_count from cassandra.cluster import Session +from cassandra.query import BatchType as cassandra_BatchType +from cassandra.cqlengine.query import BatchType as cqlengine_BatchType class TestMultiKeyModel(Model): @@ -37,6 +39,11 @@ class BatchQueryLogModel(Model): k = columns.Integer(primary_key=True) v = columns.Integer() + +class CounterBatchQueryModel(Model): + k = columns.Integer(primary_key=True) + v = columns.Counter() + class BatchQueryTests(BaseCassEngTestCase): @classmethod @@ -209,3 +216,76 @@ def test_batch_execute_no_timeout(self): with BatchQuery() as b: BatchQueryLogModel.batch(b).create(k=2, v=2) self.assertEqual(mock_execute.call_args[-1]['timeout'], NOT_SET) + + +class BatchTypeQueryTests(BaseCassEngTestCase): + def setUp(self): + sync_table(TestMultiKeyModel) + sync_table(CounterBatchQueryModel) + + def tearDown(self): + drop_table(TestMultiKeyModel) + drop_table(CounterBatchQueryModel) + + @execute_count(6) + def test_cassandra_batch_type(self): + """ + Tests the different types of `class: cassandra.query.BatchType` + + @since 3.13 + @jira_ticket PYTHON-88 + @expected_result batch query succeeds and the results + are correctly readen + + @test_category query + """ + with BatchQuery(batch_type=cassandra_BatchType.UNLOGGED) as b: + TestMultiKeyModel.batch(b).create(partition=1, cluster=1) + TestMultiKeyModel.batch(b).create(partition=1, cluster=2) + + obj = TestMultiKeyModel.objects(partition=1) + self.assertEqual(2, len(obj)) + + with BatchQuery(batch_type=cassandra_BatchType.COUNTER) as b: + CounterBatchQueryModel.batch(b).create(k=1, v=1) + CounterBatchQueryModel.batch(b).create(k=1, v=2) + CounterBatchQueryModel.batch(b).create(k=1, v=10) + + obj = CounterBatchQueryModel.objects(k=1) + self.assertEqual(1, len(obj)) + self.assertEqual(obj[0].v, 13) + + with BatchQuery(batch_type=cassandra_BatchType.LOGGED) as b: + TestMultiKeyModel.batch(b).create(partition=1, cluster=1) + TestMultiKeyModel.batch(b).create(partition=1, cluster=2) + + obj = TestMultiKeyModel.objects(partition=1) + self.assertEqual(2, len(obj)) + + @execute_count(4) + def test_cqlengine_batch_type(self): + """ + Tests the different types of `class: cassandra.cqlengine.query.BatchType` + + @since 3.13 + @jira_ticket PYTHON-88 + @expected_result batch query succeeds and the results + are correctly readen + + @test_category query + """ + with BatchQuery(batch_type=cqlengine_BatchType.Unlogged) as b: + TestMultiKeyModel.batch(b).create(partition=1, cluster=1) + TestMultiKeyModel.batch(b).create(partition=1, cluster=2) + + obj = TestMultiKeyModel.objects(partition=1) + self.assertEqual(2, len(obj)) + + with BatchQuery(batch_type=cqlengine_BatchType.Counter) as b: + CounterBatchQueryModel.batch(b).create(k=1, v=1) + CounterBatchQueryModel.batch(b).create(k=1, v=2) + CounterBatchQueryModel.batch(b).create(k=1, v=10) + + obj = CounterBatchQueryModel.objects(k=1) + self.assertEqual(1, len(obj)) + self.assertEqual(obj[0].v, 13) From bf726a1e347688bc0637dfa26cac540b9d2281dd Mon Sep 17 00:00:00 2001 From: bjmb Date: Wed, 11 Oct 2017 12:46:54 -0400 Subject: [PATCH 1338/2005] Added group_keys_by_host --- CHANGELOG.rst | 1 + cassandra/metadata.py | 56 ++++++++++++++++++++- tests/integration/standard/test_metadata.py | 50 +++++++++++++++++- 3 files changed, 105 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e04820fe52..762ea28a13 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -43,6 +43,7 @@ Features * Add new exception type for CDC (PYTHON-837) * Allow 0ms in ConstantSpeculativeExecutionPolicy (PYTHON-836) * Add asyncio reactor (PYTHON-507) +* Create an utility function to fetch concurrently many keys from the same replica (PYTHON-647) Bug Fixes --------- diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 224dfe5e27..ccbc571c20 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -25,6 +25,8 @@ from six.moves import zip import sys from threading import RLock +import struct +import random murmur3 = None try: @@ -37,8 +39,10 @@ from cassandra.encoder import Encoder from cassandra.marshal import varint_unpack from cassandra.protocol import QueryMessage -from cassandra.query import dict_factory, bind_params +from cassandra.query import dict_factory, bind_params, Statement from cassandra.util import OrderedDict +from cassandra.pool import HostDistance + log = logging.getLogger(__name__) @@ -2565,3 +2569,53 @@ def _cql_from_cass_type(cass_type): return cass_type.subtypes[0].cql_parameterized_type() else: return cass_type.cql_parameterized_type() + + +NO_VALID_REPLICA = object() + + +def group_keys_by_replica(session, keyspace, table, keys): + """ + Returns a :class:`collections.defaultdict` with the keys grouped per host. This can be + used to more accurately group by IN clause or to batch the keys per host. + + If a valid replica is not found for a particular key it will be grouped under + :class:`~.NO_VALID_REPLICA` + + Example usage:: + result = group_keys_by_host(session, "system", "peers", + ("peer", "data_center"), (("127.0.0.1", ), ("127.0.0.2", ))) + """ + cluster = session.cluster + + partition_keys = cluster.metadata.keyspaces[keyspace].tables[table].partition_key + + serializers = list(types._cqltypes[partition_key.cql_type] for partition_key in partition_keys) + keys_per_host = defaultdict(list) + for key in keys: + serialized_key = [serializer.serialize(pk, cluster.protocol_version) + for serializer, pk in zip(serializers, key)] + if len(serialized_key) == 1: + routing_key = serialized_key[0] + else: + routing_key = b"".join(struct.pack(">H%dsB" % len(p), len(p), p, 0) for p in serialized_key) + all_replicas = cluster.metadata.get_replicas(keyspace, routing_key) + # First check if there are local replicas + valid_replicas = [host for host in all_replicas if + host.is_up and cluster._default_load_balancing_policy.distance( + host) == HostDistance.LOCAL] + if not valid_replicas: + valid_replicas = [host for host in all_replicas if host.is_up] + + if valid_replicas: + for replica in valid_replicas: + if replica in keys_per_host: + keys_per_host[replica].append(key) + break + else: + keys_per_host[random.choice(valid_replicas)].append(key) + else: + # We will group under this statement all the keys for which + # we haven't found a valid replica + keys_per_host[NO_VALID_REPLICA].append(key) + return keys_per_host diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 6cd9b019bf..bc3a384694 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -28,7 +28,8 @@ from cassandra.cluster import Cluster from cassandra.encoder import Encoder from cassandra.metadata import (IndexMetadata, Token, murmur3, Function, Aggregate, protect_name, protect_names, - RegisteredTableExtension, _RegisteredExtensionType, get_schema_parser,) + RegisteredTableExtension, _RegisteredExtensionType, get_schema_parser, + group_keys_by_replica, NO_VALID_REPLICA) from tests.integration import (get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, @@ -2306,3 +2307,50 @@ def test_metadata_with_quoted_identifiers(self): value_column = mv_columns[2] self.assertIsNotNone(value_column) self.assertEqual(value_column.name, 'the Value') + +class GroupPerHost(BasicSharedKeyspaceUnitTestCase): + @classmethod + def setUpClass(cls): + cls.common_setup(rf=1, create_class_table=True) + cls.table_two_pk = "table_with_two_pk" + cls.session.execute( + ''' + CREATE TABLE {0}.{1} ( + k_one int, + k_two int, + v int, + PRIMARY KEY ((k_one, k_two)) + )'''.format(cls.ks_name, cls.table_two_pk) + ) + + def test_group_keys_by_host(self): + """ + Test to ensure group_keys_by_host functions as expected. It is tried + with a table with a single field for the partition key and a table + with two fields for the partition key + @since 3.13 + @jira_ticket PYTHON-647 + @expected_result group_keys_by_host return the expected value + + @test_category metadata + """ + stmt = """SELECT * FROM {}.{} + WHERE k_one = ? AND k_two = ? """.format(self.ks_name, self.table_two_pk) + keys = ((1, 2), (2, 2), (2, 3), (3, 4)) + self._assert_group_keys_by_host(keys, self.table_two_pk, stmt) + + stmt = """SELECT * FROM {}.{} + WHERE k = ? """.format(self.ks_name, self.ks_name) + keys = ((1, ), (2, ), (2, ), (3, )) + self._assert_group_keys_by_host(keys, self.ks_name, stmt) + + def _assert_group_keys_by_host(self, keys, table_name, stmt): + keys_per_host = group_keys_by_replica(self.session, self.ks_name, table_name, keys) + self.assertNotIn(NO_VALID_REPLICA, keys_per_host) + + prepared_stmt = self.session.prepare(stmt) + for key in keys: + routing_key = prepared_stmt.bind(key).routing_key + hosts = self.cluster.metadata.get_replicas(self.ks_name, routing_key) + self.assertEqual(1, len(hosts)) # RF is 1 for this keyspace + self.assertIn(key, keys_per_host[hosts[0]]) From 09ef4bee461005c9724d075173b968de183e2bea Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 30 Jan 2018 09:47:47 -0500 Subject: [PATCH 1339/2005] version 3.13.0 --- CHANGELOG.rst | 1 + cassandra/__init__.py | 2 +- docs.yaml | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e04820fe52..ac9914ab05 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,6 @@ 3.13.0 ====== +January 30, 2018 Features -------- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index ba3816519d..943584bbd8 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 12, 0, 'post0') +__version_info__ = (3, 13, 0) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs.yaml b/docs.yaml index 8de9bff5e9..9250f15b1c 100644 --- a/docs.yaml +++ b/docs.yaml @@ -13,6 +13,8 @@ sections: prebuilds: - python setup.py build_ext --inplace --force --no-murmur3 --no-cython versions: + - name: '3.13' + ref: '3.13.0' - name: '3.12' ref: '43b9c995' - name: '3.11' From f4329589685fd076fae1480333f88f633fe54141 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 30 Jan 2018 14:23:24 -0500 Subject: [PATCH 1340/2005] bump to post0 version (3.13.0.post0) --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 943584bbd8..df34f9191f 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 13, 0) +__version_info__ = (3, 13, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From d56fede690bbafb78dce9e28a5a1d4fcec48d212 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Tue, 30 Jan 2018 13:37:14 -0500 Subject: [PATCH 1341/2005] PYTHON-871 Added test simulating network isolation --- .../integration/simulacron/test_connection.py | 46 +++++++++++++++- tests/integration/simulacron/utils.py | 52 ++++++++++++++++++- 2 files changed, 95 insertions(+), 3 deletions(-) diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 1bded2e749..c5059d6ff1 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -36,7 +36,8 @@ prime_query, prime_request, start_and_prime_cluster_defaults, start_and_prime_singledc, - clear_queries) + clear_queries, RejectConnections, + RejectType, AcceptConnections) class TrackDownListener(HostStateListener): @@ -46,6 +47,15 @@ def __init__(self): def on_down(self, host): self.hosts_marked_down.append(host) + def on_up(self, host): + pass + + def on_add(self, host): + pass + + def on_remove(self, host): + pass + class ThreadTracker(ThreadPoolExecutor): called_functions = [] @@ -339,3 +349,37 @@ class ExtendedConnection(AsyncoreConnection): connection_class=ExtendedConnection) cluster.connect() cluster.shutdown() + + def test_driver_recovers_nework_isolation(self): + start_and_prime_singledc() + + idle_heartbeat_timeout = 3 + idle_heartbeat_interval = 1 + + listener = TrackDownListener() + + cluster = Cluster(['127.0.0.1'], + load_balancing_policy=RoundRobinPolicy(), + idle_heartbeat_timeout=idle_heartbeat_timeout, + idle_heartbeat_interval=idle_heartbeat_interval, + executor_threads=16) + session = cluster.connect(wait_for_all_pools=True) + + cluster.register_listener(listener) + + prime_request(PrimeOptions(then=NO_THEN)) + prime_request(RejectConnections(RejectType.REJECT_STARTUP)) + + time.sleep((idle_heartbeat_timeout + idle_heartbeat_interval) * 2) + + for host in cluster.metadata.all_hosts(): + self.assertIn(host, listener.hosts_marked_down) + + self.assertRaises(NoHostAvailable, session.execute, "SELECT * from system.local") + + clear_queries() + prime_request(AcceptConnections()) + + time.sleep(idle_heartbeat_timeout + idle_heartbeat_interval + 2) + + self.assertIsNotNone(session.execute("SELECT * from system.local")) diff --git a/tests/integration/simulacron/utils.py b/tests/integration/simulacron/utils.py index f3965009bb..4ee793831e 100644 --- a/tests/integration/simulacron/utils.py +++ b/tests/integration/simulacron/utils.py @@ -103,7 +103,7 @@ def submit_request(self, query): request = Request("http://{}/{}{}".format( self.admin_addr, query.path, query.fetch_url_params()), data=data) - request.get_method = lambda: 'POST' + request.get_method = lambda: query.method request.add_header("Content-Type", 'application/json') request.add_header("Content-Length", len(data)) @@ -142,9 +142,13 @@ def clear_all_queries(self, cluster_name=DEFAULT_CLUSTER): class SimulacronRequest(object): def fetch_json(self): - raise NotImplementedError() + return {} def fetch_url_params(self): + return "" + + @property + def method(self): raise NotImplementedError() @@ -175,6 +179,44 @@ def fetch_json(self): def fetch_url_params(self): return "" + @property + def method(self): + return "POST" + + +class RejectType(): + UNBIND = "UNBIND" + STOP = "STOP" + REJECT_STARTUP = "REJECT_STARTUP" + + +class RejectConnections(SimulacronRequest): + """ + Class used for making simulacron reject new connections + """ + def __init__(self, reject_type, cluster_name=DEFAULT_CLUSTER): + self.path = "listener/{}".format(cluster_name) + self.reject_type = reject_type + + def fetch_url_params(self): + return "?type={0}".format(self.reject_type) + + @property + def method(self): + return "DELETE" + + +class AcceptConnections(SimulacronRequest): + """ + Class used for making simulacron reject new connections + """ + def __init__(self, cluster_name=DEFAULT_CLUSTER): + self.path = "listener/{}".format(cluster_name) + + @property + def method(self): + return "PUT" + class PrimeQuery(SimulacronRequest): """ @@ -228,6 +270,9 @@ def set_node(self, cluster_id, datacenter_id, node_id): def fetch_url_params(self): return "" + @property + def method(self): + return "POST" class ClusterQuery(SimulacronRequest): """ @@ -251,6 +296,9 @@ def fetch_url_params(self): return "?cassandra_version={0}&data_centers={1}&name={2}".\ format(self.cassandra_version, self.data_centers, self.cluster_name) + @property + def method(self): + return "POST" def prime_driver_defaults(): """ From ad54008f10cf6a2c3ca695d24dce1742dc132dc2 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Mon, 29 Jan 2018 14:04:32 -0500 Subject: [PATCH 1342/2005] Replaced CASS_SERVER_VERSION for CASSANDRA_VERSION in tests --- tests/integration/standard/test_metadata.py | 51 ++++++++------------- tests/integration/standard/test_query.py | 9 +--- 2 files changed, 20 insertions(+), 40 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index bc3a384694..5d51fd1c3e 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -31,17 +31,16 @@ RegisteredTableExtension, _RegisteredExtensionType, get_schema_parser, group_keys_by_replica, NO_VALID_REPLICA) -from tests.integration import (get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass, +from tests.integration import (get_cluster, use_singledc, PROTOCOL_VERSION, execute_until_pass, BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION, - get_supported_protocol_versions, greaterthanorequalcass30, lessthancass30, local) + get_supported_protocol_versions, greaterthanorequalcass30, lessthancass30, local, + greaterthancass20) from tests.integration import greaterthancass21 def setup_module(): use_singledc() - global CASS_SERVER_VERSION - CASS_SERVER_VERSION = get_server_versions()[0] class HostMetatDataTests(BasicExistingKeyspaceUnitTestCase): @@ -207,7 +206,7 @@ def test_basic_table_meta_properties(self): self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys())) cc = self.cluster.control_connection._connection - parser = get_schema_parser(cc, str(CASS_SERVER_VERSION[0]), 1) + parser = get_schema_parser(cc, CASSANDRA_VERSION, 1) for option in tablemeta.options: self.assertIn(option, parser.recognized_table_options) @@ -344,9 +343,8 @@ def test_composite_in_compound_primary_key_compact(self): self.check_create_statement(tablemeta, create_statement) + @lessthancass30 def test_cql_compatibility(self): - if CASS_SERVER_VERSION >= (3, 0): - raise unittest.SkipTest("cql compatibility does not apply Cassandra 3.0+") # having more than one non-PK column is okay if there aren't any # clustering columns @@ -412,9 +410,8 @@ def test_indexes(self): self.assertIn('CREATE INDEX d_index', statement) self.assertIn('CREATE INDEX e_index', statement) + @greaterthancass21 def test_collection_indexes(self): - if CASS_SERVER_VERSION < (2, 1, 0): - raise unittest.SkipTest("Secondary index on collections were introduced in Cassandra 2.1") self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b map)" % (self.keyspace_name, self.function_table_name)) @@ -429,11 +426,11 @@ def test_collection_indexes(self): % (self.keyspace_name, self.function_table_name)) tablemeta = self.get_table_metadata() - target = ' (b)' if CASS_SERVER_VERSION < (3, 0) else 'values(b))' # explicit values in C* 3+ + target = ' (b)' if CASSANDRA_VERSION < "3.0" else 'values(b))' # explicit values in C* 3+ self.assertIn(target, tablemeta.export_as_string()) # test full indexes on frozen collections, if available - if CASS_SERVER_VERSION >= (2, 1, 3): + if CASSANDRA_VERSION >= "2.1.3": self.session.execute("DROP TABLE %s.%s" % (self.keyspace_name, self.function_table_name)) self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b frozen>)" % (self.keyspace_name, self.function_table_name)) @@ -448,7 +445,7 @@ def test_compression_disabled(self): create_statement += " WITH compression = {}" self.session.execute(create_statement) tablemeta = self.get_table_metadata() - expected = "compression = {}" if CASS_SERVER_VERSION < (3, 0) else "compression = {'enabled': 'false'}" + expected = "compression = {}" if CASSANDRA_VERSION < "3.0" else "compression = {'enabled': 'false'}" self.assertIn(expected, tablemeta.export_as_string()) def test_non_size_tiered_compaction(self): @@ -620,6 +617,7 @@ def test_refresh_table_metadata(self): cluster2.shutdown() + @greaterthanorequalcass30 def test_refresh_metadata_for_mv(self): """ test for synchronously refreshing materialized view metadata @@ -639,9 +637,6 @@ def test_refresh_metadata_for_mv(self): @test_category metadata """ - if CASS_SERVER_VERSION < (3, 0): - raise unittest.SkipTest("Materialized views require Cassandra 3.0+") - self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.keyspace_name, self.function_table_name)) cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) @@ -823,6 +818,7 @@ def test_refresh_user_aggregate_metadata(self): cluster2.shutdown() + @greaterthanorequalcass30 def test_multiple_indices(self): """ test multiple indices on the same column. @@ -835,8 +831,6 @@ def test_multiple_indices(self): @test_category metadata """ - if CASS_SERVER_VERSION < (3, 0): - raise unittest.SkipTest("Materialized views require Cassandra 3.0+") self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b map)".format(self.keyspace_name, self.function_table_name)) self.session.execute("CREATE INDEX index_1 ON {0}.{1}(b)".format(self.keyspace_name, self.function_table_name)) @@ -1001,7 +995,7 @@ def test_export_keyspace_schema_udts(self): Test udt exports """ - if CASS_SERVER_VERSION < (2, 1, 0): + if CASSANDRA_VERSION < "2.1": raise unittest.SkipTest('UDTs were introduced in Cassandra 2.1') if PROTOCOL_VERSION < 3: @@ -1826,17 +1820,13 @@ def setup_class(cls): cls.session.execute("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name) cls.session.set_keyspace(cls.keyspace_name) connection = cls.cluster.control_connection._connection - cls.parser_class = get_schema_parser(connection, str(CASS_SERVER_VERSION[0]), timeout=20).__class__ + cls.parser_class = get_schema_parser(connection, CASSANDRA_VERSION, timeout=20).__class__ cls.cluster.control_connection.reconnect = Mock() @classmethod def teardown_class(cls): drop_keyspace_shutdown_cluster(cls.keyspace_name, cls.session, cls.cluster) - def _skip_if_not_version(self, version): - if CASS_SERVER_VERSION < version: - raise unittest.SkipTest("Requires server version >= %s" % (version,)) - def test_bad_keyspace(self): with patch.object(self.parser_class, '_build_keyspace_metadata_internal', side_effect=self.BadMetaException): self.cluster.refresh_keyspace_metadata(self.keyspace_name) @@ -1861,8 +1851,8 @@ def test_bad_index(self): self.assertIs(m._exc_info[0], self.BadMetaException) self.assertIn("/*\nWarning:", m.export_as_string()) + @greaterthancass20 def test_bad_user_type(self): - self._skip_if_not_version((2, 1, 0)) self.session.execute('CREATE TYPE %s (i int, d double)' % self.function_name) with patch.object(self.parser_class, '_build_user_type', side_effect=self.BadMetaException): self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh @@ -1870,8 +1860,8 @@ def test_bad_user_type(self): self.assertIs(m._exc_info[0], self.BadMetaException) self.assertIn("/*\nWarning:", m.export_as_string()) + @greaterthancass21 def test_bad_user_function(self): - self._skip_if_not_version((2, 2, 0)) self.session.execute("""CREATE FUNCTION IF NOT EXISTS %s (key int, val int) RETURNS NULL ON NULL INPUT RETURNS int @@ -1882,8 +1872,8 @@ def test_bad_user_function(self): self.assertIs(m._exc_info[0], self.BadMetaException) self.assertIn("/*\nWarning:", m.export_as_string()) + @greaterthancass21 def test_bad_user_aggregate(self): - self._skip_if_not_version((2, 2, 0)) self.session.execute("""CREATE FUNCTION IF NOT EXISTS sum_int (key int, val int) RETURNS NULL ON NULL INPUT RETURNS int @@ -1923,11 +1913,10 @@ def test_dct_alias(self): self.assertTrue("c1'org.apache.cassandra.db.marshal.DynamicCompositeType(s=>org.apache.cassandra.db.marshal.UTF8Type,i=>org.apache.cassandra.db.marshal.Int32Type)'" in dct_table.as_cql_query().replace(" ", "")) +@greaterthanorequalcass30 class Materia3lizedViewMetadataTestSimple(BasicSharedKeyspaceUnitTestCase): def setUp(self): - if CASS_SERVER_VERSION < (3, 0): - raise unittest.SkipTest("Materialized views require Cassandra 3.0+") self.session.execute("CREATE TABLE {0}.{1} (pk int PRIMARY KEY, c int)".format(self.keyspace_name, self.function_table_name)) self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.keyspace_name, self.function_table_name)) @@ -2003,12 +1992,8 @@ def test_materialized_view_metadata_drop(self): self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.keyspace_name, self.function_table_name)) +@greaterthanorequalcass30 class MaterializedViewMetadataTestComplex(BasicSegregatedKeyspaceUnitTestCase): - def setUp(self): - if CASS_SERVER_VERSION < (3, 0): - raise unittest.SkipTest("Materialized views require Cassandra 3.0+") - super(MaterializedViewMetadataTestComplex, self).setUp() - def test_create_view_metadata(self): """ test to ensure that materialized view metadata is properly constructed diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 6cc70055f1..821a33d491 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -26,7 +26,7 @@ BatchStatement, BatchType, dict_factory, TraceUnavailable) from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile from cassandra.policies import HostDistance, RoundRobinPolicy, WhiteListRoundRobinPolicy -from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, get_server_versions, \ +from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, \ greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, local, get_cluster, setup_keyspace, \ USE_CASS_EXTERNAL, greaterthanorequalcass40 from tests import notwindows @@ -47,8 +47,6 @@ def setup_module(): ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) setup_keyspace() - global CASS_SERVER_VERSION - CASS_SERVER_VERSION = get_server_versions()[0] class QueryTests(BasicSharedKeyspaceUnitTestCase): @@ -980,12 +978,9 @@ def test_inherit_first_rk_prepared_param(self): self.assertEqual(batch.routing_key, self.prepared.bind((1, 0)).routing_key) +@greaterthanorequalcass30 class MaterializedViewQueryTest(BasicSharedKeyspaceUnitTestCase): - def setUp(self): - if CASS_SERVER_VERSION < (3, 0): - raise unittest.SkipTest("Materialized views require Cassandra 3.0+") - def test_mv_filtering(self): """ Test to ensure that cql filtering where clauses are properly supported in the python driver. From 8e2a6a96b181e8ed60d6f23f05b5f70b76bf018a Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Tue, 30 Jan 2018 10:17:21 -0500 Subject: [PATCH 1343/2005] Removed MONKEY_PATCH_LOOP in tests --- .travis.yml | 3 ++- appveyor/run_test.ps1 | 5 +++-- build.yaml | 4 ++-- tests/__init__.py | 3 --- tests/unit/io/test_eventletreactor.py | 5 ++--- tests/unit/io/test_geventreactor.py | 5 ++--- tox.ini | 17 ++++++++++++++--- 7 files changed, 25 insertions(+), 17 deletions(-) diff --git a/.travis.yml b/.travis.yml index 35d124fa80..276b2297ba 100644 --- a/.travis.yml +++ b/.travis.yml @@ -33,4 +33,5 @@ script: exit 1 fi - tox - - tox -e patched_loops + - tox -e gevent_loop + - tox -e eventlet_loop diff --git a/appveyor/run_test.ps1 b/appveyor/run_test.ps1 index eae7115c5e..bdd0501ac6 100644 --- a/appveyor/run_test.ps1 +++ b/appveyor/run_test.ps1 @@ -17,10 +17,11 @@ if($env:ci_type -eq 'unit'){ echo "Running Unit tests" nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit - $env:MONKEY_PATCH_LOOP=1 + $env:EVENT_LOOP_MANAGER="gevent" nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit\io\test_geventreactor.py + $env:EVENT_LOOP_MANAGER="eventlet" nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit\io\test_eventletreactor.py - Remove-Item $env:MONKEY_PATCH_LOOP + $env:EVENT_LOOP_MANAGER="asyncore" echo "uploading unit results" $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\unit_results.xml)) diff --git a/build.yaml b/build.yaml index 5c56ae1468..3739be0121 100644 --- a/build.yaml +++ b/build.yaml @@ -152,8 +152,8 @@ build: # it takes too much time for the whole matrix to build with cython if [[ $CYTHON == 'CYTHON' ]]; then EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true - MONKEY_PATCH_LOOP=$EVENT_LOOP_MANAGER EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true - MONKEY_PATCH_LOOP=$EVENT_LOOP_MANAGER EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true + EVENT_LOOP_MANAGER=eventlet VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true + EVENT_LOOP_MANAGER=gevent VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true fi diff --git a/tests/__init__.py b/tests/__init__.py index 8ed3d797c4..b95d7f1ca7 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -89,8 +89,5 @@ def is_monkey_patched(): connection_class = None -MONKEY_PATCH_LOOP = os.getenv('MONKEY_PATCH_LOOP', False) - notwindows = unittest.skipUnless(not "Windows" in platform.system(), "This test is not adequate for windows") notpypy = unittest.skipUnless(not platform.python_implementation() == 'PyPy', "This tests is not suitable for pypy") -notmonkeypatch = unittest.skipUnless(MONKEY_PATCH_LOOP, "Skipping this test because monkey patching is required") diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index 5bf1b5792b..ce828cd6d8 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -19,7 +19,7 @@ import unittest # noqa from tests.unit.io.utils import TimerTestMixin -from tests import notpypy, MONKEY_PATCH_LOOP, notmonkeypatch +from tests import notpypy, EVENT_LOOP_MANAGER from eventlet import monkey_patch from mock import patch @@ -29,11 +29,10 @@ except ImportError: EventletConnection = None # noqa -skip_condition = EventletConnection is None or MONKEY_PATCH_LOOP != "eventlet" +skip_condition = EventletConnection is None or EVENT_LOOP_MANAGER != "eventlet" # There are some issues with some versions of pypy and eventlet @notpypy @unittest.skipIf(skip_condition, "Skipping the eventlet tests because it's not installed") -@notmonkeypatch class EventletTimerTest(TimerTestMixin, unittest.TestCase): connection_class = EventletConnection diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py index 1b474a921a..ec64ce34c1 100644 --- a/tests/unit/io/test_geventreactor.py +++ b/tests/unit/io/test_geventreactor.py @@ -19,7 +19,7 @@ from tests.unit.io.utils import TimerTestMixin -from tests import MONKEY_PATCH_LOOP, notmonkeypatch +from tests import EVENT_LOOP_MANAGER try: from cassandra.io.geventreactor import GeventConnection import gevent.monkey @@ -29,9 +29,8 @@ from mock import patch -skip_condition = GeventConnection is None or MONKEY_PATCH_LOOP != "gevent" +skip_condition = GeventConnection is None or EVENT_LOOP_MANAGER != "gevent" @unittest.skipIf(skip_condition, "Skipping the gevent tests because it's not installed") -@notmonkeypatch class GeventTimerTest(TimerTestMixin, unittest.TestCase): connection_class = GeventConnection diff --git a/tox.ini b/tox.ini index 53526cccb4..86c0eafebe 100644 --- a/tox.ini +++ b/tox.ini @@ -20,14 +20,25 @@ changedir = {envtmpdir} commands = nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/ -[testenv:patched_loops] +[testenv:gevent_loop] deps = {[base]deps} gevent setenv = LIBEV_EMBED=0 CARES_EMBED=0 - MONKEY_PATCH_LOOP=1 + EVENT_LOOP_MANAGER=gevent changedir = {envtmpdir} commands = - nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/io/test_eventletreactor.py nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/io/test_geventreactor.py + + +[testenv:eventlet_loop] +deps = {[base]deps} + gevent + +setenv = LIBEV_EMBED=0 + CARES_EMBED=0 + EVENT_LOOP_MANAGER=eventlet +changedir = {envtmpdir} +commands = + nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/io/test_eventletreactor.py From 7da791d0ea311d445f324cc7e617c93f34b96a52 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 6 Feb 2018 10:29:13 -0500 Subject: [PATCH 1344/2005] fix CQL links and redirects map --- docs.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/docs.yaml b/docs.yaml index 9250f15b1c..db6831e4d5 100644 --- a/docs.yaml +++ b/docs.yaml @@ -31,5 +31,8 @@ versions: ref: 3.6-doc - name: 3.5 ref: 3.5-doc --redirects: -- - \A\/(.*)/\Z: /\1.html +redirects: + - \A\/(.*)/\Z: /\1.html +rewrites: + - search: cassandra.apache.org/doc/cql3/CQL.html + replace: cassandra.apache.org/doc/cql3/CQL-3.0.html From 79551915e7cee2337dc2387482dd7d3305a731ee Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 6 Feb 2018 11:33:56 -0500 Subject: [PATCH 1345/2005] changelog entry for parent commit --- CHANGELOG.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index c1125d5857..552cac57cc 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,10 @@ +3.14.0 +====== + +Other +----- +* Fix Broken Links in Docs (PYTHON-916) + 3.13.0 ====== January 30, 2018 From ae85711dd05eb424ddff4645168a590cab6e6c28 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Tue, 6 Feb 2018 12:12:44 -0500 Subject: [PATCH 1346/2005] Make CASSANDRA_VERSION a Version object --- CHANGELOG.rst | 4 ++ tests/integration/__init__.py | 63 ++++++++++--------- .../columns/test_container_columns.py | 5 +- .../cqlengine/columns/test_validation.py | 13 ++-- .../cqlengine/management/test_management.py | 4 +- .../cqlengine/query/test_queryset.py | 11 ++-- .../cqlengine/test_lwt_conditional.py | 6 +- tests/integration/cqlengine/test_ttl.py | 9 +-- .../integration/simulacron/test_connection.py | 7 +-- tests/integration/simulacron/test_policies.py | 10 +-- tests/integration/simulacron/utils.py | 10 +-- tests/integration/standard/test_cluster.py | 7 ++- tests/integration/standard/test_metadata.py | 20 +++--- 13 files changed, 89 insertions(+), 80 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 552cac57cc..7fe4e516b8 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,10 @@ Other ----- * Fix Broken Links in Docs (PYTHON-916) +* Reevaluate MONKEY_PATCH_LOOP in test codebase (PYTHON-903) +* Remove CASS_SERVER_VERSION and replace it for CASSANDRA_VERSION in tests (PYTHON-910) +* Refactor CASSANDRA_VERSION to a some kind of version object (PYTHON-915) + 3.13.0 ====== diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index a8e0b29f9f..9f2f4be59b 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -104,11 +104,11 @@ def _tuple_version(version_string): if(cython_env == 'True'): VERIFY_CYTHON = True -default_cassandra_version = '2.2.0' +default_cassandra_version = Version('2.2.0') CASSANDRA_IP = os.getenv('CASSANDRA_IP', '127.0.0.1') CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) -CASSANDRA_VERSION = os.getenv('CASSANDRA_VERSION', default_cassandra_version) +CASSANDRA_VERSION = Version(os.getenv('CASSANDRA_VERSION', default_cassandra_version)) CCM_KWARGS = {} if CASSANDRA_DIR: @@ -141,15 +141,14 @@ def set_default_beta_flag_true(): def get_default_protocol(): - version = Version(CASSANDRA_VERSION) - if version >= Version('4.0'): + if CASSANDRA_VERSION >= Version('4.0'): set_default_beta_flag_true() return 5 - elif version >= Version('2.2'): + elif CASSANDRA_VERSION >= Version('2.2'): return 4 - elif version >= Version('2.1'): + elif CASSANDRA_VERSION >= Version('2.1'): return 3 - elif version >= Version('2.0'): + elif CASSANDRA_VERSION >= Version('2.0'): return 2 else: return 1 @@ -164,18 +163,17 @@ def get_supported_protocol_versions(): 3.X -> 4, 3 3.10 -> 5(beta),4,3 ` """ - version = Version(CASSANDRA_VERSION) - if version >= Version('4.0'): + if CASSANDRA_VERSION >= Version('4.0'): return (3, 4, 5) - elif version >= Version('3.10'): + elif CASSANDRA_VERSION >= Version('3.10'): return (3, 4) - elif version >= Version('3.0'): + elif CASSANDRA_VERSION >= Version('3.0'): return (3, 4) - elif version >= Version('2.2'): + elif CASSANDRA_VERSION >= Version('2.2'): return (1, 2, 3, 4) - elif version >= Version('2.1'): + elif CASSANDRA_VERSION >= Version('2.1'): return (1, 2, 3) - elif version >= Version('2.0'): + elif CASSANDRA_VERSION >= Version('2.0'): return (1, 2) else: return (1, ) @@ -187,7 +185,7 @@ def get_unsupported_lower_protocol(): supported by the version of C* running """ - if Version(CASSANDRA_VERSION) >= Version('3.0'): + if CASSANDRA_VERSION >= Version('3.0'): return 2 else: return None @@ -199,11 +197,11 @@ def get_unsupported_upper_protocol(): supported by the version of C* running """ - if Version(CASSANDRA_VERSION) >= Version('2.2'): + if CASSANDRA_VERSION >= Version('2.2'): return None - if Version(CASSANDRA_VERSION) >= Version('2.1'): + if CASSANDRA_VERSION >= Version('2.1'): return 4 - elif Version(CASSANDRA_VERSION) >= Version('2.0'): + elif CASSANDRA_VERSION >= Version('2.0'): return 3 else: return None @@ -219,20 +217,20 @@ def get_unsupported_upper_protocol(): greaterthanprotocolv3 = unittest.skipUnless(PROTOCOL_VERSION >= 4, 'Protocol versions less than 4 are not supported') protocolv5 = unittest.skipUnless(5 in get_supported_protocol_versions(), 'Protocol versions less than 5 are not supported') -greaterthancass20 = unittest.skipUnless(CASSANDRA_VERSION >= '2.1', 'Cassandra version 2.1 or greater required') -greaterthancass21 = unittest.skipUnless(CASSANDRA_VERSION >= '2.2', 'Cassandra version 2.2 or greater required') -greaterthanorequalcass30 = unittest.skipUnless(CASSANDRA_VERSION >= '3.0', 'Cassandra version 3.0 or greater required') -greaterthanorequalcass36 = unittest.skipUnless(CASSANDRA_VERSION >= '3.6', 'Cassandra version 3.6 or greater required') -greaterthanorequalcass3_10 = unittest.skipUnless(CASSANDRA_VERSION >= '3.10', 'Cassandra version 3.10 or greater required') -greaterthanorequalcass3_11 = unittest.skipUnless(CASSANDRA_VERSION >= '3.11', 'Cassandra version 3.10 or greater required') -greaterthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION >= '4.0', 'Cassandra version 4.0 or greater required') -lessthanorequalcass40 = unittest.skipIf(CASSANDRA_VERSION >= '4.0', 'Cassandra version 4.0 or greater required') -lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < '3.0', 'Cassandra version less then 3.0 required') +greaterthancass20 = unittest.skipUnless(CASSANDRA_VERSION >= Version('2.1'), 'Cassandra version 2.1 or greater required') +greaterthancass21 = unittest.skipUnless(CASSANDRA_VERSION >= Version('2.2'), 'Cassandra version 2.2 or greater required') +greaterthanorequalcass30 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.0'), 'Cassandra version 3.0 or greater required') +greaterthanorequalcass36 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.6'), 'Cassandra version 3.6 or greater required') +greaterthanorequalcass3_10 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.10'), 'Cassandra version 3.10 or greater required') +greaterthanorequalcass3_11 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.11'), 'Cassandra version 3.10 or greater required') +greaterthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION >= Version('4.0'), 'Cassandra version 4.0 or greater required') +lessthanorequalcass40 = unittest.skipIf(CASSANDRA_VERSION >= Version('4.0'), 'Cassandra version 4.0 or greater required') +lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < Version('3.0'), 'Cassandra version less then 3.0 required') pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") notpy3 = unittest.skipIf(sys.version_info >= (3, 0), "Test not applicable for Python 3.x runtime") requiresmallclockgranularity = unittest.skipIf("Windows" in platform.system() or "asyncore" in EVENT_LOOP_MANAGER, "This test is not suitible for environments with large clock granularity") -requiressimulacron = unittest.skipIf(SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1", "Simulacron jar hasn't been specified or C* version is 2.0") +requiressimulacron = unittest.skipIf(SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"), "Simulacron jar hasn't been specified or C* version is 2.0") def wait_for_node_socket(node, timeout): @@ -315,8 +313,11 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], se set_default_cass_ip() if ccm_options is None: - ccm_options = CCM_KWARGS + ccm_options = CCM_KWARGS.copy() + cassandra_version = ccm_options.get('version', CASSANDRA_VERSION) + if 'version' in ccm_options: + ccm_options['version'] = ccm_options['version'].base_version global CCM_CLUSTER if USE_CASS_EXTERNAL: @@ -349,9 +350,9 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], se log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, ccm_options)) CCM_CLUSTER = CCMCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) - if cassandra_version >= '2.2': + if cassandra_version >= Version('2.2'): CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) - if cassandra_version >= '3.0': + if cassandra_version >= Version('3.0'): CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) common.switch_cluster(path, cluster_name) CCM_CLUSTER.set_configuration_options(configuration_options) diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index ad653e1db3..2acf36457b 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -19,6 +19,7 @@ import sys import traceback from uuid import uuid4 +from packaging.version import Version from cassandra import WriteTimeout, OperationTimedOut import cassandra.cqlengine.columns as columns @@ -559,7 +560,7 @@ class TestTupleColumn(BaseCassEngTestCase): @classmethod def setUpClass(cls): # Skip annotations don't seem to skip class level teradown and setup methods - if(CASSANDRA_VERSION >= '2.1'): + if CASSANDRA_VERSION >= Version('2.1'): drop_table(TestTupleModel) sync_table(TestTupleModel) @@ -759,7 +760,7 @@ class TestNestedType(BaseCassEngTestCase): @classmethod def setUpClass(cls): # Skip annotations don't seem to skip class level teradown and setup methods - if(CASSANDRA_VERSION >= '2.1'): + if CASSANDRA_VERSION >= Version('2.1'): drop_table(TestNestedModel) sync_table(TestNestedModel) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index b8c432be3f..69682fd68d 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -21,6 +21,7 @@ from datetime import datetime, timedelta, date, tzinfo, time from decimal import Decimal as D from uuid import uuid4, uuid1 +from packaging.version import Version from cassandra import InvalidRequest from cassandra.cqlengine.columns import (TimeUUID, Ascii, Text, Integer, BigInt, @@ -194,7 +195,7 @@ def test_varint_io(self): class DataType(): @classmethod def setUpClass(cls): - if PROTOCOL_VERSION < 4 or CASSANDRA_VERSION < "3.0": + if PROTOCOL_VERSION < 4 or CASSANDRA_VERSION < Version("3.0"): return class DataTypeTest(Model): @@ -206,12 +207,12 @@ class DataTypeTest(Model): @classmethod def tearDownClass(cls): - if PROTOCOL_VERSION < 4 or CASSANDRA_VERSION < "3.0": + if PROTOCOL_VERSION < 4 or CASSANDRA_VERSION < Version("3.0"): return drop_table(cls.model_class) def setUp(self): - if PROTOCOL_VERSION < 4 or CASSANDRA_VERSION < "3.0": + if PROTOCOL_VERSION < 4 or CASSANDRA_VERSION < Version("3.0"): raise unittest.SkipTest("Protocol v4 datatypes " "require native protocol 4+ and C* version >=3.0, " "currently using protocol {0} and C* version {1}". @@ -349,7 +350,7 @@ class TestDuration(DataType, BaseCassEngTestCase): @classmethod def setUpClass(cls): # setUpClass is executed despite the whole class being skipped - if CASSANDRA_VERSION >= "3.10": + if CASSANDRA_VERSION >= Version("3.10"): cls.db_klass, cls.python_klass = ( Duration, util.Duration @@ -363,7 +364,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - if CASSANDRA_VERSION >= "3.10": + if CASSANDRA_VERSION >= Version("3.10"): super(TestDuration, cls).tearDownClass() @@ -386,7 +387,7 @@ class UserModel(Model): class TestUDT(DataType, BaseCassEngTestCase): @classmethod def setUpClass(cls): - if PROTOCOL_VERSION < 4 or CASSANDRA_VERSION < "3.0": + if PROTOCOL_VERSION < 4 or CASSANDRA_VERSION < Version("3.0"): return cls.db_klass, cls.python_klass = UserDefinedType, User diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index b4c7a6140f..6b91760fdc 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -18,6 +18,8 @@ import mock import logging +from packaging.version import Version + from cassandra.cqlengine.connection import get_session, get_cluster from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine import management @@ -358,7 +360,7 @@ def test_sync_warnings(self): sync_table(BaseInconsistent) sync_table(ChangedInconsistent) self.assertTrue('differing from the model type' in mock_handler.messages.get('warning')[0]) - if CASSANDRA_VERSION >= '2.1': + if CASSANDRA_VERSION >= Version('2.1'): sync_type(DEFAULT_KEYSPACE, BaseInconsistentType) mock_handler.reset() sync_type(DEFAULT_KEYSPACE, ChangedInconsistentType) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 83586d1c30..e0bfb2fa7a 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -19,8 +19,8 @@ import unittest # noqa from datetime import datetime -import time -from uuid import uuid1, uuid4 +from uuid import uuid4 +from packaging.version import Version import uuid from cassandra.cluster import Session @@ -41,7 +41,8 @@ from cassandra.cqlengine import operators from cassandra.util import uuid_from_time from cassandra.cqlengine.connection import get_session -from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthancass20, greaterthancass21 +from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthancass20, greaterthancass21, \ + greaterthanorequalcass30 from tests.integration.cqlengine import execute_count @@ -351,7 +352,7 @@ def setUpClass(cls): IndexedTestModel.objects.create(test_id=11, attempt_id=3, description='try12', expected_result=75, test_result=45) - if(CASSANDRA_VERSION >= '2.1'): + if CASSANDRA_VERSION >= Version('2.1'): drop_table(IndexedCollectionsTestModel) sync_table(IndexedCollectionsTestModel) IndexedCollectionsTestModel.objects.create(test_id=12, attempt_id=3, description='list12', expected_result=75, @@ -763,7 +764,7 @@ def test_delete_without_any_where_args(self): with self.assertRaises(query.QueryException): TestModel.objects(attempt_id=0).delete() - @unittest.skipIf(CASSANDRA_VERSION < '3.0', "range deletion was introduce in C* 3.0, currently running {0}".format(CASSANDRA_VERSION)) + @greaterthanorequalcass30 @execute_count(18) def test_range_deletion(self): """ diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index efd3b33603..accaf7a9aa 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -27,7 +27,7 @@ from cassandra.cqlengine.statements import ConditionalClause from tests.integration.cqlengine.base import BaseCassEngTestCase -from tests.integration import CASSANDRA_VERSION, greaterthancass20 +from tests.integration import greaterthancass20 class TestConditionalModel(Model): @@ -36,7 +36,7 @@ class TestConditionalModel(Model): text = columns.Text(required=False) -@unittest.skipUnless(CASSANDRA_VERSION >= '2.0.0', "conditionals only supported on cassandra 2.0 or higher") +@greaterthancass20 class TestConditional(BaseCassEngTestCase): @classmethod @@ -154,7 +154,6 @@ def test_delete_conditional(self): TestConditionalModel.objects(id=t.id).iff(count=5).delete() self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0) - @greaterthancass20 def test_delete_lwt_ne(self): """ Test to ensure that deletes using IF and not equals are honored correctly @@ -182,7 +181,6 @@ def test_delete_lwt_ne(self): TestConditionalModel.objects(id=t.id).iff(count__ne=2).delete() self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0) - @greaterthancass20 def test_update_lwt_ne(self): """ Test to ensure that update using IF and not equals are honored correctly diff --git a/tests/integration/cqlengine/test_ttl.py b/tests/integration/cqlengine/test_ttl.py index fd6a01440b..a9aa32db94 100644 --- a/tests/integration/cqlengine/test_ttl.py +++ b/tests/integration/cqlengine/test_ttl.py @@ -18,6 +18,8 @@ except ImportError: import unittest # noqa +from packaging.version import Version + from cassandra import InvalidRequest from cassandra.cqlengine.management import sync_table, drop_table from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -26,7 +28,7 @@ from cassandra.cqlengine import columns import mock from cassandra.cqlengine.connection import get_session -from tests.integration import CASSANDRA_VERSION +from tests.integration import CASSANDRA_VERSION, greaterthancass20 class TestTTLModel(Model): @@ -59,14 +61,14 @@ class BaseDefaultTTLTest(BaseCassEngTestCase): @classmethod def setUpClass(cls): - if CASSANDRA_VERSION >= '2.0': + if CASSANDRA_VERSION >= Version('2.0'): super(BaseDefaultTTLTest, cls).setUpClass() sync_table(TestDefaultTTLModel) sync_table(TestTTLModel) @classmethod def tearDownClass(cls): - if CASSANDRA_VERSION >= '2.0': + if CASSANDRA_VERSION >= Version('2.0'): super(BaseDefaultTTLTest, cls).tearDownClass() drop_table(TestDefaultTTLModel) drop_table(TestTTLModel) @@ -157,7 +159,6 @@ def test_ttl_included_with_blind_update(self): self.assertIn("USING TTL", query) -@unittest.skipIf(CASSANDRA_VERSION < '2.0', "default_time_to_Live was introduce in C* 2.0, currently running {0}".format(CASSANDRA_VERSION)) class TTLDefaultTest(BaseDefaultTTLTest): def get_default_ttl(self, table_name): session = get_session() diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index c5059d6ff1..6fb6e53b04 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -28,8 +28,7 @@ _Scheduler, NoHostAvailable) from cassandra.policies import HostStateListener, RoundRobinPolicy from cassandra.io.asyncorereactor import AsyncoreConnection -from tests.integration import (CASSANDRA_VERSION, PROTOCOL_VERSION, - requiressimulacron) +from tests.integration import (PROTOCOL_VERSION, requiressimulacron) from tests.integration.util import assert_quiescent_pool_state from tests.integration.simulacron import SimulacronBase from tests.integration.simulacron.utils import (NO_THEN, PrimeOptions, @@ -97,7 +96,7 @@ def test_heart_beat_timeout(self): idle_heartbeat_timeout = 5 idle_heartbeat_interval = 1 - start_and_prime_cluster_defaults(number_of_dcs, nodes_per_dc, CASSANDRA_VERSION) + start_and_prime_cluster_defaults(number_of_dcs, nodes_per_dc) listener = TrackDownListener() executor = ThreadTracker(max_workers=8) @@ -231,7 +230,7 @@ def test_retry_after_defunct(self): idle_heartbeat_timeout = 1 idle_heartbeat_interval = 5 - simulacron_cluster = start_and_prime_cluster_defaults(number_of_dcs, nodes_per_dc, CASSANDRA_VERSION) + simulacron_cluster = start_and_prime_cluster_defaults(number_of_dcs, nodes_per_dc) dc_ids = sorted(simulacron_cluster.data_center_ids) last_host = dc_ids.pop() diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index bd52574910..26bd437ffc 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -27,7 +27,7 @@ stop_simulacron, NO_THEN, clear_queries from itertools import count - +from packaging.version import Version class BadRoundRobinPolicy(RoundRobinPolicy): @@ -48,7 +48,7 @@ class SpecExecTest(unittest.TestCase): @classmethod def setUpClass(cls): - if SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1": + if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"): return start_and_prime_singledc() @@ -73,7 +73,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - if SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1": + if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"): return cls.cluster.shutdown() @@ -249,13 +249,13 @@ def reset_counters(self): class RetryPolicyTests(unittest.TestCase): @classmethod def setUpClass(cls): - if SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1": + if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"): return start_and_prime_singledc() @classmethod def tearDownClass(cls): - if SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1": + if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"): return stop_simulacron() diff --git a/tests/integration/simulacron/utils.py b/tests/integration/simulacron/utils.py index 4ee793831e..5ec53831fe 100644 --- a/tests/integration/simulacron/utils.py +++ b/tests/integration/simulacron/utils.py @@ -115,8 +115,8 @@ def prime_server_versions(self): This information has to be primed for the test harness to run """ system_local_row = {} - system_local_row["cql_version"] = CASSANDRA_VERSION - system_local_row["release_version"] = CASSANDRA_VERSION + "-SNAPSHOT" + system_local_row["cql_version"] = CASSANDRA_VERSION.base_version + system_local_row["release_version"] = CASSANDRA_VERSION.base_version + "-SNAPSHOT" column_types = {"cql_version": "ascii", "release_version": "ascii"} system_local = PrimeQuery("SELECT cql_version, release_version FROM system.local", rows=[system_local_row], @@ -308,7 +308,7 @@ def prime_driver_defaults(): client_simulacron.prime_server_versions() -def prime_cluster(data_centers="3", version=None, cluster_name=DEFAULT_CLUSTER): +def prime_cluster(data_centers="3", version=CASSANDRA_VERSION, cluster_name=DEFAULT_CLUSTER): """ Creates a new cluster in the simulacron server :param cluster_name: name of the cluster @@ -316,7 +316,7 @@ def prime_cluster(data_centers="3", version=None, cluster_name=DEFAULT_CLUSTER): datacenters of 2 nodes and three nodes :param version: C* version """ - version = version or CASSANDRA_VERSION + version = version or CASSANDRA_VERSION.base_version cluster_query = ClusterQuery(cluster_name, version, data_centers) client_simulacron = SimulacronClient() response = client_simulacron.submit_request(cluster_query) @@ -332,7 +332,7 @@ def start_and_prime_singledc(cluster_name=DEFAULT_CLUSTER): return start_and_prime_cluster_defaults(number_of_dc=1, nodes_per_dc=3, cluster_name=cluster_name) -def start_and_prime_cluster_defaults(number_of_dc=1, nodes_per_dc=3, version=None, cluster_name=DEFAULT_CLUSTER): +def start_and_prime_cluster_defaults(number_of_dc=1, nodes_per_dc=3, version=CASSANDRA_VERSION, cluster_name=DEFAULT_CLUSTER): """ :param number_of_dc: number of datacentes :param nodes_per_dc: number of nodes per datacenter diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 8857f94389..0dc33c6862 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -24,6 +24,7 @@ from uuid import uuid4 import logging import warnings +from packaging.version import Version import cassandra from cassandra.cluster import Cluster, Session, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT @@ -237,13 +238,13 @@ def test_protocol_negotiation(self): updated_protocol_version = session._protocol_version updated_cluster_version = cluster.protocol_version # Make sure the correct protocol was selected by default - if CASSANDRA_VERSION >= '2.2': + if CASSANDRA_VERSION >= Version('2.2'): self.assertEqual(updated_protocol_version, 4) self.assertEqual(updated_cluster_version, 4) - elif CASSANDRA_VERSION >= '2.1': + elif CASSANDRA_VERSION >= Version('2.1'): self.assertEqual(updated_protocol_version, 3) self.assertEqual(updated_cluster_version, 3) - elif CASSANDRA_VERSION >= '2.0': + elif CASSANDRA_VERSION >= Version('2.0'): self.assertEqual(updated_protocol_version, 2) self.assertEqual(updated_cluster_version, 2) else: diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 5d51fd1c3e..5057fc2fff 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -21,6 +21,7 @@ import six import sys import time +from packaging.version import Version from mock import Mock, patch from cassandra import AlreadyExists, SignatureDescriptor, UserFunctionDescriptor, UserAggregateDescriptor @@ -76,7 +77,7 @@ def test_host_release_version(self): @test_category metadata """ for host in self.cluster.metadata.all_hosts(): - self.assertTrue(host.release_version.startswith(CASSANDRA_VERSION)) + self.assertTrue(host.release_version.startswith(CASSANDRA_VERSION.base_version)) @local class MetaDataRemovalTest(unittest.TestCase): @@ -206,7 +207,7 @@ def test_basic_table_meta_properties(self): self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys())) cc = self.cluster.control_connection._connection - parser = get_schema_parser(cc, CASSANDRA_VERSION, 1) + parser = get_schema_parser(cc, CASSANDRA_VERSION.base_version, 1) for option in tablemeta.options: self.assertIn(option, parser.recognized_table_options) @@ -426,11 +427,11 @@ def test_collection_indexes(self): % (self.keyspace_name, self.function_table_name)) tablemeta = self.get_table_metadata() - target = ' (b)' if CASSANDRA_VERSION < "3.0" else 'values(b))' # explicit values in C* 3+ + target = ' (b)' if CASSANDRA_VERSION < Version("3.0") else 'values(b))' # explicit values in C* 3+ self.assertIn(target, tablemeta.export_as_string()) # test full indexes on frozen collections, if available - if CASSANDRA_VERSION >= "2.1.3": + if CASSANDRA_VERSION >= Version("2.1.3"): self.session.execute("DROP TABLE %s.%s" % (self.keyspace_name, self.function_table_name)) self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b frozen>)" % (self.keyspace_name, self.function_table_name)) @@ -445,7 +446,7 @@ def test_compression_disabled(self): create_statement += " WITH compression = {}" self.session.execute(create_statement) tablemeta = self.get_table_metadata() - expected = "compression = {}" if CASSANDRA_VERSION < "3.0" else "compression = {'enabled': 'false'}" + expected = "compression = {}" if CASSANDRA_VERSION < Version("3.0") else "compression = {'enabled': 'false'}" self.assertIn(expected, tablemeta.export_as_string()) def test_non_size_tiered_compaction(self): @@ -706,6 +707,7 @@ def test_refresh_user_type_metadata(self): cluster2.shutdown() + @greaterthancass20 def test_refresh_user_type_metadata_proto_2(self): """ Test to insure that protocol v1/v2 surface UDT metadata changes @@ -717,7 +719,7 @@ def test_refresh_user_type_metadata_proto_2(self): @test_category metadata """ supported_versions = get_supported_protocol_versions() - if 2 not in supported_versions or CASSANDRA_VERSION < "2.1": # 1 and 2 were dropped in the same version + if 2 not in supported_versions: # 1 and 2 were dropped in the same version raise unittest.SkipTest("Protocol versions 1 and 2 are not supported in Cassandra version ".format(CASSANDRA_VERSION)) for protocol_version in (1, 2): @@ -990,14 +992,12 @@ def assert_startswith_diff(self, received, prefix): lineterm='')) self.fail(diff_string) + @greaterthancass20 def test_export_keyspace_schema_udts(self): """ Test udt exports """ - if CASSANDRA_VERSION < "2.1": - raise unittest.SkipTest('UDTs were introduced in Cassandra 2.1') - if PROTOCOL_VERSION < 3: raise unittest.SkipTest( "Protocol 3.0+ is required for UDT change events, currently testing against %r" @@ -1820,7 +1820,7 @@ def setup_class(cls): cls.session.execute("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name) cls.session.set_keyspace(cls.keyspace_name) connection = cls.cluster.control_connection._connection - cls.parser_class = get_schema_parser(connection, CASSANDRA_VERSION, timeout=20).__class__ + cls.parser_class = get_schema_parser(connection, CASSANDRA_VERSION.base_version, timeout=20).__class__ cls.cluster.control_connection.reconnect = Mock() @classmethod From 8a7a4b4e2c6bad9c83ac0bd62fc800a93a3482a4 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Fri, 9 Feb 2018 09:48:29 -0500 Subject: [PATCH 1347/2005] Move VERIFY_CYTHON from integration test level to all the tests level --- tests/__init__.py | 10 ++++++++++ tests/integration/__init__.py | 11 +---------- .../standard/test_cython_protocol_handlers.py | 3 ++- tests/unit/cython/utils.py | 2 +- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index b95d7f1ca7..0fdc646e66 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -89,5 +89,15 @@ def is_monkey_patched(): connection_class = None +# If set to to true this will force the Cython tests to run regardless of whether they are installed +cython_env = os.getenv('VERIFY_CYTHON', "False") + + +VERIFY_CYTHON = False + +if(cython_env == 'True'): + VERIFY_CYTHON = True + + notwindows = unittest.skipUnless(not "Windows" in platform.system(), "This test is not adequate for windows") notpypy = unittest.skipUnless(not platform.python_implementation() == 'PyPy', "This tests is not suitable for pypy") diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 9f2f4be59b..de8f11e30f 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -95,16 +95,7 @@ def _tuple_version(version_string): KEEP_TEST_CLUSTER = bool(os.getenv('KEEP_TEST_CLUSTER', False)) SIMULACRON_JAR = os.getenv('SIMULACRON_JAR', None) -# If set to to true this will force the Cython tests to run regardless of whether they are installed -cython_env = os.getenv('VERIFY_CYTHON', "False") - - -VERIFY_CYTHON = False - -if(cython_env == 'True'): - VERIFY_CYTHON = True - -default_cassandra_version = Version('2.2.0') +default_cassandra_version = Version('3.11') CASSANDRA_IP = os.getenv('CASSANDRA_IP', '127.0.0.1') CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index 28c3b0f2d8..593dcba50e 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -14,7 +14,8 @@ from cassandra.concurrent import execute_concurrent_with_args from cassandra.protocol import ProtocolHandler, LazyProtocolHandler, NumpyProtocolHandler from cassandra.cython_deps import HAVE_CYTHON, HAVE_NUMPY -from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1, drop_keyspace_shutdown_cluster, VERIFY_CYTHON, BasicSharedKeyspaceUnitTestCase, execute_with_retry_tolerant, greaterthancass21 +from tests import VERIFY_CYTHON +from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1, drop_keyspace_shutdown_cluster, BasicSharedKeyspaceUnitTestCase, execute_with_retry_tolerant, greaterthancass21 from tests.integration.datatype_utils import update_datatypes from tests.integration.standard.utils import ( create_table_with_all_types, get_all_primitive_params, get_primitive_datatypes) diff --git a/tests/unit/cython/utils.py b/tests/unit/cython/utils.py index ee5cb1a9c5..7f8be22ce0 100644 --- a/tests/unit/cython/utils.py +++ b/tests/unit/cython/utils.py @@ -14,7 +14,7 @@ from cassandra.cython_deps import HAVE_CYTHON, HAVE_NUMPY try: - from tests.integration import VERIFY_CYTHON + from tests import VERIFY_CYTHON except ImportError: VERIFY_CYTHON = False From 3bf46dd9a6136ff025a044e4921a482c5a2e1154 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 13 Feb 2018 11:55:25 -0500 Subject: [PATCH 1348/2005] docs fixes for group_keys_by_replica --- cassandra/metadata.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index ccbc571c20..bb4626ad9e 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2576,15 +2576,17 @@ def _cql_from_cass_type(cass_type): def group_keys_by_replica(session, keyspace, table, keys): """ - Returns a :class:`collections.defaultdict` with the keys grouped per host. This can be + Returns a :class:`dict` with the keys grouped per host. This can be used to more accurately group by IN clause or to batch the keys per host. If a valid replica is not found for a particular key it will be grouped under :class:`~.NO_VALID_REPLICA` Example usage:: - result = group_keys_by_host(session, "system", "peers", - ("peer", "data_center"), (("127.0.0.1", ), ("127.0.0.2", ))) + result = group_keys_by_host( + session, "system", "peers", + (("127.0.0.1", ), ("127.0.0.2", )) + ) """ cluster = session.cluster From 37cbeb8e2b557b1be1d36ed46fead80c56d77d80 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 13 Feb 2018 11:56:23 -0500 Subject: [PATCH 1349/2005] cache distance function for speed & readability --- cassandra/metadata.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index bb4626ad9e..b8f33efa08 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2594,6 +2594,8 @@ def group_keys_by_replica(session, keyspace, table, keys): serializers = list(types._cqltypes[partition_key.cql_type] for partition_key in partition_keys) keys_per_host = defaultdict(list) + distance = cluster._default_load_balancing_policy.distance + for key in keys: serialized_key = [serializer.serialize(pk, cluster.protocol_version) for serializer, pk in zip(serializers, key)] @@ -2604,8 +2606,7 @@ def group_keys_by_replica(session, keyspace, table, keys): all_replicas = cluster.metadata.get_replicas(keyspace, routing_key) # First check if there are local replicas valid_replicas = [host for host in all_replicas if - host.is_up and cluster._default_load_balancing_policy.distance( - host) == HostDistance.LOCAL] + host.is_up and distance(host) == HostDistance.LOCAL] if not valid_replicas: valid_replicas = [host for host in all_replicas if host.is_up] From a30a095c455e0ca15f47f3bec8101fdf48e80282 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 13 Feb 2018 11:57:44 -0500 Subject: [PATCH 1350/2005] indentation cleanup --- cassandra/metadata.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index b8f33efa08..eddf2b9033 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2598,7 +2598,7 @@ def group_keys_by_replica(session, keyspace, table, keys): for key in keys: serialized_key = [serializer.serialize(pk, cluster.protocol_version) - for serializer, pk in zip(serializers, key)] + for serializer, pk in zip(serializers, key)] if len(serialized_key) == 1: routing_key = serialized_key[0] else: @@ -2606,7 +2606,7 @@ def group_keys_by_replica(session, keyspace, table, keys): all_replicas = cluster.metadata.get_replicas(keyspace, routing_key) # First check if there are local replicas valid_replicas = [host for host in all_replicas if - host.is_up and distance(host) == HostDistance.LOCAL] + host.is_up and distance(host) == HostDistance.LOCAL] if not valid_replicas: valid_replicas = [host for host in all_replicas if host.is_up] From ecd0376eda31d1c267e57851751f3201cf68696b Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 13 Feb 2018 11:59:18 -0500 Subject: [PATCH 1351/2005] always add to a random replica Tested with `ccm create test-endpoints -n 5 -v 3.11.1` and the script below. Against the previous implementation resulted in length of each key looking something like this: {: 60188, : 19878, : 19934} The new implementation's results look more like this: {: 19866, : 20224, : 19869, : 20009, : 20032} What happens in the test (5 nodes, RF=3) is this: group_keys_by_replica checks C* keys and adds them, preferring hosts that are already there, until there are 3 hosts in keys_per_host. Once those 3 hosts are chosen, no new hosts will be added -- with RF=3 across 5 nodes, every key is guaranteed to map to >= 1 of the 3 hosts already in the map. The new implementation distributes values more evenly. ``` from cassandra.cluster import Cluster from cassandra.concurrent import execute_concurrent_with_args from cassandra.metadata import group_keys_by_replica import contextlib import logging import pprint from random import randint from textwrap import indent import time log = logging.getLogger(__name__) log.setLevel('DEBUG') handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s")) log.addHandler(handler) @contextlib.contextmanager def debug_time(message): log.debug('START: {}'.format(message)) start_time = time.time() yield elapsed = time.time() - start_time log.debug( 'END ({elapsed:.2f}s): {message}'.format( elapsed=elapsed, message=message ) ) LOAD = True MIN, MAX = -2 ** 31, (2 ** 31) - 1 c = Cluster() s = c.connect() s.execute("CREATE KEYSPACE IF NOT EXISTS test_ks " "WITH replication = {" " 'class': 'SimpleStrategy', " " 'replication_factor': '3'" "}") s.execute("CREATE TABLE IF NOT EXISTS test_ks.test_tab (" " partition int, " " cluster int, " " PRIMARY KEY (partition, cluster)" ")") params = tuple(((randint(MIN, MAX), randint(MIN, MAX)) for _ in range(100000))) p = s.prepare("INSERT INTO test_ks.test_tab (partition, cluster) " "VALUES (?, ?)") with debug_time('insertion'): execute_concurrent_with_args( session=s, statement=p, parameters=params ) with debug_time('get replicas'): gkbr = group_keys_by_replica( s, "test_ks", "test_tab", params ) print('length of each key:') print(indent(pprint.pformat({k: len(v) for k, v in gkbr.items()}), ' ')) print('example values per key:') print(indent(pprint.pformat(({k: v[0:3] for k, v in gkbr.items()})), ' ')) ``` --- cassandra/metadata.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index eddf2b9033..ea49dca4a1 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2611,12 +2611,7 @@ def group_keys_by_replica(session, keyspace, table, keys): valid_replicas = [host for host in all_replicas if host.is_up] if valid_replicas: - for replica in valid_replicas: - if replica in keys_per_host: - keys_per_host[replica].append(key) - break - else: - keys_per_host[random.choice(valid_replicas)].append(key) + keys_per_host[random.choice(valid_replicas)].append(key) else: # We will group under this statement all the keys for which # we haven't found a valid replica From 86ff3e02e8fcef123e0dc97dbca5cac226dc32ca Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 13 Feb 2018 12:04:49 -0500 Subject: [PATCH 1352/2005] return a dict, not a defaultdict --- cassandra/metadata.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index ea49dca4a1..292b051a45 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2616,4 +2616,5 @@ def group_keys_by_replica(session, keyspace, table, keys): # We will group under this statement all the keys for which # we haven't found a valid replica keys_per_host[NO_VALID_REPLICA].append(key) - return keys_per_host + + return dict(keys_per_host) From 4ed4b8a1103c79ffdb389a384e908e90f4fab81a Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Thu, 25 Jan 2018 10:26:27 -0500 Subject: [PATCH 1353/2005] PYTHON-848 ResultSet.was_applied doesn't support batch with LWT statements --- CHANGELOG.rst | 1 + cassandra/cluster.py | 13 ++- .../cqlengine/test_lwt_conditional.py | 30 ++++- tests/integration/standard/test_query.py | 104 ++++++++++++++++++ 4 files changed, 144 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e04820fe52..03c6b6f8cd 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -13,6 +13,7 @@ Bug Fixes * __del__ method in Session is throwing an exception (PYTHON-813) * LZ4 import issue with recent versions (PYTHON-897) * ResponseFuture._connection can be None when returning request_id (PYTHON-853) +* ResultSet.was_applied doesn't support batch with LWT statements (PYTHON-848) Other ----- diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 8e0582bb13..16c9a8c4eb 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -4321,13 +4321,20 @@ def was_applied(self): """ For LWT results, returns whether the transaction was applied. - Result is indeterminate if called on a result that was not an LWT request. + Result is indeterminate if called on a result that was not an LWT request or on + a :class:`.query.BatchStatement` containing LWT. In the latter case either all the batch + succeeds or fails. - Only valid when one of tne of the internal row factories is in use. + Only valid when one of the of the internal row factories is in use. """ if self.response_future.row_factory not in (named_tuple_factory, dict_factory, tuple_factory): raise RuntimeError("Cannot determine LWT result with row factory %s" % (self.response_future.row_factsory,)) - if len(self.current_rows) != 1: + + is_batch_statement = isinstance(self.response_future.query, BatchStatement) + if is_batch_statement and self.column_names[0] != "[applied]": + raise RuntimeError("No LWT were present in the BatchStatement") + + if not is_batch_statement and len(self.current_rows) != 1: raise RuntimeError("LWT result should have exactly one row. This has %d." % (len(self.current_rows))) row = self.current_rows[0] diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index efd3b33603..6d97eef823 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -36,6 +36,13 @@ class TestConditionalModel(Model): text = columns.Text(required=False) +class TestUpdateModel(Model): + partition = columns.Integer(primary_key=True) + cluster = columns.Integer(primary_key=True) + value = columns.Integer(required=False) + text = columns.Text(required=False, index=True) + + @unittest.skipUnless(CASSANDRA_VERSION >= '2.0.0', "conditionals only supported on cassandra 2.0 or higher") class TestConditional(BaseCassEngTestCase): @@ -116,7 +123,7 @@ def test_batch_update_conditional(self): t = TestConditionalModel.create(text='something', count=5) id = t.id with BatchQuery() as b: - t.batch(b).iff(count=5).update(text='something else') + t.batch(b).if_not_exists(count=5).update(text='something else') updated = TestConditionalModel.objects(id=id).first() self.assertEqual(updated.text, 'something else') @@ -135,6 +142,27 @@ def test_batch_update_conditional(self): updated = TestConditionalModel.objects(id=id).first() self.assertEqual(updated.text, 'something else') + def test_batch_update_conditional_several_rows(self): + sync_table(TestUpdateModel) + self.addCleanup(drop_table, TestUpdateModel) + + first_row = TestUpdateModel.create(partition=1, cluster=1, value=5, text="something") + second_row = TestUpdateModel.create(partition=1, cluster=2, value=5, text="something") + + b = BatchQuery() + TestUpdateModel.batch(b).if_not_exists().create(partition=1, cluster=1, value=5, text='something else') + TestUpdateModel.batch(b).if_not_exists().create(partition=1, cluster=2, value=5, text='something else') + TestUpdateModel.batch(b).if_not_exists().create(partition=1, cluster=3, value=5, text='something else') + + # The response will be more than two rows because two of the inserts will fail + with self.assertRaises(LWTException): + b.execute() + + first_row.delete() + second_row.delete() + b.execute() + + def test_delete_conditional(self): # DML path t = TestConditionalModel.create(text='something', count=5) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 6cc70055f1..3c5464bba8 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -844,11 +844,20 @@ def setUp(self): v int )''' self.session.execute(ddl) + ddl = ''' + CREATE TABLE test3rf.lwt_clustering ( + k int, + c int, + v int, + PRIMARY KEY (k, c))''' + self.session.execute(ddl) + def tearDown(self): """ Shutdown cluster """ self.session.execute("DROP TABLE test3rf.lwt") + self.session.execute("DROP TABLE test3rf.lwt_clustering") self.cluster.shutdown() def test_no_connection_refused_on_timeout(self): @@ -894,6 +903,101 @@ def test_no_connection_refused_on_timeout(self): # Make sure test passed self.assertTrue(received_timeout) + def test_was_applied_batch_stmt(self): + """ + Test to ensure `:attr:cassandra.cluster.ResultSet.was_applied` works as expected + with Batchstatements. + + For both type of batches verify was_applied has the correct result + under different scenarios: + - If on LWT fails the rest of the statements fail including normal UPSERTS + - If on LWT fails the rest of the statements fail + - All the queries succeed + + @since 3.14 + @jira_ticket PYTHON-399 + @expected_result `:attr:cassandra.cluster.ResultSet.was_applied` is updated as + expected + + @test_category query + """ + for batch_type in (BatchType.UNLOGGED, BatchType.LOGGED): + batch_statement = BatchStatement(batch_type) + batch_statement.add_all(["INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 0, 10);", + "INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 1, 10);", + "INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 2, 10);"], [None] * 3) + result = self.session.execute(batch_statement) + #self.assertTrue(result.was_applied) + + # Should fail since (0, 0, 10) have already been written + # The non conditional insert shouldn't be written as well + batch_statement = BatchStatement(batch_type) + batch_statement.add_all(["INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 0, 10) IF NOT EXISTS;", + "INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 3, 10) IF NOT EXISTS;", + "INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 4, 10);", + "INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 5, 10) IF NOT EXISTS;"], [None] * 4) + result = self.session.execute(batch_statement) + self.assertFalse(result.was_applied) + + all_rows = self.session.execute("SELECT * from test3rf.lwt_clustering") + # Verify the non conditional insert hasn't been inserted + self.assertEqual(len(all_rows.current_rows), 3) + + # Should fail since (0, 0, 10) have already been written + batch_statement = BatchStatement(batch_type) + batch_statement.add_all(["INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 0, 10) IF NOT EXISTS;", + "INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 3, 10) IF NOT EXISTS;", + "INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 5, 10) IF NOT EXISTS;"], [None] * 3) + result = self.session.execute(batch_statement) + self.assertFalse(result.was_applied) + + # Should fail since (0, 0, 10) have already been written + batch_statement.add("INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 0, 10) IF NOT EXISTS;") + result = self.session.execute(batch_statement) + self.assertFalse(result.was_applied) + + # Should succeed + batch_statement = BatchStatement(batch_type) + batch_statement.add_all(["INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 3, 10) IF NOT EXISTS;", + "INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 4, 10) IF NOT EXISTS;", + "INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 5, 10) IF NOT EXISTS;"], [None] * 3) + + result = self.session.execute(batch_statement) + self.assertTrue(result.was_applied) + + all_rows = self.session.execute("SELECT * from test3rf.lwt_clustering") + for i, row in enumerate(all_rows): + self.assertEqual((0, i, 10), (row[0], row[1], row[2])) + + self.session.execute("TRUNCATE TABLE test3rf.lwt_clustering") + + def test_was_applied_batch_string(self): + batch_statement = BatchStatement(BatchType.LOGGED) + batch_statement.add_all(["INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 0, 10);", + "INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 1, 10);", + "INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 2, 10);"], [None] * 3) + self.session.execute(batch_statement) + + batch_str = """ + BEGIN unlogged batch + INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 0, 10) IF NOT EXISTS; + INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 1, 10) IF NOT EXISTS; + INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 2, 10) IF NOT EXISTS; + APPLY batch; + """ + result = self.session.execute(batch_str) + self.assertFalse(result.was_applied) + + batch_str = """ + BEGIN unlogged batch + INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 3, 10) IF NOT EXISTS; + INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 4, 10) IF NOT EXISTS; + INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 5, 10) IF NOT EXISTS; + APPLY batch; + """ + result = self.session.execute(batch_str) + self.assertTrue(result.was_applied) + class BatchStatementDefaultRoutingKeyTests(unittest.TestCase): # Test for PYTHON-126: BatchStatement.add() should set the routing key of the first added prepared statement From 5bad3548ab5ee19f3ecff453b6b221316632f3ac Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Fri, 16 Feb 2018 14:28:19 -0500 Subject: [PATCH 1354/2005] Add warning to cqlengine docs about LWT and batch statements --- cassandra/cqlengine/query.py | 6 ++++-- tests/integration/cqlengine/test_lwt_conditional.py | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 93becd7275..1118aba86a 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -19,7 +19,7 @@ import six from warnings import warn -from cassandra.query import SimpleStatement, BatchType as CBatchType +from cassandra.query import SimpleStatement, BatchType as CBatchType, BatchStatement from cassandra.cqlengine import columns, CQLEngineException, ValidationError, UnicodeMixin from cassandra.cqlengine import connection as conn from cassandra.cqlengine.functions import Token, BaseQueryFunction, QueryValue @@ -67,7 +67,9 @@ class MultipleObjectsReturned(QueryException): def check_applied(result): """ - Raises LWTException if it looks like a failed LWT request. + Raises LWTException if it looks like a failed LWT request. A LWTException + won't be raised in the special case in which there are several failed LWT + in a :class:`~cqlengine.query.BatchQuery`. """ try: applied = result.was_applied diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index 6d97eef823..70987985d9 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -142,6 +142,7 @@ def test_batch_update_conditional(self): updated = TestConditionalModel.objects(id=id).first() self.assertEqual(updated.text, 'something else') + @unittest.skip("Skipping until PYTHON-943 is resolved") def test_batch_update_conditional_several_rows(self): sync_table(TestUpdateModel) self.addCleanup(drop_table, TestUpdateModel) From 947b521e49124118172bf2c587f67dbdfae9c895 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Thu, 22 Feb 2018 13:02:32 -0500 Subject: [PATCH 1355/2005] Added logic for when batch statement is empty --- cassandra/cluster.py | 4 ++-- .../cqlengine/test_lwt_conditional.py | 2 +- tests/integration/standard/test_query.py | 20 ++++++++++++++++++- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 16c9a8c4eb..12552b626b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -4328,10 +4328,10 @@ def was_applied(self): Only valid when one of the of the internal row factories is in use. """ if self.response_future.row_factory not in (named_tuple_factory, dict_factory, tuple_factory): - raise RuntimeError("Cannot determine LWT result with row factory %s" % (self.response_future.row_factsory,)) + raise RuntimeError("Cannot determine LWT result with row factory %s" % (self.response_future.row_factory,)) is_batch_statement = isinstance(self.response_future.query, BatchStatement) - if is_batch_statement and self.column_names[0] != "[applied]": + if is_batch_statement and (not self.column_names or self.column_names[0] != "[applied]"): raise RuntimeError("No LWT were present in the BatchStatement") if not is_batch_statement and len(self.current_rows) != 1: diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index 70987985d9..61f2a8d8ac 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -123,7 +123,7 @@ def test_batch_update_conditional(self): t = TestConditionalModel.create(text='something', count=5) id = t.id with BatchQuery() as b: - t.batch(b).if_not_exists(count=5).update(text='something else') + t.batch(b).iff(count=5).update(text='something else') updated = TestConditionalModel.objects(id=id).first() self.assertEqual(updated.text, 'something else') diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 3c5464bba8..68b21e5708 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -915,7 +915,7 @@ def test_was_applied_batch_stmt(self): - All the queries succeed @since 3.14 - @jira_ticket PYTHON-399 + @jira_ticket PYTHON-848 @expected_result `:attr:cassandra.cluster.ResultSet.was_applied` is updated as expected @@ -971,6 +971,24 @@ def test_was_applied_batch_stmt(self): self.session.execute("TRUNCATE TABLE test3rf.lwt_clustering") + def test_empty_batch_statement(self): + """ + Test to ensure `:attr:cassandra.cluster.ResultSet.was_applied` works as expected + with empty Batchstatements. + + @since 3.14 + @jira_ticket PYTHON-848 + @expected_result an Exception is raised + expected + + @test_category query + """ + batch_statement = BatchStatement() + results = self.session.execute(batch_statement) + with self.assertRaises(RuntimeError): + results.was_applied + + @unittest.skip("Skipping until PYTHON-943 is resolved") def test_was_applied_batch_string(self): batch_statement = BatchStatement(BatchType.LOGGED) batch_statement.add_all(["INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 0, 10);", From 2f80aa64a26151ff68c37d4f9ce80630a709c05b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 27 Feb 2018 14:42:15 -0500 Subject: [PATCH 1356/2005] Add one() function to the ResultSet API --- CHANGELOG.rst | 4 ++++ cassandra/cluster.py | 12 ++++++++++++ 2 files changed, 16 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 75a64c2c68..d923db1365 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,10 @@ 3.14.0 ====== +Features +-------- +* Add one() function to the ResultSet API (PYTHON-947) + Other ----- * Fix Broken Links in Docs (PYTHON-916) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 12552b626b..774fdb0a1d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -4230,6 +4230,15 @@ def current_rows(self): """ return self._current_rows or [] + def one(self): + """ + Return a single row of the results or None if empty. This is basically + a shortcut to `result_set.current_rows[0]` and should only be used when + you know a query returns a single row. Consider using an iterator if the + ResultSet contains more than one row. + """ + return self._current_rows[0] if self._current_rows else None + def __iter__(self): if self._list_mode: return iter(self._current_rows) @@ -4294,6 +4303,9 @@ def __eq__(self, other): return self._current_rows == other def __getitem__(self, i): + if i is 0: + warn("ResultSet indexing support will be removed in 4.0. Consider using " + "ResultSet.one() to get a single row.", DeprecationWarning) self._enter_list_mode("index operator") return self._current_rows[i] From bff974fa7a90bd819bcb32a246e934b34b5fd20a Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 28 Feb 2018 16:33:47 -0500 Subject: [PATCH 1357/2005] add test for .one() --- tests/unit/test_resultset.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index f30b9f6f9c..20579bc1dd 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -187,3 +187,10 @@ def test_was_applied(self): for applied in (True, False): rs = ResultSet(Mock(row_factory=row_factory), [{'[applied]': applied}]) self.assertEqual(rs.was_applied, applied) + + def test_one(self): + # no pages + first, second = Mock(), Mock() + rs = ResultSet(Mock(has_more_pages=False), [first, second]) + + self.assertEqual(rs.one(), first) From 42000c26145729d169f4df21ad58a997e5957006 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 28 Feb 2018 17:58:30 -0500 Subject: [PATCH 1358/2005] add test for indexing deprecation --- tests/unit/test_resultset.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index 20579bc1dd..d8f3398953 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -194,3 +194,20 @@ def test_one(self): rs = ResultSet(Mock(has_more_pages=False), [first, second]) self.assertEqual(rs.one(), first) + + def test_indexing_deprecation(self): + import warnings + + first, second = Mock(), Mock() + rs = ResultSet(Mock(has_more_pages=False), [first, second]) + self.assertEqual(rs[0], first) + + with warnings.catch_warnings(record=True) as ws: + # catch_warnings restores original filter on close + warnings.simplefilter('always') + rs[0] + self.assertEqual(len(ws), 1) + index_warning = ws[0] + self.assertIs(index_warning.category, DeprecationWarning) + self.assertIn('indexing support will be removed in 4.0', + str(index_warning.message)) From 74a3c2a276d7d6f3d9395895f0c0956420d2c094 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 28 Feb 2018 17:58:35 -0500 Subject: [PATCH 1359/2005] use equality, not identity Integer identity for small integers works in CPython, but isn't guaranteed in other interpreters. --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 774fdb0a1d..08141692ad 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -4303,7 +4303,7 @@ def __eq__(self, other): return self._current_rows == other def __getitem__(self, i): - if i is 0: + if i == 0: warn("ResultSet indexing support will be removed in 4.0. Consider using " "ResultSet.one() to get a single row.", DeprecationWarning) self._enter_list_mode("index operator") From 5e60c79041e25941ddcb36df313203861100e858 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Fri, 9 Mar 2018 15:54:52 -0500 Subject: [PATCH 1360/2005] Added test for PYTHON-946 --- tests/integration/standard/test_cluster.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 0dc33c6862..2ec613e743 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -639,6 +639,20 @@ def test_trace_unavailable(self): else: raise Exception("get_query_trace didn't raise TraceUnavailable after {} tries".format(max_retry_count)) + def test_one_returns_none(self): + """ + Test ResulSet.one returns None if no rows where found + + @since 3.14 + @jira_ticket PYTHON-947 + @expected_result ResulSet.one is None + + @test_category query + """ + with Cluster() as cluster: + session = cluster.connect() + self.assertIsNone(session.execute("SELECT * from system.local WHERE key='madeup_key'").one()) + def test_string_coverage(self): """ Ensure str(future) returns without error From 939212f9b9dfcbd4731f2417a02e7032d1f77063 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Mon, 19 Mar 2018 10:20:14 +0100 Subject: [PATCH 1361/2005] Fix build.yaml --- build.yaml | 14 +++++++++----- test-requirements.txt | 2 +- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/build.yaml b/build.yaml index 3739be0121..bc3685d140 100644 --- a/build.yaml +++ b/build.yaml @@ -127,12 +127,16 @@ build: echo "Installing simulacron" pushd ~ - git clone git@github.com:datastax/simulacron.git + + if [ ! -d "simulacron" ] ; then + git clone git@github.com:datastax/simulacron.git + cd simulacron + git clone git@github.com:datastax/native-protocol.git + cd native-protocol + mvn clean install + cd ../.. + fi cd simulacron - git clone git@github.com:datastax/native-protocol.git - cd native-protocol - mvn clean install - cd .. mvn clean install -DskipTests=true ls standalone/target SIMULACRON_JAR=`find \`pwd\` -name "simulacron-standalone-*.jar"` diff --git a/test-requirements.txt b/test-requirements.txt index ed78256317..ca2d3309d6 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -13,4 +13,4 @@ gevent>=1.0 eventlet cython>=0.20,<0.28 packaging -asynctest +asynctest; python_version > '3.4' From 6d6433e404ff956c2f6a1315f931a2712c2dc24d Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 9 Feb 2018 14:34:31 -0500 Subject: [PATCH 1362/2005] add new virtualenv docs-building logic --- docs.yaml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/docs.yaml b/docs.yaml index db6831e4d5..9ee320740f 100644 --- a/docs.yaml +++ b/docs.yaml @@ -10,8 +10,16 @@ sections: prefix: / type: sphinx directory: docs -prebuilds: - - python setup.py build_ext --inplace --force --no-murmur3 --no-cython + virtualenv_init: | + set -x + CASS_DRIVER_NO_CYTHON=1 pip install -r test-requirements.txt + # for newer versions this is redundant, but in older versions we need to + # install, e.g., the cassandra driver, and those versions don't specify + # the cassandra driver version in requirements files + CASS_DRIVER_NO_CYTHON=1 python setup.py develop + pip install "jinja2==2.8.1;python_version<'3.6'" "sphinx>=1.3,<2" geomet + # build extensions like libev + CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.13' ref: '3.13.0' From 698bdfa625c0296dcb4b877925cbf64cc68a24f3 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 22 Mar 2018 11:30:40 -0400 Subject: [PATCH 1363/2005] fix test_indexing_deprecation pre-3.0 (#930) --- tests/unit/test_resultset.py | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index d8f3398953..541ef6fe32 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -18,7 +18,7 @@ except ImportError: import unittest # noqa -from mock import Mock, PropertyMock +from mock import Mock, PropertyMock, patch from cassandra.cluster import ResultSet @@ -195,19 +195,15 @@ def test_one(self): self.assertEqual(rs.one(), first) - def test_indexing_deprecation(self): - import warnings - + @patch('cassandra.cluster.warn') + def test_indexing_deprecation(self, mocked_warn): + # normally we'd use catch_warnings to test this, but that doesn't work + # pre-Py3.0 for some reason first, second = Mock(), Mock() rs = ResultSet(Mock(has_more_pages=False), [first, second]) self.assertEqual(rs[0], first) - - with warnings.catch_warnings(record=True) as ws: - # catch_warnings restores original filter on close - warnings.simplefilter('always') - rs[0] - self.assertEqual(len(ws), 1) - index_warning = ws[0] - self.assertIs(index_warning.category, DeprecationWarning) + self.assertEqual(len(mocked_warn.mock_calls), 1) + index_warning_args = tuple(mocked_warn.mock_calls[0])[1] self.assertIn('indexing support will be removed in 4.0', - str(index_warning.message)) + str(index_warning_args[0])) + self.assertIs(index_warning_args[1], DeprecationWarning) From e7f6f9afbaf0a47b5f8a866284ae85e5f447977d Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Fri, 16 Mar 2018 12:34:15 +0100 Subject: [PATCH 1364/2005] PYTHON-953, warn when using Session.default_consistency_level, fix docs --- CHANGELOG.rst | 1 + cassandra/cluster.py | 7 +++++++ docs/getting_started.rst | 5 ++++- tests/integration/standard/test_cluster.py | 19 +++++++++++++++++++ 4 files changed, 31 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d923db1365..75081e8d00 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,6 +11,7 @@ Other * Reevaluate MONKEY_PATCH_LOOP in test codebase (PYTHON-903) * Remove CASS_SERVER_VERSION and replace it for CASSANDRA_VERSION in tests (PYTHON-910) * Refactor CASSANDRA_VERSION to a some kind of version object (PYTHON-915) +* Warn users when using the deprecated Session.default_consistency_level (PYTHON-953) 3.13.0 diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 08141692ad..c6d9cad09e 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1936,6 +1936,7 @@ def default_timeout(self, timeout): @property def default_consistency_level(self): """ + *Deprecated:* use execution profiles instead The default :class:`~ConsistencyLevel` for operations executed through this session. This default may be overridden by setting the :attr:`~.Statement.consistency_level` on individual statements. @@ -1950,6 +1951,12 @@ def default_consistency_level(self): @default_consistency_level.setter def default_consistency_level(self, cl): + """ + *Deprecated:* use execution profiles instead + """ + warn("Setting the consistency level at the session level will be removed in 4.0. Consider using " + "execution profiles and setting the desired consitency level to the EXEC_PROFILE_DEFAULT profile." + , DeprecationWarning) self._validate_set_legacy_config('default_consistency_level', cl) _default_serial_consistency_level = None diff --git a/docs/getting_started.rst b/docs/getting_started.rst index c7cbc25970..2dc32e6499 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -328,7 +328,10 @@ replicas of the data you are interacting with need to respond for the query to be considered a success. By default, :attr:`.ConsistencyLevel.LOCAL_ONE` will be used for all queries. -You can specify a different default for the session on :attr:`.Session.default_consistency_level`. +You can specify a different default for the session on :attr:`.Session.default_consistency_level` +if the cluster is configured in legacy mode (not using execution profiles). Otherwise this can +be done by setting the :attr:`.ExecutionProfile.consistency_level` for the execution profile with key +:data:`~.cluster.EXEC_PROFILE_DEFAULT`. To specify a different consistency level per request, wrap queries in a :class:`~.SimpleStatement`: diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 2ec613e743..307c455dac 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -1482,3 +1482,22 @@ def test_deprecation_warnings_meta_refreshed(self): self.assertEqual(len(w), 1) self.assertIn("Cluster.set_meta_refresh_enabled is deprecated and will be removed in 4.0.", str(w[0].message)) + + def test_deprecation_warning_default_consistency_level(self): + """ + Tests the deprecation warning has been added when enabling + session the default consistency level to session + + @since 3.14 + @jira_ticket PYTHON-935 + @expected_result the deprecation warning is emitted + + @test_category logs + """ + with warnings.catch_warnings(record=True) as w: + cluster = Cluster() + session = cluster.connect() + session.default_consistency_level = ConsistencyLevel.ONE + self.assertEqual(len(w), 1) + self.assertIn("Setting the consistency level at the session level will be removed in 4.0", + str(w[0].message)) From 0319a89693c9f79328bfcf92034afd66b768884f Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Thu, 8 Mar 2018 09:07:11 -0500 Subject: [PATCH 1365/2005] PYTHON-894 Add DSE smoke test to OSS driver tests --- CHANGELOG.rst | 1 + tests/__init__.py | 6 +- tests/integration/__init__.py | 80 +++++++++++++++++++++----- tests/integration/standard/test_dse.py | 77 +++++++++++++++++++++++++ 4 files changed, 150 insertions(+), 14 deletions(-) create mode 100644 tests/integration/standard/test_dse.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d923db1365..87bd08991d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,6 +11,7 @@ Other * Reevaluate MONKEY_PATCH_LOOP in test codebase (PYTHON-903) * Remove CASS_SERVER_VERSION and replace it for CASSANDRA_VERSION in tests (PYTHON-910) * Refactor CASSANDRA_VERSION to a some kind of version object (PYTHON-915) +* Add DSE smoke test to OSS driver tests (PYTHON-894) 3.13.0 diff --git a/tests/__init__.py b/tests/__init__.py index 0fdc646e66..23c956ae03 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -99,5 +99,9 @@ def is_monkey_patched(): VERIFY_CYTHON = True -notwindows = unittest.skipUnless(not "Windows" in platform.system(), "This test is not adequate for windows") +def is_windows(): + return "Windows" in platform.system() + + +notwindows = unittest.skipUnless(not is_windows(), "This test is not adequate for windows") notpypy = unittest.skipUnless(not platform.python_implementation() == 'PyPy', "This tests is not suitable for pypy") diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index de8f11e30f..489d9735dc 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -40,6 +40,7 @@ from cassandra.protocol import ConfigurationException try: + from ccmlib.dse_cluster import DseCluster from ccmlib.cluster import Cluster as CCMCluster from ccmlib.cluster_factory import ClusterFactory as CCMClusterFactory from ccmlib import common @@ -91,16 +92,35 @@ def _tuple_version(version_string): return tuple([int(p) for p in version_string.split('.')]) +def _get_dse_version_from_cass(cass_version): + if cass_version.startswith('2.1'): + dse_ver = "4.8.15" + elif cass_version.startswith('3.0'): + dse_ver = "5.0.12" + elif cass_version.startswith('3.10') or cass_version.startswith('3.11'): + dse_ver = "5.1.7" + elif cass_version.startswith('4.0'): + dse_ver = "6.0" + else: + log.error("Unknown cassandra version found {0}, defaulting to 2.1".format(cass_version)) + dse_ver = "2.1" + return dse_ver + + USE_CASS_EXTERNAL = bool(os.getenv('USE_CASS_EXTERNAL', False)) KEEP_TEST_CLUSTER = bool(os.getenv('KEEP_TEST_CLUSTER', False)) SIMULACRON_JAR = os.getenv('SIMULACRON_JAR', None) -default_cassandra_version = Version('3.11') - CASSANDRA_IP = os.getenv('CASSANDRA_IP', '127.0.0.1') CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) + +default_cassandra_version = '3.11' CASSANDRA_VERSION = Version(os.getenv('CASSANDRA_VERSION', default_cassandra_version)) +default_dse_version = _get_dse_version_from_cass(CASSANDRA_VERSION.base_version) + +DSE_VERSION = Version(os.getenv('DSE_VERSION', default_dse_version)) + CCM_KWARGS = {} if CASSANDRA_DIR: log.info("Using Cassandra dir: %s", CASSANDRA_DIR) @@ -197,12 +217,23 @@ def get_unsupported_upper_protocol(): else: return None + default_protocol_version = get_default_protocol() PROTOCOL_VERSION = int(os.getenv('PROTOCOL_VERSION', default_protocol_version)) -local = unittest.skipUnless(CASSANDRA_IP.startswith("127.0.0."), 'Tests only runs against local C*') + +def local_decorator_creator(): + if not CASSANDRA_IP.startswith("127.0.0."): + return unittest.skip('Tests only runs against local C*') + + def _id_and_mark(f): + f.local = True + + return _id_and_mark + +local = local_decorator_creator() notprotocolv1 = unittest.skipUnless(PROTOCOL_VERSION > 1, 'Protocol v1 not supported') lessthenprotocolv4 = unittest.skipUnless(PROTOCOL_VERSION < 4, 'Protocol versions 4 or greater not supported') greaterthanprotocolv3 = unittest.skipUnless(PROTOCOL_VERSION >= 4, 'Protocol versions less than 4 are not supported') @@ -300,13 +331,17 @@ def is_current_cluster(cluster_name, node_counts): def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], set_keyspace=True, ccm_options=None, - configuration_options={}): + configuration_options={}, dse_cluster=False, dse_options={}): set_default_cass_ip() - if ccm_options is None: + if ccm_options is None and dse_cluster: + ccm_options = {"version": DSE_VERSION} + elif ccm_options is None: ccm_options = CCM_KWARGS.copy() cassandra_version = ccm_options.get('version', CASSANDRA_VERSION) + dse_version = ccm_options.get('version', DSE_VERSION) + if 'version' in ccm_options: ccm_options['version'] = ccm_options['version'].base_version @@ -339,15 +374,34 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], se del tb log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, ccm_options)) - CCM_CLUSTER = CCMCluster(path, cluster_name, **ccm_options) - CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) - if cassandra_version >= Version('2.2'): - CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) - if cassandra_version >= Version('3.0'): + + if dse_cluster: + CCM_CLUSTER = DseCluster(path, cluster_name, **ccm_options) + CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) + CCM_CLUSTER.set_configuration_options({'batch_size_warn_threshold_in_kb': 5}) + if dse_version >= Version('5.0'): + CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) - common.switch_cluster(path, cluster_name) - CCM_CLUSTER.set_configuration_options(configuration_options) - CCM_CLUSTER.populate(nodes, ipformat=ipformat) + if 'spark' in workloads: + config_options = {"initial_spark_worker_resources": 0.1} + CCM_CLUSTER.set_dse_configuration_options(config_options) + common.switch_cluster(path, cluster_name) + CCM_CLUSTER.set_configuration_options(configuration_options) + CCM_CLUSTER.populate(nodes, ipformat=ipformat) + + CCM_CLUSTER.set_dse_configuration_options(dse_options) + else: + log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, ccm_options)) + CCM_CLUSTER = CCMCluster(path, cluster_name, **ccm_options) + CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) + if cassandra_version >= Version('2.2'): + CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) + if cassandra_version >= Version('3.0'): + CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) + common.switch_cluster(path, cluster_name) + CCM_CLUSTER.set_configuration_options(configuration_options) + CCM_CLUSTER.populate(nodes, ipformat=ipformat) + try: jvm_args = [] # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back diff --git a/tests/integration/standard/test_dse.py b/tests/integration/standard/test_dse.py new file mode 100644 index 0000000000..a4a365406a --- /dev/null +++ b/tests/integration/standard/test_dse.py @@ -0,0 +1,77 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + + +from cassandra.cluster import Cluster +from tests import notwindows, is_windows +from tests.integration import use_cluster, CLUSTER_NAME, PROTOCOL_VERSION, execute_until_pass, \ + execute_with_long_wait_retry + + +def setup_module(): + if is_windows(): + return + use_cluster(CLUSTER_NAME, [3], dse_cluster=True, dse_options={}) + + +@notwindows +class DseCCMClusterTest(unittest.TestCase): + """ + This class can be executed setting the DSE_VERSION variable, for example: + DSE_VERSION=5.1.4 python2.7 -m nose tests/integration/standard/test_dse.py + If CASSANDRA_VERSION is set instead, it will be converted to the corresponding DSE_VERSION + """ + + def test_basic(self): + """ + Test basic connection and usage + """ + + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + session = cluster.connect() + result = execute_until_pass(session, + """ + CREATE KEYSPACE clustertests + WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} + """) + self.assertFalse(result) + + result = execute_with_long_wait_retry(session, + """ + CREATE TABLE clustertests.cf0 ( + a text, + b text, + c text, + PRIMARY KEY (a, b) + ) + """) + self.assertFalse(result) + + result = session.execute( + """ + INSERT INTO clustertests.cf0 (a, b, c) VALUES ('a', 'b', 'c') + """) + self.assertFalse(result) + + result = session.execute("SELECT * FROM clustertests.cf0") + self.assertEqual([('a', 'b', 'c')], result) + + execute_with_long_wait_retry(session, "DROP KEYSPACE clustertests") + + cluster.shutdown() From 56c1884ad4eb999005bf2dc474934d6b8e355c5b Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 26 Mar 2018 13:00:56 -0400 Subject: [PATCH 1366/2005] Python skip long (#934) * skip long tests by default * empty test commit to trigger job-creator * don't do fancy yaml stuff * remove unnecessary nose-exclude install --- build.yaml | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/build.yaml b/build.yaml index bc3685d140..a975c3d8fb 100644 --- a/build.yaml +++ b/build.yaml @@ -1,6 +1,17 @@ schedules: commit_master: schedule: per_commit + branches: + include: [master] + env_vars: | + EVENT_LOOP_MANAGER='libev' + EXCLUDE_LONG=1 + matrix: + exclude: + - python: [3.4, 3.6] + + nightly_master: + schedule: nightly branches: include: [master] env_vars: | @@ -15,6 +26,7 @@ schedules: include: [/python.*/] env_vars: | EVENT_LOOP_MANAGER='libev' + EXCLUDE_LONG=1 matrix: exclude: - python: [3.4, 3.6] @@ -172,7 +184,9 @@ build: echo "==========RUNNING INTEGRATION TESTS==========" EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true - echo "==========RUNNING LONG INTEGRATION TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true + if [ -z "$EXCLUDE_LONG" ]; then + echo "==========RUNNING LONG INTEGRATION TESTS==========" + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true + fi - xunit: - "*_results.xml" From 4ff50f63c579446a4702db29f0d8deaa41e2834a Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Mon, 19 Mar 2018 12:17:07 +0100 Subject: [PATCH 1367/2005] PYTHON-940 Log warning when driver configures an authenticator, but server does not request authentication --- CHANGELOG.rst | 1 + cassandra/connection.py | 6 +++ tests/integration/__init__.py | 16 +++++++ tests/integration/standard/test_cluster.py | 53 ++++++++++++++++++++++ 4 files changed, 76 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d923db1365..771fc302a0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,6 +11,7 @@ Other * Reevaluate MONKEY_PATCH_LOOP in test codebase (PYTHON-903) * Remove CASS_SERVER_VERSION and replace it for CASSANDRA_VERSION in tests (PYTHON-910) * Refactor CASSANDRA_VERSION to a some kind of version object (PYTHON-915) +* Log warning when driver configures an authenticator, but server does not request authentication (PYTHON-940) 3.13.0 diff --git a/cassandra/connection.py b/cassandra/connection.py index 3d13cbd4a0..f017bf55a3 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -724,6 +724,12 @@ def _handle_startup_response(self, startup_response, did_authenticate=False): if self.is_defunct: return if isinstance(startup_response, ReadyMessage): + if self.authenticator: + log.warning("An authentication challenge was not sent, " + "this is suspicious because the driver expects " + "authentication (configured authenticator = %s)", + self.authenticator.__class__.__name__) + log.debug("Got ReadyMessage on new connection (%s) from %s", id(self), self.host) if self._compressor: self.compressor = self._compressor diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index de8f11e30f..cdfa4d17a9 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -619,6 +619,22 @@ def get_message_count(self, level, sub_string): count+=1 return count + def set_module_name(self, module_name): + """ + This is intended to be used doing: + with MockLoggingHandler().set_module_name(connection.__name__) as mock_handler: + """ + self.module_name = module_name + return self + + def __enter__(self): + self.logger = logging.getLogger(self.module_name) + self.logger.addHandler(self) + return self + + def __exit__(self, *args): + pass + class BasicExistingKeyspaceUnitTestCase(BasicKeyspaceUnitTestCase): """ diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 2ec613e743..8c8540dfd5 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -36,6 +36,8 @@ from cassandra.pool import Host from cassandra.query import SimpleStatement, TraceUnavailable, tuple_factory +from cassandra.auth import PlainTextAuthProvider, SaslAuthProvider +from cassandra import connection from tests import notwindows from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, CASSANDRA_VERSION, \ @@ -672,6 +674,57 @@ def test_string_coverage(self): self.assertIn('result', str(future)) cluster.shutdown() + def test_can_connect_with_plainauth(self): + """ + Verify that we can connect setting PlainTextAuthProvider against a + C* server without authentication set. We also verify a warning is + issued per connection. This test is here instead of in test_authentication.py + because the C* server running in that module has auth set. + + @since 3.14 + @jira_ticket PYTHON-940 + @expected_result we can connect, query C* and warning are issued + + @test_category auth + """ + auth_provider = PlainTextAuthProvider( + username="made_up_username", + password="made_up_password" + ) + self._warning_are_issued_when_auth(auth_provider) + + def test_can_connect_with_sslauth(self): + """ + Verify that we can connect setting SaslAuthProvider against a + C* server without authentication set. We also verify a warning is + issued per connection. This test is here instead of in test_authentication.py + because the C* server running in that module has auth set. + + @since 3.14 + @jira_ticket PYTHON-940 + @expected_result we can connect, query C* and warning are issued + + @test_category auth + """ + sasl_kwargs = {'service': 'cassandra', + 'mechanism': 'PLAIN', + 'qops': ['auth'], + 'username': "made_up_username", + 'password': "made_up_password"} + + auth_provider = SaslAuthProvider(**sasl_kwargs) + self._warning_are_issued_when_auth(auth_provider) + + def _warning_are_issued_when_auth(self, auth_provider): + with MockLoggingHandler().set_module_name(connection.__name__) as mock_handler: + with Cluster(auth_provider=auth_provider) as cluster: + session = cluster.connect() + self.assertIsNotNone(session.execute("SELECT * from system.local")) + + # Three conenctions to nodes plus the control connection + self.assertEqual(4, mock_handler.get_message_count('warning', + "An authentication challenge was not sent")) + def test_idle_heartbeat(self): interval = 2 cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=interval) From 73616e8f6f662c3fb20361c03db0cee561ae6929 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Tue, 27 Mar 2018 11:17:57 +0200 Subject: [PATCH 1368/2005] Update cassandra version to 3.11.2 in appveyor --- appveyor.yml | 4 ++-- appveyor/run_test.ps1 | 10 ---------- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index dd3adaa770..113f8d308c 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,10 +1,10 @@ environment: matrix: - PYTHON: "C:\\Python27-x64" - cassandra_version: 3.11.0 + cassandra_version: 3.11.2 ci_type: standard - PYTHON: "C:\\Python34" - cassandra_version: 3.11.0 + cassandra_version: 3.11.2 ci_type: unit os: Visual Studio 2015 platform: diff --git a/appveyor/run_test.ps1 b/appveyor/run_test.ps1 index bdd0501ac6..fc95ec7e52 100644 --- a/appveyor/run_test.ps1 +++ b/appveyor/run_test.ps1 @@ -29,11 +29,6 @@ if($env:ci_type -eq 'unit'){ } if($env:ci_type -eq 'standard'){ - echo "Running simulacron tests" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=simulacron_results.xml .\tests\integration\simulacron - $simulacron_tests_result = $lastexitcode - $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\simulacron_results.xml)) - echo "uploading Simulacron test results" echo "Running CQLEngine integration tests" nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=cqlengine_results.xml .\tests\integration\cqlengine @@ -48,11 +43,6 @@ if($env:ci_type -eq 'standard'){ echo "uploading standard integration test results" } -if($env:ci_type -eq 'long'){ - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=cqlengine_results.xml .\tests\integration\cqlengine - $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\cqlengine_results.xml)) - echo "uploading standard integration test results" -} $exit_result = $unit_tests_result + $cqlengine_tests_result + $integration_tests_result + $simulacron_tests_result echo "Exit result: $exit_result" From 8aaa4c8ed2189213fb4a397aec64f9f64066a9c5 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 30 Mar 2018 13:16:16 -0400 Subject: [PATCH 1369/2005] Fix PYTHON-647 mention in the changelog --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index aa75f4a622..dce3e6f785 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,7 @@ Features -------- * Add one() function to the ResultSet API (PYTHON-947) +* Create an utility function to fetch concurrently many keys from the same replica (PYTHON-647) Other ----- @@ -61,7 +62,6 @@ Features * Add new exception type for CDC (PYTHON-837) * Allow 0ms in ConstantSpeculativeExecutionPolicy (PYTHON-836) * Add asyncio reactor (PYTHON-507) -* Create an utility function to fetch concurrently many keys from the same replica (PYTHON-647) Bug Fixes --------- From 844f04da9b284f0bc7138ef71c544bf44071bb4f Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 22 Mar 2018 17:48:22 -0400 Subject: [PATCH 1370/2005] add docs for long compilation --- CHANGELOG.rst | 1 + docs/installation.rst | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index aa75f4a622..7f455a8340 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -13,6 +13,7 @@ Other * Refactor CASSANDRA_VERSION to a some kind of version object (PYTHON-915) * Warn users when using the deprecated Session.default_consistency_level (PYTHON-953) * Add DSE smoke test to OSS driver tests (PYTHON-894) +* Document long compilation times and workarounds (PYTHON-868) 3.13.0 ====== diff --git a/docs/installation.rst b/docs/installation.rst index 9c5e52ff61..7a3f83fba0 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -20,6 +20,44 @@ You can use ``pip install --pre cassandra-driver`` if you need to install a beta ***Note**: if intending to use optional extensions, install the `dependencies <#optional-non-python-dependencies>`_ first. The driver may need to be reinstalled if dependencies are added after the initial installation. +Speeding Up Installation +^^^^^^^^^^^^^^^^^^^^^^^^ + +By default, installing the driver through ``pip`` uses Cython to compile +certain parts of the driver. +This makes those hot paths faster at runtime, but the Cython compilation +process can take a long time -- as long as 10 minutes in some environments. + +In environments where performance is less important, it may be worth it to +:ref:`disable Cython as documented below `. +You can also use ``CASS_DRIVER_BUILD_CONCURRENCY`` to increase the number of +threads used to build the driver and any C extensions: + + $ # installing from source + $ CASS_DRIVER_BUILD_CONCURRENCY=8 python setup.py install + $ # installing from pip + $ CASS_DRIVER_BUILD_CONCURRENCY=8 pip install cassandra-driver + +Finally, you can `build a wheel `_ from the driver's source and distribute that to computers +that depend on it. For example: + + $ git clone https://github.com/datastax/python-driver.git + $ cd python-driver + $ git checkout 3.14.0 # or other desired tag + $ pip install wheel + $ python setup.py bdist_wheel + $ # build wheel with optional concurrency settings + $ CASS_DRIVER_BUILD_CONCURRENCY=8 python setup.py bdist_wheel + $ scp ./dist/cassandra_driver-3.14.0-cp27-cp27mu-linux_x86_64.whl user@host:/remote_dir + +Then, on the remote machine or machines, simply + + $ pip install /remote_dir/cassandra_driver-3.14.0-cp27-cp27mu-linux_x86_64.whl + +Note that the wheel created this way is a `platform wheel +`_ +and as such will not work across platforms or architectures. + OSX Installation Error ^^^^^^^^^^^^^^^^^^^^^^ If you're installing on OSX and have XCode 5.1 installed, you may see an error like this:: @@ -123,6 +161,8 @@ On OS X, homebrew installations of Python should provide the necessary headers. See :ref:`windows_build` for notes on configuring the build environment on Windows. +.. _cython-extensions: + Cython-based Extensions ~~~~~~~~~~~~~~~~~~~~~~~ By default, this package uses `Cython `_ to optimize core modules and build custom extensions. From b822e7ff7e9f192b890348b03954314affd75ec2 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 26 Mar 2018 11:02:43 -0400 Subject: [PATCH 1371/2005] use code blocks where appropriate --- docs/installation.rst | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/installation.rst b/docs/installation.rst index 7a3f83fba0..ecca2891b7 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -33,6 +33,8 @@ In environments where performance is less important, it may be worth it to You can also use ``CASS_DRIVER_BUILD_CONCURRENCY`` to increase the number of threads used to build the driver and any C extensions: +.. code-block:: bash + $ # installing from source $ CASS_DRIVER_BUILD_CONCURRENCY=8 python setup.py install $ # installing from pip @@ -41,6 +43,8 @@ threads used to build the driver and any C extensions: Finally, you can `build a wheel `_ from the driver's source and distribute that to computers that depend on it. For example: +.. code-block:: bash + $ git clone https://github.com/datastax/python-driver.git $ cd python-driver $ git checkout 3.14.0 # or other desired tag @@ -52,6 +56,8 @@ that depend on it. For example: Then, on the remote machine or machines, simply +.. code-block:: bash + $ pip install /remote_dir/cassandra_driver-3.14.0-cp27-cp27mu-linux_x86_64.whl Note that the wheel created this way is a `platform wheel @@ -64,7 +70,9 @@ If you're installing on OSX and have XCode 5.1 installed, you may see an error l clang: error: unknown argument: '-mno-fused-madd' [-Wunused-command-line-argument-hard-error-in-future] -To fix this, re-run the installation with an extra compilation flag:: +To fix this, re-run the installation with an extra compilation flag: + +.. code-block:: bash ARCHFLAGS=-Wno-error=unused-command-line-argument-hard-error-in-future pip install cassandra-driver From 491eeadc567c0383ed24a774f3e5230a43d158d1 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 26 Mar 2018 12:08:26 -0400 Subject: [PATCH 1372/2005] fix unclosed quoted string --- docs/cqlengine/connections.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/cqlengine/connections.rst b/docs/cqlengine/connections.rst index f5c06b3df6..0d4a3c0393 100644 --- a/docs/cqlengine/connections.rst +++ b/docs/cqlengine/connections.rst @@ -110,7 +110,7 @@ You can use the :attr:`using() <.query.ModelQuerySet.using>` method to select a Automobile.objects.using(connection='cluster1').create(manufacturer='honda', year=2010, model='civic') q = Automobile.objects.filter(manufacturer='Tesla') - autos = q.using(keyspace='ks2, connection='cluster2').all() + autos = q.using(keyspace='ks2', connection='cluster2').all() for auto in autos: auto.using(connection='cluster1').save() From 15efb0914c5ea31875f0dc64a7622a5a279fb9b4 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 26 Mar 2018 11:58:00 -0400 Subject: [PATCH 1373/2005] remove unnecessary indentation --- cassandra/cluster.py | 16 +-- cassandra/metrics.py | 22 ++-- cassandra/util.py | 12 +-- docs/api/cassandra/cqlengine/columns.rst | 8 +- docs/cqlengine/batches.rst | 126 +++++++++++------------ docs/cqlengine/connections.rst | 98 +++++++++--------- docs/cqlengine/faq.rst | 6 +- docs/object_mapper.rst | 114 ++++++++++---------- 8 files changed, 201 insertions(+), 201 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c6d9cad09e..22ea6211d8 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -230,10 +230,10 @@ class ExecutionProfile(object): Some example implementations: - - :func:`cassandra.query.tuple_factory` - return a result row as a tuple - - :func:`cassandra.query.named_tuple_factory` - return a result row as a named tuple - - :func:`cassandra.query.dict_factory` - return a result row as a dict - - :func:`cassandra.query.ordered_dict_factory` - return a result row as an OrderedDict + - :func:`cassandra.query.tuple_factory` - return a result row as a tuple + - :func:`cassandra.query.named_tuple_factory` - return a result row as a named tuple + - :func:`cassandra.query.dict_factory` - return a result row as a dict + - :func:`cassandra.query.ordered_dict_factory` - return a result row as an OrderedDict """ speculative_execution_policy = None @@ -1897,10 +1897,10 @@ def row_factory(self): returned row will be a named tuple. You can alternatively use any of the following: - - :func:`cassandra.query.tuple_factory` - return a result row as a tuple - - :func:`cassandra.query.named_tuple_factory` - return a result row as a named tuple - - :func:`cassandra.query.dict_factory` - return a result row as a dict - - :func:`cassandra.query.ordered_dict_factory` - return a result row as an OrderedDict + - :func:`cassandra.query.tuple_factory` - return a result row as a tuple + - :func:`cassandra.query.named_tuple_factory` - return a result row as a named tuple + - :func:`cassandra.query.dict_factory` - return a result row as a dict + - :func:`cassandra.query.ordered_dict_factory` - return a result row as an OrderedDict """ return self._row_factory diff --git a/cassandra/metrics.py b/cassandra/metrics.py index dce3de2973..473e52767f 100644 --- a/cassandra/metrics.py +++ b/cassandra/metrics.py @@ -37,17 +37,17 @@ class Metrics(object): A :class:`greplin.scales.PmfStat` timer for requests. This is a dict-like object with the following keys: - * count - number of requests that have been timed - * min - min latency - * max - max latency - * mean - mean latency - * stddev - standard deviation for latencies - * median - median latency - * 75percentile - 75th percentile latencies - * 95percentile - 95th percentile latencies - * 98percentile - 98th percentile latencies - * 99percentile - 99th percentile latencies - * 999percentile - 99.9th percentile latencies + * count - number of requests that have been timed + * min - min latency + * max - max latency + * mean - mean latency + * stddev - standard deviation for latencies + * median - median latency + * 75percentile - 75th percentile latencies + * 95percentile - 95th percentile latencies + * 98percentile - 98th percentile latencies + * 99percentile - 99th percentile latencies + * 999percentile - 99.9th percentile latencies """ connection_errors = None diff --git a/cassandra/util.py b/cassandra/util.py index 694b1bc032..28d2c729d3 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -883,9 +883,9 @@ def __init__(self, value): """ Initializer value can be: - - integer_type: absolute nanoseconds in the day - - datetime.time: built-in time - - string_type: a string time of the form "HH:MM:SS[.mmmuuunnn]" + - integer_type: absolute nanoseconds in the day + - datetime.time: built-in time + - string_type: a string time of the form "HH:MM:SS[.mmmuuunnn]" """ if isinstance(value, six.integer_types): self._from_timestamp(value) @@ -1012,9 +1012,9 @@ def __init__(self, value): """ Initializer value can be: - - integer_type: absolute days from epoch (1970, 1, 1). Can be negative. - - datetime.date: built-in date - - string_type: a string time of the form "yyyy-mm-dd" + - integer_type: absolute days from epoch (1970, 1, 1). Can be negative. + - datetime.date: built-in date + - string_type: a string time of the form "yyyy-mm-dd" """ if isinstance(value, six.integer_types): self.days_from_epoch = value diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst index 670633f73a..3530078015 100644 --- a/docs/api/cassandra/cqlengine/columns.rst +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -6,12 +6,12 @@ Columns ------- - Columns in your models map to columns in your CQL table. You define CQL columns by defining column attributes on your model classes. - For a model to be valid it needs at least one primary key column and one non-primary key column. +Columns in your models map to columns in your CQL table. You define CQL columns by defining column attributes on your model classes. +For a model to be valid it needs at least one primary key column and one non-primary key column. - Just as in CQL, the order you define your columns in is important, and is the same order they are defined in on a model's corresponding table. +Just as in CQL, the order you define your columns in is important, and is the same order they are defined in on a model's corresponding table. - Each column on your model definitions needs to be an instance of a Column class. +Each column on your model definitions needs to be an instance of a Column class. .. autoclass:: Column(**kwargs) diff --git a/docs/cqlengine/batches.rst b/docs/cqlengine/batches.rst index c2b711a51f..306e7d01a6 100644 --- a/docs/cqlengine/batches.rst +++ b/docs/cqlengine/batches.rst @@ -8,101 +8,101 @@ cqlengine supports batch queries using the BatchQuery class. Batch queries can b Batch Query General Use Pattern =============================== - You can only create, update, and delete rows with a batch query, attempting to read rows out of the database with a batch query will fail. +You can only create, update, and delete rows with a batch query, attempting to read rows out of the database with a batch query will fail. - .. code-block:: python +.. code-block:: python - from cassandra.cqlengine.query import BatchQuery + from cassandra.cqlengine.query import BatchQuery - #using a context manager - with BatchQuery() as b: - now = datetime.now() - em1 = ExampleModel.batch(b).create(example_type=0, description="1", created_at=now) - em2 = ExampleModel.batch(b).create(example_type=0, description="2", created_at=now) - em3 = ExampleModel.batch(b).create(example_type=0, description="3", created_at=now) - - # -- or -- - - #manually - b = BatchQuery() + #using a context manager + with BatchQuery() as b: now = datetime.now() em1 = ExampleModel.batch(b).create(example_type=0, description="1", created_at=now) em2 = ExampleModel.batch(b).create(example_type=0, description="2", created_at=now) em3 = ExampleModel.batch(b).create(example_type=0, description="3", created_at=now) - b.execute() - # updating in a batch + # -- or -- + + #manually + b = BatchQuery() + now = datetime.now() + em1 = ExampleModel.batch(b).create(example_type=0, description="1", created_at=now) + em2 = ExampleModel.batch(b).create(example_type=0, description="2", created_at=now) + em3 = ExampleModel.batch(b).create(example_type=0, description="3", created_at=now) + b.execute() + + # updating in a batch - b = BatchQuery() - em1.description = "new description" - em1.batch(b).save() - em2.description = "another new description" - em2.batch(b).save() - b.execute() + b = BatchQuery() + em1.description = "new description" + em1.batch(b).save() + em2.description = "another new description" + em2.batch(b).save() + b.execute() - # deleting in a batch - b = BatchQuery() - ExampleModel.objects(id=some_id).batch(b).delete() - ExampleModel.objects(id=some_id2).batch(b).delete() - b.execute() + # deleting in a batch + b = BatchQuery() + ExampleModel.objects(id=some_id).batch(b).delete() + ExampleModel.objects(id=some_id2).batch(b).delete() + b.execute() - Typically you will not want the block to execute if an exception occurs inside the `with` block. However, in the case that this is desirable, it's achievable by using the following syntax: +Typically you will not want the block to execute if an exception occurs inside the `with` block. However, in the case that this is desirable, it's achievable by using the following syntax: - .. code-block:: python +.. code-block:: python - with BatchQuery(execute_on_exception=True) as b: - LogEntry.batch(b).create(k=1, v=1) - mystery_function() # exception thrown in here - LogEntry.batch(b).create(k=1, v=2) # this code is never reached due to the exception, but anything leading up to here will execute in the batch. + with BatchQuery(execute_on_exception=True) as b: + LogEntry.batch(b).create(k=1, v=1) + mystery_function() # exception thrown in here + LogEntry.batch(b).create(k=1, v=2) # this code is never reached due to the exception, but anything leading up to here will execute in the batch. - If an exception is thrown somewhere in the block, any statements that have been added to the batch will still be executed. This is useful for some logging situations. +If an exception is thrown somewhere in the block, any statements that have been added to the batch will still be executed. This is useful for some logging situations. Batch Query Execution Callbacks =============================== - In order to allow secondary tasks to be chained to the end of batch, BatchQuery instances allow callbacks to be - registered with the batch, to be executed immediately after the batch executes. +In order to allow secondary tasks to be chained to the end of batch, BatchQuery instances allow callbacks to be +registered with the batch, to be executed immediately after the batch executes. - Multiple callbacks can be attached to same BatchQuery instance, they are executed in the same order that they - are added to the batch. +Multiple callbacks can be attached to same BatchQuery instance, they are executed in the same order that they +are added to the batch. - The callbacks attached to a given batch instance are executed only if the batch executes. If the batch is used as a - context manager and an exception is raised, the queued up callbacks will not be run. +The callbacks attached to a given batch instance are executed only if the batch executes. If the batch is used as a +context manager and an exception is raised, the queued up callbacks will not be run. - .. code-block:: python +.. code-block:: python - def my_callback(*args, **kwargs): - pass + def my_callback(*args, **kwargs): + pass - batch = BatchQuery() + batch = BatchQuery() - batch.add_callback(my_callback) - batch.add_callback(my_callback, 'positional arg', named_arg='named arg value') + batch.add_callback(my_callback) + batch.add_callback(my_callback, 'positional arg', named_arg='named arg value') - # if you need reference to the batch within the callback, - # just trap it in the arguments to be passed to the callback: - batch.add_callback(my_callback, cqlengine_batch=batch) + # if you need reference to the batch within the callback, + # just trap it in the arguments to be passed to the callback: + batch.add_callback(my_callback, cqlengine_batch=batch) - # once the batch executes... - batch.execute() + # once the batch executes... + batch.execute() - # the effect of the above scheduled callbacks will be similar to - my_callback() - my_callback('positional arg', named_arg='named arg value') - my_callback(cqlengine_batch=batch) + # the effect of the above scheduled callbacks will be similar to + my_callback() + my_callback('positional arg', named_arg='named arg value') + my_callback(cqlengine_batch=batch) - Failure in any of the callbacks does not affect the batch's execution, as the callbacks are started after the execution - of the batch is complete. +Failure in any of the callbacks does not affect the batch's execution, as the callbacks are started after the execution +of the batch is complete. Logged vs Unlogged Batches --------------------------- - By default, queries in cqlengine are LOGGED, which carries additional overhead from UNLOGGED. To explicitly state which batch type to use, simply: +By default, queries in cqlengine are LOGGED, which carries additional overhead from UNLOGGED. To explicitly state which batch type to use, simply: - .. code-block:: python +.. code-block:: python - from cassandra.cqlengine.query import BatchType - with BatchQuery(batch_type=BatchType.Unlogged) as b: - LogEntry.batch(b).create(k=1, v=1) - LogEntry.batch(b).create(k=1, v=2) + from cassandra.cqlengine.query import BatchType + with BatchQuery(batch_type=BatchType.Unlogged) as b: + LogEntry.batch(b).create(k=1, v=1) + LogEntry.batch(b).create(k=1, v=2) diff --git a/docs/cqlengine/connections.rst b/docs/cqlengine/connections.rst index 0d4a3c0393..03ade27521 100644 --- a/docs/cqlengine/connections.rst +++ b/docs/cqlengine/connections.rst @@ -10,22 +10,22 @@ Register a new connection To use cqlengine, you need at least a default connection. If you initialize cqlengine's connections with with :func:`connection.setup <.connection.setup>`, a connection will be created automatically. If you want to use another cluster/session, you need to register a new cqlengine connection. You register a connection with :func:`~.connection.register_connection`: - .. code-block:: python +.. code-block:: python - from cassandra.cqlengine import connection + from cassandra.cqlengine import connection - connection.setup(['127.0.0.1') - connection.register_connection('cluster2', ['127.0.0.2']) + connection.setup(['127.0.0.1') + connection.register_connection('cluster2', ['127.0.0.2']) :func:`~.connection.register_connection` can take a list of hosts, as shown above, in which case it will create a connection with a new session. It can also take a `session` argument if you've already created a session: - .. code-block:: python +.. code-block:: python - from cassandra.cqlengine import connection - from cassandra.cluster import Cluster + from cassandra.cqlengine import connection + from cassandra.cluster import Cluster - session = Cluster(['127.0.0.1']).connect() - connection.register_connection('cluster3', session=session) + session = Cluster(['127.0.0.1']).connect() + connection.register_connection('cluster3', session=session) Change the default connection @@ -33,51 +33,51 @@ Change the default connection You can change the default cqlengine connection on registration: - .. code-block:: python +.. code-block:: python - from cassandra.cqlengine import connection + from cassandra.cqlengine import connection - connection.register_connection('cluster2', ['127.0.0.2'] default=True) + connection.register_connection('cluster2', ['127.0.0.2'] default=True) or on the fly using :func:`~.connection.set_default_connection` - .. code-block:: python +.. code-block:: python - connection.set_default_connection('cluster2') + connection.set_default_connection('cluster2') Unregister a connection ======================= You can unregister a connection using :func:`~.connection.unregister_connection`: - .. code-block:: python +.. code-block:: python - connection.unregister_connection('cluster2') + connection.unregister_connection('cluster2') Management ========== When using multiples connections, you also need to sync your models on all connections (and keyspaces) that you need operate on. Management commands have been improved to ease this part. Here is an example: - .. code-block:: python +.. code-block:: python - from cassandra.cqlengine import management + from cassandra.cqlengine import management - keyspaces = ['ks1', 'ks2'] - conns = ['cluster1', 'cluster2'] + keyspaces = ['ks1', 'ks2'] + conns = ['cluster1', 'cluster2'] - # registers your connections - # ... + # registers your connections + # ... - # create all keyspaces on all connections - for ks in keyspaces: - management.create_simple_keyspace(ks, connections=conns) + # create all keyspaces on all connections + for ks in keyspaces: + management.create_simple_keyspace(ks, connections=conns) - # define your Automobile model - # ... + # define your Automobile model + # ... - # sync your models - management.sync_table(Automobile, keyspaces=keyspaces, connections=conns) + # sync your models + management.sync_table(Automobile, keyspaces=keyspaces, connections=conns) Connection Selection @@ -90,40 +90,40 @@ Default Model Connection You can specify a default connection per model: - .. code-block:: python +.. code-block:: python - class Automobile(Model): - __keyspace__ = 'test' - __connection__ = 'cluster2' - manufacturer = columns.Text(primary_key=True) - year = columns.Integer(primary_key=True) - model = columns.Text(primary_key=True) + class Automobile(Model): + __keyspace__ = 'test' + __connection__ = 'cluster2' + manufacturer = columns.Text(primary_key=True) + year = columns.Integer(primary_key=True) + model = columns.Text(primary_key=True) - print len(Automobile.objects.all()) # executed on the connection 'cluster2' + print len(Automobile.objects.all()) # executed on the connection 'cluster2' QuerySet and model instance --------------------------- You can use the :attr:`using() <.query.ModelQuerySet.using>` method to select a connection (or keyspace): - .. code-block:: python +.. code-block:: python - Automobile.objects.using(connection='cluster1').create(manufacturer='honda', year=2010, model='civic') - q = Automobile.objects.filter(manufacturer='Tesla') - autos = q.using(keyspace='ks2', connection='cluster2').all() + Automobile.objects.using(connection='cluster1').create(manufacturer='honda', year=2010, model='civic') + q = Automobile.objects.filter(manufacturer='Tesla') + autos = q.using(keyspace='ks2', connection='cluster2').all() - for auto in autos: - auto.using(connection='cluster1').save() + for auto in autos: + auto.using(connection='cluster1').save() Context Manager --------------- You can use the ContextQuery as well to select a connection: - .. code-block:: python +.. code-block:: python - with ContextQuery(Automobile, connection='cluster1') as A: - A.objects.filter(manufacturer='honda').all() # executed on 'cluster1' + with ContextQuery(Automobile, connection='cluster1') as A: + A.objects.filter(manufacturer='honda').all() # executed on 'cluster1' BatchQuery @@ -131,7 +131,7 @@ BatchQuery With a BatchQuery, you can select the connection with the context manager. Note that all operations in the batch need to use the same connection. - .. code-block:: python +.. code-block:: python - with BatchQuery(connection='cluster1') as b: - Automobile.objects.batch(b).create(manufacturer='honda', year=2010, model='civic') + with BatchQuery(connection='cluster1') as b: + Automobile.objects.batch(b).create(manufacturer='honda', year=2010, model='civic') diff --git a/docs/cqlengine/faq.rst b/docs/cqlengine/faq.rst index fe269e3b03..6c056d02ea 100644 --- a/docs/cqlengine/faq.rst +++ b/docs/cqlengine/faq.rst @@ -14,9 +14,9 @@ Statement Ordering is not supported by CQL3 batches. Therefore, once cassandra needs resolving conflict(Updating the same column in one batch), The algorithm below would be used. - * If timestamps are different, pick the column with the largest timestamp (the value being a regular column or a tombstone) - * If timestamps are the same, and one of the columns in a tombstone ('null') - pick the tombstone - * If timestamps are the same, and none of the columns are tombstones, pick the column with the largest value +* If timestamps are different, pick the column with the largest timestamp (the value being a regular column or a tombstone) +* If timestamps are the same, and one of the columns in a tombstone ('null') - pick the tombstone +* If timestamps are the same, and none of the columns are tombstones, pick the column with the largest value Below is an example to show this scenario. diff --git a/docs/object_mapper.rst b/docs/object_mapper.rst index 4379434f31..50d3cbf320 100644 --- a/docs/object_mapper.rst +++ b/docs/object_mapper.rst @@ -46,60 +46,60 @@ Contents Getting Started --------------- - .. code-block:: python - - import uuid - from cassandra.cqlengine import columns - from cassandra.cqlengine import connection - from datetime import datetime - from cassandra.cqlengine.management import sync_table - from cassandra.cqlengine.models import Model - - #first, define a model - class ExampleModel(Model): - example_id = columns.UUID(primary_key=True, default=uuid.uuid4) - example_type = columns.Integer(index=True) - created_at = columns.DateTime() - description = columns.Text(required=False) - - #next, setup the connection to your cassandra server(s)... - # see http://datastax.github.io/python-driver/api/cassandra/cluster.html for options - # the list of hosts will be passed to create a Cluster() instance - connection.setup(['127.0.0.1'], "cqlengine", protocol_version=3) - - #...and create your CQL table - >>> sync_table(ExampleModel) - - #now we can create some rows: - >>> em1 = ExampleModel.create(example_type=0, description="example1", created_at=datetime.now()) - >>> em2 = ExampleModel.create(example_type=0, description="example2", created_at=datetime.now()) - >>> em3 = ExampleModel.create(example_type=0, description="example3", created_at=datetime.now()) - >>> em4 = ExampleModel.create(example_type=0, description="example4", created_at=datetime.now()) - >>> em5 = ExampleModel.create(example_type=1, description="example5", created_at=datetime.now()) - >>> em6 = ExampleModel.create(example_type=1, description="example6", created_at=datetime.now()) - >>> em7 = ExampleModel.create(example_type=1, description="example7", created_at=datetime.now()) - >>> em8 = ExampleModel.create(example_type=1, description="example8", created_at=datetime.now()) - - #and now we can run some queries against our table - >>> ExampleModel.objects.count() - 8 - >>> q = ExampleModel.objects(example_type=1) - >>> q.count() - 4 - >>> for instance in q: - >>> print instance.description - example5 - example6 - example7 - example8 - - #here we are applying additional filtering to an existing query - #query objects are immutable, so calling filter returns a new - #query object - >>> q2 = q.filter(example_id=em5.example_id) - - >>> q2.count() - 1 - >>> for instance in q2: - >>> print instance.description - example5 +.. code-block:: python + + import uuid + from cassandra.cqlengine import columns + from cassandra.cqlengine import connection + from datetime import datetime + from cassandra.cqlengine.management import sync_table + from cassandra.cqlengine.models import Model + + #first, define a model + class ExampleModel(Model): + example_id = columns.UUID(primary_key=True, default=uuid.uuid4) + example_type = columns.Integer(index=True) + created_at = columns.DateTime() + description = columns.Text(required=False) + + #next, setup the connection to your cassandra server(s)... + # see http://datastax.github.io/python-driver/api/cassandra/cluster.html for options + # the list of hosts will be passed to create a Cluster() instance + connection.setup(['127.0.0.1'], "cqlengine", protocol_version=3) + + #...and create your CQL table + >>> sync_table(ExampleModel) + + #now we can create some rows: + >>> em1 = ExampleModel.create(example_type=0, description="example1", created_at=datetime.now()) + >>> em2 = ExampleModel.create(example_type=0, description="example2", created_at=datetime.now()) + >>> em3 = ExampleModel.create(example_type=0, description="example3", created_at=datetime.now()) + >>> em4 = ExampleModel.create(example_type=0, description="example4", created_at=datetime.now()) + >>> em5 = ExampleModel.create(example_type=1, description="example5", created_at=datetime.now()) + >>> em6 = ExampleModel.create(example_type=1, description="example6", created_at=datetime.now()) + >>> em7 = ExampleModel.create(example_type=1, description="example7", created_at=datetime.now()) + >>> em8 = ExampleModel.create(example_type=1, description="example8", created_at=datetime.now()) + + #and now we can run some queries against our table + >>> ExampleModel.objects.count() + 8 + >>> q = ExampleModel.objects(example_type=1) + >>> q.count() + 4 + >>> for instance in q: + >>> print instance.description + example5 + example6 + example7 + example8 + + #here we are applying additional filtering to an existing query + #query objects are immutable, so calling filter returns a new + #query object + >>> q2 = q.filter(example_id=em5.example_id) + + >>> q2.count() + 1 + >>> for instance in q2: + >>> print instance.description + example5 From 99aab7ef4e91bfdb2239ad37f375ec3e05734622 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 26 Mar 2018 12:21:18 -0400 Subject: [PATCH 1374/2005] remove top-level indentation in queryset.rst We're not removing all the indentation because the way it's used cleanly denotes the docs for different postfixes. --- docs/cqlengine/queryset.rst | 98 ++++++++++++++++++------------------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/docs/cqlengine/queryset.rst b/docs/cqlengine/queryset.rst index 5920cbb093..bd47e1067e 100644 --- a/docs/cqlengine/queryset.rst +++ b/docs/cqlengine/queryset.rst @@ -119,92 +119,92 @@ Accessing objects in a QuerySet Filtering Operators =================== - :attr:`Equal To ` +:attr:`Equal To ` - The default filtering operator. + The default filtering operator. - .. code-block:: python + .. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') - q = q.filter(year=2012) #year == 2012 + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year=2012) #year == 2012 - In addition to simple equal to queries, cqlengine also supports querying with other operators by appending a ``__`` to the field name on the filtering call +In addition to simple equal to queries, cqlengine also supports querying with other operators by appending a ``__`` to the field name on the filtering call - :attr:`in (__in) ` +:attr:`in (__in) ` - .. code-block:: python + .. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') - q = q.filter(year__in=[2011, 2012]) + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year__in=[2011, 2012]) - :attr:`> (__gt) ` +:attr:`> (__gt) ` - .. code-block:: python + .. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') - q = q.filter(year__gt=2010) # year > 2010 + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year__gt=2010) # year > 2010 - # or the nicer syntax + # or the nicer syntax - q.filter(Automobile.year > 2010) + q.filter(Automobile.year > 2010) - :attr:`>= (__gte) ` +:attr:`>= (__gte) ` - .. code-block:: python + .. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') - q = q.filter(year__gte=2010) # year >= 2010 + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year__gte=2010) # year >= 2010 - # or the nicer syntax + # or the nicer syntax - q.filter(Automobile.year >= 2010) + q.filter(Automobile.year >= 2010) - :attr:`< (__lt) ` +:attr:`< (__lt) ` - .. code-block:: python + .. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') - q = q.filter(year__lt=2012) # year < 2012 + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year__lt=2012) # year < 2012 - # or... + # or... - q.filter(Automobile.year < 2012) + q.filter(Automobile.year < 2012) - :attr:`<= (__lte) ` +:attr:`<= (__lte) ` - .. code-block:: python + .. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') - q = q.filter(year__lte=2012) # year <= 2012 + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year__lte=2012) # year <= 2012 - q.filter(Automobile.year <= 2012) + q.filter(Automobile.year <= 2012) - :attr:`CONTAINS (__contains) ` +:attr:`CONTAINS (__contains) ` - The CONTAINS operator is available for all collection types (List, Set, Map). + The CONTAINS operator is available for all collection types (List, Set, Map). - .. code-block:: python + .. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') - q.filter(options__contains='backup camera').allow_filtering() + q = Automobile.objects.filter(manufacturer='Tesla') + q.filter(options__contains='backup camera').allow_filtering() - Note that we need to use allow_filtering() since the *options* column has no secondary index. + Note that we need to use allow_filtering() since the *options* column has no secondary index. - :attr:`LIKE (__like) ` +:attr:`LIKE (__like) ` - The LIKE operator is available for text columns that have a SASI secondary index. + The LIKE operator is available for text columns that have a SASI secondary index. - .. code-block:: python + .. code-block:: python - q = Automobile.objects.filter(model__like='%Civic%').allow_filtering() + q = Automobile.objects.filter(model__like='%Civic%').allow_filtering() - Limitations: - - Currently, cqlengine does not support SASI index creation. To use this feature, - you need to create the SASI index using the core driver. - - Queries using LIKE must use allow_filtering() since the *model* column has no - standard secondary index. Note that the server will use the SASI index properly - when executing the query. + Limitations: + - Currently, cqlengine does not support SASI index creation. To use this feature, + you need to create the SASI index using the core driver. + - Queries using LIKE must use allow_filtering() since the *model* column has no + standard secondary index. Note that the server will use the SASI index properly + when executing the query. TimeUUID Functions ================== From 64804d29bba6662693ab93eca6a9c55afa01a0f5 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 26 Mar 2018 12:34:34 -0400 Subject: [PATCH 1375/2005] use list instead of indentation --- cassandra/concurrent.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index cb9fd1bf48..a8bddcbdab 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -49,13 +49,12 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais `results_generator` controls how the results are returned. - If :const:`False`, the results are returned only after all requests have completed. - - If :const:`True`, a generator expression is returned. Using a generator results in a constrained - memory footprint when the results set will be large -- results are yielded - as they return instead of materializing the entire list at once. The trade for lower memory - footprint is marginal CPU overhead (more thread coordination and sorting out-of-order results - on-the-fly). + * If :const:`False`, the results are returned only after all requests have completed. + * If :const:`True`, a generator expression is returned. Using a generator results in a constrained + memory footprint when the results set will be large -- results are yielded + as they return instead of materializing the entire list at once. The trade for lower memory + footprint is marginal CPU overhead (more thread coordination and sorting out-of-order results + on-the-fly). A sequence of ``ExecutionResult(success, result_or_exc)`` namedtuples is returned in the same order that the statements were passed in. If ``success`` is :const:`False`, From 7fbaa361b5a2c03d59ded977a4bceda0ff55f32f Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 26 Mar 2018 12:40:42 -0400 Subject: [PATCH 1376/2005] fix protocol.rst lists remove unnecessary indentation and fix items to be single paragraphs --- docs/api/cassandra/protocol.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/api/cassandra/protocol.rst b/docs/api/cassandra/protocol.rst index f3a3cd5ab5..f615ab1a70 100644 --- a/docs/api/cassandra/protocol.rst +++ b/docs/api/cassandra/protocol.rst @@ -45,11 +45,11 @@ and ``NumpyProtocolHandler``. They can be used as follows: These protocol handlers comprise different parsers, and return results as described below: - - ProtocolHandler: this default implementation is a drop-in replacement for the pure-Python version. - The rows are all parsed upfront, before results are returned. +- ProtocolHandler: this default implementation is a drop-in replacement for the pure-Python version. + The rows are all parsed upfront, before results are returned. - - LazyProtocolHandler: near drop-in replacement for the above, except that it returns an iterator over rows, - lazily decoded into the default row format (this is more efficient since all decoded results are not materialized at once) +- LazyProtocolHandler: near drop-in replacement for the above, except that it returns an iterator over rows, + lazily decoded into the default row format (this is more efficient since all decoded results are not materialized at once) - - NumpyProtocolHander: deserializes results directly into NumPy arrays. This facilitates efficient integration with - analysis toolkits such as Pandas. +- NumpyProtocolHander: deserializes results directly into NumPy arrays. This facilitates efficient integration with + analysis toolkits such as Pandas. From 3b88b63e53b3780c21d47211a94ccc834d6b2dea Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 9 Apr 2018 13:57:36 -0400 Subject: [PATCH 1377/2005] PYTHON-966: Allow filter queries with fields that have an index managed outside of cqlengine (#938) * Allow filter queries with fields that have an index managed outside of cqlengine --- CHANGELOG.rst | 1 + cassandra/cqlengine/columns.py | 15 +++++++++- cassandra/cqlengine/query.py | 28 ++++++++++++++----- docs/api/cassandra/cqlengine/columns.rst | 2 ++ .../cqlengine/query/test_queryset.py | 27 ++++++++++++++++++ .../integration/cqlengine/test_connections.py | 1 + 6 files changed, 66 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 89d18bac2a..e2a884e814 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,7 @@ Features -------- * Add one() function to the ResultSet API (PYTHON-947) * Create an utility function to fetch concurrently many keys from the same replica (PYTHON-647) +* Allow filter queries with fields that have an index managed outside of cqlengine (PYTHON-966) Other ----- diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index cfceea32ce..5808f60613 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -114,6 +114,13 @@ class Column(object): bool flag, indicates an index should be created for this column """ + custom_index = False + """ + bool flag, indicates an index is managed outside of cqlengine. This is + useful if you want to do filter queries on fields that have custom + indexes. + """ + db_field = None """ the fieldname this field will map to in the database @@ -161,10 +168,12 @@ def __init__(self, required=False, clustering_order=None, discriminator_column=False, - static=False): + static=False, + custom_index=False): self.partition_key = partition_key self.primary_key = partition_key or primary_key self.index = index + self.custom_index = custom_index self.db_field = db_field self.default = default self.required = required @@ -286,6 +295,10 @@ def db_index_name(self): """ Returns the name of the cql index """ return 'index_{0}'.format(self.db_field_name) + @property + def has_index(self): + return self.index or self.custom_index + @property def cql(self): return self.get_cql() diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 1118aba86a..41a8e2fb8c 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1071,19 +1071,33 @@ class ModelQuerySet(AbstractQuerySet): """ def _validate_select_where(self): """ Checks that a filterset will not create invalid select statement """ - # check that there's either a =, a IN or a CONTAINS (collection) relationship with a primary key or indexed field + # check that there's either a =, a IN or a CONTAINS (collection) + # relationship with a primary key or indexed field. We also allow + # custom indexes to be queried with any operator (a difference + # between a secondary index) equal_ops = [self.model._get_column_by_db_name(w.field) \ - for w in self._where if isinstance(w.operator, EqualsOperator) and not isinstance(w.value, Token)] + for w in self._where if not isinstance(w.value, Token) + and (isinstance(w.operator, EqualsOperator) + or self.model._get_column_by_db_name(w.field).custom_index)] token_comparison = any([w for w in self._where if isinstance(w.value, Token)]) - if not any(w.primary_key or w.index for w in equal_ops) and not token_comparison and not self._allow_filtering: - raise QueryException(('Where clauses require either =, a IN or a CONTAINS (collection) ' - 'comparison with either a primary key or indexed field')) + if not any(w.primary_key or w.has_index for w in equal_ops) and not token_comparison and not self._allow_filtering: + raise QueryException( + ('Where clauses require either =, a IN or a CONTAINS ' + '(collection) comparison with either a primary key or ' + 'indexed field. You might want to consider setting ' + 'custom_index on fields that you manage index outside ' + 'cqlengine.')) if not self._allow_filtering: # if the query is not on an indexed field - if not any(w.index for w in equal_ops): + if not any(w.has_index for w in equal_ops): if not any([w.partition_key for w in equal_ops]) and not token_comparison: - raise QueryException('Filtering on a clustering key without a partition key is not allowed unless allow_filtering() is called on the querset') + raise QueryException( + ('Filtering on a clustering key without a partition ' + 'key is not allowed unless allow_filtering() is ' + 'called on the queryset. You might want to consider ' + 'setting custom_index on fields that you manage ' + 'index outside cqlengine.')) def _select_fields(self): if self._defer_fields or self._only_fields: diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst index 3530078015..d44be8adb8 100644 --- a/docs/api/cassandra/cqlengine/columns.rst +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -21,6 +21,8 @@ Each column on your model definitions needs to be an instance of a Column class. .. autoattribute:: index + .. autoattribute:: custom_index + .. autoattribute:: db_field .. autoattribute:: default diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index e0bfb2fa7a..46a65dd8c5 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -82,6 +82,13 @@ class IndexedTestModel(Model): test_result = columns.Integer(index=True) +class CustomIndexedTestModel(Model): + + test_id = columns.Integer(primary_key=True) + description = columns.Text(custom_index=True) + data = columns.Text() + + class IndexedCollectionsTestModel(Model): test_id = columns.Integer(primary_key=True) @@ -314,9 +321,11 @@ def setUpClass(cls): super(BaseQuerySetUsage, cls).setUpClass() drop_table(TestModel) drop_table(IndexedTestModel) + drop_table(CustomIndexedTestModel) sync_table(TestModel) sync_table(IndexedTestModel) + sync_table(CustomIndexedTestModel) sync_table(TestMultiClusteringModel) TestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30) @@ -374,6 +383,7 @@ def tearDownClass(cls): super(BaseQuerySetUsage, cls).tearDownClass() drop_table(TestModel) drop_table(IndexedTestModel) + drop_table(CustomIndexedTestModel) drop_table(TestMultiClusteringModel) @@ -736,6 +746,23 @@ def test_indexed_field_can_be_queried(self): q = IndexedCollectionsTestModel.objects.filter(test_map__contains=13) self.assertEqual(q.count(), 0) + def test_custom_indexed_field_can_be_queried(self): + """ + Tests that queries on an custom indexed field will work without any primary key relations specified + """ + + with self.assertRaises(query.QueryException): + list(CustomIndexedTestModel.objects.filter(data='test')) # not custom indexed + + # equals operator, server error since there is no real index, but it passes + with self.assertRaises(InvalidRequest): + list(CustomIndexedTestModel.objects.filter(description='test')) + + # gte operator, server error since there is no real index, but it passes + # this can't work with a secondary index + with self.assertRaises(InvalidRequest): + list(CustomIndexedTestModel.objects.filter(description__gte='test')) + class TestQuerySetDelete(BaseQuerySetUsage): diff --git a/tests/integration/cqlengine/test_connections.py b/tests/integration/cqlengine/test_connections.py index ce4f4cc631..10dee66ddc 100644 --- a/tests/integration/cqlengine/test_connections.py +++ b/tests/integration/cqlengine/test_connections.py @@ -536,6 +536,7 @@ def setUpClass(cls): conn.register_connection('cluster', [CASSANDRA_IP]) test_queryset.TestModel.__queryset__ = ModelQuerySetNew test_queryset.IndexedTestModel.__queryset__ = ModelQuerySetNew + test_queryset.CustomIndexedTestModel.__queryset__ = ModelQuerySetNew test_queryset.IndexedCollectionsTestModel.__queryset__ = ModelQuerySetNew test_queryset.TestMultiClusteringModel.__queryset__ = ModelQuerySetNew From 710b6f75de74785d3abc66e72f37c72fc4aae313 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Tue, 10 Apr 2018 14:00:47 -0400 Subject: [PATCH 1378/2005] Added tests for PYTHON-966 --- .../cqlengine/query/test_queryset.py | 23 +++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 46a65dd8c5..e5a15b7c4b 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -23,7 +23,7 @@ from packaging.version import Version import uuid -from cassandra.cluster import Session +from cassandra.cluster import Cluster, Session from cassandra import InvalidRequest from tests.integration.cqlengine.base import BaseCassEngTestCase from cassandra.cqlengine.connection import NOT_SET @@ -43,7 +43,7 @@ from cassandra.cqlengine.connection import get_session from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthancass20, greaterthancass21, \ greaterthanorequalcass30 -from tests.integration.cqlengine import execute_count +from tests.integration.cqlengine import execute_count, DEFAULT_KEYSPACE class TzOffset(tzinfo): @@ -86,6 +86,7 @@ class CustomIndexedTestModel(Model): test_id = columns.Integer(primary_key=True) description = columns.Text(custom_index=True) + indexed = columns.Text(index=True) data = columns.Text() @@ -754,15 +755,33 @@ def test_custom_indexed_field_can_be_queried(self): with self.assertRaises(query.QueryException): list(CustomIndexedTestModel.objects.filter(data='test')) # not custom indexed + # It should return InvalidRequest if target an indexed columns + with self.assertRaises(InvalidRequest): + list(CustomIndexedTestModel.objects.filter(indexed='test', data='test')) + + # It should return InvalidRequest if target an indexed columns + with self.assertRaises(InvalidRequest): + list(CustomIndexedTestModel.objects.filter(description='test', data='test')) + # equals operator, server error since there is no real index, but it passes with self.assertRaises(InvalidRequest): list(CustomIndexedTestModel.objects.filter(description='test')) + with self.assertRaises(InvalidRequest): + list(CustomIndexedTestModel.objects.filter(test_id=1, description='test')) + # gte operator, server error since there is no real index, but it passes # this can't work with a secondary index with self.assertRaises(InvalidRequest): list(CustomIndexedTestModel.objects.filter(description__gte='test')) + with Cluster().connect() as session: + session.execute("CREATE INDEX custom_index_cqlengine ON {}.{} (description)". + format(DEFAULT_KEYSPACE, CustomIndexedTestModel._table_name)) + + list(CustomIndexedTestModel.objects.filter(description='test')) + list(CustomIndexedTestModel.objects.filter(test_id=1, description='test')) + class TestQuerySetDelete(BaseQuerySetUsage): From 6bf44169ef7eb3ebc182a9a22287848c976c6d25 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Tue, 20 Mar 2018 15:56:45 +0100 Subject: [PATCH 1379/2005] PYTHON-941 Improve error for batch WriteTimeouts --- cassandra/__init__.py | 89 +++++++++++++++++-- cassandra/policies.py | 74 ++------------- cassandra/protocol.py | 2 +- tests/integration/simulacron/__init__.py | 25 ++++++ tests/integration/simulacron/test_cluster.py | 55 ++++++++++++ tests/integration/simulacron/test_policies.py | 3 +- 6 files changed, 172 insertions(+), 76 deletions(-) create mode 100644 tests/integration/simulacron/test_cluster.py diff --git a/cassandra/__init__.py b/cassandra/__init__.py index df34f9191f..422ded847e 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +import six class NullHandler(logging.Handler): @@ -213,6 +214,75 @@ def uses_keyspace_flag(cls, version): return version >= cls.V5 +class WriteType(object): + """ + For usage with :class:`.RetryPolicy`, this describe a type + of write operation. + """ + + SIMPLE = 0 + """ + A write to a single partition key. Such writes are guaranteed to be atomic + and isolated. + """ + + BATCH = 1 + """ + A write to multiple partition keys that used the distributed batch log to + ensure atomicity. + """ + + UNLOGGED_BATCH = 2 + """ + A write to multiple partition keys that did not use the distributed batch + log. Atomicity for such writes is not guaranteed. + """ + + COUNTER = 3 + """ + A counter write (for one or multiple partition keys). Such writes should + not be replayed in order to avoid overcount. + """ + + BATCH_LOG = 4 + """ + The initial write to the distributed batch log that Cassandra performs + internally before a BATCH write. + """ + + CAS = 5 + """ + A lighweight-transaction write, such as "DELETE ... IF EXISTS". + """ + + VIEW = 6 + """ + This WriteType is only seen in results for requests that were unable to + complete MV operations. + """ + + CDC = 7 + """ + This WriteType is only seen in results for requests that were unable to + complete CDC operations. + """ + + +WriteType.name_to_value = { + 'SIMPLE': WriteType.SIMPLE, + 'BATCH': WriteType.BATCH, + 'UNLOGGED_BATCH': WriteType.UNLOGGED_BATCH, + 'COUNTER': WriteType.COUNTER, + 'BATCH_LOG': WriteType.BATCH_LOG, + 'CAS': WriteType.CAS, + 'VIEW': WriteType.VIEW, + 'CDC': WriteType.CDC +} + + +WriteType.value_to_name = {v: k for k, v in six.iteritems(WriteType.name_to_value)} + + class SchemaChangeType(object): DROPPED = 'DROPPED' CREATED = 'CREATED' @@ -339,14 +409,16 @@ class Timeout(RequestExecutionException): the operation """ - def __init__(self, summary_message, consistency=None, required_responses=None, received_responses=None): - self.consistency = consistency - self.required_responses = required_responses - self.received_responses = received_responses - Exception.__init__(self, summary_message + ' info=' + - repr({'consistency': consistency_value_to_name(consistency), - 'required_responses': required_responses, - 'received_responses': received_responses})) + def __init__(self, summary_message, **kwargs): + self.consistency = kwargs.get("consistency") + self.required_responses = kwargs.get("required_responses") + self.received_responses = kwargs.get("received_responses") + if "consistency" in kwargs: + kwargs["consistency"] = consistency_value_to_name(kwargs["consistency"]) + if "write_type" in kwargs: + kwargs["write_type"] = WriteType.value_to_name[kwargs["write_type"]] + + Exception.__init__(self, summary_message + ' info=' + repr(kwargs)) class ReadTimeout(Timeout): @@ -387,6 +459,7 @@ class WriteTimeout(Timeout): """ def __init__(self, message, write_type=None, **kwargs): + kwargs["write_type"] = write_type Timeout.__init__(self, message, **kwargs) self.write_type = write_type diff --git a/cassandra/policies.py b/cassandra/policies.py index 13c112d0d1..fdd96d596e 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -17,6 +17,14 @@ from random import randint, shuffle from threading import Lock import socket +from cassandra import WriteType as WT + + +# This is done this way because WriteType was originally +# defined here and in order not to break the API. +# It may removed in the next mayor. +WriteType = WT + from cassandra import ConsistencyLevel, OperationTimedOut @@ -690,72 +698,6 @@ def new_schedule(self): i += 1 -class WriteType(object): - """ - For usage with :class:`.RetryPolicy`, this describe a type - of write operation. - """ - - SIMPLE = 0 - """ - A write to a single partition key. Such writes are guaranteed to be atomic - and isolated. - """ - - BATCH = 1 - """ - A write to multiple partition keys that used the distributed batch log to - ensure atomicity. - """ - - UNLOGGED_BATCH = 2 - """ - A write to multiple partition keys that did not use the distributed batch - log. Atomicity for such writes is not guaranteed. - """ - - COUNTER = 3 - """ - A counter write (for one or multiple partition keys). Such writes should - not be replayed in order to avoid overcount. - """ - - BATCH_LOG = 4 - """ - The initial write to the distributed batch log that Cassandra performs - internally before a BATCH write. - """ - - CAS = 5 - """ - A lighweight-transaction write, such as "DELETE ... IF EXISTS". - """ - - VIEW = 6 - """ - This WriteType is only seen in results for requests that were unable to - complete MV operations. - """ - - CDC = 7 - """ - This WriteType is only seen in results for requests that were unable to - complete CDC operations. - """ - - -WriteType.name_to_value = { - 'SIMPLE': WriteType.SIMPLE, - 'BATCH': WriteType.BATCH, - 'UNLOGGED_BATCH': WriteType.UNLOGGED_BATCH, - 'COUNTER': WriteType.COUNTER, - 'BATCH_LOG': WriteType.BATCH_LOG, - 'CAS': WriteType.CAS, - 'VIEW': WriteType.VIEW, - 'CDC': WriteType.CDC -} - - class RetryPolicy(object): """ A policy that describes whether to retry, rethrow, or ignore coordinator diff --git a/cassandra/protocol.py b/cassandra/protocol.py index e9b6dab5c1..7e117792af 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -40,7 +40,7 @@ UTF8Type, VarcharType, UUIDType, UserType, TupleType, lookup_casstype, SimpleDateType, TimeType, ByteType, ShortType, DurationType) -from cassandra.policies import WriteType +from cassandra import WriteType from cassandra.cython_deps import HAVE_CYTHON, HAVE_NUMPY from cassandra import util diff --git a/tests/integration/simulacron/__init__.py b/tests/integration/simulacron/__init__.py index 79868c38bf..9a831d08f7 100644 --- a/tests/integration/simulacron/__init__.py +++ b/tests/integration/simulacron/__init__.py @@ -17,7 +17,12 @@ import unittest # noqa from tests.integration.simulacron.utils import stop_simulacron, clear_queries +from tests.integration import PROTOCOL_VERSION, SIMULACRON_JAR, CASSANDRA_VERSION +from tests.integration.simulacron.utils import start_and_prime_singledc +from cassandra.cluster import Cluster + +from packaging.version import Version def teardown_package(): stop_simulacron() @@ -27,3 +32,23 @@ class SimulacronBase(unittest.TestCase): def tearDown(self): clear_queries() stop_simulacron() + + +class SimulacronCluster(SimulacronBase): + @classmethod + def setUpClass(cls): + if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"): + return + + start_and_prime_singledc() + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) + cls.session = cls.cluster.connect(wait_for_all_pools=True) + + @classmethod + def tearDownClass(cls): + if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"): + return + + cls.cluster.shutdown() + stop_simulacron() + diff --git a/tests/integration/simulacron/test_cluster.py b/tests/integration/simulacron/test_cluster.py new file mode 100644 index 0000000000..93b3a50078 --- /dev/null +++ b/tests/integration/simulacron/test_cluster.py @@ -0,0 +1,55 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from tests.integration.simulacron import SimulacronCluster +from tests.integration import requiressimulacron +from tests.integration.simulacron.utils import prime_query + +from cassandra import WriteTimeout, WriteType, ConsistencyLevel + +@requiressimulacron +class ClusterTests(SimulacronCluster): + def test_writetimeout(self): + write_type = "UNLOGGED_BATCH" + consistency = "LOCAL_QUORUM" + received_responses = 1 + required_responses = 4 + + query_to_prime_simple = "SELECT * from simulacron_keyspace.simple" + then = { + "result": "write_timeout", + "delay_in_ms": 0, + "consistency_level": consistency, + "received": received_responses, + "block_for": required_responses, + "write_type": write_type, + "ignore_on_prepare": True + } + prime_query(query_to_prime_simple, then=then, rows=None, column_types=None) + + try: + self.session.execute(query_to_prime_simple) + except WriteTimeout as wt: + self.assertEqual(wt.write_type, WriteType.name_to_value[write_type]) + self.assertEqual(wt.consistency, ConsistencyLevel.name_to_value[consistency]) + self.assertEqual(wt.received_responses, received_responses) + self.assertEqual(wt.required_responses, required_responses) + self.assertIn(write_type, str(wt)) + self.assertIn(consistency, str(wt)) + self.assertIn(str(received_responses), str(wt)) + self.assertIn(str(required_responses), str(wt)) diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index 26bd437ffc..a934ccb117 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -29,8 +29,8 @@ from itertools import count from packaging.version import Version -class BadRoundRobinPolicy(RoundRobinPolicy): +class BadRoundRobinPolicy(RoundRobinPolicy): def make_query_plan(self, working_keyspace=None, query=None): pos = self._position self._position += 1 @@ -293,6 +293,7 @@ def test_retry_policy_ignores_and_rethrows(self): with self.assertRaises(WriteTimeout): self.session.execute(query_to_prime_simple) + #CDC should be ignored self.session.execute(query_to_prime_cdc) From 6e8cd075edf0f69ebd5d20b2339ec2e861ef7566 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Tue, 10 Apr 2018 13:17:48 -0400 Subject: [PATCH 1380/2005] Adress review comments --- cassandra/__init__.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 422ded847e..cac9693b9e 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -409,16 +409,21 @@ class Timeout(RequestExecutionException): the operation """ - def __init__(self, summary_message, **kwargs): - self.consistency = kwargs.get("consistency") - self.required_responses = kwargs.get("required_responses") - self.received_responses = kwargs.get("received_responses") - if "consistency" in kwargs: - kwargs["consistency"] = consistency_value_to_name(kwargs["consistency"]) + def __init__(self, summary_message, consistency=None, required_responses=None, + received_responses=None, **kwargs): + self.consistency = consistency + self.required_responses = required_responses + self.received_responses = received_responses + if "write_type" in kwargs: kwargs["write_type"] = WriteType.value_to_name[kwargs["write_type"]] - Exception.__init__(self, summary_message + ' info=' + repr(kwargs)) + info = {'consistency': consistency_value_to_name(consistency), + 'required_responses': required_responses, + 'received_responses': received_responses} + info.update(kwargs) + + Exception.__init__(self, summary_message + ' info=' + repr(info)) class ReadTimeout(Timeout): From 6e9214d246496c7ca392824974e436eb39ae8c01 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 11 Apr 2018 17:06:02 -0400 Subject: [PATCH 1381/2005] assert exception raised in test --- tests/integration/simulacron/test_cluster.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/integration/simulacron/test_cluster.py b/tests/integration/simulacron/test_cluster.py index 93b3a50078..ff2fb59743 100644 --- a/tests/integration/simulacron/test_cluster.py +++ b/tests/integration/simulacron/test_cluster.py @@ -42,14 +42,14 @@ def test_writetimeout(self): } prime_query(query_to_prime_simple, then=then, rows=None, column_types=None) - try: + with self.assertRaises(WriteTimeout) as assert_raised_context: self.session.execute(query_to_prime_simple) - except WriteTimeout as wt: - self.assertEqual(wt.write_type, WriteType.name_to_value[write_type]) - self.assertEqual(wt.consistency, ConsistencyLevel.name_to_value[consistency]) - self.assertEqual(wt.received_responses, received_responses) - self.assertEqual(wt.required_responses, required_responses) - self.assertIn(write_type, str(wt)) - self.assertIn(consistency, str(wt)) - self.assertIn(str(received_responses), str(wt)) - self.assertIn(str(required_responses), str(wt)) + wt = assert_raised_context.exception + self.assertEqual(wt.write_type, WriteType.name_to_value[write_type]) + self.assertEqual(wt.consistency, ConsistencyLevel.name_to_value[consistency]) + self.assertEqual(wt.received_responses, received_responses) + self.assertEqual(wt.required_responses, required_responses) + self.assertIn(write_type, str(wt)) + self.assertIn(consistency, str(wt)) + self.assertIn(str(received_responses), str(wt)) + self.assertIn(str(required_responses), str(wt)) From 3a881b07ed480681755109024775b7e20b731bc1 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 11 Apr 2018 17:12:36 -0400 Subject: [PATCH 1382/2005] CHANGELOG entry for PYTHON-941 --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e2a884e814..6572fdf296 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -17,6 +17,7 @@ Other * Warn users when using the deprecated Session.default_consistency_level (PYTHON-953) * Add DSE smoke test to OSS driver tests (PYTHON-894) * Document long compilation times and workarounds (PYTHON-868) +* Improve error for batch WriteTimeouts (PYTHON-941) 3.13.0 ====== From c2fc640d4d2d9f06c31130237e64811a6afab051 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Wed, 11 Apr 2018 17:36:28 -0400 Subject: [PATCH 1383/2005] PYTHON-968 Support IS NOT NULL operator in cqlengine --- cassandra/cqlengine/operators.py | 5 + cassandra/cqlengine/statements.py | 20 +++- .../cqlengine/operators/__init__.py | 7 ++ .../operators/test_where_operators.py | 92 ++++++++++++++++--- 4 files changed, 108 insertions(+), 16 deletions(-) diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index 8585b9c619..bba505583c 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -98,3 +98,8 @@ class ContainsOperator(EqualsOperator): class LikeOperator(EqualsOperator): symbol = "LIKE" cql_symbol = 'LIKE' + + +class IsNotNullOperator(EqualsOperator): + symbol = "IS NOT NULL" + cql_symbol = 'IS NOT NULL' diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index a138e755ef..57aac56e59 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -21,7 +21,7 @@ from cassandra.cqlengine import columns from cassandra.cqlengine import UnicodeMixin from cassandra.cqlengine.functions import QueryValue -from cassandra.cqlengine.operators import BaseWhereOperator, InOperator, EqualsOperator +from cassandra.cqlengine.operators import BaseWhereOperator, InOperator, EqualsOperator, IsNotNullOperator class StatementException(Exception): @@ -138,6 +138,24 @@ def update_context(self, ctx): self.query_value.update_context(ctx) +class IsNotNullClause(WhereClause): + def __init__(self, field): + super(IsNotNullClause, self).__init__(field, IsNotNullOperator(), '') + + def __unicode__(self): + field = ('"{0}"' if self.quote_field else '{0}').format(self.field) + return u'{0} {1}'.format(field, self.operator) + + def update_context(self, ctx): + pass + + def get_context_size(self): + return 0 + +# alias for convenience +IsNotNull = IsNotNullClause + + class AssignmentClause(BaseClause): """ a single variable st statement """ diff --git a/tests/integration/cqlengine/operators/__init__.py b/tests/integration/cqlengine/operators/__init__.py index 2c9ca172f8..05a41c46fd 100644 --- a/tests/integration/cqlengine/operators/__init__.py +++ b/tests/integration/cqlengine/operators/__init__.py @@ -11,3 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from cassandra.cqlengine.operators import BaseWhereOperator + + +def check_lookup(test_case, symbol, expected): + op = BaseWhereOperator.get_operator(symbol) + test_case.assertEqual(op, expected) diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index da4e9c5b30..536c85d3e1 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -12,30 +12,40 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest import TestCase +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + from cassandra.cqlengine.operators import * +from uuid import uuid4 + +from cassandra.cqlengine.management import sync_table, drop_table +from cassandra.cqlengine.operators import IsNotNullOperator +from cassandra.cqlengine.statements import IsNotNull +from cassandra import InvalidRequest + +from tests.integration.cqlengine.base import TestQueryUpdateModel, BaseCassEngTestCase +from tests.integration.cqlengine.operators import check_lookup + import six -class TestWhereOperators(TestCase): +class TestWhereOperators(unittest.TestCase): def test_symbol_lookup(self): """ tests where symbols are looked up properly """ - def check_lookup(symbol, expected): - op = BaseWhereOperator.get_operator(symbol) - self.assertEqual(op, expected) - - check_lookup('EQ', EqualsOperator) - check_lookup('NE', NotEqualsOperator) - check_lookup('IN', InOperator) - check_lookup('GT', GreaterThanOperator) - check_lookup('GTE', GreaterThanOrEqualOperator) - check_lookup('LT', LessThanOperator) - check_lookup('LTE', LessThanOrEqualOperator) - check_lookup('CONTAINS', ContainsOperator) - check_lookup('LIKE', LikeOperator) + check_lookup(self, 'EQ', EqualsOperator) + check_lookup(self, 'NE', NotEqualsOperator) + check_lookup(self, 'IN', InOperator) + check_lookup(self, 'GT', GreaterThanOperator) + check_lookup(self, 'GTE', GreaterThanOrEqualOperator) + check_lookup(self, 'LT', LessThanOperator) + check_lookup(self, 'LTE', LessThanOrEqualOperator) + check_lookup(self, 'CONTAINS', ContainsOperator) + check_lookup(self, 'LIKE', LikeOperator) def test_operator_rendering(self): """ tests symbols are rendered properly """ @@ -48,3 +58,55 @@ def test_operator_rendering(self): self.assertEqual("<=", six.text_type(LessThanOrEqualOperator())) self.assertEqual("CONTAINS", six.text_type(ContainsOperator())) self.assertEqual("LIKE", six.text_type(LikeOperator())) + + +class TestIsNotNull(BaseCassEngTestCase): + def test_is_not_nul_to_cql(self): + """ + Verify that IsNotNull is converted correctly to CQL + + @since 2.5 + @jira_ticket PYTHON-968 + @expected_result the strings match + + @test_category cqlengine + """ + + check_lookup(self, 'IS NOT NULL', IsNotNullOperator) + + # The * is not expanded because there are no referred fields + self.assertEqual( + str(TestQueryUpdateModel.filter(IsNotNull("text")).limit(2)), + "SELECT * FROM cqlengine_test.test_query_update_model WHERE \"text\" IS NOT NULL LIMIT 2" + ) + + # We already know partition so cqlengine doesn't query for it + self.assertEqual( + str(TestQueryUpdateModel.filter(IsNotNull("text"), partition=uuid4())), + "SELECT \"cluster\", \"count\", \"text\", \"text_set\", " + "\"text_list\", \"text_map\" FROM cqlengine_test.test_query_update_model " + "WHERE \"text\" IS NOT NULL AND \"partition\" = %(0)s LIMIT 10000" + ) + + def test_is_not_null_execution(self): + """ + Verify that CQL statements have correct syntax when executed + If we wanted them to return something meaningful and not a InvalidRequest + we'd have to create an index in search for the column we are using + IsNotNull + + @since 2.5 + @jira_ticket PYTHON-968 + @expected_result InvalidRequest is arisen + + @test_category cqlengine + """ + sync_table(TestQueryUpdateModel) + self.addCleanup(drop_table, TestQueryUpdateModel) + + # Raises InvalidRequest instead of dse.protocol.SyntaxException + with self.assertRaises(InvalidRequest): + list(TestQueryUpdateModel.filter(IsNotNull("text"))) + + with self.assertRaises(InvalidRequest): + list(TestQueryUpdateModel.filter(IsNotNull("text"), partition=uuid4())) From 5629319565b66a2237585eb2868b2293ccde08bd Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Thu, 12 Apr 2018 11:10:58 -0400 Subject: [PATCH 1384/2005] Added changelog --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6572fdf296..5d1311548c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,6 +6,7 @@ Features * Add one() function to the ResultSet API (PYTHON-947) * Create an utility function to fetch concurrently many keys from the same replica (PYTHON-647) * Allow filter queries with fields that have an index managed outside of cqlengine (PYTHON-966) +* Support IS NOT NULL operator in cqlengine (PYTHON-968) Other ----- From 7f717d38d1e284f13a8de334f50e7fb24048eee6 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Thu, 12 Apr 2018 11:58:16 -0400 Subject: [PATCH 1385/2005] Run test_is_not_null_execution for C* greater than 3.0 --- tests/integration/cqlengine/operators/test_where_operators.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index 536c85d3e1..1545771280 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -28,6 +28,7 @@ from tests.integration.cqlengine.base import TestQueryUpdateModel, BaseCassEngTestCase from tests.integration.cqlengine.operators import check_lookup +from tests.integration import greaterthanorequalcass30 import six @@ -88,6 +89,7 @@ def test_is_not_nul_to_cql(self): "WHERE \"text\" IS NOT NULL AND \"partition\" = %(0)s LIMIT 10000" ) + @greaterthanorequalcass30 def test_is_not_null_execution(self): """ Verify that CQL statements have correct syntax when executed From bfee87d4e4e7ba8b944481e0bf5abde2f7697c1f Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 12 Apr 2018 11:55:05 -0400 Subject: [PATCH 1386/2005] fix method name spelling --- tests/integration/cqlengine/operators/test_where_operators.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index 1545771280..7fc177de5e 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -62,7 +62,7 @@ def test_operator_rendering(self): class TestIsNotNull(BaseCassEngTestCase): - def test_is_not_nul_to_cql(self): + def test_is_not_null_to_cql(self): """ Verify that IsNotNull is converted correctly to CQL From 8311a09d5482389745136d019b5cafbe011aaebc Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 12 Apr 2018 12:00:27 -0400 Subject: [PATCH 1387/2005] use single quotes --- .../cqlengine/operators/test_where_operators.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index 7fc177de5e..1428846a90 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -78,15 +78,15 @@ def test_is_not_null_to_cql(self): # The * is not expanded because there are no referred fields self.assertEqual( str(TestQueryUpdateModel.filter(IsNotNull("text")).limit(2)), - "SELECT * FROM cqlengine_test.test_query_update_model WHERE \"text\" IS NOT NULL LIMIT 2" + 'SELECT * FROM cqlengine_test.test_query_update_model WHERE "text" IS NOT NULL LIMIT 2' ) # We already know partition so cqlengine doesn't query for it self.assertEqual( str(TestQueryUpdateModel.filter(IsNotNull("text"), partition=uuid4())), - "SELECT \"cluster\", \"count\", \"text\", \"text_set\", " - "\"text_list\", \"text_map\" FROM cqlengine_test.test_query_update_model " - "WHERE \"text\" IS NOT NULL AND \"partition\" = %(0)s LIMIT 10000" + 'SELECT "cluster", "count", "text", "text_set", ' + '"text_list", "text_map" FROM cqlengine_test.test_query_update_model ' + 'WHERE "text" IS NOT NULL AND "partition" = %(0)s LIMIT 10000' ) @greaterthanorequalcass30 From 95e02333499a173243a54898f8b4b6da764a2374 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 12 Apr 2018 12:02:09 -0400 Subject: [PATCH 1388/2005] add parens for easier reading --- .../integration/cqlengine/operators/test_where_operators.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index 1428846a90..fdfce1f0b8 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -84,9 +84,9 @@ def test_is_not_null_to_cql(self): # We already know partition so cqlengine doesn't query for it self.assertEqual( str(TestQueryUpdateModel.filter(IsNotNull("text"), partition=uuid4())), - 'SELECT "cluster", "count", "text", "text_set", ' - '"text_list", "text_map" FROM cqlengine_test.test_query_update_model ' - 'WHERE "text" IS NOT NULL AND "partition" = %(0)s LIMIT 10000' + ('SELECT "cluster", "count", "text", "text_set", ' + '"text_list", "text_map" FROM cqlengine_test.test_query_update_model ' + 'WHERE "text" IS NOT NULL AND "partition" = %(0)s LIMIT 10000') ) @greaterthanorequalcass30 From ca0f26f42b607b605f2930763d8441d1fc82643b Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Thu, 12 Apr 2018 14:01:21 -0400 Subject: [PATCH 1389/2005] Added docs for IS NOT NULL --- docs/cqlengine/queryset.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/cqlengine/queryset.rst b/docs/cqlengine/queryset.rst index bd47e1067e..14bbef503f 100644 --- a/docs/cqlengine/queryset.rst +++ b/docs/cqlengine/queryset.rst @@ -199,6 +199,14 @@ In addition to simple equal to queries, cqlengine also supports querying with ot q = Automobile.objects.filter(model__like='%Civic%').allow_filtering() +:attr:`IS NOT NULL (IsNotNull(column_name)) ` + + The IS NOT NULL operator is not yet supported for C*. + + .. code-block:: python + + q = Automobile.objects.filter(IsNotNull('model')) + Limitations: - Currently, cqlengine does not support SASI index creation. To use this feature, you need to create the SASI index using the core driver. From a9a70f645c1e78a4c5f78d5a003f48d757f6c0ac Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Sat, 17 Mar 2018 19:11:46 +0100 Subject: [PATCH 1390/2005] PYTHON-343 SSL support for Twisted --- CHANGELOG.rst | 1 + build.yaml | 10 ++- cassandra/io/twistedreactor.py | 94 ++++++++++++++++++++++++---- test-requirements.txt | 1 + tests/integration/__init__.py | 2 + tests/integration/long/test_ssl.py | 35 +++++++---- tests/unit/io/test_twistedreactor.py | 8 ++- 7 files changed, 120 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6572fdf296..6c1f3b95a1 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,6 +6,7 @@ Features * Add one() function to the ResultSet API (PYTHON-947) * Create an utility function to fetch concurrently many keys from the same replica (PYTHON-647) * Allow filter queries with fields that have an index managed outside of cqlengine (PYTHON-966) +* Twisted SSL Support (PYTHON-343) Other ----- diff --git a/build.yaml b/build.yaml index a975c3d8fb..e4101f99dd 100644 --- a/build.yaml +++ b/build.yaml @@ -25,7 +25,7 @@ schedules: branches: include: [/python.*/] env_vars: | - EVENT_LOOP_MANAGER='libev' + EVENT_LOOP_MANAGER='twisted' EXCLUDE_LONG=1 matrix: exclude: @@ -118,13 +118,19 @@ build: export PATH=$JAVA_HOME/bin:$PATH export PYTHONPATH="" + # Install latest setuptools + pip install --upgrade pip + pip install -U setuptools + pip install git+https://github.com/pcmanus/ccm.git # Install dependencies - sudo apt-get install -y libev4 libev-dev + sudo apt-get install -y libev4 libev-dev libssl-dev pip install -r test-requirements.txt pip install nose-ignore-docstring pip install nose-exclude + pip install service_identity + FORCE_CYTHON=False if [[ $CYTHON == 'CYTHON' ]]; then FORCE_CYTHON=True diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index 337d7a4265..8121c784c4 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -42,6 +42,9 @@ class TwistedConnectionProtocol(protocol.Protocol): made events. """ + def __init__(self): + self.connection = None + def dataReceived(self, data): """ Callback function that is called when data has been received @@ -50,9 +53,8 @@ def dataReceived(self, data): Reaches back to the Connection object and queues the data for processing. """ - self.transport.connector.factory.conn._iobuf.write(data) - self.transport.connector.factory.conn.handle_read() - + self.connection._iobuf.write(data) + self.connection.handle_read() def connectionMade(self): """ Callback function that is called when a connection has succeeded. @@ -60,11 +62,18 @@ def connectionMade(self): Reaches back to the Connection object and confirms that the connection is ready. """ - self.transport.connector.factory.conn.client_connection_made() + try: + # Non SSL connection + self.connection = self.transport.connector.factory.conn + except AttributeError: + # SSL connection + self.connection = self.transport.connector.factory.wrappedFactory.conn + + self.connection.client_connection_made(self.transport) def connectionLost(self, reason): # reason is a Failure instance - self.transport.connector.factory.conn.defunct(reason.value) + self.connection.defunct(reason.value) class TwistedConnectionClientFactory(protocol.ClientFactory): @@ -157,6 +166,49 @@ def _on_loop_timer(self): self._schedule_timeout(self._timers.next_timeout) +try: + from twisted.internet import ssl + import OpenSSL.crypto + from OpenSSL.crypto import load_certificate, FILETYPE_PEM + + class _SSLContextFactory(ssl.ClientContextFactory): + def __init__(self, ssl_options, check_hostname, host): + self.ssl_options = ssl_options + self.check_hostname = check_hostname + self.host = host + + def getContext(self): + # This version has to be OpenSSL.SSL.DESIRED_VERSION + # instead of ssl.DESIRED_VERSION as in other loops + self.method = self.ssl_options["ssl_version"] + context = ssl.ClientContextFactory.getContext(self) + if "certfile" in self.ssl_options: + context.use_certificate_file(self.ssl_options["certfile"]) + if "keyfile" in self.ssl_options: + context.use_privatekey_file(self.ssl_options["keyfile"]) + if "ca_certs" in self.ssl_options: + x509 = load_certificate(FILETYPE_PEM, open(self.ssl_options["ca_certs"]).read()) + store = context.get_cert_store() + store.add_cert(x509) + if "cert_reqs" in self.ssl_options: + # This expects OpenSSL.SSL.VERIFY_NONE/OpenSSL.SSL.VERIFY_PEER + # or OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT + context.set_verify(self.ssl_options["cert_reqs"], + callback=self.verify_callback) + return context + + def verify_callback(self, connection, x509, errnum, errdepth, ok): + if ok: + if self.check_hostname and self.host != x509.get_subject().commonName: + return False + return ok + + _HAS_SSL = True + +except ImportError as e: + _HAS_SSL = False + + class TwistedConnection(Connection): """ An implementation of :class:`.Connection` that utilizes the @@ -189,6 +241,7 @@ def __init__(self, *args, **kwargs): self.is_closed = True self.connector = None + self.transport = None reactor.callFromThread(self.add_connection) self._loop.maybe_start() @@ -198,18 +251,33 @@ def add_connection(self): Convenience function to connect and store the resulting connector. """ - self.connector = reactor.connectTCP( - host=self.host, port=self.port, - factory=TwistedConnectionClientFactory(self), - timeout=self.connect_timeout) - - def client_connection_made(self): + if self.ssl_options: + + if not _HAS_SSL: + raise ImportError( + str(e) + + ', pyOpenSSL must be installed to enable SSL support with the Twisted event loop' + ) + + self.connector = reactor.connectSSL( + host=self.host, port=self.port, + factory=TwistedConnectionClientFactory(self), + contextFactory=_SSLContextFactory(self.ssl_options, self._check_hostname, self.host), + timeout=self.connect_timeout) + else: + self.connector = reactor.connectTCP( + host=self.host, port=self.port, + factory=TwistedConnectionClientFactory(self), + timeout=self.connect_timeout) + + def client_connection_made(self, transport): """ Called by twisted protocol when a connection attempt has succeeded. """ with self.lock: self.is_closed = False + self.transport = transport self._send_options_message() def close(self): @@ -222,7 +290,7 @@ def close(self): self.is_closed = True log.debug("Closing connection (%s) to %s", id(self), self.host) - self.connector.disconnect() + reactor.callFromThread(self.connector.disconnect) log.debug("Closed socket to %s", self.host) if not self.is_defunct: @@ -246,4 +314,4 @@ def push(self, data): it is not thread-safe, so we schedule it to run from within the event loop when it gets the chance. """ - reactor.callFromThread(self.connector.transport.write, data) + reactor.callFromThread(self.transport.write, data) diff --git a/test-requirements.txt b/test-requirements.txt index ca2d3309d6..e18fecdd16 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -14,3 +14,4 @@ eventlet cython>=0.20,<0.28 packaging asynctest; python_version > '3.4' +pyOpenSSL diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 6d506544e6..8c4cabb4f1 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -111,6 +111,8 @@ def _get_dse_version_from_cass(cass_version): KEEP_TEST_CLUSTER = bool(os.getenv('KEEP_TEST_CLUSTER', False)) SIMULACRON_JAR = os.getenv('SIMULACRON_JAR', None) +default_cassandra_version = '3.11' + CASSANDRA_IP = os.getenv('CASSANDRA_IP', '127.0.0.1') CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index ffaa76e9fe..bb53a30edc 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -21,7 +21,7 @@ from cassandra.cluster import Cluster, NoHostAvailable from cassandra import ConsistencyLevel from cassandra.query import SimpleStatement -from tests.integration import PROTOCOL_VERSION, get_cluster, remove_cluster, use_single_node +from tests.integration import PROTOCOL_VERSION, get_cluster, remove_cluster, use_single_node, EVENT_LOOP_MANAGER log = logging.getLogger(__name__) @@ -37,6 +37,16 @@ DRIVER_CERTFILE = "tests/integration/long/ssl/driver.pem" DRIVER_CERTFILE_BAD = "tests/integration/long/ssl/python_driver_bad.pem" +if "twisted" in EVENT_LOOP_MANAGER: + import OpenSSL + ssl_version = OpenSSL.SSL.TLSv1_METHOD + verify_certs = {'cert_reqs': OpenSSL.SSL.VERIFY_PEER, + 'check_hostname': True} + +else: + ssl_version = ssl.PROTOCOL_TLSv1 + verify_certs = {'cert_reqs': ssl.CERT_REQUIRED, + 'check_hostname': True} def setup_cluster_ssl(client_auth=False): """ @@ -130,7 +140,7 @@ def test_can_connect_with_ssl_ca(self): # find absolute path to client CA_CERTS abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) - ssl_options = {'ca_certs': abs_path_ca_cert_path,'ssl_version': ssl.PROTOCOL_TLSv1} + ssl_options = {'ca_certs': abs_path_ca_cert_path,'ssl_version': ssl_version} validate_ssl_options(ssl_options=ssl_options) def test_can_connect_with_ssl_long_running(self): @@ -147,7 +157,7 @@ def test_can_connect_with_ssl_long_running(self): # find absolute path to client CA_CERTS abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) ssl_options = {'ca_certs': abs_path_ca_cert_path, - 'ssl_version': ssl.PROTOCOL_TLSv1} + 'ssl_version': ssl_version} tries = 0 while True: if tries > 5: @@ -187,9 +197,8 @@ def test_can_connect_with_ssl_ca_host_match(self): # find absolute path to client CA_CERTS abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) ssl_options = {'ca_certs': abs_path_ca_cert_path, - 'ssl_version': ssl.PROTOCOL_TLSv1, - 'cert_reqs': ssl.CERT_REQUIRED, - 'check_hostname': True} + 'ssl_version': ssl_version} + ssl_options.update(verify_certs) validate_ssl_options(ssl_options=ssl_options) @@ -225,7 +234,7 @@ def test_can_connect_with_ssl_client_auth(self): abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE) abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE) ssl_options = {'ca_certs': abs_path_ca_cert_path, - 'ssl_version': ssl.PROTOCOL_TLSv1, + 'ssl_version': ssl_version, 'keyfile': abs_driver_keyfile, 'certfile': abs_driver_certfile} validate_ssl_options(ssl_options) @@ -251,11 +260,11 @@ def test_can_connect_with_ssl_client_auth_host_name(self): abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE) ssl_options = {'ca_certs': abs_path_ca_cert_path, - 'ssl_version': ssl.PROTOCOL_TLSv1, + 'ssl_version': ssl_version, 'keyfile': abs_driver_keyfile, - 'certfile': abs_driver_certfile, - 'cert_reqs': ssl.CERT_REQUIRED, - 'check_hostname': True} + 'certfile': abs_driver_certfile} + ssl_options.update(verify_certs) + validate_ssl_options(ssl_options) def test_cannot_connect_without_client_auth(self): @@ -273,7 +282,7 @@ def test_cannot_connect_without_client_auth(self): abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path, - 'ssl_version': ssl.PROTOCOL_TLSv1}) + 'ssl_version': ssl_version}) # attempt to connect and expect an exception with self.assertRaises(NoHostAvailable) as context: @@ -300,7 +309,7 @@ def test_cannot_connect_with_bad_client_auth(self): abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE_BAD) cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path, - 'ssl_version': ssl.PROTOCOL_TLSv1, + 'ssl_version': ssl_version, 'keyfile': abs_driver_keyfile, 'certfile': abs_driver_certfile}) with self.assertRaises(NoHostAvailable) as context: diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index 982ce11b55..125ac62537 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -156,7 +156,7 @@ def test_client_connection_made(self): client_connection_made() """ self.obj_ut._send_options_message = Mock() - self.obj_ut.client_connection_made() + self.obj_ut.client_connection_made(Mock()) self.obj_ut._send_options_message.assert_called_with() @patch('twisted.internet.reactor.connectTCP') @@ -168,7 +168,7 @@ def test_close(self, mock_connectTCP): self.obj_ut.add_connection() self.obj_ut.is_closed = False self.obj_ut.close() - self.obj_ut.connector.disconnect.assert_called_with() + self.assertTrue(self.obj_ut.connected_event.is_set()) self.assertTrue(self.obj_ut.error_all_requests.called) @@ -217,6 +217,8 @@ def test_push(self, mock_connectTCP): Verifiy that push() calls transport.write(data). """ self.obj_ut.add_connection() + transport_mock = Mock() + self.obj_ut.transport = transport_mock self.obj_ut.push('123 pickup') self.mock_reactor_cft.assert_called_with( - self.obj_ut.connector.transport.write, '123 pickup') + transport_mock.write, '123 pickup') From ff32e6e3cf0cc8cc0d0096d0af89b317597762e1 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Thu, 12 Apr 2018 14:19:06 -0400 Subject: [PATCH 1391/2005] Note on using SSL with twisted event loop --- docs/security.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/security.rst b/docs/security.rst index 989688e9bb..2196275432 100644 --- a/docs/security.rst +++ b/docs/security.rst @@ -87,3 +87,6 @@ This is only an example to show how to pass the ssl parameters. Consider reading the `python ssl documentation `_ for your configuration. For further reading, Andrew Mussey has published a thorough guide on `Using SSL with the DataStax Python driver `_. + +*Note*: In case the twisted event loop is used PyOpenSSL must be installed or an exception will be risen. Also +to set the ``ssl_version`` and ``cert_reqs`` in ``ssl_opts`` the appropriate constants from PyOpenSSL are expected. From d9eced84707cb3e22c8be632a359ec579250ea1d Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Thu, 12 Apr 2018 14:45:03 -0400 Subject: [PATCH 1392/2005] PyOpenSSL -> twsited[tls] in test-requirements --- build.yaml | 2 +- docs/security.rst | 4 ++-- test-requirements.txt | 3 +-- tests/integration/__init__.py | 2 -- 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/build.yaml b/build.yaml index e4101f99dd..6b36d23930 100644 --- a/build.yaml +++ b/build.yaml @@ -25,7 +25,7 @@ schedules: branches: include: [/python.*/] env_vars: | - EVENT_LOOP_MANAGER='twisted' + EVENT_LOOP_MANAGER='libev' EXCLUDE_LONG=1 matrix: exclude: diff --git a/docs/security.rst b/docs/security.rst index 2196275432..9036567413 100644 --- a/docs/security.rst +++ b/docs/security.rst @@ -88,5 +88,5 @@ the `python ssl documentation `_. -*Note*: In case the twisted event loop is used PyOpenSSL must be installed or an exception will be risen. Also -to set the ``ssl_version`` and ``cert_reqs`` in ``ssl_opts`` the appropriate constants from PyOpenSSL are expected. +*Note*: In case the twisted event loop is used pyOpenSSL must be installed or an exception will be risen. Also +to set the ``ssl_version`` and ``cert_reqs`` in ``ssl_opts`` the appropriate constants from pyOpenSSL are expected. diff --git a/test-requirements.txt b/test-requirements.txt index e18fecdd16..086576d36f 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -8,10 +8,9 @@ PyYAML pytz sure pure-sasl -twisted +twisted[tls] gevent>=1.0 eventlet cython>=0.20,<0.28 packaging asynctest; python_version > '3.4' -pyOpenSSL diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 8c4cabb4f1..6d506544e6 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -111,8 +111,6 @@ def _get_dse_version_from_cass(cass_version): KEEP_TEST_CLUSTER = bool(os.getenv('KEEP_TEST_CLUSTER', False)) SIMULACRON_JAR = os.getenv('SIMULACRON_JAR', None) -default_cassandra_version = '3.11' - CASSANDRA_IP = os.getenv('CASSANDRA_IP', '127.0.0.1') CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) From bb31bb7e872209123abf829d946a69e1be743203 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 13 Apr 2018 10:23:05 -0400 Subject: [PATCH 1393/2005] prerelease changelog cleanup --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 36f05b42d3..4ab927a5b2 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -20,6 +20,7 @@ Other * Add DSE smoke test to OSS driver tests (PYTHON-894) * Document long compilation times and workarounds (PYTHON-868) * Improve error for batch WriteTimeouts (PYTHON-941) +* Deprecate ResultSet indexing (PYTHON-945) 3.13.0 ====== From 58abbbf51bd9deca969cc8f3359489ecc8694d00 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Fri, 13 Apr 2018 12:26:41 -0400 Subject: [PATCH 1394/2005] Wait when checking the pool status in tests --- .../standard/test_authentication.py | 6 +++--- tests/integration/util.py | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index bb6fedfa1c..1a5f16dec4 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -20,7 +20,7 @@ from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION, CASSANDRA_IP, \ set_default_cass_ip -from tests.integration.util import assert_quiescent_pool_state +from tests.integration.util import assert_quiescent_pool_state, assert_quiescent_pool_state_with_wait try: import unittest2 as unittest @@ -93,7 +93,7 @@ def test_auth_connect(self): session = cluster.connect() try: self.assertTrue(session.execute('SELECT release_version FROM system.local')) - assert_quiescent_pool_state(self, cluster) + assert_quiescent_pool_state_with_wait(self, cluster, wait=1) for pool in session.get_pools(): connection, _ = pool.borrow_connection(timeout=0) self.assertEqual(connection.authenticator.server_authenticator_class, 'org.apache.cassandra.auth.PasswordAuthenticator') @@ -102,7 +102,7 @@ def test_auth_connect(self): cluster.shutdown() finally: root_session.execute('DROP USER %s', user) - assert_quiescent_pool_state(self, root_session.cluster) + assert_quiescent_pool_state_with_wait(self, root_session.cluster, wait=1) root_session.cluster.shutdown() def test_connect_wrong_pwd(self): diff --git a/tests/integration/util.py b/tests/integration/util.py index 0a95d81dfb..1778defd6e 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -13,6 +13,25 @@ # limitations under the License. from tests.integration import PROTOCOL_VERSION +import time + + +def assert_quiescent_pool_state_with_wait(test_case, cluster, wait): + """ + This function is useful for waiting before checking the pool state. + assert_quiescent_pool_state checks that none of the requests ids have got lost. + However the callback corresponding to a request_id is called before the request_id + is returned back to the pool, therfore + + session.execute("SELECT * from system.local") + assert_quiescent_pool_state(self, session.cluster) + + might fail because when execute comes back the request_id hasn't yet been + returned to the pool, therefore the wait. + """ + time.sleep(wait) + assert_quiescent_pool_state(test_case, cluster) + def assert_quiescent_pool_state(test_case, cluster): From e4028765b137376c5ae7a16117b34406210d75c1 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 16 Apr 2018 10:14:05 -0400 Subject: [PATCH 1395/2005] version 3.14.0 --- cassandra/__init__.py | 2 +- docs.yaml | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index cac9693b9e..2dc3c1aa13 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 13, 0, 'post0') +__version_info__ = (3, 14, 0) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs.yaml b/docs.yaml index 9ee320740f..4fb3c09f15 100644 --- a/docs.yaml +++ b/docs.yaml @@ -21,6 +21,8 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: + - name: '3.14' + ref: 'bb31bb7e' - name: '3.13' ref: '3.13.0' - name: '3.12' From 599db6844551e926c7d0c71f1ffc1dde93b659be Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 16 Apr 2018 11:01:32 -0400 Subject: [PATCH 1396/2005] Add group_keys_by_replica in api docs --- cassandra/metadata.py | 2 +- docs/api/cassandra/metadata.rst | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 292b051a45..b8f5f11f13 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2583,7 +2583,7 @@ def group_keys_by_replica(session, keyspace, table, keys): :class:`~.NO_VALID_REPLICA` Example usage:: - result = group_keys_by_host( + result = group_keys_by_replica( session, "system", "peers", (("127.0.0.1", ), ("127.0.0.2", )) ) diff --git a/docs/api/cassandra/metadata.rst b/docs/api/cassandra/metadata.rst index d797f739de..7a95141722 100644 --- a/docs/api/cassandra/metadata.rst +++ b/docs/api/cassandra/metadata.rst @@ -72,3 +72,5 @@ Tokens and Ring Topology .. autoclass:: LocalStrategy :members: + +.. autofunction:: group_keys_by_replica From 9f3ceb16840c89c04c7a5496c05ac031402c9c99 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 16 Apr 2018 11:38:06 -0400 Subject: [PATCH 1397/2005] update docs ref (3.14.0) --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 4fb3c09f15..6eca2cb4e4 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,7 +22,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.14' - ref: 'bb31bb7e' + ref: '599db684' - name: '3.13' ref: '3.13.0' - name: '3.12' From 9af8bd19cfc257266a54ce47571b62fbdb690c7a Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 16 Apr 2018 12:38:48 -0400 Subject: [PATCH 1398/2005] document correct IsNotNull class --- docs/cqlengine/queryset.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/cqlengine/queryset.rst b/docs/cqlengine/queryset.rst index 14bbef503f..69574c85e4 100644 --- a/docs/cqlengine/queryset.rst +++ b/docs/cqlengine/queryset.rst @@ -199,7 +199,7 @@ In addition to simple equal to queries, cqlengine also supports querying with ot q = Automobile.objects.filter(model__like='%Civic%').allow_filtering() -:attr:`IS NOT NULL (IsNotNull(column_name)) ` +:attr:`IS NOT NULL (IsNotNull(column_name)) ` The IS NOT NULL operator is not yet supported for C*. From a799c5e1e4a483c32e7fd7f429519ca8eaaa7bdf Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 16 Apr 2018 13:14:12 -0400 Subject: [PATCH 1399/2005] update docs ref --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 6eca2cb4e4..938cdebab2 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,7 +22,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.14' - ref: '599db684' + ref: '9af8bd19' - name: '3.13' ref: '3.13.0' - name: '3.12' From 9819499c8a5c735fa7ca7f18a0f9b1c20def498a Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 16 Apr 2018 17:21:22 -0400 Subject: [PATCH 1400/2005] remove six import in __init__ This fixes installation on pip 10 in environments that don't already have six installed. --- cassandra/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 2dc3c1aa13..a3936b1999 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -13,7 +13,6 @@ # limitations under the License. import logging -import six class NullHandler(logging.Handler): @@ -280,7 +279,7 @@ class WriteType(object): } -WriteType.value_to_name = {v: k for k, v in six.iteritems(WriteType.name_to_value)} +WriteType.value_to_name = {v: k for k, v in WriteType.name_to_value.items()} class SchemaChangeType(object): From baf085d8bd2349f5b63a216a4b93b3663fcf3854 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 20 Apr 2018 12:36:05 -0400 Subject: [PATCH 1401/2005] use optional kwarg, not a separate function --- .../standard/test_authentication.py | 6 +++--- tests/integration/util.py | 20 ++++++++----------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 1a5f16dec4..6c389f9282 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -20,7 +20,7 @@ from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION, CASSANDRA_IP, \ set_default_cass_ip -from tests.integration.util import assert_quiescent_pool_state, assert_quiescent_pool_state_with_wait +from tests.integration.util import assert_quiescent_pool_state try: import unittest2 as unittest @@ -93,7 +93,7 @@ def test_auth_connect(self): session = cluster.connect() try: self.assertTrue(session.execute('SELECT release_version FROM system.local')) - assert_quiescent_pool_state_with_wait(self, cluster, wait=1) + assert_quiescent_pool_state(self, cluster, wait=1) for pool in session.get_pools(): connection, _ = pool.borrow_connection(timeout=0) self.assertEqual(connection.authenticator.server_authenticator_class, 'org.apache.cassandra.auth.PasswordAuthenticator') @@ -102,7 +102,7 @@ def test_auth_connect(self): cluster.shutdown() finally: root_session.execute('DROP USER %s', user) - assert_quiescent_pool_state_with_wait(self, root_session.cluster, wait=1) + assert_quiescent_pool_state(self, root_session.cluster, wait=1) root_session.cluster.shutdown() def test_connect_wrong_pwd(self): diff --git a/tests/integration/util.py b/tests/integration/util.py index 1778defd6e..ccc4aea7f8 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -16,24 +16,20 @@ import time -def assert_quiescent_pool_state_with_wait(test_case, cluster, wait): +def assert_quiescent_pool_state(test_case, cluster, wait=None): """ - This function is useful for waiting before checking the pool state. - assert_quiescent_pool_state checks that none of the requests ids have got lost. - However the callback corresponding to a request_id is called before the request_id - is returned back to the pool, therfore + Checking the quiescent pool state checks that none of the requests ids have + been lost. However, the callback corresponding to a request_id is called + before the request_id is returned back to the pool, therefore session.execute("SELECT * from system.local") assert_quiescent_pool_state(self, session.cluster) - might fail because when execute comes back the request_id hasn't yet been - returned to the pool, therefore the wait. + (with no wait) might fail because when execute comes back the request_id + hasn't yet been returned to the pool, therefore the wait. """ - time.sleep(wait) - assert_quiescent_pool_state(test_case, cluster) - - -def assert_quiescent_pool_state(test_case, cluster): + if wait is not None: + time.sleep(wait) for session in cluster.sessions: pool_states = session.get_pool_state().values() From 4b0de5e9bd7afc5e422b503bbd4cb9bb01c45c0e Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 20 Apr 2018 12:37:29 -0400 Subject: [PATCH 1402/2005] remove unnecessary trailing newline --- tests/integration/util.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/util.py b/tests/integration/util.py index ccc4aea7f8..6215449d1f 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -49,4 +49,3 @@ def assert_quiescent_pool_state(test_case, cluster, wait=None): test_case.assertEqual(connection.highest_request_id, max(req_ids)) if PROTOCOL_VERSION < 3: test_case.assertEqual(connection.highest_request_id, connection.max_request_id) - From 94df983ff93c0370574c5c5a91fd1891c6c0c650 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 25 Apr 2018 16:51:40 -0400 Subject: [PATCH 1403/2005] remove simulacron download --- build.yaml | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/build.yaml b/build.yaml index 6b36d23930..d5c114e1e2 100644 --- a/build.yaml +++ b/build.yaml @@ -146,17 +146,6 @@ build: echo "Installing simulacron" pushd ~ - if [ ! -d "simulacron" ] ; then - git clone git@github.com:datastax/simulacron.git - cd simulacron - git clone git@github.com:datastax/native-protocol.git - cd native-protocol - mvn clean install - cd ../.. - fi - cd simulacron - mvn clean install -DskipTests=true - ls standalone/target SIMULACRON_JAR=`find \`pwd\` -name "simulacron-standalone-*.jar"` echo "SIMULACRON_JAR: $SIMULACRON_JAR" From 7d5a624bfb9c9f243f85869119d93fc8c7aa53e3 Mon Sep 17 00:00:00 2001 From: Damien Stevenson Date: Wed, 18 Apr 2018 14:36:06 +1000 Subject: [PATCH 1404/2005] Update token range ownership (#944) --- CHANGELOG.rst | 7 +++++++ cassandra/metadata.py | 9 ++++----- tests/integration/standard/test_metadata.py | 4 ++-- tests/unit/test_metadata.py | 3 +-- 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4ab927a5b2..790aea99a1 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,12 @@ +3.15.0 +====== +Bug Fixes +--------- +* Tokenmap.get_replicas returns the wrong value if token coincides with the end of the range (PYTHON-978) + 3.14.0 ====== +April 17, 2018 Features -------- diff --git a/cassandra/metadata.py b/cassandra/metadata.py index b8f5f11f13..6fb6a95ece 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -13,7 +13,7 @@ # limitations under the License. from binascii import unhexlify -from bisect import bisect_right +from bisect import bisect_left from collections import defaultdict, Mapping from functools import total_ordering from hashlib import md5 @@ -1486,10 +1486,9 @@ def get_replicas(self, keyspace, token): tokens_to_hosts = self.tokens_to_hosts_by_ks.get(keyspace, None) if tokens_to_hosts: - # token range ownership is exclusive on the LHS (the start token), so - # we use bisect_right, which, in the case of a tie/exact match, - # picks an insertion point to the right of the existing match - point = bisect_right(self.ring, token) + # The values in self.ring correspond to the end of the + # token range up to and including the value listed. + point = bisect_left(self.ring, token) if point == len(self.ring): return tokens_to_hosts[self.ring[0]] else: diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 5057fc2fff..1a58438c9f 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1172,8 +1172,8 @@ def test_token_map(self): for i, token in enumerate(ring): self.assertEqual(set(get_replicas('test3rf', token)), set(owners)) - self.assertEqual(set(get_replicas('test2rf', token)), set([owners[(i + 1) % 3], owners[(i + 2) % 3]])) - self.assertEqual(set(get_replicas('test1rf', token)), set([owners[(i + 1) % 3]])) + self.assertEqual(set(get_replicas('test2rf', token)), set([owners[i], owners[(i + 1) % 3]])) + self.assertEqual(set(get_replicas('test1rf', token)), set([owners[i]])) cluster.shutdown() diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 3cc810e89a..96a512792c 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -312,8 +312,7 @@ def _get_replicas(self, token_klass): token_map = TokenMap(token_klass, token_to_primary_replica, tokens, metadata) # tokens match node tokens exactly - for i, token in enumerate(tokens): - expected_host = hosts[(i + 1) % len(hosts)] + for token, expected_host in zip(tokens, hosts): replicas = token_map.get_replicas("ks", token) self.assertEqual(set(replicas), {expected_host}) From 6bce25c918f442d7231d9d1721313f2c82faf33a Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Thu, 5 Apr 2018 12:23:10 -0400 Subject: [PATCH 1405/2005] PYTHON-697 Asyncore reactor should use a global variable instead of a class variable for the event loop --- CHANGELOG.rst | 1 + cassandra/io/asyncorereactor.py | 43 +++++++++---------- tests/integration/standard/test_connection.py | 25 ++++++++++- tests/unit/io/test_asyncorereactor.py | 3 +- 4 files changed, 47 insertions(+), 25 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 790aea99a1..82f044817b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -15,6 +15,7 @@ Features * Allow filter queries with fields that have an index managed outside of cqlengine (PYTHON-966) * Twisted SSL Support (PYTHON-343) * Support IS NOT NULL operator in cqlengine (PYTHON-968) +* Asyncore reactors should use a global variable instead of a class variable for the event loop (PYTHON-697) Other ----- diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index f7b7cac421..67744e5afe 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -43,13 +43,9 @@ _dispatcher_map = {} -def _cleanup(loop_weakref): - try: - loop = loop_weakref() - except ReferenceError: - return - - loop._cleanup() +def _cleanup(loop): + if loop: + loop._cleanup() class WaitableTimer(Timer): @@ -228,8 +224,6 @@ def __init__(self): dispatcher = _BusyWaitDispatcher() self._loop_dispatcher = dispatcher - atexit.register(partial(_cleanup, weakref.ref(self))) - def maybe_start(self): should_start = False did_acquire = False @@ -299,40 +293,43 @@ def _cleanup(self): log.debug("Dispatchers were closed") +_global_loop = None +atexit.register(partial(_cleanup, _global_loop)) + + class AsyncoreConnection(Connection, asyncore.dispatcher): """ An implementation of :class:`.Connection` that uses the ``asyncore`` module in the Python standard library for its event loop. """ - _loop = None - _writable = False _readable = False @classmethod def initialize_reactor(cls): - if not cls._loop: - cls._loop = AsyncoreLoop() + global _global_loop + if not _global_loop: + _global_loop = AsyncoreLoop() else: current_pid = os.getpid() - if cls._loop._pid != current_pid: + if _global_loop._pid != current_pid: log.debug("Detected fork, clearing and reinitializing reactor state") cls.handle_fork() - cls._loop = AsyncoreLoop() + _global_loop = AsyncoreLoop() @classmethod def handle_fork(cls): - global _dispatcher_map + global _dispatcher_map, _global_loop _dispatcher_map = {} - if cls._loop: - cls._loop._cleanup() - cls._loop = None + if _global_loop: + _global_loop._cleanup() + _global_loop = None @classmethod def create_timer(cls, timeout, callback): timer = Timer(timeout, callback) - cls._loop.add_timer(timer) + _global_loop.add_timer(timer) return timer def __init__(self, *args, **kwargs): @@ -344,14 +341,14 @@ def __init__(self, *args, **kwargs): self._connect_socket() # start the event loop if needed - self._loop.maybe_start() + _global_loop.maybe_start() init_handler = WaitableTimer( timeout=0, callback=partial(asyncore.dispatcher.__init__, self, self._socket, _dispatcher_map) ) - self._loop.add_timer(init_handler) + _global_loop.add_timer(init_handler) init_handler.wait(kwargs["connect_timeout"]) self._writable = True @@ -451,7 +448,7 @@ def push(self, data): with self.deque_lock: self.deque.extend(chunks) self._writable = True - self._loop.wake_loop() + _global_loop.wake_loop() def writable(self): return self._writable diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 81238a2e5e..853adc4bb5 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -20,12 +20,13 @@ from functools import partial from six.moves import range import sys +import threading from threading import Thread, Event import time -import weakref from cassandra import ConsistencyLevel, OperationTimedOut from cassandra.cluster import NoHostAvailable, ConnectionShutdown, Cluster +import cassandra.io.asyncorereactor from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.protocol import QueryMessage from cassandra.connection import Connection @@ -401,6 +402,28 @@ def setUp(self): raise unittest.SkipTest("Can't test asyncore with monkey patching") ConnectionTests.setUp(self) + def test_subclasses_share_loop(self): + class C1(AsyncoreConnection): + pass + + class C2(AsyncoreConnection): + pass + + cassandra.io.asyncorereactor._global_loop._cleanup() + cassandra.io.asyncorereactor._global_loop = None + + clusterC1 = Cluster(connection_class=C1) + clusterC1.connect(wait_for_all_pools=True) + + clusterC2 = Cluster(connection_class=C2) + clusterC2.connect(wait_for_all_pools=True) + self.addCleanup(clusterC1.shutdown) + self.addCleanup(clusterC2.shutdown) + + event_loops_threads = [thread for thread in threading.enumerate() if + thread.name == "cassandra_driver_event_loop"] + self.assertEqual(len(event_loops_threads), 1) + class LibevConnectionTests(ConnectionTests, unittest.TestCase): diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 8e9dc553ae..7e55059daa 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -18,6 +18,7 @@ from mock import patch import socket +import cassandra.io.asyncorereactor from cassandra.io.asyncorereactor import AsyncoreConnection from tests import is_monkey_patched from tests.unit.io.utils import ReactorTestMixin, TimerTestMixin, noop_if_monkey_patched @@ -76,7 +77,7 @@ def create_timer(self): @property def _timers(self): - return self.connection._loop._timers + return cassandra.io.asyncorereactor._global_loop._timers def setUp(self): if is_monkey_patched(): From fdfae47d0ee87b42690d5d232b8d11a30ef13768 Mon Sep 17 00:00:00 2001 From: Yoan Blanc Date: Mon, 21 May 2018 20:35:50 +0200 Subject: [PATCH 1406/2005] gevent: requires ares.h (libc-ares) Signed-off-by: Yoan Blanc --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 276b2297ba..0eb6b1eafe 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,6 +17,7 @@ addons: - build-essential - python-dev - pypy-dev + - libc-ares-dev - libev4 - libev-dev From 901acab916ca7be791f5443d8e74ce12c335e164 Mon Sep 17 00:00:00 2001 From: Michael Shuler Date: Wed, 30 May 2018 17:31:43 -0500 Subject: [PATCH 1407/2005] Update PyPI URLs for new Python Package Index --- cassandra/cluster.py | 2 +- cassandra/metrics.py | 2 +- docs/installation.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 22ea6211d8..63eb54e3ae 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -559,7 +559,7 @@ def default_retry_policy(self, policy): as outlined in RFC 2818 and RFC 6125. Note that this requires the certificate to be transferred, so should almost always require the option ``'cert_reqs': ssl.CERT_REQUIRED``. Note also that this functionality was not built into Python standard library until (2.7.9, 3.2). To enable this mechanism in earlier versions, patch ``ssl.match_hostname`` - with a custom or `back-ported function `_. + with a custom or `back-ported function `_. """ sockopts = None diff --git a/cassandra/metrics.py b/cassandra/metrics.py index 473e52767f..223b0c7c6e 100644 --- a/cassandra/metrics.py +++ b/cassandra/metrics.py @@ -20,7 +20,7 @@ except ImportError: raise ImportError( "The scales library is required for metrics support: " - "https://pypi.python.org/pypi/scales") + "https://pypi.org/project/scales/") log = logging.getLogger(__name__) diff --git a/docs/installation.rst b/docs/installation.rst index ecca2891b7..80104c314a 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -10,7 +10,7 @@ Linux, OSX, and Windows are supported. Installation through pip ------------------------ -`pip `_ is the suggested tool for installing +`pip `_ is the suggested tool for installing packages. It will handle installing all Python dependencies for the driver at the same time as the driver itself. To install the driver*:: From 97cbab09ac763f0d5d587be721e06c6ef7f668ad Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Mon, 14 May 2018 10:35:00 +0100 Subject: [PATCH 1408/2005] PYTHON-973: Use global variable for libev loops so it can be subclassed --- CHANGELOG.rst | 4 +++ cassandra/io/libevreactor.py | 49 ++++++++++++++++++------------------ 2 files changed, 29 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 82f044817b..e543d37204 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,10 @@ Bug Fixes --------- * Tokenmap.get_replicas returns the wrong value if token coincides with the end of the range (PYTHON-978) +Other +----- +* Use global variable for libev loops so it can be subclassed (PYTHON-973) + 3.14.0 ====== April 17, 2018 diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 90bd76158a..fe3827eb6a 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -41,12 +41,9 @@ log = logging.getLogger(__name__) -def _cleanup(loop_weakref): - try: - loop = loop_weakref() - except ReferenceError: - return - loop._cleanup() +def _cleanup(loop): + if loop: + loop._cleanup() class LibevLoop(object): @@ -84,8 +81,6 @@ def __init__(self): self._timers = TimerManager() self._loop_timer = libev.Timer(self._loop, self._on_loop_timer) - atexit.register(partial(_cleanup, weakref.ref(self))) - def maybe_start(self): should_start = False with self._lock: @@ -228,11 +223,14 @@ def _loop_will_run(self, prepare): self._notifier.send() +_global_loop = None +atexit.register(partial(_cleanup, _global_loop)) + + class LibevConnection(Connection): """ An implementation of :class:`.Connection` that uses libev for its event loop. """ - _libevloop = None _write_watcher_is_active = False _read_watcher = None _write_watcher = None @@ -240,24 +238,26 @@ class LibevConnection(Connection): @classmethod def initialize_reactor(cls): - if not cls._libevloop: - cls._libevloop = LibevLoop() + global _global_loop + if not _global_loop: + _global_loop = LibevLoop() else: - if cls._libevloop._pid != os.getpid(): + if _global_loop._pid != os.getpid(): log.debug("Detected fork, clearing and reinitializing reactor state") cls.handle_fork() - cls._libevloop = LibevLoop() + _global_loop = LibevLoop() @classmethod def handle_fork(cls): - if cls._libevloop: - cls._libevloop._cleanup() - cls._libevloop = None + global _global_loop + if _global_loop: + _global_loop._cleanup() + _global_loop = None @classmethod def create_timer(cls, timeout, callback): timer = Timer(timeout, callback) - cls._libevloop.add_timer(timer) + _global_loop.add_timer(timer) return timer def __init__(self, *args, **kwargs): @@ -268,16 +268,16 @@ def __init__(self, *args, **kwargs): self._connect_socket() self._socket.setblocking(0) - with self._libevloop._lock: - self._read_watcher = libev.IO(self._socket.fileno(), libev.EV_READ, self._libevloop._loop, self.handle_read) - self._write_watcher = libev.IO(self._socket.fileno(), libev.EV_WRITE, self._libevloop._loop, self.handle_write) + with _global_loop._lock: + self._read_watcher = libev.IO(self._socket.fileno(), libev.EV_READ, _global_loop._loop, self.handle_read) + self._write_watcher = libev.IO(self._socket.fileno(), libev.EV_WRITE, _global_loop._loop, self.handle_write) self._send_options_message() - self._libevloop.connection_created(self) + _global_loop.connection_created(self) # start the global event loop if needed - self._libevloop.maybe_start() + _global_loop.maybe_start() def close(self): with self.lock: @@ -286,7 +286,8 @@ def close(self): self.is_closed = True log.debug("Closing connection (%s) to %s", id(self), self.host) - self._libevloop.connection_destroyed(self) + + _global_loop.connection_destroyed(self) self._socket.close() log.debug("Closed socket to %s", self.host) @@ -367,4 +368,4 @@ def push(self, data): with self._deque_lock: self.deque.extend(chunks) - self._libevloop.notify() + _global_loop.notify() From 662e12f08ab068ea32bcc7ca6a9076df9006de94 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Mon, 14 May 2018 12:16:55 -0400 Subject: [PATCH 1409/2005] Use _global_loop in tests --- tests/unit/io/test_libevreactor.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index d22bc14ed6..a02458edc8 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -35,7 +35,6 @@ class LibevConnectionTest(ReactorTestMixin, unittest.TestCase): connection_class = LibevConnection socket_attr_name = '_socket' - loop_attr_name = '_libevloop' null_handle_function_args = None, 0 def setUp(self): @@ -62,7 +61,7 @@ def test_watchers_are_finished(self): Test for asserting that watchers are closed in LibevConnection This test simulates a process termination without calling cluster.shutdown(), which would trigger - LibevConnection._libevloop._cleanup. It will check the watchers have been closed + _global_loop._cleanup. It will check the watchers have been closed Finally it will restore the LibevConnection reactor so it doesn't affect the rest of the tests @@ -72,24 +71,25 @@ def test_watchers_are_finished(self): @test_category connection """ - with patch.object(LibevConnection._libevloop, "_thread"),\ - patch.object(LibevConnection._libevloop, "notify"): + from cassandra.io.libevreactor import _global_loop + with patch.object(_global_loop, "_thread"),\ + patch.object(_global_loop, "notify"): self.make_connection() # We have to make a copy because the connections shouldn't # be alive when we verify them - live_connections = set(LibevConnection._libevloop._live_conns) + live_connections = set(_global_loop._live_conns) # This simulates the process ending without cluster.shutdown() # being called, then with atexit _cleanup for libevreactor would # be called - libev__cleanup(weakref.ref(LibevConnection._libevloop)) + libev__cleanup(_global_loop) for conn in live_connections: self.assertTrue(conn._write_watcher.stop.mock_calls) self.assertTrue(conn._read_watcher.stop.mock_calls) - LibevConnection._libevloop._shutdown = False + _global_loop._shutdown = False class LibevTimerPatcher(unittest.TestCase): @@ -125,7 +125,8 @@ def create_timer(self): @property def _timers(self): - return self.connection._libevloop._timers + from cassandra.io.libevreactor import _global_loop + return _global_loop._timers def make_connection(self): c = LibevConnection('1.2.3.4', cql_version='3.0.1') From 5452e1e519127c8c50d8d82cfb6bc73cde85a352 Mon Sep 17 00:00:00 2001 From: Jaume Marhuenda Date: Tue, 15 May 2018 15:06:38 -0400 Subject: [PATCH 1410/2005] Add test PYTHON-973 --- tests/integration/standard/test_connection.py | 45 ++++++++++++------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 853adc4bb5..8e57f5758b 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -38,6 +38,7 @@ requiresmallclockgranularity, greaterthancass20 try: from cassandra.io.libevreactor import LibevConnection + import cassandra.io.libevreactor except ImportError: LibevConnection = None @@ -392,16 +393,6 @@ def test_connect_timeout(self): break self.assertTrue(exception_thrown) - -class AsyncoreConnectionTests(ConnectionTests, unittest.TestCase): - - klass = AsyncoreConnection - - def setUp(self): - if is_monkey_patched(): - raise unittest.SkipTest("Can't test asyncore with monkey patching") - ConnectionTests.setUp(self) - def test_subclasses_share_loop(self): class C1(AsyncoreConnection): pass @@ -409,9 +400,6 @@ class C1(AsyncoreConnection): class C2(AsyncoreConnection): pass - cassandra.io.asyncorereactor._global_loop._cleanup() - cassandra.io.asyncorereactor._global_loop = None - clusterC1 = Cluster(connection_class=C1) clusterC1.connect(wait_for_all_pools=True) @@ -420,14 +408,35 @@ class C2(AsyncoreConnection): self.addCleanup(clusterC1.shutdown) self.addCleanup(clusterC2.shutdown) - event_loops_threads = [thread for thread in threading.enumerate() if - thread.name == "cassandra_driver_event_loop"] - self.assertEqual(len(event_loops_threads), 1) + self.assertEqual(len(get_eventloop_threads(self.event_loop_name)), 1) + + +def get_eventloop_threads(name): + import threading + event_loops_threads = [thread for thread in threading.enumerate() if name == thread.name] + + return event_loops_threads + + +class AsyncoreConnectionTests(ConnectionTests, unittest.TestCase): + + klass = AsyncoreConnection + event_loop_name = "cassandra_driver_event_loop" + + def setUp(self): + if is_monkey_patched(): + raise unittest.SkipTest("Can't test asyncore with monkey patching") + ConnectionTests.setUp(self) + + def clean_global_loop(self): + cassandra.io.asyncorereactor._global_loop._cleanup() + cassandra.io.asyncorereactor._global_loop = None class LibevConnectionTests(ConnectionTests, unittest.TestCase): klass = LibevConnection + event_loop_name = "event_loop" def setUp(self): if is_monkey_patched(): @@ -436,3 +445,7 @@ def setUp(self): raise unittest.SkipTest( 'libev does not appear to be installed properly') ConnectionTests.setUp(self) + + def clean_global_loop(self): + cassandra.io.libevreactor._global_loop._cleanup() + cassandra.io.libevreactor._global_loop = None From 16ae36b52cad2bd9e96cd60c5ab5ce3ffb47f809 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 19 Apr 2018 16:39:33 -0400 Subject: [PATCH 1411/2005] PYTHON-893: fallback impl for nt row factory --- CHANGELOG.rst | 1 + cassandra/query.py | 48 +++++++++++++++++ tests/unit/test_row_factories.py | 90 ++++++++++++++++++++++++++++++++ 3 files changed, 139 insertions(+) create mode 100644 tests/unit/test_row_factories.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 790aea99a1..abdddaf9b7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -3,6 +3,7 @@ Bug Fixes --------- * Tokenmap.get_replicas returns the wrong value if token coincides with the end of the range (PYTHON-978) +* Python Driver fails with "more than 255 arguments" python exception when > 255 columns specified in query response (PYTHON-893) 3.14.0 ====== diff --git a/cassandra/query.py b/cassandra/query.py index 56b470db74..b2193d61a0 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -25,6 +25,7 @@ import time import six from six.moves import range, zip +import warnings from cassandra import ConsistencyLevel, OperationTimedOut from cassandra.util import unix_time_from_uuid1 @@ -83,6 +84,39 @@ def tuple_factory(colnames, rows): """ return rows +class PseudoNamedTupleRow(object): + """ + Helper class for pseudo_named_tuple_factory. These objects provide an + __iter__ interface, as well as index- and attribute-based access to values, + but otherwise do not attempt to implement the full namedtuple or iterable + interface. + """ + def __init__(self, ordered_dict): + self._dict = ordered_dict + self._tuple = tuple(ordered_dict.values()) + + def __getattr__(self, name): + return self._dict[name] + + def __getitem__(self, idx): + return self._tuple[idx] + + def __iter__(self): + return iter(self._tuple) + + def __repr__(self): + return '{t}({od})'.format(t=self.__class__.__name__, + od=self._dict) + + +def pseudo_namedtuple_factory(colnames, rows): + """ + Returns each row as a :class:`.PseudoNamedTupleRow`. This is the fallback + factory for cases where :meth:`.named_tuple_factory` fails to create rows. + """ + return [PseudoNamedTupleRow(od) + for od in ordered_dict_factory(colnames, rows)] + def named_tuple_factory(colnames, rows): """ @@ -116,6 +150,20 @@ def named_tuple_factory(colnames, rows): clean_column_names = map(_clean_column_name, colnames) try: Row = namedtuple('Row', clean_column_names) + except SyntaxError: + warnings.warn( + "Failed creating namedtuple for a result because there were too " + "many columns. This is due to a Python limitation that affects " + "namedtuple in Python 3.0-3.6 (see issue18896). The row will be " + "created with {substitute_factory_name}, which lacks some namedtuple " + "features and is slower. To avoid slower performance accessing " + "values on row objects, Upgrade to Python 3.7, or use a different " + "row factory. (column names: {colnames})".format( + substitute_factory_name=pseudo_namedtuple_factory.__name__, + colnames=colnames + ) + ) + return pseudo_namedtuple_factory(colnames, rows) except Exception: clean_column_names = list(map(_clean_column_name, colnames)) # create list because py3 map object will be consumed by first attempt log.warning("Failed creating named tuple for results with column names %s (cleaned: %s) " diff --git a/tests/unit/test_row_factories.py b/tests/unit/test_row_factories.py new file mode 100644 index 0000000000..13049ba034 --- /dev/null +++ b/tests/unit/test_row_factories.py @@ -0,0 +1,90 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from cassandra.query import named_tuple_factory + +import logging +import warnings + +import sys + +try: + from unittest import TestCase +except ImportError: + from unittest2 import TestCase + + +log = logging.getLogger(__name__) + + +NAMEDTUPLE_CREATION_BUG = sys.version_info >= (3,) and sys.version_info < (3, 7) + +class TestNamedTupleFactory(TestCase): + + long_colnames, long_rows = ( + ['col{}'.format(x) for x in range(300)], + [ + ['value{}'.format(x) for x in range(300)] + for _ in range(100) + ] + ) + short_colnames, short_rows = ( + ['col{}'.format(x) for x in range(200)], + [ + ['value{}'.format(x) for x in range(200)] + for _ in range(100) + ] + ) + + def test_creation_warning_on_long_column_list(self): + """ + Reproduces the failure described in PYTHON-893 + + @since 3.15 + @jira_ticket PYTHON-893 + @expected_result creation fails on Python > 3 and < 3.7 + + @test_category row_factory + """ + if not NAMEDTUPLE_CREATION_BUG: + named_tuple_factory(self.long_colnames, self.long_rows) + return + + with warnings.catch_warnings(record=True) as w: + rows = named_tuple_factory(self.long_colnames, self.long_rows) + self.assertEqual(len(w), 1) + warning = w[0] + self.assertIn('pseudo_namedtuple_factory', str(warning)) + self.assertIn('3.7', str(warning)) + + for r in rows: + self.assertEqual(r.col0, self.long_rows[0][0]) + + def test_creation_no_warning_on_short_column_list(self): + """ + Tests that normal namedtuple row creation still works after PYTHON-893 fix + + @since 3.15 + @jira_ticket PYTHON-893 + @expected_result creates namedtuple-based Rows + + @test_category row_factory + """ + with warnings.catch_warnings(record=True) as w: + rows = named_tuple_factory(self.short_colnames, self.short_rows) + self.assertEqual(len(w), 0) + # check that this is a real namedtuple + self.assertTrue(hasattr(rows[0], '_fields')) + self.assertIsInstance(rows[0], tuple) From 2f90da5c592f5aede38c383084f47913e98f52eb Mon Sep 17 00:00:00 2001 From: Yoan Blanc Date: Sat, 19 May 2018 18:34:13 +0200 Subject: [PATCH 1412/2005] tox: remove python 3.3 Signed-off-by: Yoan Blanc --- .travis.yml | 1 - README-dev.rst | 2 +- README.rst | 2 +- docs/index.rst | 2 +- docs/installation.rst | 2 +- setup.py | 1 - tox.ini | 2 +- 7 files changed, 5 insertions(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index 0eb6b1eafe..0f3c01b200 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,6 @@ language: python python: - "2.7" - - "3.3" - "3.4" - "3.5" - "3.6" diff --git a/README-dev.rst b/README-dev.rst index 8c8b71bcca..815fe0aadd 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -124,7 +124,7 @@ it with the ``PROTOCOL_VERSION`` environment variable:: Testing Multiple Python Versions -------------------------------- -If you want to test all of python 2.7, 3.3, 3.4 and pypy, use tox (this is what +If you want to test all of python 2.7, 3.4, 3.5, 3.6 and pypy, use tox (this is what TravisCI runs):: tox diff --git a/README.rst b/README.rst index f30a916854..f14cc77963 100644 --- a/README.rst +++ b/README.rst @@ -6,7 +6,7 @@ DataStax Python Driver for Apache Cassandra A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (2.1+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. -The driver supports Python 2.7, 3.3, 3.4, 3.5, and 3.6. +The driver supports Python 2.7, 3.4, 3.5, and 3.6. If you require compatibility with DataStax Enterprise, use the `DataStax Enterprise Python Driver `_. diff --git a/docs/index.rst b/docs/index.rst index 3c9b3adb69..0b578a845e 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,7 +4,7 @@ A Python client driver for `Apache Cassandra `_. This driver works exclusively with the Cassandra Query Language v3 (CQL3) and Cassandra's native protocol. Cassandra 2.1+ is supported. -The driver supports Python 2.7, 3.3, 3.4, 3.5, and 3.6. +The driver supports Python 2.7, 3.4, 3.5, and 3.6. This driver is open source under the `Apache v2 License `_. diff --git a/docs/installation.rst b/docs/installation.rst index 80104c314a..1ef08546a0 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -3,7 +3,7 @@ Installation Supported Platforms ------------------- -Python 2.6, 2.7, 3.3, and 3.4 are supported. Both CPython (the standard Python +Python 2.7, 3.4, 3.5 and 3.6 are supported. Both CPython (the standard Python implementation) and `PyPy `_ are supported and tested. Linux, OSX, and Windows are supported. diff --git a/setup.py b/setup.py index a49bb1e21b..9d0f12b4f7 100644 --- a/setup.py +++ b/setup.py @@ -429,7 +429,6 @@ def run_setup(extensions): 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', diff --git a/tox.ini b/tox.ini index 86c0eafebe..3645bef4b8 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py{27,33,34,35,36},pypy +envlist = py{27,34,35,36},pypy [base] deps = nose From 75584d72c1fc8fd3129f41f4a34667bdb76f592c Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 20 Jun 2018 11:42:29 -0400 Subject: [PATCH 1413/2005] guarantee cluster cleanup --- tests/integration/standard/test_cluster.py | 28 ++++++++++++++-------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 59b58d6712..a39d22f0fb 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -200,23 +200,31 @@ def test_session_host_parameter(self): @test_category connection """ + def cleanup(): + """ + When this test fails, the inline .shutdown() calls don't get + called, so we register this as a cleanup. + """ + self.cluster_to_shutdown.shutdown() + self.addCleanup(cleanup) + # Test with empty list - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.cluster_to_shutdown = Cluster(protocol_version=PROTOCOL_VERSION) with self.assertRaises(NoHostAvailable): - Session(cluster, []) - cluster.shutdown() + Session(self.cluster_to_shutdown, []) + self.cluster_to_shutdown.shutdown() # Test with only invalid - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.cluster_to_shutdown = Cluster(protocol_version=PROTOCOL_VERSION) with self.assertRaises(NoHostAvailable): - Session(cluster, [Host("1.2.3.4", SimpleConvictionPolicy)]) - cluster.shutdown() + Session(self.cluster_to_shutdown, [Host("1.2.3.4", SimpleConvictionPolicy)]) + self.cluster_to_shutdown.shutdown() # Test with valid and invalid hosts - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - Session(cluster, [Host(x, SimpleConvictionPolicy) for x in - ("127.0.0.1", "127.0.0.2", "1.2.3.4")]) - cluster.shutdown() + self.cluster_to_shutdown = Cluster(protocol_version=PROTOCOL_VERSION) + Session(self.cluster_to_shutdown, [Host(x, SimpleConvictionPolicy) for x in + ("127.0.0.1", "127.0.0.2", "1.2.3.4")]) + self.cluster_to_shutdown.shutdown() def test_protocol_negotiation(self): """ From 99b3234077f9c7672c1d9a8647412c10655bf413 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 20 Jun 2018 15:01:44 -0400 Subject: [PATCH 1414/2005] Use cluster.connect(), not Session() The old behavior led to Session objects not being registered with the cluster, and thus not cleaned up on Cluster.shutdown(). --- tests/integration/standard/test_cluster.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index a39d22f0fb..9df8a3586c 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -27,14 +27,13 @@ from packaging.version import Version import cassandra -from cassandra.cluster import Cluster, Session, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.concurrent import execute_concurrent from cassandra.policies import (RoundRobinPolicy, ExponentialReconnectionPolicy, RetryPolicy, SimpleConvictionPolicy, HostDistance, AddressTranslator, TokenAwarePolicy, HostFilterPolicy) from cassandra import ConsistencyLevel -from cassandra.pool import Host from cassandra.query import SimpleStatement, TraceUnavailable, tuple_factory from cassandra.auth import PlainTextAuthProvider, SaslAuthProvider from cassandra import connection @@ -209,21 +208,21 @@ def cleanup(): self.addCleanup(cleanup) # Test with empty list - self.cluster_to_shutdown = Cluster(protocol_version=PROTOCOL_VERSION) + self.cluster_to_shutdown = Cluster([], protocol_version=PROTOCOL_VERSION) with self.assertRaises(NoHostAvailable): - Session(self.cluster_to_shutdown, []) + self.cluster_to_shutdown.connect() self.cluster_to_shutdown.shutdown() # Test with only invalid - self.cluster_to_shutdown = Cluster(protocol_version=PROTOCOL_VERSION) + self.cluster_to_shutdown = Cluster(('1.2.3.4',), protocol_version=PROTOCOL_VERSION) with self.assertRaises(NoHostAvailable): - Session(self.cluster_to_shutdown, [Host("1.2.3.4", SimpleConvictionPolicy)]) + self.cluster_to_shutdown.connect() self.cluster_to_shutdown.shutdown() # Test with valid and invalid hosts - self.cluster_to_shutdown = Cluster(protocol_version=PROTOCOL_VERSION) - Session(self.cluster_to_shutdown, [Host(x, SimpleConvictionPolicy) for x in - ("127.0.0.1", "127.0.0.2", "1.2.3.4")]) + self.cluster_to_shutdown = Cluster(("127.0.0.1", "127.0.0.2", "1.2.3.4"), + protocol_version=PROTOCOL_VERSION) + self.cluster_to_shutdown.connect() self.cluster_to_shutdown.shutdown() def test_protocol_negotiation(self): From 17c55e6daecf2a4674d9d22730b792d67f1230f4 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 20 Jun 2018 15:21:41 -0400 Subject: [PATCH 1415/2005] changelog entry --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7da9ced7cf..30f488020e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,7 @@ Bug Fixes --------- * Tokenmap.get_replicas returns the wrong value if token coincides with the end of the range (PYTHON-978) * Python Driver fails with "more than 255 arguments" python exception when > 255 columns specified in query response (PYTHON-893) +* Hang in integration.standard.test_cluster.ClusterTests.test_set_keyspace_twice (PYTHON-998) Other ----- From 4561427668f68957051ea046a80086b5f8ad7033 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 5 Jul 2018 11:53:51 -0400 Subject: [PATCH 1416/2005] style cleanup before PYTHON-922 --- cassandra/metadata.py | 76 ++++++++++++++++++++++++------------------- 1 file changed, 43 insertions(+), 33 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 6fb6a95ece..a7b7b6266b 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -39,7 +39,7 @@ from cassandra.encoder import Encoder from cassandra.marshal import varint_unpack from cassandra.protocol import QueryMessage -from cassandra.query import dict_factory, bind_params, Statement +from cassandra.query import dict_factory, bind_params from cassandra.util import OrderedDict from cassandra.pool import HostDistance @@ -408,9 +408,9 @@ def __init__(self, name, options_map): self.options_map['class'] = self.name def __eq__(self, other): - return (isinstance(other, _UnknownStrategy) - and self.name == other.name - and self.options_map == other.options_map) + return (isinstance(other, _UnknownStrategy) and + self.name == other.name and + self.options_map == other.options_map) def export_for_schema(self): """ @@ -658,11 +658,11 @@ def export_as_string(self): Returns a CQL query string that can be used to recreate the entire keyspace, including user-defined types and tables. """ - cql = "\n\n".join([self.as_cql_query() + ';'] - + self.user_type_strings() - + [f.export_as_string() for f in self.functions.values()] - + [a.export_as_string() for a in self.aggregates.values()] - + [t.export_as_string() for t in self.tables.values()]) + cql = "\n\n".join([self.as_cql_query() + ';'] + + self.user_type_strings() + + [f.export_as_string() for f in self.functions.values()] + + [a.export_as_string() for a in self.aggregates.values()] + + [t.export_as_string() for t in self.tables.values()]) if self._exc_info: import traceback ret = "/*\nWarning: Keyspace %s is incomplete because of an error processing metadata.\n" % \ @@ -1530,6 +1530,7 @@ def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.value) __str__ = __repr__ + MIN_LONG = -(2 ** 63) MAX_LONG = (2 ** 63) - 1 @@ -1870,8 +1871,8 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): is_compact = False has_value = False clustering_size = num_column_name_components - 2 - elif (len(column_aliases) == num_column_name_components - 1 - and issubclass(last_col, types.UTF8Type)): + elif (len(column_aliases) == num_column_name_components - 1 and + issubclass(last_col, types.UTF8Type)): # aliases? is_compact = False has_value = False @@ -2064,11 +2065,11 @@ def _query_all(self): ] responses = self.connection.wait_for_responses(*queries, timeout=self.timeout, fail_on_error=False) - (ks_success, ks_result), (table_success, table_result), \ - (col_success, col_result), (types_success, types_result), \ - (functions_success, functions_result), \ - (aggregates_success, aggregates_result), \ - (triggers_success, triggers_result) = responses + ((ks_success, ks_result), (table_success, table_result), + (col_success, col_result), (types_success, types_result), + (functions_success, functions_result), + (aggregates_success, aggregates_result), + (triggers_success, triggers_result)) = responses self.keyspaces_result = self._handle_results(ks_success, ks_result) self.tables_result = self._handle_results(table_success, table_result) @@ -2208,10 +2209,13 @@ def get_table(self, keyspaces, keyspace, table): where_clause = bind_params(" WHERE keyspace_name = %s AND view_name = %s", (keyspace, table), _encoder) view_query = QueryMessage(query=self._SELECT_VIEWS + where_clause, consistency_level=cl) - (cf_success, cf_result), (col_success, col_result), (indexes_sucess, indexes_result), \ - (triggers_success, triggers_result), (view_success, view_result) \ - = self.connection.wait_for_responses(cf_query, col_query, indexes_query, triggers_query, view_query, - timeout=self.timeout, fail_on_error=False) + ((cf_success, cf_result), (col_success, col_result), + (indexes_sucess, indexes_result), (triggers_success, triggers_result), + (view_success, view_result)) = ( + self.connection.wait_for_responses( + cf_query, col_query, indexes_query, triggers_query, + view_query, timeout=self.timeout, fail_on_error=False) + ) table_result = self._handle_results(cf_success, cf_result) col_result = self._handle_results(col_success, col_result) if table_result: @@ -2372,13 +2376,13 @@ def _query_all(self): ] responses = self.connection.wait_for_responses(*queries, timeout=self.timeout, fail_on_error=False) - (ks_success, ks_result), (table_success, table_result), \ - (col_success, col_result), (types_success, types_result), \ - (functions_success, functions_result), \ - (aggregates_success, aggregates_result), \ - (triggers_success, triggers_result), \ - (indexes_success, indexes_result), \ - (views_success, views_result) = responses + ((ks_success, ks_result), (table_success, table_result), + (col_success, col_result), (types_success, types_result), + (functions_success, functions_result), + (aggregates_success, aggregates_result), + (triggers_success, triggers_result), + (indexes_success, indexes_result), + (views_success, views_result)) = responses self.keyspaces_result = self._handle_results(ks_success, ks_result) self.tables_result = self._handle_results(table_success, table_result) @@ -2410,6 +2414,10 @@ def _schema_type_to_cql(type_string): return type_string +class SchemaParserV4(SchemaParserV3): + pass + + class TableMetadataV3(TableMetadata): compaction_options = {} @@ -2529,12 +2537,12 @@ def as_cql_query(self, formatted=False): properties = TableMetadataV3._property_string(formatted, self.clustering_key, self.options) - ret = "CREATE MATERIALIZED VIEW %(keyspace)s.%(name)s AS%(sep)s" \ - "SELECT %(selected_cols)s%(sep)s" \ - "FROM %(keyspace)s.%(base_table)s%(sep)s" \ - "WHERE %(where_clause)s%(sep)s" \ - "PRIMARY KEY %(pk)s%(sep)s" \ - "WITH %(properties)s" % locals() + ret = ("CREATE MATERIALIZED VIEW %(keyspace)s.%(name)s AS%(sep)s" + "SELECT %(selected_cols)s%(sep)s" + "FROM %(keyspace)s.%(base_table)s%(sep)s" + "WHERE %(where_clause)s%(sep)s" + "PRIMARY KEY %(pk)s%(sep)s" + "WITH %(properties)s") % locals() if self.extensions: registry = _RegisteredExtensionType._extension_registry @@ -2551,6 +2559,8 @@ def export_as_string(self): def get_schema_parser(connection, server_version, timeout): server_major_version = int(server_version.split('.')[0]) + if server_major_version >= 4: + return SchemaParserV4(connection, timeout) if server_major_version >= 3: return SchemaParserV3(connection, timeout) else: From 53c7fd61c45f4a73e8b6a0ff15b27da83091b293 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 12 Jul 2018 16:01:41 -0400 Subject: [PATCH 1417/2005] SchemaParserV4 fixes --- CHANGELOG.rst | 1 + cassandra/metadata.py | 10 +++++++++- tests/integration/standard/test_metadata.py | 6 ++++-- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 30f488020e..d7e0f5ece3 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -9,6 +9,7 @@ Bug Fixes Other ----- * Use global variable for libev loops so it can be subclassed (PYTHON-973) +* Update SchemaParser for V4 (PYTHON-1006) 3.14.0 ====== diff --git a/cassandra/metadata.py b/cassandra/metadata.py index a7b7b6266b..9f15faffb6 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2415,7 +2415,15 @@ def _schema_type_to_cql(type_string): class SchemaParserV4(SchemaParserV3): - pass + + recognized_table_options = tuple( + opt for opt in + SchemaParserV3.recognized_table_options + if opt not in ( + # removed in V4: CASSANDRA-13910 + 'dclocal_read_repair_chance', 'read_repair_chance' + ) + ) class TableMetadataV3(TableMetadata): diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 1a58438c9f..fe42411ef7 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -470,8 +470,10 @@ def test_non_size_tiered_compaction(self): cql = table_meta.export_as_string() self.assertIn("'tombstone_threshold': '0.3'", cql) self.assertIn("LeveledCompactionStrategy", cql) - self.assertNotIn("min_threshold", cql) - self.assertNotIn("max_threshold", cql) + # formerly legacy options; reintroduced in 4.0 + if CASSANDRA_VERSION < Version('4.0'): + self.assertNotIn("min_threshold", cql) + self.assertNotIn("max_threshold", cql) def test_refresh_schema_metadata(self): """ From fc0803ad4697cf83828e9af077d7062c347930aa Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 19 Jul 2018 11:03:59 -0400 Subject: [PATCH 1418/2005] change loop names to fix checks --- cassandra/io/asyncorereactor.py | 2 +- cassandra/io/twistedreactor.py | 2 +- tests/integration/standard/test_connection.py | 21 ++++++++++++++----- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 67744e5afe..d2ea96d80b 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -237,7 +237,7 @@ def maybe_start(self): self._loop_lock.release() if should_start: - self._thread = Thread(target=self._run_loop, name="cassandra_driver_event_loop") + self._thread = Thread(target=self._run_loop, name="asyncore_cassandra_driver_event_loop") self._thread.daemon = True self._thread.start() diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index 8121c784c4..3611cdf851 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -128,7 +128,7 @@ def maybe_start(self): with self._lock: if not reactor.running: self._thread = Thread(target=reactor.run, - name="cassandra_driver_event_loop", + name="cassandra_driver_twisted_event_loop", kwargs={'installSignalHandlers': False}) self._thread.daemon = True self._thread.start() diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 8e57f5758b..3ce3f5e2b6 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -18,11 +18,13 @@ import unittest # noqa from functools import partial +import logging from six.moves import range import sys import threading from threading import Thread, Event import time +from unittest import SkipTest from cassandra import ConsistencyLevel, OperationTimedOut from cassandra.cluster import NoHostAvailable, ConnectionShutdown, Cluster @@ -43,6 +45,9 @@ LibevConnection = None +log = logging.getLogger(__name__) + + def setup_module(): use_singledc() @@ -394,10 +399,14 @@ def test_connect_timeout(self): self.assertTrue(exception_thrown) def test_subclasses_share_loop(self): - class C1(AsyncoreConnection): + + if self.klass not in (AsyncoreConnection, LibevConnection): + raise SkipTest + + class C1(self.klass): pass - class C2(AsyncoreConnection): + class C2(self.klass): pass clusterC1 = Cluster(connection_class=C1) @@ -412,8 +421,10 @@ class C2(AsyncoreConnection): def get_eventloop_threads(name): - import threading - event_loops_threads = [thread for thread in threading.enumerate() if name == thread.name] + all_threads = list(threading.enumerate()) + log.debug('all threads: {}'.format(all_threads)) + log.debug('all names: {}'.format([thread.name for thread in all_threads])) + event_loops_threads = [thread for thread in all_threads if name == thread.name] return event_loops_threads @@ -421,7 +432,7 @@ def get_eventloop_threads(name): class AsyncoreConnectionTests(ConnectionTests, unittest.TestCase): klass = AsyncoreConnection - event_loop_name = "cassandra_driver_event_loop" + event_loop_name = "asyncore_cassandra_driver_event_loop" def setUp(self): if is_monkey_patched(): From 1dcc85a235a6c0734cd18191680af0931d56117a Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 19 Jul 2018 13:23:02 -0400 Subject: [PATCH 1419/2005] fix asyncio reactor tests --- tests/unit/io/test_asyncioreactor.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/tests/unit/io/test_asyncioreactor.py b/tests/unit/io/test_asyncioreactor.py index fb7965a5e3..3458ecbcbc 100644 --- a/tests/unit/io/test_asyncioreactor.py +++ b/tests/unit/io/test_asyncioreactor.py @@ -1,12 +1,12 @@ - try: from cassandra.io.asyncioreactor import AsyncioConnection import asynctest ASYNCIO_AVAILABLE = True except (ImportError, SyntaxError): + AysncioConnection = None ASYNCIO_AVAILABLE = False -from tests import is_monkey_patched +from tests import is_monkey_patched, connection_class from tests.unit.io.utils import TimerCallback, TimerTestMixin from mock import patch @@ -14,20 +14,29 @@ import unittest import time +skip_me = (is_monkey_patched() or + (not ASYNCIO_AVAILABLE) or + (connection_class is not AsyncioConnection)) + +@unittest.skipIf(is_monkey_patched(), 'runtime is monkey patched for another reactor') +@unittest.skipIf(connection_class is not AsyncioConnection, + 'not running asyncio tests; current connection_class is {}'.format(connection_class)) @unittest.skipUnless(ASYNCIO_AVAILABLE, "asyncio is not available for this runtime") class AsyncioTimerTests(TimerTestMixin, unittest.TestCase): @classmethod def setUpClass(cls): - if is_monkey_patched() or not ASYNCIO_AVAILABLE: + if skip_me: return cls.connection_class = AsyncioConnection AsyncioConnection.initialize_reactor() @classmethod def tearDownClass(cls): - if ASYNCIO_AVAILABLE: + if skip_me: + return + if ASYNCIO_AVAILABLE and AsyncioConnection._loop: AsyncioConnection._loop.stop() @property @@ -39,6 +48,8 @@ def _timers(self): raise RuntimeError('no TimerManager for AsyncioConnection') def setUp(self): + if skip_me: + return socket_patcher = patch('socket.socket') self.addCleanup(socket_patcher.stop) socket_patcher.start() From 8d9fb3979d50af4305802bdb2ba1ca9ca07fe79a Mon Sep 17 00:00:00 2001 From: Andrew Tolbert Date: Tue, 7 Aug 2018 08:38:26 -0500 Subject: [PATCH 1420/2005] Use ubuntu/bionic64/python-driver bionic image on jenkins (#974) --- build.yaml | 16 +++------------- tests/unit/io/test_asyncioreactor.py | 2 +- 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/build.yaml b/build.yaml index d5c114e1e2..c38d370c71 100644 --- a/build.yaml +++ b/build.yaml @@ -99,7 +99,7 @@ python: - 3.6 os: - - ubuntu/trusty64 + - ubuntu/bionic64/python-driver cassandra: - '2.1' @@ -122,9 +122,7 @@ build: pip install --upgrade pip pip install -U setuptools - pip install git+https://github.com/pcmanus/ccm.git - # Install dependencies - sudo apt-get install -y libev4 libev-dev libssl-dev + pip install git+https://github.com/riptano/ccm.git pip install -r test-requirements.txt pip install nose-ignore-docstring @@ -143,15 +141,6 @@ build: python setup.py build_ext --inplace --no-cython fi - echo "Installing simulacron" - pushd ~ - - SIMULACRON_JAR=`find \`pwd\` -name "simulacron-standalone-*.jar"` - echo "SIMULACRON_JAR: $SIMULACRON_JAR" - - popd - - echo "JUST_UPGRADE: $JUST_UPGRADE" if [[ $JUST_UPGRADE == 'True' ]]; then EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=upgrade_results.xml tests/integration/upgrade || true @@ -170,6 +159,7 @@ build: echo "Running with event loop manager: $EVENT_LOOP_MANAGER" echo "==========RUNNING SIMULACRON TESTS==========" + SIMULACRON_JAR="$HOME/simulacron.jar" SIMULACRON_JAR=$SIMULACRON_JAR EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR DSE_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true echo "Running with event loop manager: $EVENT_LOOP_MANAGER" diff --git a/tests/unit/io/test_asyncioreactor.py b/tests/unit/io/test_asyncioreactor.py index 3458ecbcbc..be3c2bcd2e 100644 --- a/tests/unit/io/test_asyncioreactor.py +++ b/tests/unit/io/test_asyncioreactor.py @@ -3,7 +3,7 @@ import asynctest ASYNCIO_AVAILABLE = True except (ImportError, SyntaxError): - AysncioConnection = None + AsyncioConnection = None ASYNCIO_AVAILABLE = False from tests import is_monkey_patched, connection_class From a35e549679d7abb48824bbfa36dd07708ea52752 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 13 Jul 2018 12:47:48 -0400 Subject: [PATCH 1421/2005] style fixes in metadata integration tests --- tests/integration/standard/test_metadata.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index fe42411ef7..3982d6dec2 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -28,7 +28,7 @@ from cassandra.cluster import Cluster from cassandra.encoder import Encoder -from cassandra.metadata import (IndexMetadata, Token, murmur3, Function, Aggregate, protect_name, protect_names, +from cassandra.metadata import (IndexMetadata, Token, murmur3, Function, Aggregate, protect_name, protect_names, RegisteredTableExtension, _RegisteredExtensionType, get_schema_parser, group_keys_by_replica, NO_VALID_REPLICA) @@ -40,6 +40,7 @@ from tests.integration import greaterthancass21 + def setup_module(): use_singledc() @@ -79,11 +80,12 @@ def test_host_release_version(self): for host in self.cluster.metadata.all_hosts(): self.assertTrue(host.release_version.startswith(CASSANDRA_VERSION.base_version)) + @local class MetaDataRemovalTest(unittest.TestCase): def setUp(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, contact_points=['127.0.0.1','127.0.0.2', '127.0.0.3', '126.0.0.186']) + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, contact_points=['127.0.0.1', '127.0.0.2', '127.0.0.3', '126.0.0.186']) self.cluster.connect() def tearDown(self): @@ -1912,7 +1914,10 @@ def test_dct_alias(self): dct_table = self.cluster.metadata.keyspaces.get(self.ks_name).tables.get(self.function_table_name) # Format can very slightly between versions, strip out whitespace for consistency sake - self.assertTrue("c1'org.apache.cassandra.db.marshal.DynamicCompositeType(s=>org.apache.cassandra.db.marshal.UTF8Type,i=>org.apache.cassandra.db.marshal.Int32Type)'" in dct_table.as_cql_query().replace(" ", "")) + self.assertTrue("c1'org.apache.cassandra.db.marshal.DynamicCompositeType(" + "s=>org.apache.cassandra.db.marshal.UTF8Type," + "i=>org.apache.cassandra.db.marshal.Int32Type)'" + in dct_table.as_cql_query().replace(" ", "")) @greaterthanorequalcass30 @@ -1963,7 +1968,7 @@ def test_materialized_view_metadata_alter(self): @test_category metadata """ - self.assertIn("SizeTieredCompactionStrategy", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].options["compaction"]["class"] ) + self.assertIn("SizeTieredCompactionStrategy", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].options["compaction"]["class"]) self.session.execute("ALTER MATERIALIZED VIEW {0}.mv1 WITH compaction = {{ 'class' : 'LeveledCompactionStrategy' }}".format(self.keyspace_name)) self.assertIn("LeveledCompactionStrategy", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].options["compaction"]["class"]) @@ -2295,6 +2300,7 @@ def test_metadata_with_quoted_identifiers(self): self.assertIsNotNone(value_column) self.assertEqual(value_column.name, 'the Value') + class GroupPerHost(BasicSharedKeyspaceUnitTestCase): @classmethod def setUpClass(cls): @@ -2339,5 +2345,5 @@ def _assert_group_keys_by_host(self, keys, table_name, stmt): for key in keys: routing_key = prepared_stmt.bind(key).routing_key hosts = self.cluster.metadata.get_replicas(self.ks_name, routing_key) - self.assertEqual(1, len(hosts)) # RF is 1 for this keyspace + self.assertEqual(1, len(hosts)) # RF is 1 for this keyspace self.assertIn(key, keys_per_host[hosts[0]]) From 0a901130e43020eb90fccfa121792fd15ea33f35 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 10 Jul 2018 11:16:53 -0400 Subject: [PATCH 1422/2005] improve tuple unpacking readability and remove intermediate variables --- cassandra/metadata.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 9f15faffb6..b122478773 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2064,12 +2064,16 @@ def _query_all(self): QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl) ] - responses = self.connection.wait_for_responses(*queries, timeout=self.timeout, fail_on_error=False) - ((ks_success, ks_result), (table_success, table_result), - (col_success, col_result), (types_success, types_result), + ((ks_success, ks_result), + (table_success, table_result), + (col_success, col_result), + (types_success, types_result), (functions_success, functions_result), (aggregates_success, aggregates_result), - (triggers_success, triggers_result)) = responses + (triggers_success, triggers_result)) = ( + self.connection.wait_for_responses(*queries, timeout=self.timeout, + fail_on_error=False) + ) self.keyspaces_result = self._handle_results(ks_success, ks_result) self.tables_result = self._handle_results(table_success, table_result) @@ -2375,14 +2379,17 @@ def _query_all(self): QueryMessage(query=self._SELECT_VIEWS, consistency_level=cl) ] - responses = self.connection.wait_for_responses(*queries, timeout=self.timeout, fail_on_error=False) - ((ks_success, ks_result), (table_success, table_result), - (col_success, col_result), (types_success, types_result), + ((ks_success, ks_result), + (table_success, table_result), + (col_success, col_result), + (types_success, types_result), (functions_success, functions_result), (aggregates_success, aggregates_result), (triggers_success, triggers_result), (indexes_success, indexes_result), - (views_success, views_result)) = responses + (views_success, views_result)) = self.connection.wait_for_responses( + *queries, timeout=self.timeout, fail_on_error=False + ) self.keyspaces_result = self._handle_results(ks_success, ks_result) self.tables_result = self._handle_results(table_success, table_result) From a6d0626205d940180d7f9c950130ddb2a2ca0ab2 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 13 Jul 2018 12:38:05 -0400 Subject: [PATCH 1423/2005] add virtual metadata to parser --- CHANGELOG.rst | 5 + cassandra/metadata.py | 158 +++++++++++++++++++- tests/integration/standard/test_metadata.py | 78 +++++++++- 3 files changed, 233 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d7e0f5ece3..779f60575f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,10 @@ 3.15.0 ====== + +Features +-------- +* Parse Virtual Keyspace Metadata (PYTHON-992) + Bug Fixes --------- * Tokenmap.get_replicas returns the wrong value if token coincides with the end of the range (PYTHON-978) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index b122478773..f50fbdedfe 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -639,6 +639,14 @@ class KeyspaceMetadata(object): A dict mapping view names to :class:`.MaterializedViewMetadata` instances. """ + virtual = False + """ + A boolean indicating if this is a virtual keyspace or not. Always ``False`` + for clusters running pre-4.0 versions of Cassandra. + + .. versionadded:: 3.15 + """ + _exc_info = None """ set if metadata parsing failed """ @@ -671,6 +679,11 @@ def export_as_string(self): ret += line ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/" % cql return ret + if self.virtual: + return ("/*\nWarning: Keyspace {ks} is a virtual keyspace and cannot be recreated with CQL.\n" + "Structure, for reference:*/\n" + "{cql}\n" + "").format(ks=self.name, cql=cql) return cql def as_cql_query(self): @@ -678,6 +691,8 @@ def as_cql_query(self): Returns a CQL query string that can be used to recreate just this keyspace, not including user-defined types and tables. """ + if self.virtual: + return "// VIRTUAL KEYSPACE {}".format(protect_name(self.name)) ret = "CREATE KEYSPACE %s WITH replication = %s " % ( protect_name(self.name), self.replication_strategy.export_for_schema()) @@ -1065,11 +1080,21 @@ def primary_key(self): _exc_info = None """ set if metadata parsing failed """ + virtual = False + """ + A boolean indicating if this is a virtual table or not. Always ``False`` + for clusters running pre-4.0 versions of Cassandra. + + .. versionadded:: 3.15 + """ + @property def is_cql_compatible(self): """ A boolean indicating if this table can be represented as CQL in export """ + if self.virtual: + return False comparator = getattr(self, 'comparator', None) if comparator: # no compact storage with more than one column beyond PK if there @@ -1086,7 +1111,7 @@ def is_cql_compatible(self): Metadata describing configuration for table extensions """ - def __init__(self, keyspace_name, name, partition_key=None, clustering_key=None, columns=None, triggers=None, options=None): + def __init__(self, keyspace_name, name, partition_key=None, clustering_key=None, columns=None, triggers=None, options=None, virtual=False): self.keyspace_name = keyspace_name self.name = name self.partition_key = [] if partition_key is None else partition_key @@ -1097,6 +1122,7 @@ def __init__(self, keyspace_name, name, partition_key=None, clustering_key=None, self.comparator = None self.triggers = OrderedDict() if triggers is None else triggers self.views = {} + self.virtual = virtual def export_as_string(self): """ @@ -1116,6 +1142,11 @@ def export_as_string(self): ret = "/*\nWarning: Table %s.%s omitted because it has constructs not compatible with CQL (was created via legacy API).\n" % \ (self.keyspace_name, self.name) ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/" % self._all_as_cql() + elif self.virtual: + ret = ('/*\nWarning: Table {ks}.{tab} is a virtual table and cannot be recreated with CQL.\n' + 'Structure, for reference:\n' + '{cql}\n*/').format(ks=self.keyspace_name, tab=self.name, cql=self._all_as_cql()) + else: ret = self._all_as_cql() @@ -1150,7 +1181,8 @@ def as_cql_query(self, formatted=False): creations are not included). If `formatted` is set to :const:`True`, extra whitespace will be added to make the query human readable. """ - ret = "CREATE TABLE %s.%s (%s" % ( + ret = "%s TABLE %s.%s (%s" % ( + ('VIRTUAL' if self.virtual else 'CREATE'), protect_name(self.keyspace_name), protect_name(self.name), "\n" if formatted else "") @@ -2245,7 +2277,7 @@ def _build_aggregate(aggregate_row): aggregate_row['argument_types'], aggregate_row['state_func'], aggregate_row['state_type'], aggregate_row['final_func'], aggregate_row['initcond'], aggregate_row['return_type']) - def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_rows=None): + def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_rows=None, virtual=False): keyspace_name = row["keyspace_name"] table_name = row[self._table_name_col] @@ -2253,7 +2285,7 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_row trigger_rows = trigger_rows or self.keyspace_table_trigger_rows[keyspace_name][table_name] index_rows = index_rows or self.keyspace_table_index_rows[keyspace_name][table_name] - table_meta = TableMetadataV3(keyspace_name, table_name) + table_meta = TableMetadataV3(keyspace_name, table_name, virtual=virtual) try: table_meta.options = self._build_table_options(row) flags = row.get('flags', set()) @@ -2261,12 +2293,16 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_row compact_static = False table_meta.is_compact_storage = 'dense' in flags or 'super' in flags or 'compound' not in flags is_dense = 'dense' in flags + elif virtual: + compact_static = False + table_meta.is_compact_storage = False + is_dense = False else: compact_static = True table_meta.is_compact_storage = True is_dense = False - self._build_table_columns(table_meta, col_rows, compact_static, is_dense) + self._build_table_columns(table_meta, col_rows, compact_static, is_dense, virtual) for trigger_row in trigger_rows: trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) @@ -2288,7 +2324,7 @@ def _build_table_options(self, row): """ Setup the mostly-non-schema table options, like caching settings """ return dict((o, row.get(o)) for o in self.recognized_table_options if o in row) - def _build_table_columns(self, meta, col_rows, compact_static=False, is_dense=False): + def _build_table_columns(self, meta, col_rows, compact_static=False, is_dense=False, virtual=False): # partition key partition_rows = [r for r in col_rows if r.get('kind', None) == "partition_key"] @@ -2432,6 +2468,116 @@ class SchemaParserV4(SchemaParserV3): ) ) + _SELECT_VIRTUAL_KEYSPACES = 'SELECT * from system_virtual_schema.keyspaces' + _SELECT_VIRTUAL_TABLES = 'SELECT * from system_virtual_schema.tables' + _SELECT_VIRTUAL_COLUMNS = 'SELECT * from system_virtual_schema.columns' + + def __init__(self, connection, timeout): + super(SchemaParserV4, self).__init__(connection, timeout) + self.virtual_keyspaces_rows = defaultdict(list) + self.virtual_tables_rows = defaultdict(list) + self.virtual_columns_rows = defaultdict(lambda: defaultdict(list)) + + def _query_all(self): + cl = ConsistencyLevel.ONE + # todo: this duplicates V3; we should find a way for _query_all methods + # to extend each other. + queries = [ + # copied from V3 + QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), + QueryMessage(query=self._SELECT_TABLES, consistency_level=cl), + QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), + QueryMessage(query=self._SELECT_TYPES, consistency_level=cl), + QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), + QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), + QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl), + QueryMessage(query=self._SELECT_INDEXES, consistency_level=cl), + QueryMessage(query=self._SELECT_VIEWS, consistency_level=cl), + # V4-only queries + QueryMessage(query=self._SELECT_VIRTUAL_KEYSPACES, consistency_level=cl), + QueryMessage(query=self._SELECT_VIRTUAL_TABLES, consistency_level=cl), + QueryMessage(query=self._SELECT_VIRTUAL_COLUMNS, consistency_level=cl) + ] + + responses = self.connection.wait_for_responses( + *queries, timeout=self.timeout, fail_on_error=False) + ( + # copied from V3 + (ks_success, ks_result), + (table_success, table_result), + (col_success, col_result), + (types_success, types_result), + (functions_success, functions_result), + (aggregates_success, aggregates_result), + (triggers_success, triggers_result), + (indexes_success, indexes_result), + (views_success, views_result), + # V4-only responses + (virtual_ks_success, virtual_ks_result), + (virtual_table_success, virtual_table_result), + (virtual_column_success, virtual_column_result) + ) = responses + + # copied from V3 + self.keyspaces_result = self._handle_results(ks_success, ks_result) + self.tables_result = self._handle_results(table_success, table_result) + self.columns_result = self._handle_results(col_success, col_result) + self.triggers_result = self._handle_results(triggers_success, triggers_result) + self.types_result = self._handle_results(types_success, types_result) + self.functions_result = self._handle_results(functions_success, functions_result) + self.aggregates_result = self._handle_results(aggregates_success, aggregates_result) + self.indexes_result = self._handle_results(indexes_success, indexes_result) + self.views_result = self._handle_results(views_success, views_result) + # V4-only results + self.virtual_keyspaces_result = self._handle_results(virtual_ks_success, + virtual_ks_result) + self.virtual_tables_result = self._handle_results(virtual_table_success, + virtual_table_result) + self.virtual_columns_result = self._handle_results(virtual_column_success, + virtual_column_result) + self._aggregate_results() + + def _aggregate_results(self): + super(SchemaParserV4, self)._aggregate_results() + + m = self.virtual_tables_rows + for row in self.virtual_tables_result: + m[row["keyspace_name"]].append(row) + + m = self.virtual_columns_rows + for row in self.virtual_columns_result: + ks_name = row['keyspace_name'] + tab_name = row[self._table_name_col] + m[ks_name][tab_name].append(row) + + def get_all_keyspaces(self): + for x in super(SchemaParserV4, self).get_all_keyspaces(): + yield x + + for row in self.virtual_keyspaces_result: + ks_name = row['keyspace_name'] + keyspace_meta = self._build_keyspace_metadata(row) + keyspace_meta.virtual = True + + for table_row in self.virtual_tables_rows.get(ks_name, []): + table_name = table_row[self._table_name_col] + + col_rows = self.virtual_columns_rows[ks_name][table_name] + keyspace_meta._add_table_metadata( + self._build_table_metadata(table_row, + col_rows=col_rows, + virtual=True) + ) + yield keyspace_meta + + @staticmethod + def _build_keyspace_metadata_internal(row): + # necessary fields that aren't int virtual ks + row["durable_writes"] = row.get("durable_writes", None) + row["replication"] = row.get("replication", {}) + row["replication"]["class"] = row["replication"].get("class", None) + return super(SchemaParserV4, SchemaParserV4)._build_keyspace_metadata_internal(row) + class TableMetadataV3(TableMetadata): compaction_options = {} diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 3982d6dec2..d12a655fa1 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -17,7 +17,9 @@ except ImportError: import unittest # noqa +from collections import defaultdict import difflib +import logging import six import sys import time @@ -36,11 +38,14 @@ BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION, get_supported_protocol_versions, greaterthanorequalcass30, lessthancass30, local, - greaterthancass20) + greaterthancass20, greaterthanorequalcass40) from tests.integration import greaterthancass21 +log = logging.getLogger(__name__) + + def setup_module(): use_singledc() @@ -2334,7 +2339,7 @@ def test_group_keys_by_host(self): stmt = """SELECT * FROM {}.{} WHERE k = ? """.format(self.ks_name, self.ks_name) - keys = ((1, ), (2, ), (2, ), (3, )) + keys = ((1,), (2,), (2,), (3,)) self._assert_group_keys_by_host(keys, self.ks_name, stmt) def _assert_group_keys_by_host(self, keys, table_name, stmt): @@ -2347,3 +2352,72 @@ def _assert_group_keys_by_host(self, keys, table_name, stmt): hosts = self.cluster.metadata.get_replicas(self.ks_name, routing_key) self.assertEqual(1, len(hosts)) # RF is 1 for this keyspace self.assertIn(key, keys_per_host[hosts[0]]) + + +class VirtualKeypaceTest(BasicSharedKeyspaceUnitTestCase): + virtual_ks_names = ('system_virtual_schema', 'system_views') + + virtual_ks_structure = { + 'system_views': { + # map from table names to sets of column names for unordered + # comparison + 'caches': {'capacity_bytes', 'entry_count', 'hit_count', + 'hit_ratio', 'name', 'recent_hit_rate_per_second', + 'recent_request_rate_per_second', 'request_count', + 'size_bytes'}, + 'clients': {'address', 'connection_stage', 'driver_name', + 'driver_version', 'hostname', 'port', + 'protocol_version', 'request_count', + 'ssl_cipher_suite', 'ssl_enabled', 'ssl_protocol', + 'username'}, + 'sstable_tasks': {'keyspace_name', 'kind', 'progress', + 'table_name', 'task_id', 'total', 'unit'}, + 'thread_pools': {'active_tasks', 'active_tasks_limit', + 'blocked_tasks', 'blocked_tasks_all_time', + 'completed_tasks', 'name', 'pending_tasks'} + }, + 'system_virtual_schema': { + 'columns': {'clustering_order', 'column_name', + 'column_name_bytes', 'keyspace_name', 'kind', + 'position', 'table_name', 'type'}, + 'keyspaces': {'keyspace_name'}, + 'tables': {'comment', 'keyspace_name', 'table_name'} + } + } + + def test_existing_keyspaces_have_correct_virtual_tags(self): + for name, ks in self.cluster.metadata.keyspaces.items(): + if name in self.virtual_ks_names: + self.assertTrue( + ks.virtual, + 'incorrect .virtual value for {}'.format(name) + ) + else: + self.assertFalse( + ks.virtual, + 'incorrect .virtual value for {}'.format(name) + ) + + @greaterthanorequalcass40 + def test_expected_keyspaces_exist_and_are_virtual(self): + for name in self.virtual_ks_names: + self.assertTrue( + self.cluster.metadata.keyspaces[name].virtual, + 'incorrect .virtual value for {}'.format(name) + ) + + @greaterthanorequalcass40 + def test_virtual_keyspaces_have_expected_schema_structure(self): + self.maxDiff = None + + ingested_virtual_ks_structure = defaultdict(dict) + for ks_name, ks in self.cluster.metadata.keyspaces.items(): + if not ks.virtual: + continue + for tab_name, tab in ks.tables.items(): + ingested_virtual_ks_structure[ks_name][tab_name] = set( + tab.columns.keys() + ) + + self.assertDictEqual(ingested_virtual_ks_structure, + self.virtual_ks_structure) From 580a122ea28c5d51541633184b3941d0d87c12bf Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 21 Aug 2018 15:58:44 -0400 Subject: [PATCH 1424/2005] Add test-dse and mapped_cassandra_version support --- build.yaml | 13 +++++++------ tests/integration/__init__.py | 31 ++++++++++++++++++++++++++----- 2 files changed, 33 insertions(+), 11 deletions(-) diff --git a/build.yaml b/build.yaml index c38d370c71..9c74192c8c 100644 --- a/build.yaml +++ b/build.yaml @@ -90,7 +90,7 @@ schedules: matrix: exclude: - python: [3.4, 3.6] - - cassandra: ['2.0', '2.1', '2.2', '3.0'] + - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse'] python: - 2.7 @@ -106,6 +106,7 @@ cassandra: - '2.2' - '3.0' - '3.11' + - 'test-dse' env: CYTHON: @@ -122,7 +123,7 @@ build: pip install --upgrade pip pip install -U setuptools - pip install git+https://github.com/riptano/ccm.git + pip install git+ssh://git@github.com/riptano/ccm-private.git pip install -r test-requirements.txt pip install nose-ignore-docstring @@ -160,18 +161,18 @@ build: echo "Running with event loop manager: $EVENT_LOOP_MANAGER" echo "==========RUNNING SIMULACRON TESTS==========" SIMULACRON_JAR="$HOME/simulacron.jar" - SIMULACRON_JAR=$SIMULACRON_JAR EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR DSE_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true + SIMULACRON_JAR=$SIMULACRON_JAR EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR CCM_ARGS="$CCM_ARGS" DSE_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true echo "Running with event loop manager: $EVENT_LOOP_MANAGER" echo "==========RUNNING CQLENGINE TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true echo "==========RUNNING INTEGRATION TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true if [ -z "$EXCLUDE_LONG" ]; then echo "==========RUNNING LONG INTEGRATION TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true fi - xunit: - "*_results.xml" diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 6d506544e6..6bdb39ca9b 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -92,6 +92,18 @@ def _tuple_version(version_string): return tuple([int(p) for p in version_string.split('.')]) +def cmd_line_args_to_dict(env_var): + cmd_args_env = os.environ.get(env_var, None) + args = {} + if cmd_args_env: + cmd_args = cmd_args_env.strip().split(' ') + while cmd_args: + cmd_arg = cmd_args.pop(0) + cmd_arg_value = True if cmd_arg.startswith('--') else cmd_args.pop(0) + args[cmd_arg.lstrip('-')] = cmd_arg_value + return args + + def _get_dse_version_from_cass(cass_version): if cass_version.startswith('2.1'): dse_ver = "4.8.15" @@ -106,7 +118,6 @@ def _get_dse_version_from_cass(cass_version): dse_ver = "2.1" return dse_ver - USE_CASS_EXTERNAL = bool(os.getenv('USE_CASS_EXTERNAL', False)) KEEP_TEST_CLUSTER = bool(os.getenv('KEEP_TEST_CLUSTER', False)) SIMULACRON_JAR = os.getenv('SIMULACRON_JAR', None) @@ -115,7 +126,15 @@ def _get_dse_version_from_cass(cass_version): CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) default_cassandra_version = '3.11' -CASSANDRA_VERSION = Version(os.getenv('CASSANDRA_VERSION', default_cassandra_version)) +cv_string = os.getenv('CASSANDRA_VERSION', default_cassandra_version) +mcv_string = os.getenv('MAPPED_CASSANDRA_VERSION', None) +try: + cassandra_version = Version(cv_string) # env var is set to test-dse +except: + # fallback to MAPPED_CASSANDRA_VERSION + cassandra_version = Version(mcv_string) +CASSANDRA_VERSION = Version(mcv_string) if mcv_string else cassandra_version +CCM_VERSION = cassandra_version if mcv_string else CASSANDRA_VERSION default_dse_version = _get_dse_version_from_cass(CASSANDRA_VERSION.base_version) @@ -128,7 +147,8 @@ def _get_dse_version_from_cass(cass_version): else: log.info('Using Cassandra version: %s', CASSANDRA_VERSION) - CCM_KWARGS['version'] = CASSANDRA_VERSION + log.info('Using CCM version: %s', CCM_VERSION) + CCM_KWARGS['version'] = CCM_VERSION #This changes the default contact_point parameter in Cluster def set_default_cass_ip(): @@ -339,7 +359,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], se elif ccm_options is None: ccm_options = CCM_KWARGS.copy() - cassandra_version = ccm_options.get('version', CASSANDRA_VERSION) + cassandra_version = ccm_options.get('version', CCM_VERSION) dse_version = ccm_options.get('version', DSE_VERSION) if 'version' in ccm_options: @@ -373,6 +393,8 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], se log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb + ccm_options.update(cmd_line_args_to_dict('CCM_ARGS')) + log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, ccm_options)) if dse_cluster: @@ -391,7 +413,6 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], se CCM_CLUSTER.set_dse_configuration_options(dse_options) else: - log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, ccm_options)) CCM_CLUSTER = CCMCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) if cassandra_version >= Version('2.2'): From 1e6155b7bf3cc6be08afe7ee266e7eb64d9742ca Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 24 Aug 2018 13:56:28 -0400 Subject: [PATCH 1425/2005] bump cython dependency to 0.28 --- CHANGELOG.rst | 1 + setup.py | 2 +- test-requirements.txt | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 779f60575f..69521e1516 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -15,6 +15,7 @@ Other ----- * Use global variable for libev loops so it can be subclassed (PYTHON-973) * Update SchemaParser for V4 (PYTHON-1006) +* Bump Cython dependency version to 0.28 (PYTHON-1012) 3.14.0 ====== diff --git a/setup.py b/setup.py index 9d0f12b4f7..1b0ebf6d8b 100644 --- a/setup.py +++ b/setup.py @@ -395,7 +395,7 @@ def run_setup(extensions): # 1.) build_ext eats errors at compile time, letting the install complete while producing useful feedback # 2.) there could be a case where the python environment has cython installed but the system doesn't have build tools if pre_build_check(): - cython_dep = 'Cython>=0.20,!=0.25,<0.28' + cython_dep = 'Cython>=0.20,!=0.25,<0.29' user_specified_cython_version = os.environ.get('CASS_DRIVER_ALLOWED_CYTHON_VERSION') if user_specified_cython_version is not None: cython_dep = 'Cython==%s' % (user_specified_cython_version,) diff --git a/test-requirements.txt b/test-requirements.txt index 086576d36f..51636d4253 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -11,6 +11,6 @@ pure-sasl twisted[tls] gevent>=1.0 eventlet -cython>=0.20,<0.28 +cython>=0.20,<0.29 packaging asynctest; python_version > '3.4' From 26191f2f3d38721bdd8a2d006ea3327c7f82d135 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 27 Aug 2018 10:56:07 -0400 Subject: [PATCH 1426/2005] Bump version and update changelog --- CHANGELOG.rst | 3 ++- cassandra/__init__.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 69521e1516..ac40c4d33a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,6 @@ 3.15.0 ====== +August 29, 2018 Features -------- @@ -10,6 +11,7 @@ Bug Fixes * Tokenmap.get_replicas returns the wrong value if token coincides with the end of the range (PYTHON-978) * Python Driver fails with "more than 255 arguments" python exception when > 255 columns specified in query response (PYTHON-893) * Hang in integration.standard.test_cluster.ClusterTests.test_set_keyspace_twice (PYTHON-998) +* Asyncore reactors should use a global variable instead of a class variable for the event loop (PYTHON-697) Other ----- @@ -28,7 +30,6 @@ Features * Allow filter queries with fields that have an index managed outside of cqlengine (PYTHON-966) * Twisted SSL Support (PYTHON-343) * Support IS NOT NULL operator in cqlengine (PYTHON-968) -* Asyncore reactors should use a global variable instead of a class variable for the event loop (PYTHON-697) Other ----- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index a3936b1999..59aaead849 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 14, 0) +__version_info__ = (3, 15, 0) __version__ = '.'.join(map(str, __version_info__)) From 830955a6645a4767598e5120e4b068f76d750366 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 27 Aug 2018 11:19:15 -0400 Subject: [PATCH 1427/2005] Update docs.yaml for 3.15 --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index 938cdebab2..7cd372f12f 100644 --- a/docs.yaml +++ b/docs.yaml @@ -21,6 +21,8 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: + - name: '3.15' + ref: '26191f2f' - name: '3.14' ref: '9af8bd19' - name: '3.13' From 2ce0bd972dc81f793aa2a671ec49108bca3e65cd Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 30 Aug 2018 12:14:17 -0400 Subject: [PATCH 1428/2005] Update the changelog release date --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index ac40c4d33a..d041a680f2 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,6 @@ 3.15.0 ====== -August 29, 2018 +August 30, 2018 Features -------- From a156924c8ccc1b7449161095882bcd99c35a1ab7 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 30 Aug 2018 12:15:37 -0400 Subject: [PATCH 1429/2005] Update docs.yaml --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 7cd372f12f..1f626c20fa 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,7 +22,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.15' - ref: '26191f2f' + ref: '2ce0bd97' - name: '3.14' ref: '9af8bd19' - name: '3.13' From cea72fefea35dcbf84a0c89d5fbf9e47e829d8f0 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 30 Aug 2018 14:42:34 -0400 Subject: [PATCH 1430/2005] post version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 59aaead849..3515359519 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 15, 0) +__version_info__ = (3, 15, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From 682be3c64f60955d594269442054d9bcc3e7152a Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 5 Sep 2018 11:46:49 -0400 Subject: [PATCH 1431/2005] linter/import fixes pre-1018 --- tests/integration/standard/test_dse.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tests/integration/standard/test_dse.py b/tests/integration/standard/test_dse.py index a4a365406a..46eb51745f 100644 --- a/tests/integration/standard/test_dse.py +++ b/tests/integration/standard/test_dse.py @@ -19,9 +19,10 @@ from cassandra.cluster import Cluster -from tests import notwindows, is_windows -from tests.integration import use_cluster, CLUSTER_NAME, PROTOCOL_VERSION, execute_until_pass, \ - execute_with_long_wait_retry +from tests import is_windows, notwindows +from tests.integration import (CLUSTER_NAME, PROTOCOL_VERSION, + execute_until_pass, + execute_with_long_wait_retry, use_cluster) def setup_module(): @@ -45,14 +46,16 @@ def test_basic(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() - result = execute_until_pass(session, + result = execute_until_pass( + session, """ CREATE KEYSPACE clustertests WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} """) self.assertFalse(result) - result = execute_with_long_wait_retry(session, + result = execute_with_long_wait_retry( + session, """ CREATE TABLE clustertests.cf0 ( a text, From 4199716361888d2a4b9028e8361eac6bc5fe19fb Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 5 Sep 2018 12:29:08 -0400 Subject: [PATCH 1432/2005] add PYTHON-1018 tests --- tests/integration/__init__.py | 7 +++-- tests/integration/standard/test_dse.py | 36 ++++++++++++++++---------- 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 6bdb39ca9b..f5f690d42c 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -351,11 +351,14 @@ def is_current_cluster(cluster_name, node_counts): def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], set_keyspace=True, ccm_options=None, - configuration_options={}, dse_cluster=False, dse_options={}): + configuration_options={}, dse_cluster=False, dse_options={}, + dse_version=None): + if (dse_version and not dse_cluster): + raise ValueError('specified dse_version {} but not dse_cluster'.format(dse_version)) set_default_cass_ip() if ccm_options is None and dse_cluster: - ccm_options = {"version": DSE_VERSION} + ccm_options = {"version": dse_version or DSE_VERSION} elif ccm_options is None: ccm_options = CCM_KWARGS.copy() diff --git a/tests/integration/standard/test_dse.py b/tests/integration/standard/test_dse.py index 46eb51745f..32e313faf1 100644 --- a/tests/integration/standard/test_dse.py +++ b/tests/integration/standard/test_dse.py @@ -12,25 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import os +from packaging.version import Version from cassandra.cluster import Cluster -from tests import is_windows, notwindows -from tests.integration import (CLUSTER_NAME, PROTOCOL_VERSION, - execute_until_pass, +from tests import notwindows +from tests.integration import (execute_until_pass, execute_with_long_wait_retry, use_cluster) - -def setup_module(): - if is_windows(): - return - use_cluster(CLUSTER_NAME, [3], dse_cluster=True, dse_options={}) +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa +@unittest.skipIf(os.environ.get('CCM_ARGS', None), 'environment has custom CCM_ARGS; skipping') @notwindows class DseCCMClusterTest(unittest.TestCase): """ @@ -39,12 +36,23 @@ class DseCCMClusterTest(unittest.TestCase): If CASSANDRA_VERSION is set instead, it will be converted to the corresponding DSE_VERSION """ - def test_basic(self): + def test_dse_5x(self): + self._test_basic(Version('5.1.10')) + + def test_dse_60(self): + self._test_basic(Version('6.0.2')) + + def _test_basic(self, dse_version): """ Test basic connection and usage """ + cluster_name = '{}-{}'.format( + self.__class__.__name__, dse_version.base_version.replace('.', '_') + ) + use_cluster(cluster_name=cluster_name, nodes=[3], + dse_cluster=True, dse_options={}, dse_version=dse_version) - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = Cluster() session = cluster.connect() result = execute_until_pass( session, From 55d3e3245704cf11ce70fe551100b52747219811 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Wed, 5 Sep 2018 16:02:13 -0400 Subject: [PATCH 1433/2005] check for DSE in schema-parsing --- CHANGELOG.rst | 8 ++++++++ cassandra/metadata.py | 4 +++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d041a680f2..78e2324f39 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +3.15.1 +====== +September 6, 2018 + +Bug Fixes +--------- +* C* 4.0 schema-parsing logic breaks running against DSE 6.0.X (PYTHON-1018) + 3.15.0 ====== August 30, 2018 diff --git a/cassandra/metadata.py b/cassandra/metadata.py index f50fbdedfe..750bf2a9b0 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2720,7 +2720,9 @@ def export_as_string(self): def get_schema_parser(connection, server_version, timeout): server_major_version = int(server_version.split('.')[0]) - if server_major_version >= 4: + # check for DSE version + has_build_version = len(server_version.split('.')) > 3 + if server_major_version >= 4 and not has_build_version: return SchemaParserV4(connection, timeout) if server_major_version >= 3: return SchemaParserV3(connection, timeout) From 4fcf41d40113f2615bbe9d22875966cb1b58723f Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 6 Sep 2018 15:55:48 -0400 Subject: [PATCH 1434/2005] version 3.15.1 --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 3515359519..76c8157a6c 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 15, 0, 'post0') +__version_info__ = (3, 15, 1) __version__ = '.'.join(map(str, __version_info__)) From 12b59b80cee69a1c41e57558787387b50eae1ef2 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 6 Sep 2018 16:05:23 -0400 Subject: [PATCH 1435/2005] 3.15.1.post0 version bump --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 76c8157a6c..3d7ef688c5 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 15, 1) +__version_info__ = (3, 15, 1, 'post0') __version__ = '.'.join(map(str, __version_info__)) From 3e46fd72cd224dba2e09da7d6927c4ee6c692bc6 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 18 Sep 2018 16:50:20 -0400 Subject: [PATCH 1436/2005] whitespace --- tests/integration/long/test_loadbalancingpolicies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index dec8b5eedb..b26a0d572f 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -549,7 +549,7 @@ def test_token_aware_with_shuffle_rf2(self): self._check_query_order_changes(session=session, keyspace=keyspace) - #check TokenAwarePolicy still return the remaining replicas when one goes down + # check TokenAwarePolicy still return the remaining replicas when one goes down self.coordinator_stats.reset_counts() stop(2) self._wait_for_nodes_down([2], cluster) From 9745e0c86ba6fd9b9d9719f5bc8828f8761f327a Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 18 Sep 2018 16:36:59 -0400 Subject: [PATCH 1437/2005] add long test per-commit build spec --- build.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/build.yaml b/build.yaml index 9c74192c8c..cdf87567d8 100644 --- a/build.yaml +++ b/build.yaml @@ -20,6 +20,17 @@ schedules: exclude: - python: [3.4, 3.6] + commit_long_test: + schedule: per_commit + branches: + include: [/long-python.*/] + env_vars: | + EVENT_LOOP_MANAGER='libev' + matrix: + exclude: + - python: [3.4, 3.6] + - cassandra: ['2.0', '2.1', '3.0'] + commit_branches: schedule: per_commit branches: From 542e94877e3caec95ec96e6f3afc53db0ec015fa Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 18 Sep 2018 16:41:26 -0400 Subject: [PATCH 1438/2005] fail correctly when bootstrap fails --- tests/integration/long/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index e09cc349d2..7b5abe4677 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -124,6 +124,7 @@ def bootstrap(node, data_center=None, token=None): start(node) except: log.error('Added node failed to start twice.') + raise e1 def ring(node): From 9c1050fd07ce865726649257d491f42ba23c665d Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 18 Sep 2018 16:39:15 -0400 Subject: [PATCH 1439/2005] add debugging to long LoadBalancingPolicyTests --- .../long/test_loadbalancingpolicies.py | 20 +++++++++++++++++++ tests/integration/long/utils.py | 12 ++++++++--- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index b26a0d572f..4f3396a47d 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -65,13 +65,20 @@ def _connect_probe_cluster(self): self.probe_session = self.probe_cluster.connect() def _wait_for_nodes_up(self, nodes, cluster=None): + log.debug('entered: _wait_for_nodes_up(nodes={ns}, ' + 'cluster={cs})'.format(ns=nodes, + cs=cluster)) if not cluster: + log.debug('connecting to cluster') self._connect_probe_cluster() cluster = self.probe_cluster for n in nodes: wait_for_up(cluster, n) def _wait_for_nodes_down(self, nodes, cluster=None): + log.debug('entered: _wait_for_nodes_down(nodes={ns}, ' + 'cluster={cs})'.format(ns=nodes, + cs=cluster)) if not cluster: self._connect_probe_cluster() cluster = self.probe_cluster @@ -87,6 +94,11 @@ def _cluster_session_with_lbp(self, lbp): def _insert(self, session, keyspace, count=12, consistency_level=ConsistencyLevel.ONE): + log.debug('entered _insert(' + 'session={session}, keyspace={keyspace}, ' + 'count={count}, consistency_level={consistency_level}' + ')'.format(session=session, keyspace=keyspace, count=count, + consistency_level=consistency_level)) session.execute('USE %s' % keyspace) ss = SimpleStatement('INSERT INTO cf(k, i) VALUES (0, 0)', consistency_level=consistency_level) @@ -94,6 +106,7 @@ def _insert(self, session, keyspace, count=12, while tries < 100: try: execute_concurrent_with_args(session, ss, [None] * count) + log.debug('Completed _insert on try #{}'.format(tries + 1)) return except (OperationTimedOut, WriteTimeout, WriteFailure): ex_type, ex, tb = sys.exc_info() @@ -105,6 +118,13 @@ def _insert(self, session, keyspace, count=12, def _query(self, session, keyspace, count=12, consistency_level=ConsistencyLevel.ONE, use_prepared=False): + log.debug('entered _query(' + 'session={session}, keyspace={keyspace}, ' + 'count={count}, consistency_level={consistency_level}, ' + 'use_prepared={use_prepared}' + ')'.format(session=session, keyspace=keyspace, count=count, + consistency_level=consistency_level, + use_prepared=use_prepared)) if use_prepared: query_string = 'SELECT * FROM %s.cf WHERE k = ?' % keyspace if not self.prepared or self.prepared.query_string != query_string: diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index 7b5abe4677..82980881ba 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -35,6 +35,7 @@ def __init__(self): self.coordinator_counts = defaultdict(int) def add_coordinator(self, future): + log.debug('adding coordinator from {}'.format(future)) future.result() coordinator = future._current_host.address self.coordinator_counts[coordinator] += 1 @@ -105,6 +106,9 @@ def decommission(node): def bootstrap(node, data_center=None, token=None): + log.debug('called bootstrap(' + 'node={node}, data_center={data_center}, ' + 'token={token})') node_instance = Node('node%s' % node, get_cluster(), auto_bootstrap=False, @@ -118,11 +122,13 @@ def bootstrap(node, data_center=None, token=None): try: start(node) - except: + except Exception as e0: + log.debug('failed 1st bootstrap attempt with: \n{}'.format(e0)) # Try only twice try: start(node) - except: + except Exception as e1: + log.debug('failed 2nd bootstrap attempt with: \n{}'.format(e1)) log.error('Added node failed to start twice.') raise e1 @@ -141,7 +147,7 @@ def wait_for_up(cluster, node): log.debug("Done waiting for node %s to be up", node) return else: - log.debug("Host is still marked down, waiting") + log.debug("Host {} is still marked down, waiting".format(addr)) tries += 1 time.sleep(1) From 91b91d8d05c9b3e4131628c22bd690f40d48017f Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 18 Sep 2018 16:41:10 -0400 Subject: [PATCH 1440/2005] PYTHON-1017: fix test failure --- CHANGELOG.rst | 7 +++++++ tests/integration/long/utils.py | 15 +++++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 78e2324f39..907255597d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,10 @@ +3.15.2 +====== + +Other +----- +* Fix tests when RF is not maintained if we decomission a node (PYTHON-1017) + 3.15.1 ====== September 6, 2018 diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index 82980881ba..07652b6ac6 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -17,8 +17,9 @@ import time from collections import defaultdict -from ccmlib.node import Node +from ccmlib.node import Node, ToolError +from nose.tools import assert_in from cassandra.query import named_tuple_factory from cassandra.cluster import ConsistencyLevel @@ -101,7 +102,17 @@ def force_stop(node): def decommission(node): - get_node(node).decommission() + try: + get_node(node).decommission() + except ToolError as e: + expected_errs = (('Not enough live nodes to maintain replication ' + 'factor in keyspace system_distributed'), + 'Perform a forceful decommission to ignore.') + for err in expected_errs: + assert_in(err, e.stdout) + # in this case, we're running against a C* version with CASSANDRA-12510 + # applied and need to decommission with `--force` + get_node(node).decommission(force=True) get_node(node).stop() From 2fe7084a545c4d4a16329b57d9eb06524b050d89 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 5 Oct 2018 14:24:23 -0400 Subject: [PATCH 1441/2005] add tests that should retry for other classes of error --- tests/unit/io/utils.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 8dae866f2c..d4967cdfaa 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -25,9 +25,11 @@ from mock import Mock import errno +import logging import math import os from socket import error as socket_error +import ssl try: import unittest2 as unittest @@ -37,6 +39,9 @@ import time +log = logging.getLogger(__name__) + + class TimerCallback(object): invoked = False @@ -247,18 +252,31 @@ def test_successful_connection(self): return c def test_eagain_on_buffer_size(self): + self._check_error_recovery_on_buffer_size(errno.EAGAIN) + + def test_ewouldblock_on_buffer_size(self): + self._check_error_recovery_on_buffer_size(errno.EWOULDBLOCK) + + def test_sslwantread_on_buffer_size(self): + self._check_error_recovery_on_buffer_size(ssl.SSL_ERROR_WANT_READ) + + def test_sslwantwrite_on_buffer_size(self): + self._check_error_recovery_on_buffer_size(ssl.SSL_ERROR_WANT_WRITE) + + def _check_error_recovery_on_buffer_size(self, error_code): c = self.test_successful_connection() header = six.b('\x00\x00\x00\x00') + int32_pack(20000) responses = [ header + (six.b('a') * (4096 - len(header))), six.b('a') * 4096, - socket_error(errno.EAGAIN), + socket_error(error_code), six.b('a') * 100, - socket_error(errno.EAGAIN)] + socket_error(error_code)] def side_effect(*args): response = responses.pop(0) + log.debug('about to mock return {}'.format(response)) if isinstance(response, socket_error): raise response else: From 281ac3e4a712bda143e68d99026267bcc1ef2acd Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 5 Oct 2018 14:26:40 -0400 Subject: [PATCH 1442/2005] PYTHON-1024: fix error handling for SSL errors This also removes a check that depends internal behavior that no longer holds, as `process_io_buffer` is no longer called after `handle_read` gets an error. --- CHANGELOG.rst | 4 ++++ cassandra/io/asyncorereactor.py | 8 ++++++-- cassandra/io/libevreactor.py | 8 ++++++-- tests/unit/io/utils.py | 1 - 4 files changed, 16 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 907255597d..015c8d2a7e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,10 @@ 3.15.2 ====== +Bug Fixes +--------- +* Improve and fix socket error-catching code in nonblocking-socket reactors (PYTHON-1024) + Other ----- * Fix tests when RF is not maintained if we decomission a node (PYTHON-1017) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index d2ea96d80b..91431aa682 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -424,10 +424,14 @@ def handle_read(self): break except socket.error as err: if ssl and isinstance(err, ssl.SSLError): - if err.args[0] not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): + if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): + return + else: self.defunct(err) return - elif err.args[0] not in NONBLOCKING: + elif err.args[0] in NONBLOCKING: + return + else: self.defunct(err) return diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index fe3827eb6a..21111b0e25 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -344,10 +344,14 @@ def handle_read(self, watcher, revents, errno=None): break except socket.error as err: if ssl and isinstance(err, ssl.SSLError): - if err.args[0] not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): + if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): + return + else: self.defunct(err) return - elif err.args[0] not in NONBLOCKING: + elif err.args[0] in NONBLOCKING: + return + else: self.defunct(err) return diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index d4967cdfaa..f342ceddbe 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -284,7 +284,6 @@ def side_effect(*args): self.get_socket(c).recv.side_effect = side_effect c.handle_read(*self.null_handle_function_args) - self.assertEqual(c._current_frame.end_pos, 20000 + len(header)) # the EAGAIN prevents it from reading the last 100 bytes c._iobuf.seek(0, os.SEEK_END) pos = c._iobuf.tell() From 0f2b8ea6f7455891b2146b8c2f98c5e497924eb8 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 14 Sep 2018 14:26:09 -0400 Subject: [PATCH 1443/2005] add failing test case for PYTHON-1008 error occurs when there are unicode values in options map other at keys other than `'target'` and `'class_name'`. --- tests/unit/test_metadata.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 96a512792c..cc0fc61c7e 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -18,6 +18,7 @@ import unittest # noqa from binascii import unhexlify +import logging from mock import Mock import os import six @@ -38,6 +39,9 @@ from cassandra.pool import Host +log = logging.getLogger(__name__) + + class StrategiesTest(unittest.TestCase): @classmethod @@ -536,9 +540,12 @@ def test_column_name_multiple_partition(self): def test_index(self): im = IndexMetadata(self.name, self.name, self.name, kind='', index_options={'target': self.name}) - im.export_as_string() + log.debug(im.export_as_string()) im = IndexMetadata(self.name, self.name, self.name, kind='CUSTOM', index_options={'target': self.name, 'class_name': 'Class'}) - im.export_as_string() + log.debug(im.export_as_string()) + # PYTHON-1008 + im = IndexMetadata(self.name, self.name, self.name, kind='CUSTOM', index_options={'target': self.name, 'class_name': 'Class', 'delimiter': self.name}) + log.debug(im.export_as_string()) def test_function(self): fm = Function(self.name, self.name, (u'int', u'int'), (u'x', u'y'), u'int', u'language', self.name, False) From 9291b38444eedf4fdee625e7de8c6a6cec4f23c9 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 9 Oct 2018 16:15:28 -0400 Subject: [PATCH 1444/2005] PYTHON-1008 Addresses PYTHON-1008/CASSANDRA-14632. The CREATE CUSTOM INDEX statement initially generated in `IndexMetadata.as_cql_query` is always a `unicode` object under Python 2, because they are derived from CQL `text` fields in in `system_schema.indexes`, and we serialize the bytes we get from `text`-type values with `.encode('utf-8')`. The result of `cql_encode_all_types` is always a `six.binary_type`, i.e. a `str` under Python 2. Notably, Python 2 `str` objects cannot be cast to `unicode` objects if they contain non-ASCII characters, including those used to encode Unicode characters in UTF-8. These casting errors can occur during attempts to concatenate these objects: ``` >>> u"I'm a `unicode` object" + ", and I am a `str`." u"I'm a `unicode` object, and I am a `str`." >>> u"I'm a `unicode` object" + ", and I am a `str` with unicode code points \xf0\x9f\x98\xac" Traceback (most recent call last): File "", line 1, in UnicodeDecodeError: 'ascii' codec can't decode byte 0xf0 in position 45: ordinal not in range(128) ``` So, the error reported in CASSANDRA-14632 happens when we try to concatenate a `str` containing escaped UTF-8 bytes, as generated by `cql_encode_all_types`, onto `ret`, a `unicode`. Since we know `ret` is always a `six.text_type`, we can check if we need to convert the result of `cql_encode_all_types`, and do so. We should only ever need to under Python 2. --- CHANGELOG.rst | 1 + cassandra/metadata.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 015c8d2a7e..844606ca92 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,7 @@ Bug Fixes --------- * Improve and fix socket error-catching code in nonblocking-socket reactors (PYTHON-1024) +* Non-ASCII characters in schema break CQL string generation (PYTHON-1008) Other ----- diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 750bf2a9b0..9209a67183 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1435,7 +1435,11 @@ def as_cql_query(self): index_target, class_name) if options: - ret += " WITH OPTIONS = %s" % Encoder().cql_encode_all_types(options) + opts_cql_encoded = Encoder().cql_encode_all_types(options) + # PYTHON-1008 + if isinstance(opts_cql_encoded, six.binary_type): + opts_cql_encoded = opts_cql_encoded.decode('utf-8') + ret += " WITH OPTIONS = %s" % opts_cql_encoded return ret def export_as_string(self): From a2b2508202a7e16a1723f82f83c761d0bfa2ac2e Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 14 Sep 2018 14:30:02 -0400 Subject: [PATCH 1445/2005] use existing encoder object --- cassandra/metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 9209a67183..6a49a0788c 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1435,7 +1435,7 @@ def as_cql_query(self): index_target, class_name) if options: - opts_cql_encoded = Encoder().cql_encode_all_types(options) + opts_cql_encoded = _encoder.cql_encode_all_types(options) # PYTHON-1008 if isinstance(opts_cql_encoded, six.binary_type): opts_cql_encoded = opts_cql_encoded.decode('utf-8') From bfb24afb3376acfbb3984e2421d1bd51d9a08a52 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 24 Sep 2018 11:49:55 -0400 Subject: [PATCH 1446/2005] use as_text_type kwarg for conversion --- cassandra/encoder.py | 7 +++++-- cassandra/metadata.py | 6 ++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 1f02348a8d..00f7bf1b40 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -224,12 +224,15 @@ def cql_encode_set_collection(self, val): """ return '{%s}' % ', '.join(self.mapping.get(type(v), self.cql_encode_object)(v) for v in val) - def cql_encode_all_types(self, val): + def cql_encode_all_types(self, val, as_text_type=False): """ Converts any type into a CQL string, defaulting to ``cql_encode_object`` if :attr:`~Encoder.mapping` does not contain an entry for the type. """ - return self.mapping.get(type(val), self.cql_encode_object)(val) + encoded = self.mapping.get(type(val), self.cql_encode_object)(val) + if as_text_type and not isinstance(encoded, six.text_type): + return encoded.decode('utf-8') + return encoded if six.PY3: def cql_encode_ipaddress(self, val): diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 6a49a0788c..e591ddaf86 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1435,10 +1435,8 @@ def as_cql_query(self): index_target, class_name) if options: - opts_cql_encoded = _encoder.cql_encode_all_types(options) - # PYTHON-1008 - if isinstance(opts_cql_encoded, six.binary_type): - opts_cql_encoded = opts_cql_encoded.decode('utf-8') + # PYTHON-1008: `ret` will always be a unicode + opts_cql_encoded = _encoder.cql_encode_all_types(options, as_text_type=True) ret += " WITH OPTIONS = %s" % opts_cql_encoded return ret From f295e6e77d049b32874526e5def92a1b2cfd1ad5 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 20 Sep 2018 14:17:01 -0400 Subject: [PATCH 1447/2005] PYTHON-1020: skip virtual tables if not present --- CHANGELOG.rst | 1 + cassandra/metadata.py | 31 ++++++++++++++++++-------- tests/integration/standard/test_dse.py | 3 +++ 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 844606ca92..e70d53864b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,7 @@ Bug Fixes --------- * Improve and fix socket error-catching code in nonblocking-socket reactors (PYTHON-1024) * Non-ASCII characters in schema break CQL string generation (PYTHON-1008) +* Fix OSS driver's virtual table support against DSE 6.0.X and future server releases (PYTHON-1020) Other ----- diff --git a/cassandra/metadata.py b/cassandra/metadata.py index e591ddaf86..8cf1e1d87a 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2531,12 +2531,27 @@ def _query_all(self): self.indexes_result = self._handle_results(indexes_success, indexes_result) self.views_result = self._handle_results(views_success, views_result) # V4-only results - self.virtual_keyspaces_result = self._handle_results(virtual_ks_success, - virtual_ks_result) - self.virtual_tables_result = self._handle_results(virtual_table_success, - virtual_table_result) - self.virtual_columns_result = self._handle_results(virtual_column_success, - virtual_column_result) + # These tables don't exist in some DSE versions reporting 4.X so we can + # ignore them if we got an error + if isinstance(virtual_ks_result, InvalidRequest): + self.virtual_keyspaces_result = [] + else: + self.virtual_keyspaces_result = self._handle_results( + virtual_ks_success, virtual_ks_result + ) + if isinstance(virtual_table_result, InvalidRequest): + self.virtual_tables_result = [] + else: + self.virtual_tables_result = self._handle_results( + virtual_table_success, virtual_table_result + ) + if isinstance(virtual_column_result, InvalidRequest): + self.virtual_columns_result = [] + else: + self.virtual_columns_result = self._handle_results( + virtual_column_success, virtual_column_result + ) + self._aggregate_results() def _aggregate_results(self): @@ -2722,9 +2737,7 @@ def export_as_string(self): def get_schema_parser(connection, server_version, timeout): server_major_version = int(server_version.split('.')[0]) - # check for DSE version - has_build_version = len(server_version.split('.')) > 3 - if server_major_version >= 4 and not has_build_version: + if server_major_version >= 4: return SchemaParserV4(connection, timeout) if server_major_version >= 3: return SchemaParserV3(connection, timeout) diff --git a/tests/integration/standard/test_dse.py b/tests/integration/standard/test_dse.py index 32e313faf1..0cf730a1b8 100644 --- a/tests/integration/standard/test_dse.py +++ b/tests/integration/standard/test_dse.py @@ -42,6 +42,9 @@ def test_dse_5x(self): def test_dse_60(self): self._test_basic(Version('6.0.2')) + def test_dse_67(self): + self._test_basic(Version('6.7.0')) + def _test_basic(self, dse_version): """ Test basic connection and usage From 1d5bffea35c92a4412b166a624971e95902a6f50 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 20 Sep 2018 16:27:23 -0400 Subject: [PATCH 1448/2005] refactor expected_failures into _handle_results --- cassandra/metadata.py | 64 ++++++++++++++++++++++++++----------------- 1 file changed, 39 insertions(+), 25 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 8cf1e1d87a..377ea4dc85 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1667,8 +1667,30 @@ def __init__(self, connection, timeout): self.connection = connection self.timeout = timeout - def _handle_results(self, success, result): - if success: + def _handle_results(self, success, result, expected_failures=tuple()): + """ + Given a bool and a ResultSet (the form returned per result from + Connection.wait_for_responses), return a dictionary containing the + results. Used to process results from asynchronous queries to system + tables. + + ``expected_failures`` will usually be used to allow callers to ignore + ``InvalidRequest`` errors caused by a missing system keyspace. For + example, some DSE versions report a 4.X server version, but do not have + virtual tables. Thus, running against 4.X servers, SchemaParserV4 uses + expected_failures to make a best-effort attempt to read those + keyspaces, but treat them as empty if they're not found. + + :param success: A boolean representing whether or not the query + succeeded + :param result: The resultset in question. + :expected_failures: An Exception class or an iterable thereof. If the + query failed, but raised an instance of an expected failure class, this + will ignore the failure and return an empty list. + """ + if not success and isinstance(result, expected_failures): + return [] + elif success: return dict_factory(*result.results) if result else [] else: raise result @@ -1784,11 +1806,9 @@ def get_table(self, keyspaces, keyspace, table): table_result = self._handle_results(cf_success, cf_result) col_result = self._handle_results(col_success, col_result) - # handle the triggers table not existing in Cassandra 1.2 - if not triggers_success and isinstance(triggers_result, InvalidRequest): - triggers_result = [] - else: - triggers_result = self._handle_results(triggers_success, triggers_result) + # the triggers table doesn't exist in C* 1.2 + triggers_result = self._handle_results(triggers_success, triggers_result, + expected_failures=InvalidRequest) if table_result: return self._build_table_metadata(table_result[0], col_result, triggers_result) @@ -2533,24 +2553,18 @@ def _query_all(self): # V4-only results # These tables don't exist in some DSE versions reporting 4.X so we can # ignore them if we got an error - if isinstance(virtual_ks_result, InvalidRequest): - self.virtual_keyspaces_result = [] - else: - self.virtual_keyspaces_result = self._handle_results( - virtual_ks_success, virtual_ks_result - ) - if isinstance(virtual_table_result, InvalidRequest): - self.virtual_tables_result = [] - else: - self.virtual_tables_result = self._handle_results( - virtual_table_success, virtual_table_result - ) - if isinstance(virtual_column_result, InvalidRequest): - self.virtual_columns_result = [] - else: - self.virtual_columns_result = self._handle_results( - virtual_column_success, virtual_column_result - ) + self.virtual_keyspaces_result = self._handle_results( + virtual_ks_success, virtual_ks_result, + expected_failures=InvalidRequest + ) + self.virtual_tables_result = self._handle_results( + virtual_table_success, virtual_table_result, + expected_failures=InvalidRequest + ) + self.virtual_columns_result = self._handle_results( + virtual_column_success, virtual_column_result, + expected_failures=InvalidRequest + ) self._aggregate_results() From efa4e84dec2879f3777af7cd1cbdfa27bb8913dd Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Thu, 4 Oct 2018 17:01:33 -0400 Subject: [PATCH 1449/2005] add new cell in test matrix for DSE 6.7 smoke test --- build.yaml | 8 ++++++++ tests/integration/standard/test_dse.py | 9 ++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/build.yaml b/build.yaml index cdf87567d8..948c4cad02 100644 --- a/build.yaml +++ b/build.yaml @@ -118,6 +118,7 @@ cassandra: - '3.0' - '3.11' - 'test-dse' + - 'dse-6.7' env: CYTHON: @@ -159,6 +160,13 @@ build: exit 0 fi + if [[ $CCM_IS_DSE == 'true' ]]; then + # We only use a DSE version for unreleased DSE versions, so we only need to run the smoke tests here + echo "CCM_IS_DSE: $CCM_IS_DSE" + echo "==========RUNNING SMOKE TESTS===========" + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION DSE_VERSION='6.7.0' MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true + exit 0 + fi # Run the unit tests, this is not done in travis because # it takes too much time for the whole matrix to build with cython diff --git a/tests/integration/standard/test_dse.py b/tests/integration/standard/test_dse.py index 0cf730a1b8..a8a3d647bb 100644 --- a/tests/integration/standard/test_dse.py +++ b/tests/integration/standard/test_dse.py @@ -18,6 +18,7 @@ from cassandra.cluster import Cluster from tests import notwindows +from tests.unit.cython.utils import notcython from tests.integration import (execute_until_pass, execute_with_long_wait_retry, use_cluster) @@ -27,8 +28,12 @@ import unittest # noqa +CCM_IS_DSE = (os.environ.get('CCM_IS_DSE', None) == 'true') + + @unittest.skipIf(os.environ.get('CCM_ARGS', None), 'environment has custom CCM_ARGS; skipping') @notwindows +@notcython # no need to double up on this test; also __default__ setting doesn't work class DseCCMClusterTest(unittest.TestCase): """ This class can be executed setting the DSE_VERSION variable, for example: @@ -42,6 +47,7 @@ def test_dse_5x(self): def test_dse_60(self): self._test_basic(Version('6.0.2')) + @unittest.skipUnless(CCM_IS_DSE, 'DSE version unavailable') def test_dse_67(self): self._test_basic(Version('6.7.0')) @@ -55,7 +61,8 @@ def _test_basic(self, dse_version): use_cluster(cluster_name=cluster_name, nodes=[3], dse_cluster=True, dse_options={}, dse_version=dse_version) - cluster = Cluster() + cluster = Cluster( + allow_beta_protocol_version=(dse_version >= Version('6.7.0'))) session = cluster.connect() result = execute_until_pass( session, From b574524c6caec018815e1efdbf84a9c5aa507a3b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 23 Oct 2018 15:24:20 -0400 Subject: [PATCH 1450/2005] Fix wrong resultset indexing use --- CHANGELOG.rst | 4 +++- cassandra/cqlengine/query.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e70d53864b..4725d64caa 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,6 @@ -3.15.2 +3.16.0 ====== +NOT RELEASED Bug Fixes --------- @@ -10,6 +11,7 @@ Bug Fixes Other ----- * Fix tests when RF is not maintained if we decomission a node (PYTHON-1017) +* Fix wrong use of ResultSet indexing (PYTHON-1015) 3.15.1 ====== diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 41a8e2fb8c..11f664ec02 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -76,7 +76,7 @@ def check_applied(result): except Exception: applied = True # result was not LWT form if not applied: - raise LWTException(result[0]) + raise LWTException(result.one()) class AbstractQueryableColumn(UnicodeMixin): @@ -841,7 +841,7 @@ def count(self): query = self._select_query() query.count = True result = self._execute(query) - count_row = result[0].popitem() + count_row = result.one().popitem() self._count = count_row[1] return self._count From 319f39ae7172b280fa9c4c296af478087687f429 Mon Sep 17 00:00:00 2001 From: Jim Witschey <3753769+mambocab@users.noreply.github.com> Date: Wed, 31 Oct 2018 09:25:20 -0400 Subject: [PATCH 1451/2005] add swiftype index name to docs.yaml --- docs.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/docs.yaml b/docs.yaml index 1f626c20fa..1f9a1eba6b 100644 --- a/docs.yaml +++ b/docs.yaml @@ -1,6 +1,7 @@ title: DataStax Python Driver for Apache Cassandra summary: DataStax Python Driver for Apache Cassandra Documentation output: docs/_build/ +swiftype_drivers: pythondrivers checks: external_links: exclude: From aaf67197e351343fdc779ff1da16097e1bec67bb Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 24 Oct 2018 09:03:38 -0400 Subject: [PATCH 1452/2005] ResultSet.one() fails if the row_factory is using a generator --- CHANGELOG.rst | 1 + cassandra/cluster.py | 9 ++++++- .../standard/test_row_factories.py | 26 +++++++++++++++++++ 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e70d53864b..4644ea27de 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,6 +6,7 @@ Bug Fixes * Improve and fix socket error-catching code in nonblocking-socket reactors (PYTHON-1024) * Non-ASCII characters in schema break CQL string generation (PYTHON-1008) * Fix OSS driver's virtual table support against DSE 6.0.X and future server releases (PYTHON-1020) +* ResultSet.one() fails if the row_factory is using a generator (PYTHON-1026) Other ----- diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 63eb54e3ae..c0d89c202c 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -4244,7 +4244,14 @@ def one(self): you know a query returns a single row. Consider using an iterator if the ResultSet contains more than one row. """ - return self._current_rows[0] if self._current_rows else None + row = None + if self._current_rows: + try: + row = self._current_rows[0] + except TypeError: # generator object is not subscriptable, PYTHON-1026 + row = next(iter(self._current_rows)) + + return row def __iter__(self): if self._list_mode: diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index dea7fb4933..df709c34bd 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -181,6 +181,32 @@ def test_ordered_dict_factory(self): self.assertEqual(result[1]['k'], result[1]['v']) self.assertEqual(result[1]['k'], 2) + def test_generator_row_factory(self): + """ + Test that ResultSet.one() works with a row_factory that contains a generator. + + @since 3.16 + @jira_ticket PYTHON-1026 + @expected_result one() returns the first row + + @test_category queries + """ + def generator_row_factory(column_names, rows): + return _gen_row_factory(rows) + + def _gen_row_factory(rows): + for r in rows: + yield r + + session = self.session + session.row_factory = generator_row_factory + + session.execute(self.insert1) + result = session.execute(self.select) + self.assertIsInstance(result, ResultSet) + first_row = result.one() + self.assertEqual(first_row[0], first_row[1]) + class NamedTupleFactoryAndNumericColNamesTests(unittest.TestCase): """ From 67db0da29750879d3fcb5b6e007e411b0d0d2b72 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 5 Nov 2018 12:25:54 -0500 Subject: [PATCH 1453/2005] PYTHON-988 fix nosetests docs in README-dev --- README-dev.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README-dev.rst b/README-dev.rst index 815fe0aadd..acf8c3ad58 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -79,35 +79,35 @@ Running the Tests ================= In order for the extensions to be built and used in the test, run:: - python setup.py nosetests + nosetests You can run a specific test module or package like so:: - python setup.py nosetests -w tests/unit/ + nosetests -w tests/unit/ You can run a specific test method like so:: - python setup.py nosetests -w tests/unit/test_connection.py:ConnectionTest.test_bad_protocol_version + nosetests -w tests/unit/test_connection.py:ConnectionTest.test_bad_protocol_version Seeing Test Logs in Real Time ----------------------------- Sometimes it's useful to output logs for the tests as they run:: - python setup.py nosetests -w tests/unit/ --nocapture --nologcapture + nosetests -w tests/unit/ --nocapture --nologcapture Use tee to capture logs and see them on your terminal:: - python setup.py nosetests -w tests/unit/ --nocapture --nologcapture 2>&1 | tee test.log + nosetests -w tests/unit/ --nocapture --nologcapture 2>&1 | tee test.log Specifying a Cassandra Version for Integration Tests ---------------------------------------------------- You can specify a cassandra version with the ``CASSANDRA_VERSION`` environment variable:: - CASSANDRA_VERSION=2.0.9 python setup.py nosetests -w tests/integration/standard + CASSANDRA_VERSION=2.0.9 nosetests -w tests/integration/standard You can also specify a cassandra directory (to test unreleased versions):: - CASSANDRA_DIR=/home/thobbs/cassandra python setup.py nosetests -w tests/integration/standard + CASSANDRA_DIR=/home/thobbs/cassandra nosetests -w tests/integration/standard Specifying the usage of an already running Cassandra cluster ---------------------------------------------------- @@ -120,7 +120,7 @@ Specify a Protocol Version for Tests The protocol version defaults to 1 for cassandra 1.2 and 2 otherwise. You can explicitly set it with the ``PROTOCOL_VERSION`` environment variable:: - PROTOCOL_VERSION=3 python setup.py nosetests -w tests/integration/standard + PROTOCOL_VERSION=3 nosetests -w tests/integration/standard Testing Multiple Python Versions -------------------------------- From a184ed6be084a10373ab02c00af57531c5df90b8 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 2 Nov 2018 14:46:22 -0400 Subject: [PATCH 1454/2005] PYTHON-944: add format to message --- CHANGELOG.rst | 1 + cassandra/cluster.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 08056bb94b..f99e644360 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,6 +8,7 @@ Bug Fixes * Non-ASCII characters in schema break CQL string generation (PYTHON-1008) * Fix OSS driver's virtual table support against DSE 6.0.X and future server releases (PYTHON-1020) * ResultSet.one() fails if the row_factory is using a generator (PYTHON-1026) +* Log profile name on attempt to create existing profile (PYTHON-944) Other ----- diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c0d89c202c..395bd60cae 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1058,7 +1058,7 @@ def add_execution_profile(self, name, profile, pool_wait_timeout=5): if self._config_mode == _ConfigMode.LEGACY: raise ValueError("Cannot add execution profiles when legacy parameters are set explicitly.") if name in self.profile_manager.profiles: - raise ValueError("Profile %s already exists") + raise ValueError("Profile {} already exists".format(name)) contact_points_but_no_lbp = ( self._contact_points_explicit and not profile._load_balancing_policy_explicit) From 2afa635ac58f03ce8c3a8718053e264709908867 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 15 Jun 2018 12:30:32 -0400 Subject: [PATCH 1455/2005] pep8 fixes --- cassandra/cluster.py | 2 ++ tests/integration/simulacron/test_cluster.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 395bd60cae..e80317ed87 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -86,6 +86,7 @@ def _is_gevent_monkey_patched(): import gevent.socket return socket.socket is gevent.socket.socket + # default to gevent when we are monkey patched with gevent, eventlet when # monkey patched with eventlet, otherwise if libev is available, use that as # the default because it's fastest. Otherwise, use asyncore. @@ -181,6 +182,7 @@ def _shutdown_clusters(): for cluster in clusters: cluster.shutdown() + atexit.register(_shutdown_clusters) diff --git a/tests/integration/simulacron/test_cluster.py b/tests/integration/simulacron/test_cluster.py index ff2fb59743..57fcf21991 100644 --- a/tests/integration/simulacron/test_cluster.py +++ b/tests/integration/simulacron/test_cluster.py @@ -21,6 +21,8 @@ from tests.integration.simulacron.utils import prime_query from cassandra import WriteTimeout, WriteType, ConsistencyLevel +from cassandra.cluster import Cluster + @requiressimulacron class ClusterTests(SimulacronCluster): From 8b9dac03ad88a7827920f327976a8b07976b77ed Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 15 Jun 2018 12:44:42 -0400 Subject: [PATCH 1456/2005] add tests for dns resolution behavior --- tests/integration/simulacron/test_cluster.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/tests/integration/simulacron/test_cluster.py b/tests/integration/simulacron/test_cluster.py index 57fcf21991..091e1b3b5a 100644 --- a/tests/integration/simulacron/test_cluster.py +++ b/tests/integration/simulacron/test_cluster.py @@ -17,7 +17,7 @@ import unittest # noqa from tests.integration.simulacron import SimulacronCluster -from tests.integration import requiressimulacron +from tests.integration import (requiressimulacron, PROTOCOL_VERSION) from tests.integration.simulacron.utils import prime_query from cassandra import WriteTimeout, WriteType, ConsistencyLevel @@ -55,3 +55,19 @@ def test_writetimeout(self): self.assertIn(consistency, str(wt)) self.assertIn(str(received_responses), str(wt)) self.assertIn(str(required_responses), str(wt)) + + def test_connection_with_one_unresolvable_contact_point(self): + self.cluster.shutdown() + + # shouldn't raise anything due to name resolution failures + Cluster(['127.0.0.1', 'doesntresolve.becauseitcant'], + protocol_version=PROTOCOL_VERSION, + compression=False) + + def test_connection_with_only_unresolvable_contact_points(self): + self.cluster.shutdown() + + # shouldn't raise anything due to name resolution failures + Cluster(['doesntresolve.becauseitcant'], + protocol_version=PROTOCOL_VERSION, + compression=False) From fbda94023e46afe406037b6b150c838a01e9aab2 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 15 Jun 2018 12:32:19 -0400 Subject: [PATCH 1457/2005] fix contact resolution --- cassandra/cluster.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e80317ed87..a151d945ae 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -192,6 +192,19 @@ def default_lbp_factory(): return DCAwareRoundRobinPolicy() +def _addrinfo_or_none(contact_point, port): + """ + A helper function that wraps socket.getaddrinfo and returns None + when it fails to, e.g. resolve one of the hostnames. Used to address + PYTHON-895. + """ + try: + return socket.getaddrinfo(contact_point, port, + socket.AF_UNSPEC, socket.SOCK_STREAM) + except (socket.error, socket.herror, socket.gaierror, socket.timeout): + return None + + class ExecutionProfile(object): load_balancing_policy = None """ @@ -824,8 +837,12 @@ def __init__(self, self.port = port - self.contact_points_resolved = [endpoint[4][0] for a in self.contact_points - for endpoint in socket.getaddrinfo(a, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)] + self.contact_points_resolved = [ + endpoint[4][0] + for point in self.contact_points + for endpoint in _addrinfo_or_none(point, self.port) + if endpoint is not None + ] self.compression = compression From 094b447c7466014f393362b0ed37271c1910ed99 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 18 Jun 2018 16:46:21 -0400 Subject: [PATCH 1458/2005] tests and fix for PYTHON-895 --- cassandra/__init__.py | 10 +++++++ cassandra/cluster.py | 29 +++++++++++++----- tests/integration/simulacron/__init__.py | 11 +++++-- tests/integration/simulacron/test_cluster.py | 31 ++++++++++++-------- 4 files changed, 58 insertions(+), 23 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 3d7ef688c5..0d239a4753 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -686,3 +686,13 @@ class UnsupportedOperation(DriverException): for more details. """ pass + + +class UnresolvableContactPoints(DriverException): + """ + The driver was unable to resolve any provided hostnames. + + Note that this is *not* raised when a :class:`.Cluster` is created with no + contact points, only when lookup fails for all hosts + """ + pass diff --git a/cassandra/cluster.py b/cassandra/cluster.py index a151d945ae..01b7dde543 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -43,7 +43,8 @@ from cassandra import (ConsistencyLevel, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, - SchemaTargetType, DriverException, ProtocolVersion) + SchemaTargetType, DriverException, ProtocolVersion, + UnresolvableContactPoints) from cassandra.connection import (ConnectionException, ConnectionShutdown, ConnectionHeartbeat, ProtocolVersionUnsupported) from cassandra.cqltypes import UserType @@ -201,10 +202,26 @@ def _addrinfo_or_none(contact_point, port): try: return socket.getaddrinfo(contact_point, port, socket.AF_UNSPEC, socket.SOCK_STREAM) - except (socket.error, socket.herror, socket.gaierror, socket.timeout): + except socket.gaierror: + log.debug('Could not resolve hostname "{}" ' + 'with port {}'.format(contact_point, port)) return None +def _resolve_contact_points(contact_points, port): + resolved = tuple(_addrinfo_or_none(p, port) + for p in contact_points) + + if resolved and all((x is None for x in resolved)): + raise UnresolvableContactPoints(contact_points, port) + + resolved = tuple(r for r in resolved if r is not None) + + return [endpoint[4][0] + for addrinfo in resolved + for endpoint in addrinfo] + + class ExecutionProfile(object): load_balancing_policy = None """ @@ -837,12 +854,8 @@ def __init__(self, self.port = port - self.contact_points_resolved = [ - endpoint[4][0] - for point in self.contact_points - for endpoint in _addrinfo_or_none(point, self.port) - if endpoint is not None - ] + self.contact_points_resolved = _resolve_contact_points(self.contact_points, + self.port) self.compression = compression diff --git a/tests/integration/simulacron/__init__.py b/tests/integration/simulacron/__init__.py index 9a831d08f7..8b3946ae7f 100644 --- a/tests/integration/simulacron/__init__.py +++ b/tests/integration/simulacron/__init__.py @@ -35,20 +35,25 @@ def tearDown(self): class SimulacronCluster(SimulacronBase): + + cluster, connect = None, True + @classmethod def setUpClass(cls): if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"): return start_and_prime_singledc() - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) - cls.session = cls.cluster.connect(wait_for_all_pools=True) + if cls.connect: + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) + cls.session = cls.cluster.connect(wait_for_all_pools=True) @classmethod def tearDownClass(cls): if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"): return - cls.cluster.shutdown() + if cls.cluster: + cls.cluster.shutdown() stop_simulacron() diff --git a/tests/integration/simulacron/test_cluster.py b/tests/integration/simulacron/test_cluster.py index 091e1b3b5a..ec20c106ba 100644 --- a/tests/integration/simulacron/test_cluster.py +++ b/tests/integration/simulacron/test_cluster.py @@ -20,7 +20,8 @@ from tests.integration import (requiressimulacron, PROTOCOL_VERSION) from tests.integration.simulacron.utils import prime_query -from cassandra import WriteTimeout, WriteType, ConsistencyLevel +from cassandra import (WriteTimeout, WriteType, + ConsistencyLevel, UnresolvableContactPoints) from cassandra.cluster import Cluster @@ -56,18 +57,24 @@ def test_writetimeout(self): self.assertIn(str(received_responses), str(wt)) self.assertIn(str(required_responses), str(wt)) - def test_connection_with_one_unresolvable_contact_point(self): - self.cluster.shutdown() - # shouldn't raise anything due to name resolution failures - Cluster(['127.0.0.1', 'doesntresolve.becauseitcant'], - protocol_version=PROTOCOL_VERSION, - compression=False) +@requiressimulacron +class ClusterDNSResolutionTests(SimulacronCluster): - def test_connection_with_only_unresolvable_contact_points(self): - self.cluster.shutdown() + connect = False + def tearDown(self): + if self.cluster: + self.cluster.shutdown() + + def test_connection_with_one_unresolvable_contact_point(self): # shouldn't raise anything due to name resolution failures - Cluster(['doesntresolve.becauseitcant'], - protocol_version=PROTOCOL_VERSION, - compression=False) + self.cluster = Cluster(['127.0.0.1', 'dns.invalid'], + protocol_version=PROTOCOL_VERSION, + compression=False) + + def test_connection_with_only_unresolvable_contact_points(self): + with self.assertRaises(UnresolvableContactPoints): + self.cluster = Cluster(['dns.invalid'], + protocol_version=PROTOCOL_VERSION, + compression=False) From 897af1a99868e33aee6f65cde164da71c0956c9c Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 18 Jun 2018 16:54:46 -0400 Subject: [PATCH 1459/2005] pep8 cleanup --- cassandra/cluster.py | 3 --- tests/integration/simulacron/__init__.py | 1 - 2 files changed, 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 01b7dde543..e119605c81 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1118,7 +1118,6 @@ def add_execution_profile(self, name, profile, pool_wait_timeout=5): if not_done: raise OperationTimedOut("Failed to create all new connection pools in the %ss timeout.") - def get_min_requests_per_connection(self, host_distance): return self._min_requests_per_connection[host_distance] @@ -2061,7 +2060,6 @@ def default_serial_consistency_level(self, cl): .. versionadded:: 3.8.0 """ - encoder = None """ A :class:`~cassandra.encoder.Encoder` instance that will be used when @@ -2251,7 +2249,6 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time load_balancing_policy = execution_profile.load_balancing_policy spec_exec_policy = execution_profile.speculative_execution_policy - fetch_size = query.fetch_size if fetch_size is FETCH_SIZE_UNSET and self._protocol_version >= 2: fetch_size = self.default_fetch_size diff --git a/tests/integration/simulacron/__init__.py b/tests/integration/simulacron/__init__.py index 8b3946ae7f..665d6b3a15 100644 --- a/tests/integration/simulacron/__init__.py +++ b/tests/integration/simulacron/__init__.py @@ -56,4 +56,3 @@ def tearDownClass(cls): if cls.cluster: cls.cluster.shutdown() stop_simulacron() - From 9c47bcced9f8afdfd8c87bcb726a6da57a023efb Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 5 Nov 2018 12:11:41 -0500 Subject: [PATCH 1460/2005] CHANGELOG --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f99e644360..6d1ac9abf6 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -9,6 +9,7 @@ Bug Fixes * Fix OSS driver's virtual table support against DSE 6.0.X and future server releases (PYTHON-1020) * ResultSet.one() fails if the row_factory is using a generator (PYTHON-1026) * Log profile name on attempt to create existing profile (PYTHON-944) +* Cluster instantiation fails if any contact points' hostname resolution fails (PYTHON-895) Other ----- From 85466ddc640f1afb32a9d961c225f4c15d0ecacc Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 12 Nov 2018 13:19:38 -0500 Subject: [PATCH 1461/2005] version 3.16.0 --- CHANGELOG.rst | 2 +- cassandra/__init__.py | 2 +- docs.yaml | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6d1ac9abf6..a4e84eed24 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,6 @@ 3.16.0 ====== -NOT RELEASED +November 12, 2018 Bug Fixes --------- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 0d239a4753..94a2bc9c6b 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 15, 1, 'post0') +__version_info__ = (3, 16, 0) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs.yaml b/docs.yaml index 1f9a1eba6b..9f4d182fca 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,6 +22,8 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: + - name: '3.16' + ref: '3.16.0' - name: '3.15' ref: '2ce0bd97' - name: '3.14' From 039eb29c3d1a4feb0fa4fe428feed49d3a205730 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 21 May 2018 15:05:20 -0400 Subject: [PATCH 1462/2005] replacement for lz4 test - fails faster - allows us to remove test --- CHANGELOG.rst | 9 +++++++++ cassandra/connection.py | 11 +++++++++++ tests/unit/test_connection.py | 8 -------- 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a4e84eed24..4ee4d418b1 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,12 @@ +3.17.0 +====== +NOT RELEASED + +Other +----- +* Fail faster on incorrect lz4 import (PYTHON-1042) + + 3.16.0 ====== November 12, 2018 diff --git a/cassandra/connection.py b/cassandra/connection.py index f017bf55a3..a51fe18163 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -68,6 +68,17 @@ except ImportError: lz4_block = lz4 + try: + lz4_block.compress + lz4_block.decompress + except AttributeError: + raise ImportError( + 'lz4 not imported correctly. Imported object should have ' + '.compress and and .decompress attributes but does not. ' + 'Please file a bug report on JIRA. (Imported object was ' + '{lz4_block})'.format(lz4_block=repr(lz4_block)) + ) + # Cassandra writes the uncompressed message length in big endian order, # but the lz4 lib requires little endian order, so we wrap these # functions to handle that diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 3611dd9662..e35d1e8a8b 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -411,14 +411,6 @@ def send_msg(msg, req_id, msg_callback): [call(connection)] * get_holders.call_count) -class LZ4Tests(unittest.TestCase): - def test_lz4_is_correctly_imported(self): - try: - import lz4 - except ImportError: - return - from lz4 import block as lz4_block - class TimerTest(unittest.TestCase): def test_timer_collision(self): From 7e3bc37b24f5fda05db754b06e47880f2269c8c1 Mon Sep 17 00:00:00 2001 From: Gert de Pagter Date: Mon, 3 Dec 2018 12:32:11 +0100 Subject: [PATCH 1463/2005] Fix python version in Contributing guide --- CONTRIBUTING.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index f16477c355..cdd742c063 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -26,7 +26,7 @@ To protect the community, all contributors are required to `sign the DataStax Co Design and Implementation Guidelines ------------------------------------ -- We support Python 2.6+, so any changes must work in any of these runtimes (we use ``six``, ``futures``, and some internal backports for compatability) +- We support Python 2.7+, so any changes must work in any of these runtimes (we use ``six``, ``futures``, and some internal backports for compatability) - We have integrations (notably Cassandra cqlsh) that require pure Python and minimal external dependencies. We try to avoid new external dependencies. Where compiled extensions are concerned, there should always be a pure Python fallback implementation. - This project follows `semantic versioning `_, so breaking API changes will only be introduced in major versions. - Legacy ``cqlengine`` has varying degrees of overreaching client-side validation. Going forward, we will avoid client validation where server feedback is adequate and not overly expensive. From d00aff7b0fa8b7fe366c6cc7c180847fe3a55dac Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Mon, 28 Jan 2019 11:09:36 -0500 Subject: [PATCH 1464/2005] PYTHON-1068: send driver name/version in startup --- CHANGELOG.rst | 4 ++++ cassandra/connection.py | 5 ++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4ee4d418b1..2a4fcb5dae 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,10 @@ ====== NOT RELEASED +Features +-------- +* Send driver name and version in startup message (PYTHON-1068) + Other ----- * Fail faster on incorrect lz4 import (PYTHON-1042) diff --git a/cassandra/connection.py b/cassandra/connection.py index a51fe18163..1d4a5186ac 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -106,6 +106,8 @@ def decompress(byts): locally_supported_compressions['snappy'] = (snappy.compress, decompress) +DRIVER_NAME, DRIVER_VERSION = 'DataStax Python Driver', sys.modules['cassandra'].__version__ + PROTOCOL_VERSION_MASK = 0x7f HEADER_DIRECTION_FROM_CLIENT = 0x00 @@ -721,7 +723,8 @@ def _handle_options_response(self, options_response): @defunct_on_error def _send_startup_message(self, compression=None, no_compact=False): log.debug("Sending StartupMessage on %s", self) - opts = {} + opts = {'DRIVER_NAME': DRIVER_NAME, + 'DRIVER_VERSION': DRIVER_VERSION} if compression: opts['COMPRESSION'] = compression if no_compact: From 2bdde786c7a455c6923e59da74cc41215352eca2 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 22 Jan 2019 13:31:37 -0500 Subject: [PATCH 1465/2005] initial ssl_context support --- cassandra/cluster.py | 9 +++-- cassandra/connection.py | 19 ++++++++-- .../integration/long/ssl/driver_encrypted.key | 30 ++++++++++++++++ tests/integration/long/test_ssl.py | 36 +++++++++++++++++-- 4 files changed, 87 insertions(+), 7 deletions(-) create mode 100644 tests/integration/long/ssl/driver_encrypted.key diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e119605c81..20ff37235b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -576,7 +576,7 @@ def default_retry_policy(self, policy): ssl_options = None """ - A optional dict which will be used as kwargs for ``ssl.wrap_socket()`` + An optional dict which will be used as kwargs for ``ssl.wrap_socket()`` when new sockets are created. This should be used when client encryption is enabled in Cassandra. @@ -594,6 +594,8 @@ def default_retry_policy(self, policy): with a custom or `back-ported function `_. """ + ssl_context = None + sockopts = None """ An optional list of tuples which will be used as arguments to @@ -831,7 +833,8 @@ def __init__(self, allow_beta_protocol_version=False, timestamp_generator=None, idle_heartbeat_timeout=30, - no_compact=False): + no_compact=False, + ssl_context=None): """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as extablishing connection pools or refreshing metadata. @@ -952,6 +955,7 @@ def __init__(self, self.metrics_enabled = metrics_enabled self.ssl_options = ssl_options + self.ssl_context = ssl_context self.sockopts = sockopts self.cql_version = cql_version self.max_schema_agreement_wait = max_schema_agreement_wait @@ -1247,6 +1251,7 @@ def _make_connection_kwargs(self, address, kwargs_dict): kwargs_dict.setdefault('compression', self.compression) kwargs_dict.setdefault('sockopts', self.sockopts) kwargs_dict.setdefault('ssl_options', self.ssl_options) + kwargs_dict.setdefault('ssl_context', self.ssl_context) kwargs_dict.setdefault('cql_version', self.cql_version) kwargs_dict.setdefault('protocol_version', self.protocol_version) kwargs_dict.setdefault('user_type_map', self._user_types) diff --git a/cassandra/connection.py b/cassandra/connection.py index 1d4a5186ac..f3bea45bd0 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -26,6 +26,7 @@ import sys from threading import Thread, Event, RLock import time +import warnings try: import ssl @@ -225,6 +226,7 @@ class Connection(object): decompressor = None ssl_options = None + ssl_context = None last_error = None # The current number of operations that are in flight. More precisely, @@ -273,11 +275,13 @@ class Connection(object): def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, cql_version=None, protocol_version=ProtocolVersion.MAX_SUPPORTED, is_control_connection=False, - user_type_map=None, connect_timeout=None, allow_beta_protocol_version=False, no_compact=False): + user_type_map=None, connect_timeout=None, allow_beta_protocol_version=False, no_compact=False, + ssl_context=None): self.host = host self.port = port self.authenticator = authenticator self.ssl_options = ssl_options.copy() if ssl_options else None + self.ssl_context = ssl_context self.sockopts = sockopts self.compression = compression self.cql_version = cql_version @@ -291,7 +295,13 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self._requests = {} self._iobuf = io.BytesIO() - if ssl_options: + if ssl_options and not ssl_context: + warnings.warn('Using ssl_options without ssl_context is ' + 'deprecated and will result in an error in ' + 'the next major release. Please use ssl_context ' + 'to prepare for that release.', + DeprecationWarning) + self._check_hostname = bool(self.ssl_options.pop('check_hostname', False)) if self._check_hostname: if not getattr(ssl, 'match_hostname', None): @@ -363,7 +373,10 @@ def _connect_socket(self): for (af, socktype, proto, canonname, sockaddr) in addresses: try: self._socket = self._socket_impl.socket(af, socktype, proto) - if self.ssl_options: + if self.ssl_context: + self._socket = self.ssl_context.wrap_socket(self._socket, + **(self.ssl_options or {})) + elif self.ssl_options: if not self._ssl_impl: raise RuntimeError("This version of Python was not compiled with SSL support") self._socket = self._ssl_impl.wrap_socket(self._socket, **self.ssl_options) diff --git a/tests/integration/long/ssl/driver_encrypted.key b/tests/integration/long/ssl/driver_encrypted.key new file mode 100644 index 0000000000..91ae5efecb --- /dev/null +++ b/tests/integration/long/ssl/driver_encrypted.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-256-CBC,4B3084A6FB74C60EF6AD51C4677FE509 + +PRmOOSwQNcBYCBbGuv/YgKgSep00GGqCeyhej2soLaGKgSjPhe/cE7sFvm6jk72+ +wDJNpuY6gLoKmizvxrgi5iHnoVdaPkRXKkcQHgaLt4TjB2EUfPp/+V5N4FA4DxN2 +/jqfWZ5Fw8ltHbW1hEkxQ58cYWd5OQB8EesQ865Pv364px5GIcDyr9FEgTiE/Ulc +wS5V1DSgytL/iwBP7FE/zejvZLiS0OoqNGq/G0nBSsv68CH7/lCs3i0Gn2E8YNW8 +CYdMnRzsxwPgbvWYYFhNeK1mrXC+RZa7FgLeeXWR6K28aSB8wc35PePBbZT6TViz +jZkmBFvbpLrLP7KNogLdzhFLqBzkP0NgkYtseYFFcuUAmIrWPVrsoA4xHWMkdcB6 +OLMz+zpb5u8rjpa3gWh5xQJbdVDxXGJNxCYzT7sc28wzr8vHFJFg6U2DJL9x+ufW +BXwBafb8biS1TBWGSDsc36NUwkyvv/2zjSytEIkdJ4lp4S69TGcU0Qvr1gA+8zLm +o6/1G5wUTt461mAAlvcy/9pHZlm8K1VShBvBCYouoVfUyIYAKhq0bBm64E/jdE3v +/Rxd/hom0d+m595wbH7f2AZCBVmII+hEyaagtKEO87qztq/WVMjN6h6BtXR5Xjsv +fXg71mMlQvqYuTB7hOhpyPdux0vvGzCdlGmvUsAC+urqCfkUseLAFfTnD7IrfYK8 +lzRyFJPABtrDiVqQpCBsOq/ACERjVpn/XcJgEBui2jhaTodwWIVD9DPlf4o93Fhf +oKE2o2W0TkaU/uaTLNt4TLJ0EXJ8S0XIqXnEvt3eRu0jOcC3ZWlFINdJISjm53LW +o1q/6mlvj2COLxCipUfGLVmIRuuMDybndSiXH+vddLQDZSw9mynYzJUvMcurBYyL +Lb9t+8hSRftRNqnM9ojDB+gmCJ0uQUlSAt47SZHGIi91S5pM7IELgP55SUG02mGn +Pr+BBtQCVNbd1szPXH7dlUfD9eUHZ34uL/2wOMtNT7da4ajviR6HIyLiz3lrI+e+ +2X62fMFD5tnxffMIZLVPhAHHlT8RPzvspA7QHdJzWCsdxZ3VVmWCFL0WHKuP3Hhh +0jIBOh0Y9eFCMcL7yP0rbjv/3MyBqqM4T5RfVkoRE3M/F8+kHObQzdUqy4JxApyb +cb0ipAU3JxgEy2rOkXDoeYeRoCKVNU3MZ3x3/+oTZXBhlP+oIbqMo/bTr8JJFC6y +y38YcWcVdUzYBI3KbH2aRCtUXNK404zm8GNs6hTuB95IrpI6APspNkQS9yhcxBdr +zOjZh8snZ52gVDtgrNCSfSgflPEs1CNeg33PpVfZWCai/zwSVFwJuL4iyQqkTVlL +wmbLUFdZN1AmYC2dJpHm30+cykXWJr/xFpAlZXmtgHlttD02pnTVl98G1rP3Oq8u +NplSV/TOXVc3keBnQ7N5BohnQ1NSLq4wusVd7n9UEoCE6HxCUbWWkAKQIHom+5HI +7PgRw0RJRUFbydBz+bDXz3KEFcTOT5ihtI/qq6tJpXMKz7uKGLAnzTIk2DecQYm4 +c40zTUKGVISky357ZIYB6uG9NRVffqsm9M1oIeprwYNpVzyqxz6Yyen+VCxdPq9w +-----END RSA PRIVATE KEY----- diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index bb53a30edc..08bd0462a1 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -34,6 +34,7 @@ # Client specific keys/certs CLIENT_CA_CERTS = 'tests/integration/long/ssl/cassandra.pem' DRIVER_KEYFILE = "tests/integration/long/ssl/driver.key" +DRIVER_KEYFILE_ENCRYPTED = "tests/integration/long/ssl/driver_encrypted.key" DRIVER_CERTFILE = "tests/integration/long/ssl/driver.pem" DRIVER_CERTFILE_BAD = "tests/integration/long/ssl/python_driver_bad.pem" @@ -78,14 +79,18 @@ def setup_cluster_ssl(client_auth=False): ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) -def validate_ssl_options(ssl_options): +def validate_ssl_options(*args, **kwargs): + ssl_options = kwargs.get('ssl_options', None) + ssl_context = kwargs.get('ssl_context', None) + # find absolute path to client CA_CERTS tries = 0 while True: if tries > 5: raise RuntimeError("Failed to connect to SSL cluster after 5 attempts") try: - cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options=ssl_options) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, + ssl_options=ssl_options, ssl_context=ssl_context) session = cluster.connect(wait_for_all_pools=True) break except Exception: @@ -239,6 +244,33 @@ def test_can_connect_with_ssl_client_auth(self): 'certfile': abs_driver_certfile} validate_ssl_options(ssl_options) + def test_can_connect_with_ssl_client_auth_password_private_key(self): + """ + Identical test to test_can_connect_with_ssl_client_auth, the only difference + is that the DRIVER_KEYFILE is encrypted with a password. + + @jira_ticket PYTHON-995. + + @since 2.8.0 + @expected_result The client can connect via SSL and preform some basic operations + @test_category connection:ssl + """ + + # Need to get absolute paths for certs/key + abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE_ENCRYPTED) + abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE) + # ssl_options = {'ca_certs': abs_path_ca_cert_path, + # 'ssl_version': ssl_version, + # 'keyfile': abs_driver_keyfile, + # 'password': 'cassandra', + # 'certfile': abs_driver_certfile} + from ssl import SSLContext + ssl_context = SSLContext(ssl_version) + ssl_context.load_cert_chain(certfile=abs_driver_certfile, + keyfile=abs_driver_keyfile, + password='cassandra') + validate_ssl_options(ssl_context=ssl_context) + def test_can_connect_with_ssl_client_auth_host_name(self): """ Test to validate that we can connect to a C* cluster that has client_auth enabled, and hostmatching From fb6dc8250ec17ce683671707f8b45aead008de3b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 22 Jan 2019 15:34:39 -0500 Subject: [PATCH 1466/2005] Add SSLConnectionWithSSLContextTests and an additional sslcontext test --- tests/integration/long/test_ssl.py | 77 +++++++++++++++++++----------- 1 file changed, 49 insertions(+), 28 deletions(-) diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 08bd0462a1..2e78a2ce38 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -49,10 +49,11 @@ verify_certs = {'cert_reqs': ssl.CERT_REQUIRED, 'check_hostname': True} + def setup_cluster_ssl(client_auth=False): """ We need some custom setup for this module. This will start the ccm cluster with basic - ssl connectivity, and client authenticiation if needed. + ssl connectivity, and client authentication if needed. """ use_single_node(start=False) @@ -244,33 +245,6 @@ def test_can_connect_with_ssl_client_auth(self): 'certfile': abs_driver_certfile} validate_ssl_options(ssl_options) - def test_can_connect_with_ssl_client_auth_password_private_key(self): - """ - Identical test to test_can_connect_with_ssl_client_auth, the only difference - is that the DRIVER_KEYFILE is encrypted with a password. - - @jira_ticket PYTHON-995. - - @since 2.8.0 - @expected_result The client can connect via SSL and preform some basic operations - @test_category connection:ssl - """ - - # Need to get absolute paths for certs/key - abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE_ENCRYPTED) - abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE) - # ssl_options = {'ca_certs': abs_path_ca_cert_path, - # 'ssl_version': ssl_version, - # 'keyfile': abs_driver_keyfile, - # 'password': 'cassandra', - # 'certfile': abs_driver_certfile} - from ssl import SSLContext - ssl_context = SSLContext(ssl_version) - ssl_context.load_cert_chain(certfile=abs_driver_certfile, - keyfile=abs_driver_keyfile, - password='cassandra') - validate_ssl_options(ssl_context=ssl_context) - def test_can_connect_with_ssl_client_auth_host_name(self): """ Test to validate that we can connect to a C* cluster that has client_auth enabled, and hostmatching @@ -347,3 +321,50 @@ def test_cannot_connect_with_bad_client_auth(self): with self.assertRaises(NoHostAvailable) as context: cluster.connect() cluster.shutdown() + + +class SSLConnectionWithSSLContextTests(unittest.TestCase): + + @classmethod + def setUpClass(cls): + setup_cluster_ssl() + + @classmethod + def tearDownClass(cls): + ccm_cluster = get_cluster() + ccm_cluster.stop() + remove_cluster() + + def test_can_connect_with_sslcontext_certificate(self): + """ + Test to validate that we are able to connect to a cluster using a SSLContext. + + @since 3.17.0 + @jira_ticket PYTHON-995 + @expected_result The client can connect via SSL and preform some basic operations + + @test_category connection:ssl + """ + abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) + ssl_context = ssl.SSLContext(ssl_version) + ssl_context.load_verify_locations(abs_path_ca_cert_path) + validate_ssl_options(ssl_context=ssl_context) + + def test_can_connect_with_ssl_client_auth_password_private_key(self): + """ + Identical test to SSLConnectionAuthTests.test_can_connect_with_ssl_client_auth, + the only difference is that the DRIVER_KEYFILE is encrypted with a password. + + @jira_ticket PYTHON-995. + + @since 3.17.0 + @expected_result The client can connect via SSL and preform some basic operations + @test_category connection:ssl + """ + abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE_ENCRYPTED) + abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE) + ssl_context = ssl.SSLContext(ssl_version) + ssl_context.load_cert_chain(certfile=abs_driver_certfile, + keyfile=abs_driver_keyfile, + password='cassandra') + validate_ssl_options(ssl_context=ssl_context) From 22624977763a8caae64aaa9474e6a103a62c86e9 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 22 Jan 2019 15:42:10 -0500 Subject: [PATCH 1467/2005] ssl_context documentation --- cassandra/cluster.py | 20 +++++++++++++--- docs/api/cassandra/cluster.rst | 2 ++ docs/security.rst | 42 +++++++++++++++++++++++++++++----- 3 files changed, 55 insertions(+), 9 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 20ff37235b..ae2a23db69 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -576,9 +576,14 @@ def default_retry_policy(self, policy): ssl_options = None """ - An optional dict which will be used as kwargs for ``ssl.wrap_socket()`` - when new sockets are created. This should be used when client encryption - is enabled in Cassandra. + Using ssl_options without ssl_context is deprecated and will be removed in the + next major release. + + An optional dict which will be used as kwargs for ``ssl.SSLContext.wrap_socket`` (or + ``ssl.wrap_socket()`` if used without ssl_context) when new sockets are created. + This should be used when client encryption is enabled in Cassandra. + + The following documentation only applies when ssl_options is used without ssl_context. By default, a ``ca_certs`` value should be supplied (the value should be a string pointing to the location of the CA certs file), and you probably @@ -595,6 +600,15 @@ def default_retry_policy(self, policy): """ ssl_context = None + """ + An optional ``ssl.SSLContext`` instance which will be used when new sockets are created. + This should be used when client encryption is enabled in Cassandra. + + ``wrap_socket`` options can be set using :attr:`~Cluster.ssl_options`. ssl_options will + be used as kwargs for ``ssl.SSLContext.wrap_socket``. + + .. versionadded:: 3.17.0 + """ sockopts = None """ diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index e8f26de3fa..e8cfa9380f 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -32,6 +32,8 @@ .. autoattribute:: metrics + .. autoattribute:: ssl_context + .. autoattribute:: ssl_options .. autoattribute:: sockopts diff --git a/docs/security.rst b/docs/security.rst index 9036567413..1a5a3b1a22 100644 --- a/docs/security.rst +++ b/docs/security.rst @@ -27,9 +27,6 @@ For example, suppose Cassandra is setup with its default cluster = Cluster(auth_provider=auth_provider, protocol_version=2) -When working with version 2 or higher of the driver, the protocol -version is set to 2 by default, but we've included it in the example -to be explicit. Custom Authenticators ^^^^^^^^^^^^^^^^^^^^^ @@ -59,10 +56,40 @@ a dict of credentials with a ``username`` and ``password`` key: SSL --- +SSL should be used when client encryption is enabled in Cassandra. + +Versions 3.17.0 and higher +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To enable SSL you will need to set :attr:`.Cluster.ssl_context` to a +``ssl.SSLContext`` instance. Optionally, you can also set :attr:`.Cluster.ssl_options` +to a dict of options. These will be passed as kwargs to ``ssl.SSLContext.wrap_socket()`` +when new sockets are created. + +For example: + +.. code-block:: python + + from cassandra.cluster import Cluster + from ssl import SSLContext, PROTOCOL_TLSv1, CERT_REQUIRED + + ssl_context = SSLContext(PROTOCOL_TLSv1) + ssl_context.verify_mode = CERT_REQUIRED + ssl_context.load_verify_locations('/path/to/my/ca.certs') + cluster = Cluster(ssl_context=ssl_context) + + +This is only an example to show how to pass the ssl parameters. Consider reading +the `python ssl documentation `_ for +your configuration. + +Versions 3.16.0 and lower +^^^^^^^^^^^^^^^^^^^^^^^^^ + To enable SSL you will need to set :attr:`.Cluster.ssl_options` to a dict of options. These will be passed as kwargs to ``ssl.wrap_socket()`` -when new sockets are created. This should be used when client encryption -is enabled in Cassandra. +when new sockets are created. Note that this use of ssl_options will be +deprecated in the next major release. By default, a ``ca_certs`` value should be supplied (the value should be a string pointing to the location of the CA certs file), and you probably @@ -88,5 +115,8 @@ the `python ssl documentation `_. -*Note*: In case the twisted event loop is used pyOpenSSL must be installed or an exception will be risen. Also +SSL with Twisted +++++++++++++++++ + +In case the twisted event loop is used pyOpenSSL must be installed or an exception will be risen. Also to set the ``ssl_version`` and ``cert_reqs`` in ``ssl_opts`` the appropriate constants from pyOpenSSL are expected. From 387774a6cc68c4772c9ade177d43fe288940e544 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 22 Jan 2019 15:44:04 -0500 Subject: [PATCH 1468/2005] minor changes and changelog entry --- CHANGELOG.rst | 7 +++++++ build.yaml | 1 - cassandra/cluster.py | 8 ++++++++ cassandra/connection.py | 9 +-------- tests/integration/long/test_ssl.py | 6 +++--- 5 files changed, 19 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 2a4fcb5dae..58e56af901 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,11 +5,18 @@ NOT RELEASED Features -------- * Send driver name and version in startup message (PYTHON-1068) +* Add Cluster ssl_context option to enable SSL (PYTHON-995) +* Allow encrypted private keys for 2-way SSL cluster connections (PYTHON-995) Other ----- * Fail faster on incorrect lz4 import (PYTHON-1042) +Deprecations +------------ + +* Using Cluster.ssl_options to enable SSL is deprecated and will be removed in + the next major release, use ssl_context. 3.16.0 ====== diff --git a/build.yaml b/build.yaml index 948c4cad02..890b614800 100644 --- a/build.yaml +++ b/build.yaml @@ -118,7 +118,6 @@ cassandra: - '3.0' - '3.11' - 'test-dse' - - 'dse-6.7' env: CYTHON: diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ae2a23db69..e443b39e24 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -968,6 +968,14 @@ def __init__(self, ''.format(cp=contact_points, lbp=load_balancing_policy)) self.metrics_enabled = metrics_enabled + + if ssl_options and not ssl_context: + warn('Using ssl_options without ssl_context is ' + 'deprecated and will result in an error in ' + 'the next major release. Please use ssl_context ' + 'to prepare for that release.', + DeprecationWarning) + self.ssl_options = ssl_options self.ssl_context = ssl_context self.sockopts = sockopts diff --git a/cassandra/connection.py b/cassandra/connection.py index f3bea45bd0..18a9381389 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -26,7 +26,6 @@ import sys from threading import Thread, Event, RLock import time -import warnings try: import ssl @@ -295,13 +294,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self._requests = {} self._iobuf = io.BytesIO() - if ssl_options and not ssl_context: - warnings.warn('Using ssl_options without ssl_context is ' - 'deprecated and will result in an error in ' - 'the next major release. Please use ssl_context ' - 'to prepare for that release.', - DeprecationWarning) - + if ssl_options: self._check_hostname = bool(self.ssl_options.pop('check_hostname', False)) if self._check_hostname: if not getattr(ssl, 'match_hostname', None): diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 2e78a2ce38..f109608cb7 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -80,7 +80,7 @@ def setup_cluster_ssl(client_auth=False): ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) -def validate_ssl_options(*args, **kwargs): +def validate_ssl_options(**kwargs): ssl_options = kwargs.get('ssl_options', None) ssl_context = kwargs.get('ssl_context', None) @@ -243,7 +243,7 @@ def test_can_connect_with_ssl_client_auth(self): 'ssl_version': ssl_version, 'keyfile': abs_driver_keyfile, 'certfile': abs_driver_certfile} - validate_ssl_options(ssl_options) + validate_ssl_options(ssl_options=ssl_options) def test_can_connect_with_ssl_client_auth_host_name(self): """ @@ -271,7 +271,7 @@ def test_can_connect_with_ssl_client_auth_host_name(self): 'certfile': abs_driver_certfile} ssl_options.update(verify_certs) - validate_ssl_options(ssl_options) + validate_ssl_options(ssl_options=ssl_options) def test_cannot_connect_without_client_auth(self): """ From a432df9ea501dcb4cb0cab85d5b522e8df163410 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 21 Jan 2019 10:51:12 -0500 Subject: [PATCH 1469/2005] Improve handling of ssl errors when sending a message --- CHANGELOG.rst | 4 +++ cassandra/io/asyncorereactor.py | 3 +- cassandra/io/libevreactor.py | 3 +- tests/integration/long/test_ssl.py | 49 ++++++++++++++++++++++++++++-- 4 files changed, 54 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 58e56af901..25d9bb9f44 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,6 +8,10 @@ Features * Add Cluster ssl_context option to enable SSL (PYTHON-995) * Allow encrypted private keys for 2-way SSL cluster connections (PYTHON-995) +Bug Fixes +--------- +* NoHostAvailable when all hosts are up and connectable (PYTHON-891) + Other ----- * Fail faster on incorrect lz4 import (PYTHON-1042) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 91431aa682..2d30cdeedf 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -402,7 +402,8 @@ def handle_write(self): sent = self.send(next_msg) self._readable = True except socket.error as err: - if (err.args[0] in NONBLOCKING): + if (err.args[0] in NONBLOCKING or + err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE)): with self.deque_lock: self.deque.appendleft(next_msg) else: diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 21111b0e25..afdf3b8137 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -316,7 +316,8 @@ def handle_write(self, watcher, revents, errno=None): try: sent = self._socket.send(next_msg) except socket.error as err: - if (err.args[0] in NONBLOCKING): + if (err.args[0] in NONBLOCKING or + err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE)): with self._deque_lock: self.deque.appendleft(next_msg) else: diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index f109608cb7..7f0a870718 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -17,7 +17,7 @@ except ImportError: import unittest -import os, sys, traceback, logging, ssl, time +import os, sys, traceback, logging, ssl, time, math, uuid from cassandra.cluster import Cluster, NoHostAvailable from cassandra import ConsistencyLevel from cassandra.query import SimpleStatement @@ -323,6 +323,49 @@ def test_cannot_connect_with_bad_client_auth(self): cluster.shutdown() +class SSLSocketErrorTests(unittest.TestCase): + + @classmethod + def setUpClass(cls): + setup_cluster_ssl() + + @classmethod + def tearDownClass(cls): + ccm_cluster = get_cluster() + ccm_cluster.stop() + remove_cluster() + + def test_ssl_want_write_errors_are_retried(self): + """ + Test that when a socket receives a WANT_WRITE error, the message chunk sending is retried. + + @since 3.17.0 + @jira_ticket PYTHON-891 + @expected_result The query is executed successfully + + @test_category connection:ssl + """ + abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) + ssl_options = {'ca_certs': abs_path_ca_cert_path, + 'ssl_version': ssl_version} + cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options=ssl_options) + session = cluster.connect(wait_for_all_pools=True) + try: + session.execute('drop keyspace ssl_error_test') + except: + pass + session.execute( + "CREATE KEYSPACE ssl_error_test WITH replication = {'class':'SimpleStrategy','replication_factor':1};") + session.execute("CREATE TABLE ssl_error_test.big_text (id uuid PRIMARY KEY, data text);") + + params = { + '0': uuid.uuid4(), + '1': "0" * int(math.pow(10, 7)) + } + + session.execute('INSERT INTO ssl_error_test.big_text ("id", "data") VALUES (%(0)s, %(1)s)', params) + + class SSLConnectionWithSSLContextTests(unittest.TestCase): @classmethod @@ -355,10 +398,10 @@ def test_can_connect_with_ssl_client_auth_password_private_key(self): Identical test to SSLConnectionAuthTests.test_can_connect_with_ssl_client_auth, the only difference is that the DRIVER_KEYFILE is encrypted with a password. - @jira_ticket PYTHON-995. - @since 3.17.0 + @jira_ticket PYTHON-995 @expected_result The client can connect via SSL and preform some basic operations + @test_category connection:ssl """ abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE_ENCRYPTED) From e45b050100043b7ab1b52466ebb816a84973c913 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 25 Jan 2019 15:46:11 -0500 Subject: [PATCH 1470/2005] Fix serial consistency level is not used --- CHANGELOG.rst | 2 + cassandra/__init__.py | 5 ++ cassandra/cluster.py | 13 +++++ cassandra/policies.py | 14 +++-- cassandra/query.py | 6 +-- tests/integration/long/test_policies.py | 70 +++++++++++++++++++++++++ tests/unit/test_cluster.py | 28 +++++++--- tests/unit/test_policies.py | 4 +- 8 files changed, 128 insertions(+), 14 deletions(-) create mode 100644 tests/integration/long/test_policies.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 25d9bb9f44..ec8bbe58cc 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,10 +7,12 @@ Features * Send driver name and version in startup message (PYTHON-1068) * Add Cluster ssl_context option to enable SSL (PYTHON-995) * Allow encrypted private keys for 2-way SSL cluster connections (PYTHON-995) +* Introduce new method ConsistencyLevel.is_serial (PYTHON-1067) Bug Fixes --------- * NoHostAvailable when all hosts are up and connectable (PYTHON-891) +* Serial consistency level is not used (PYTHON-1007) Other ----- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 94a2bc9c6b..c00ac38fad 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -92,6 +92,11 @@ class ConsistencyLevel(object): one response. """ + @staticmethod + def is_serial(cl): + return cl == ConsistencyLevel.SERIAL or cl == ConsistencyLevel.LOCAL_SERIAL + + ConsistencyLevel.value_to_name = { ConsistencyLevel.ANY: 'ANY', ConsistencyLevel.ONE: 'ONE', diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e443b39e24..37d7787e58 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -290,7 +290,14 @@ def __init__(self, load_balancing_policy=_NOT_SET, retry_policy=None, self.load_balancing_policy = load_balancing_policy self.retry_policy = retry_policy or RetryPolicy() self.consistency_level = consistency_level + + if (serial_consistency_level is not None and + not ConsistencyLevel.is_serial(serial_consistency_level)): + raise ValueError("serial_consistency_level must be either " + "ConsistencyLevel.SERIAL " + "or ConsistencyLevel.LOCAL_SERIAL.") self.serial_consistency_level = serial_consistency_level + self.request_timeout = request_timeout self.row_factory = row_factory self.speculative_execution_policy = speculative_execution_policy or NoSpeculativeExecutionPolicy() @@ -2032,6 +2039,12 @@ def default_serial_consistency_level(self): @default_serial_consistency_level.setter def default_serial_consistency_level(self, cl): + if (cl is not None and + not ConsistencyLevel.is_serial(cl)): + raise ValueError("default_serial_consistency_level must be either " + "ConsistencyLevel.SERIAL " + "or ConsistencyLevel.LOCAL_SERIAL.") + self._validate_set_legacy_config('default_serial_consistency_level', cl) max_trace_wait = 2.0 diff --git a/cassandra/policies.py b/cassandra/policies.py index fdd96d596e..67665d2788 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -814,7 +814,7 @@ def on_unavailable(self, query, consistency, required_replicas, alive_replicas, This is called when the coordinator node determines that a read or write operation cannot be successful because the number of live replicas are too low to meet the requested :class:`.ConsistencyLevel`. - This means that the read or write operation was never forwared to + This means that the read or write operation was never forwarded to any replicas. `query` is the :class:`.Statement` that failed. @@ -830,9 +830,11 @@ def on_unavailable(self, query, consistency, required_replicas, alive_replicas, `retry_num` counts how many times the operation has been retried, so the first time this method is called, `retry_num` will be 0. - By default, no retries will be attempted and the error will be re-raised. + By default, if this is the first retry, it triggers a retry on the next + host in the query plan with the same consistency level. If this is not the + first retry, no retries will be attempted and the error will be re-raised. """ - return (self.RETRY_NEXT_HOST, consistency) if retry_num == 0 else (self.RETHROW, None) + return (self.RETRY_NEXT_HOST, None) if retry_num == 0 else (self.RETHROW, None) class FallthroughRetryPolicy(RetryPolicy): @@ -912,6 +914,9 @@ def on_read_timeout(self, query, consistency, required_responses, received_responses, data_retrieved, retry_num): if retry_num != 0: return self.RETHROW, None + elif ConsistencyLevel.is_serial(consistency): + # Downgrading does not make sense for a CAS read query + return self.RETHROW, None elif received_responses < required_responses: return self._pick_consistency(received_responses) elif not data_retrieved: @@ -940,6 +945,9 @@ def on_write_timeout(self, query, consistency, write_type, def on_unavailable(self, query, consistency, required_replicas, alive_replicas, retry_num): if retry_num != 0: return self.RETHROW, None + elif ConsistencyLevel.is_serial(consistency): + # failed at the paxos phase of a LWT, retry on the next host + return self.RETRY_NEXT_HOST, None else: return self._pick_consistency(alive_replicas) diff --git a/cassandra/query.py b/cassandra/query.py index b2193d61a0..74a9896b9a 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -328,8 +328,8 @@ def _get_serial_consistency_level(self): return self._serial_consistency_level def _set_serial_consistency_level(self, serial_consistency_level): - acceptable = (None, ConsistencyLevel.SERIAL, ConsistencyLevel.LOCAL_SERIAL) - if serial_consistency_level not in acceptable: + if (serial_consistency_level is not None and + not ConsistencyLevel.is_serial(serial_consistency_level)): raise ValueError( "serial_consistency_level must be either ConsistencyLevel.SERIAL " "or ConsistencyLevel.LOCAL_SERIAL") @@ -445,7 +445,7 @@ class PreparedStatement(object): result_metadata_id = None routing_key_indexes = None _routing_key_index_set = None - serial_consistency_level = None + serial_consistency_level = None # TODO never used? def __init__(self, column_metadata, query_id, routing_key_indexes, query, keyspace, protocol_version, result_metadata, result_metadata_id): diff --git a/tests/integration/long/test_policies.py b/tests/integration/long/test_policies.py new file mode 100644 index 0000000000..f720548ea2 --- /dev/null +++ b/tests/integration/long/test_policies.py @@ -0,0 +1,70 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from cassandra import ConsistencyLevel, Unavailable +from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT + +from tests.integration import use_cluster, get_cluster, get_node + + +def setup_module(): + use_cluster('test_cluster', [4]) + + +class RetryPolicyTests(unittest.TestCase): + + @classmethod + def tearDownClass(cls): + cluster = get_cluster() + cluster.start() # make sure other nodes are restarted + + def test_should_rethrow_on_unvailable_with_default_policy_if_cas(self): + """ + Tests for the default retry policy in combination with lightweight transactions. + + @since 3.17 + @jira_ticket PYTHON-1007 + @expected_result the query is retried with the default CL, not the serial one. + + @test_category policy + """ + ep = ExecutionProfile(consistency_level=ConsistencyLevel.ALL, + serial_consistency_level=ConsistencyLevel.SERIAL) + + cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ep}) + session = cluster.connect() + + session.execute("CREATE KEYSPACE test_retry_policy_cas WITH replication = {'class':'SimpleStrategy','replication_factor': 3};") + session.execute("CREATE TABLE test_retry_policy_cas.t (id int PRIMARY KEY, data text);") + session.execute('INSERT INTO test_retry_policy_cas.t ("id", "data") VALUES (%(0)s, %(1)s)', {'0': 42, '1': 'testing'}) + + get_node(2).stop() + get_node(4).stop() + + # before fix: cassandra.InvalidRequest: Error from server: code=2200 [Invalid query] message="SERIAL is not + # supported as conditional update commit consistency. ...."" + + # after fix: cassandra.Unavailable (expected since replicas are down) + with self.assertRaises(Unavailable) as cm: + session.execute("update test_retry_policy_cas.t set data = 'staging' where id = 42 if data ='testing'") + + exception = cm.exception + self.assertEqual(exception.consistency, ConsistencyLevel.SERIAL) + self.assertEqual(exception.required_replicas, 2) + self.assertEqual(exception.alive_replicas, 1) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index e52658b704..0865109666 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -147,8 +147,13 @@ def test_default_serial_consistency_level(self, *_): # default is None self.assertIsNone(s.default_serial_consistency_level) - sentinel = 1001 - for cl in (None, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL, sentinel): + # Should fail + with self.assertRaises(ValueError): + s.default_serial_consistency_level = ConsistencyLevel.ANY + with self.assertRaises(ValueError): + s.default_serial_consistency_level = 1001 + + for cl in (None, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL): s.default_serial_consistency_level = cl # default is passed through @@ -204,7 +209,7 @@ def test_default_legacy(self): @mock_session_pools def test_default_profile(self): - non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)]) + non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(2)]) cluster = Cluster(execution_profiles={'non-default': non_default_profile}) session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) @@ -217,6 +222,17 @@ def test_default_profile(self): rf = session.execute_async("query", execution_profile='non-default') self._verify_response_future_profile(rf, non_default_profile) + def test_serial_consistency_level_validation(self): + # should pass + ep = ExecutionProfile(RoundRobinPolicy(), serial_consistency_level=ConsistencyLevel.SERIAL) + ep = ExecutionProfile(RoundRobinPolicy(), serial_consistency_level=ConsistencyLevel.LOCAL_SERIAL) + + # should not pass + with self.assertRaises(ValueError): + ep = ExecutionProfile(RoundRobinPolicy(), serial_consistency_level=ConsistencyLevel.ANY) + with self.assertRaises(ValueError): + ep = ExecutionProfile(RoundRobinPolicy(), serial_consistency_level=42) + @mock_session_pools def test_statement_params_override_legacy(self): cluster = Cluster(load_balancing_policy=RoundRobinPolicy(), default_retry_policy=DowngradingConsistencyRetryPolicy()) @@ -240,7 +256,7 @@ def test_statement_params_override_legacy(self): @mock_session_pools def test_statement_params_override_profile(self): - non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)]) + non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(2)]) cluster = Cluster(execution_profiles={'non-default': non_default_profile}) session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) @@ -309,7 +325,7 @@ def test_no_legacy_with_profile(self): @mock_session_pools def test_profile_name_value(self): - internalized_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)]) + internalized_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(2)]) cluster = Cluster(execution_profiles={'by-name': internalized_profile}) session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES) @@ -317,7 +333,7 @@ def test_profile_name_value(self): rf = session.execute_async("query", execution_profile='by-name') self._verify_response_future_profile(rf, internalized_profile) - by_value = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)]) + by_value = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(2)]) rf = session.execute_async("query", execution_profile=by_value) self._verify_response_future_profile(rf, by_value) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index f1f4e15b74..6f6c7c33c9 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1021,13 +1021,13 @@ def test_unavailable(self): query=None, consistency=ONE, required_replicas=1, alive_replicas=2, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY_NEXT_HOST) - self.assertEqual(consistency, ONE) + self.assertEqual(consistency, None) retry, consistency = policy.on_unavailable( query=None, consistency=ONE, required_replicas=10000, alive_replicas=1, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY_NEXT_HOST) - self.assertEqual(consistency, ONE) + self.assertEqual(consistency, None) class FallthroughRetryPolicyTest(unittest.TestCase): From 996f1392129ec97eb0f2a1fff5dd8217110a0b8e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 23 Jan 2019 13:25:02 -0500 Subject: [PATCH 1471/2005] Deprecate DowngradingConsistencyRetryPolicy --- CHANGELOG.rst | 2 ++ cassandra/policies.py | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index ec8bbe58cc..7d978bf9c9 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -23,6 +23,8 @@ Deprecations * Using Cluster.ssl_options to enable SSL is deprecated and will be removed in the next major release, use ssl_context. +* DowngradingConsistencyRetryPolicy is deprecated and will be + removed in the next major release. (PYTHON-937) 3.16.0 ====== diff --git a/cassandra/policies.py b/cassandra/policies.py index 67665d2788..540f5d3e2e 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -17,6 +17,7 @@ from random import randint, shuffle from threading import Lock import socket +import warnings from cassandra import WriteType as WT @@ -855,6 +856,8 @@ def on_unavailable(self, *args, **kwargs): class DowngradingConsistencyRetryPolicy(RetryPolicy): """ + *Deprecated:* This retry policy will be removed in the next major release. + A retry policy that sometimes retries with a lower consistency level than the one initially requested. @@ -900,6 +903,12 @@ class DowngradingConsistencyRetryPolicy(RetryPolicy): to make sure the data is persisted, and that reading something is better than reading nothing, even if there is a risk of reading stale data. """ + def __init__(self, *args, **kwargs): + super(DowngradingConsistencyRetryPolicy, self).__init__(*args, **kwargs) + warnings.warn('DowngradingConsistencyRetryPolicy is deprecated ' + 'and will be removed in the next major release.', + DeprecationWarning) + def _pick_consistency(self, num_responses): if num_responses >= 3: return self.RETRY, ConsistencyLevel.THREE From 82c84255ff463998f31e11f0db81e18aad0f08df Mon Sep 17 00:00:00 2001 From: wenheping Date: Thu, 7 Feb 2019 02:34:09 +0800 Subject: [PATCH 1472/2005] Update setup.py (#993) Bump cython dependency to 0.29 --- CHANGELOG.rst | 1 + setup.py | 2 +- test-requirements.txt | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7d978bf9c9..db3732e77f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -17,6 +17,7 @@ Bug Fixes Other ----- * Fail faster on incorrect lz4 import (PYTHON-1042) +* Bump Cython dependency version to 0.29 (PYTHON-1036) Deprecations ------------ diff --git a/setup.py b/setup.py index 1b0ebf6d8b..2c13639834 100644 --- a/setup.py +++ b/setup.py @@ -395,7 +395,7 @@ def run_setup(extensions): # 1.) build_ext eats errors at compile time, letting the install complete while producing useful feedback # 2.) there could be a case where the python environment has cython installed but the system doesn't have build tools if pre_build_check(): - cython_dep = 'Cython>=0.20,!=0.25,<0.29' + cython_dep = 'Cython>=0.20,!=0.25,<0.30' user_specified_cython_version = os.environ.get('CASS_DRIVER_ALLOWED_CYTHON_VERSION') if user_specified_cython_version is not None: cython_dep = 'Cython==%s' % (user_specified_cython_version,) diff --git a/test-requirements.txt b/test-requirements.txt index 51636d4253..126780cd3c 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -11,6 +11,6 @@ pure-sasl twisted[tls] gevent>=1.0 eventlet -cython>=0.20,<0.29 +cython>=0.20,<0.30 packaging asynctest; python_version > '3.4' From b885875adcca545d4524bbf6482e6ab338a5e65f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 6 Feb 2019 15:24:13 -0500 Subject: [PATCH 1473/2005] Add Session.get_execution_profile --- CHANGELOG.rst | 1 + cassandra/__init__.py | 2 +- cassandra/cluster.py | 18 +++++++++++++----- docs/api/cassandra/cluster.rst | 2 ++ tests/unit/test_cluster.py | 12 ++++++++++-- 5 files changed, 27 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index db3732e77f..c34d2aef44 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,6 +8,7 @@ Features * Add Cluster ssl_context option to enable SSL (PYTHON-995) * Allow encrypted private keys for 2-way SSL cluster connections (PYTHON-995) * Introduce new method ConsistencyLevel.is_serial (PYTHON-1067) +* Add Session.get_execution_profile (PYTHON-932) Bug Fixes --------- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index c00ac38fad..dd3edfd49d 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 16, 0) +__version_info__ = (3, 16, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 37d7787e58..15e5efae73 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2276,7 +2276,7 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time load_balancing_policy = self.cluster.load_balancing_policy spec_exec_policy = None else: - execution_profile = self._get_execution_profile(execution_profile) + execution_profile = self._maybe_get_execution_profile(execution_profile) if timeout is _NOT_SET: timeout = execution_profile.request_timeout @@ -2341,12 +2341,20 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time prepared_statement=prepared_statement, retry_policy=retry_policy, row_factory=row_factory, load_balancer=load_balancing_policy, start_time=start_time, speculative_execution_plan=spec_exec_plan) - def _get_execution_profile(self, ep): + def get_execution_profile(self, name): + """ + Returns the execution profile associated with the provided ``name``. + + :param name: The name (or key) of the execution profile. + """ profiles = self.cluster.profile_manager.profiles try: - return ep if isinstance(ep, ExecutionProfile) else profiles[ep] + return profiles[name] except KeyError: - raise ValueError("Invalid execution_profile: '%s'; valid profiles are %s" % (ep, profiles.keys())) + raise ValueError("Invalid execution_profile: '%s'; valid profiles are %s" % (name, profiles.keys())) + + def _maybe_get_execution_profile(self, ep): + return ep if isinstance(ep, ExecutionProfile) else self.get_execution_profile(ep) def execution_profile_clone_update(self, ep, **kwargs): """ @@ -2358,7 +2366,7 @@ def execution_profile_clone_update(self, ep, **kwargs): by the active profile. In cases where this is not desirable, be sure to replace the instance instead of manipulating the shared object. """ - clone = copy(self._get_execution_profile(ep)) + clone = copy(self._maybe_get_execution_profile(ep)) for attr, value in kwargs.items(): setattr(clone, attr, value) return clone diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index e8cfa9380f..105ce31fa0 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -160,6 +160,8 @@ .. automethod:: set_keyspace(keyspace) + .. automethod:: get_execution_profile + .. automethod:: execution_profile_clone_update .. automethod:: add_request_init_listener diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 0865109666..fd46731bf0 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -17,6 +17,7 @@ import unittest # noqa import logging +import six from mock import patch, Mock @@ -215,13 +216,20 @@ def test_default_profile(self): self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES) - default_profile = cluster.profile_manager.profiles[EXEC_PROFILE_DEFAULT] + default_profile = session.get_execution_profile(EXEC_PROFILE_DEFAULT) rf = session.execute_async("query") self._verify_response_future_profile(rf, default_profile) rf = session.execute_async("query", execution_profile='non-default') self._verify_response_future_profile(rf, non_default_profile) + for name, ep in six.iteritems(cluster.profile_manager.profiles): + self.assertEqual(ep, session.get_execution_profile(name)) + + # invalid ep + with self.assertRaises(ValueError): + session.get_execution_profile('non-existent') + def test_serial_consistency_level_validation(self): # should pass ep = ExecutionProfile(RoundRobinPolicy(), serial_consistency_level=ConsistencyLevel.SERIAL) @@ -353,7 +361,7 @@ def test_exec_profile_clone(self): # default and one named for profile in (EXEC_PROFILE_DEFAULT, 'one'): - active = cluster.profile_manager.profiles[profile] + active = session.get_execution_profile(profile) clone = session.execution_profile_clone_update(profile) self.assertIsNot(clone, active) From 06c7c4df3fd3acf10b2eefa830176c9535d80bf3 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 13 Feb 2019 10:50:45 -0500 Subject: [PATCH 1474/2005] PYTHON-993: Add host kwarg to Session.execute/execute_async APIs to send a query to a specific node (#1010) * Add host kwarg to Session.execute/execute_async APIs --- CHANGELOG.rst | 1 + cassandra/cluster.py | 45 +++++++++++++++++------- tests/integration/__init__.py | 1 + tests/integration/standard/test_query.py | 38 ++++++++++++++++++-- 4 files changed, 71 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index c34d2aef44..a9f85bdfd7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -9,6 +9,7 @@ Features * Allow encrypted private keys for 2-way SSL cluster connections (PYTHON-995) * Introduce new method ConsistencyLevel.is_serial (PYTHON-1067) * Add Session.get_execution_profile (PYTHON-932) +* Add host kwarg to Session.execute/execute_async APIs to send a query to a specific node (PYTHON-993) Bug Fixes --------- diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 15e5efae73..291df1e48b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2174,7 +2174,9 @@ def __init__(self, cluster, hosts, keyspace=None): msg += " using keyspace '%s'" % self.keyspace raise NoHostAvailable(msg, [h.address for h in hosts]) - def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None): + def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, + custom_payload=None, execution_profile=EXEC_PROFILE_DEFAULT, + paging_state=None, host=None): """ Execute the given query and synchronously wait for the response. @@ -2207,10 +2209,16 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_ for example `paging_state` is an optional paging state, reused from a previous :class:`ResultSet`. + + `host` is the :class:`pool.Host` that should handle the query. Using this is discouraged except in a few + cases, e.g., querying node-local tables and applying schema changes. """ - return self.execute_async(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state).result() + return self.execute_async(query, parameters, trace, custom_payload, + timeout, execution_profile, paging_state, host).result() - def execute_async(self, query, parameters=None, trace=False, custom_payload=None, timeout=_NOT_SET, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None): + def execute_async(self, query, parameters=None, trace=False, custom_payload=None, + timeout=_NOT_SET, execution_profile=EXEC_PROFILE_DEFAULT, + paging_state=None, host=None): """ Execute the given query and return a :class:`~.ResponseFuture` object which callbacks may be attached to for asynchronous response @@ -2245,13 +2253,17 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None ... log.exception("Operation failed:") """ - future = self._create_response_future(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state) + future = self._create_response_future( + query, parameters, trace, custom_payload, timeout, + execution_profile, paging_state, host) future._protocol_handler = self.client_protocol_handler self._on_request(future) future.send_request() return future - def _create_response_future(self, query, parameters, trace, custom_payload, timeout, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None): + def _create_response_future(self, query, parameters, trace, custom_payload, + timeout, execution_profile=EXEC_PROFILE_DEFAULT, + paging_state=None, host=None): """ Returns the ResponseFuture before calling send_request() on it """ prepared_statement = None @@ -2339,7 +2351,8 @@ def _create_response_future(self, query, parameters, trace, custom_payload, time return ResponseFuture( self, message, query, timeout, metrics=self._metrics, prepared_statement=prepared_statement, retry_policy=retry_policy, row_factory=row_factory, - load_balancer=load_balancing_policy, start_time=start_time, speculative_execution_plan=spec_exec_plan) + load_balancer=load_balancing_policy, start_time=start_time, speculative_execution_plan=spec_exec_plan, + host=host) def get_execution_profile(self, name): """ @@ -3539,11 +3552,13 @@ class ResponseFuture(object): _timer = None _protocol_handler = ProtocolHandler _spec_execution_plan = NoSpeculativeExecutionPlan() + _host = None _warned_timeout = False def __init__(self, session, message, query, timeout, metrics=None, prepared_statement=None, - retry_policy=RetryPolicy(), row_factory=None, load_balancer=None, start_time=None, speculative_execution_plan=None): + retry_policy=RetryPolicy(), row_factory=None, load_balancer=None, start_time=None, + speculative_execution_plan=None, host=None): self.session = session # TODO: normalize handling of retry policy and row factory self.row_factory = row_factory or session.row_factory @@ -3556,6 +3571,7 @@ def __init__(self, session, message, query, timeout, metrics=None, prepared_stat self.prepared_statement = prepared_statement self._callback_lock = Lock() self._start_time = start_time or time.time() + self._host = host self._spec_execution_plan = speculative_execution_plan or self._spec_execution_plan self._make_query_plan() self._event = Event() @@ -3651,12 +3667,17 @@ def _on_speculative_execute(self): self.send_request(error_no_hosts=False) self._start_timer() - def _make_query_plan(self): - # convert the list/generator/etc to an iterator so that subsequent - # calls to send_request (which retries may do) will resume where - # they last left off - self.query_plan = iter(self._load_balancer.make_query_plan(self.session.keyspace, self.query)) + # set the query_plan according to the load balancing policy, + # or to the explicit host target if set + if self._host: + # returning a single value effectively disables retries + self.query_plan = [self._host] + else: + # convert the list/generator/etc to an iterator so that subsequent + # calls to send_request (which retries may do) will resume where + # they last left off + self.query_plan = iter(self._load_balancer.make_query_plan(self.session.keyspace, self.query)) def send_request(self, error_no_hosts=True): """ Internal """ diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index f5f690d42c..370c34116f 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -250,6 +250,7 @@ def local_decorator_creator(): def _id_and_mark(f): f.local = True + return f return _id_and_mark diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 3fedb0cd57..22b4130740 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -33,8 +33,15 @@ from tests.integration import greaterthanorequalcass30, get_node import time +import random import re +import mock + + +log = logging.getLogger(__name__) + + def setup_module(): if not USE_CASS_EXTERNAL: use_singledc(start=False) @@ -326,6 +333,33 @@ def test_basic_json_query(self): self.assertEqual(results.column_names, ["[json]"]) self.assertEqual(results[0][0], '{"k": 1, "v": 1}') + def test_host_targeting_query(self): + """ + Test to validate the the single host targeting works. + + @since 3.17.0 + @jira_ticket PYTHON-933 + @expected_result the coordinator host is always the one set + """ + + default_ep = self.cluster.profile_manager.default + # copy of default EP with checkable LBP + checkable_ep = self.session.execution_profile_clone_update( + ep=default_ep, + load_balancing_policy=mock.Mock(wraps=default_ep.load_balancing_policy) + ) + query = SimpleStatement("INSERT INTO test3rf.test(k, v) values (1, 1)") + + for i in range(10): + host = random.choice(self.cluster.metadata.all_hosts()) + log.debug('targeting {}'.format(host)) + future = self.session.execute_async(query, host=host, execution_profile=checkable_ep) + future.result() + # check we're using the selected host + self.assertEqual(host, future.coordinator_host) + # check that this bypasses the LBP + self.assertFalse(checkable_ep.load_balancing_policy.make_query_plan.called) + class PreparedStatementTests(unittest.TestCase): @@ -568,7 +602,7 @@ def test_prepare_batch_statement_after_alter(self): select_results = session.execute("SELECT * FROM %s" % table) expected_results = [(1, None, 2, None, 3), (2, None, 3, None, 4), (3, None, 4, None, 5), (4, None, 5, None, 6)] - + self.assertEqual(set(expected_results), set(select_results._current_rows)) @@ -1421,7 +1455,7 @@ def test_lower_protocol(self): # : ConnectionException('Host has been marked down or removed',)}) with self.assertRaises(NoHostAvailable): session.execute(simple_stmt) - + def _check_set_keyspace_in_statement(self, session): simple_stmt = SimpleStatement("SELECT * from {}".format(self.table_name), keyspace=self.ks_name) results = session.execute(simple_stmt) From 3d3aececf22c7a77b51ae4333269693d86c9d071 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 11 Feb 2019 15:44:02 -0500 Subject: [PATCH 1475/2005] Expand Driver SSL Documentation --- CHANGELOG.rst | 1 + docs/security.rst | 177 ++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 166 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a9f85bdfd7..15a696bebd 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -20,6 +20,7 @@ Other ----- * Fail faster on incorrect lz4 import (PYTHON-1042) * Bump Cython dependency version to 0.29 (PYTHON-1036) +* Expand Driver SSL Documentation (PYTHON-740) Deprecations ------------ diff --git a/docs/security.rst b/docs/security.rst index 1a5a3b1a22..f0d7d0f85c 100644 --- a/docs/security.rst +++ b/docs/security.rst @@ -58,30 +58,183 @@ SSL --- SSL should be used when client encryption is enabled in Cassandra. -Versions 3.17.0 and higher -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To enable SSL you will need to set :attr:`.Cluster.ssl_context` to a -``ssl.SSLContext`` instance. Optionally, you can also set :attr:`.Cluster.ssl_options` +With version 3.17.0 and higher, you will need to set :attr:`.Cluster.ssl_context` to a +``ssl.SSLContext`` instance to enable SSL. Optionally, you can also set :attr:`.Cluster.ssl_options` to a dict of options. These will be passed as kwargs to ``ssl.SSLContext.wrap_socket()`` when new sockets are created. -For example: +The following examples assume you have generated your cassandra certificate and +keystore files with these intructions: + +* `Setup SSL Cert `_ + +It might be also useful to learn about the different levels of identity verification to understand the examples: + +* `Using SSL in DSE drivers `_ + +SSL Configuration Examples +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +No identity verification +++++++++++++++++++++++++ + +No identity verification at all. Note that this is not recommended for for production deployments. + +The cassandra configuration:: + + client_encryption_options: + enabled: true + keystore: /path/to/127.0.0.1.keystore + keystore_password: myStorePass + require_client_auth: false + +The driver configuration: .. code-block:: python - from cassandra.cluster import Cluster + from cassandra.cluster import Cluster, Session + from ssl import SSLContext, PROTOCOL_TLSv1 + + ssl_context = SSLContext(PROTOCOL_TLSv1) + + cluster = Cluster(['127.0.0.1'], ssl_context=ssl_context) + session = cluster.connect() + +Client verifies server +++++++++++++++++++++++ + +Ensure the python driver verifies the identity of the server. + +The cassandra configuration:: + + client_encryption_options: + enabled: true + keystore: /path/to/127.0.0.1.keystore + keystore_password: myStorePass + require_client_auth: false + +For the driver configuration, it's very important to set `ssl_context.verify_mode` +to `CERT_REQUIRED`. Otherwise, the loaded verify certificate will have no effect: + +.. code-block:: python + + from cassandra.cluster import Cluster, Session from ssl import SSLContext, PROTOCOL_TLSv1, CERT_REQUIRED ssl_context = SSLContext(PROTOCOL_TLSv1) + ssl_context.load_verify_locations('/path/to/rootca.crt') ssl_context.verify_mode = CERT_REQUIRED - ssl_context.load_verify_locations('/path/to/my/ca.certs') - cluster = Cluster(ssl_context=ssl_context) + cluster = Cluster(['127.0.0.1'], ssl_context=ssl_context) + session = cluster.connect() -This is only an example to show how to pass the ssl parameters. Consider reading -the `python ssl documentation `_ for -your configuration. +Additionally, you can also verify the `hostname` of the server: + +.. code-block:: python + + from cassandra.cluster import Cluster, Session + from ssl import SSLContext, PROTOCOL_TLSv1, CERT_REQUIRED + + ssl_context = SSLContext(PROTOCOL_TLSv1) + ssl_context.load_verify_locations('/path/to/rootca.crt') + ssl_context.verify_mode = CERT_REQUIRED + ssl_context.check_hostname = True + ssl_options = {'server_hostname': '127.0.0.1'} + + cluster = Cluster(['127.0.0.1'], ssl_context=ssl_context, ssl_options=ssl_options) + session = cluster.connect() + +Server verifies client +++++++++++++++++++++++ + +If cassandra is configured to verify clients (require_client_auth), you need to generate +SSL key and certificate files for them. + +The cassandra configuration:: + + client_encryption_options: + enabled: true + keystore: /path/to/127.0.0.1.keystore + keystore_password: myStorePass + require_client_auth: true + truststore: /path/to/dse-truststore.jks + truststore_password: myStorePass + +For Python, you need to generate certificate in PEM format. First, create a certificate +conf file: + +.. code-block:: bash + + cat > gen_client_cert.conf <`_ +for more details about the SSLContext configuration. Versions 3.16.0 and lower ^^^^^^^^^^^^^^^^^^^^^^^^^ From 3b305d5b8fb75ad210d1dcd454e5e7aba9fd0644 Mon Sep 17 00:00:00 2001 From: Jim Witschey <3753769+mambocab@users.noreply.github.com> Date: Wed, 13 Feb 2019 13:22:39 -0500 Subject: [PATCH 1476/2005] Docs improvements and corrections Co-Authored-By: aboudreault --- docs/security.rst | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/docs/security.rst b/docs/security.rst index f0d7d0f85c..2d0a3830a5 100644 --- a/docs/security.rst +++ b/docs/security.rst @@ -58,12 +58,18 @@ SSL --- SSL should be used when client encryption is enabled in Cassandra. -With version 3.17.0 and higher, you will need to set :attr:`.Cluster.ssl_context` to a +To give you as much control as possible over your SSL configuration, our SSL +API takes a user-created `SSLContext` instance from the Python standard library. +These docs will include some examples for how to achieve common configurations, +but the `ssl.SSLContext` documentation gives a more complete description of +what is possible. + +To enable SSL with version 3.17.0 and higher, you will need to set :attr:`.Cluster.ssl_context` to a ``ssl.SSLContext`` instance to enable SSL. Optionally, you can also set :attr:`.Cluster.ssl_options` to a dict of options. These will be passed as kwargs to ``ssl.SSLContext.wrap_socket()`` when new sockets are created. -The following examples assume you have generated your cassandra certificate and +The following examples assume you have generated your Cassandra certificate and keystore files with these intructions: * `Setup SSL Cert `_ @@ -74,13 +80,13 @@ It might be also useful to learn about the different levels of identity verifica SSL Configuration Examples ^^^^^^^^^^^^^^^^^^^^^^^^^^ - +Here, we'll describe the server and driver configuration necessary to set up SSL to meet various goals, such as the client verifying the server and the server verifying the client. We'll also include Python code demonstrating how to use servers and drivers configured in these ways. No identity verification ++++++++++++++++++++++++ No identity verification at all. Note that this is not recommended for for production deployments. -The cassandra configuration:: +The Cassandra configuration:: client_encryption_options: enabled: true @@ -105,7 +111,7 @@ Client verifies server Ensure the python driver verifies the identity of the server. -The cassandra configuration:: +The Cassandra configuration:: client_encryption_options: enabled: true @@ -128,7 +134,7 @@ to `CERT_REQUIRED`. Otherwise, the loaded verify certificate will have no effect cluster = Cluster(['127.0.0.1'], ssl_context=ssl_context) session = cluster.connect() -Additionally, you can also verify the `hostname` of the server: +Additionally, you can also force the driver to verify the `hostname` of the server by passing additional options to `ssl_context.wrap_socket` via the `ssl_options` kwarg: .. code-block:: python @@ -147,8 +153,8 @@ Additionally, you can also verify the `hostname` of the server: Server verifies client ++++++++++++++++++++++ -If cassandra is configured to verify clients (require_client_auth), you need to generate -SSL key and certificate files for them. +If Cassandra is configured to verify clients (``require_client_auth``), you need to generate +SSL key and certificate files. The cassandra configuration:: @@ -160,7 +166,7 @@ The cassandra configuration:: truststore: /path/to/dse-truststore.jks truststore_password: myStorePass -For Python, you need to generate certificate in PEM format. First, create a certificate +The Python ``ssl`` APIs require the certificate in PEM format. First, create a certificate conf file: .. code-block:: bash @@ -193,7 +199,7 @@ And generate the client signed certificate: openssl x509 -req -CA ${ROOT_CA_BASE_NAME}.crt -CAkey ${ROOT_CA_BASE_NAME}.key -passin pass:${ROOT_CERT_PASS} \ -in client.csr -out client.crt_signed -days ${CERT_VALIDITY} -CAcreateserial -The driver configuration: +Finally, you can use that configuration with the following driver code: .. code-block:: python @@ -212,10 +218,10 @@ The driver configuration: Server verifies client and client verifies server +++++++++++++++++++++++++++++++++++++++++++++++++ -See the previous section for the cassandra configuration and preparing +See the previous section for examples of Cassandra configuration and preparing the client certificates. -The driver configuration: +The following driver code specifies that the connection should use two-way verification: .. code-block:: python @@ -233,8 +239,8 @@ The driver configuration: session = cluster.connect() -Consider reading the `python ssl documentation `_ -for more details about the SSLContext configuration. +The driver uses ``SSLContext`` directly to give you many other options in configuring SSL. Consider reading the `Python SSL documentation `_ +for more details about ``SSLContext`` configuration. Versions 3.16.0 and lower ^^^^^^^^^^^^^^^^^^^^^^^^^ From d78d2b0b81523bd2d22722fbe2bdd71aa16213b9 Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 15 Feb 2019 16:05:24 -0500 Subject: [PATCH 1477/2005] fix whitespace in SSL docs --- docs/security.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/security.rst b/docs/security.rst index 2d0a3830a5..275cde4395 100644 --- a/docs/security.rst +++ b/docs/security.rst @@ -81,6 +81,7 @@ It might be also useful to learn about the different levels of identity verifica SSL Configuration Examples ^^^^^^^^^^^^^^^^^^^^^^^^^^ Here, we'll describe the server and driver configuration necessary to set up SSL to meet various goals, such as the client verifying the server and the server verifying the client. We'll also include Python code demonstrating how to use servers and drivers configured in these ways. + No identity verification ++++++++++++++++++++++++ From b57269ce6bfa509127ae538a17007befcb96624b Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 15 Feb 2019 14:08:06 -0500 Subject: [PATCH 1478/2005] version 3.17.0: changelog & version --- CHANGELOG.rst | 2 +- cassandra/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 15a696bebd..730d96163f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,6 @@ 3.17.0 ====== -NOT RELEASED +February 19, 2019 Features -------- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index dd3edfd49d..ead66b576a 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 16, 0, 'post0') +__version_info__ = (3, 17, 0) __version__ = '.'.join(map(str, __version_info__)) From 66bdb8e5a35f1af9c2966dad6eda6e5f5b7b303f Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Fri, 15 Feb 2019 14:08:59 -0500 Subject: [PATCH 1479/2005] version 3.17.0: docs --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index 9f4d182fca..2cf56dd88b 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,6 +22,8 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: + - name: '3.17' + ref: 38e359e1 - name: '3.16' ref: '3.16.0' - name: '3.15' From bc4ccaa65a7e2de56fcd3e026fa5e069e3009baf Mon Sep 17 00:00:00 2001 From: Jim Witschey Date: Tue, 19 Feb 2019 16:15:40 -0500 Subject: [PATCH 1480/2005] add post0 --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index ead66b576a..b3397994b4 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 17, 0) +__version_info__ = (3, 17, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From c1993fb0c04d8a59621f958c1a493e1752e7e1ac Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 21 Feb 2019 08:49:53 -0500 Subject: [PATCH 1481/2005] change some h4 for bold titles --- docs/security.rst | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/docs/security.rst b/docs/security.rst index 275cde4395..0353091eb7 100644 --- a/docs/security.rst +++ b/docs/security.rst @@ -82,8 +82,7 @@ SSL Configuration Examples ^^^^^^^^^^^^^^^^^^^^^^^^^^ Here, we'll describe the server and driver configuration necessary to set up SSL to meet various goals, such as the client verifying the server and the server verifying the client. We'll also include Python code demonstrating how to use servers and drivers configured in these ways. -No identity verification -++++++++++++++++++++++++ +**No identity verification** No identity verification at all. Note that this is not recommended for for production deployments. @@ -107,8 +106,7 @@ The driver configuration: cluster = Cluster(['127.0.0.1'], ssl_context=ssl_context) session = cluster.connect() -Client verifies server -++++++++++++++++++++++ +**Client verifies server** Ensure the python driver verifies the identity of the server. @@ -151,8 +149,7 @@ Additionally, you can also force the driver to verify the `hostname` of the serv cluster = Cluster(['127.0.0.1'], ssl_context=ssl_context, ssl_options=ssl_options) session = cluster.connect() -Server verifies client -++++++++++++++++++++++ +**Server verifies client** If Cassandra is configured to verify clients (``require_client_auth``), you need to generate SSL key and certificate files. @@ -216,8 +213,7 @@ Finally, you can use that configuration with the following driver code: session = cluster.connect() -Server verifies client and client verifies server -+++++++++++++++++++++++++++++++++++++++++++++++++ +**Server verifies client and client verifies server** See the previous section for examples of Cassandra configuration and preparing the client certificates. From 777e1b1095db4df59365301084a4a4f5705a7de4 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 22 Feb 2019 09:41:10 -0500 Subject: [PATCH 1482/2005] Abstract node connection information --- CHANGELOG.rst | 9 + build.yaml | 12 + cassandra/cluster.py | 153 +++++++------ cassandra/connection.py | 214 ++++++++++++++---- cassandra/io/asyncioreactor.py | 6 +- cassandra/io/asyncorereactor.py | 8 +- cassandra/io/eventletreactor.py | 6 +- cassandra/io/geventreactor.py | 6 +- cassandra/io/libevreactor.py | 7 +- cassandra/io/twistedreactor.py | 12 +- cassandra/metadata.py | 27 ++- cassandra/policies.py | 12 +- cassandra/pool.py | 52 +++-- docs/api/cassandra/cluster.rst | 2 + docs/api/cassandra/connection.rst | 6 + docs/api/cassandra/metadata.rst | 2 +- tests/integration/simulacron/test_endpoint.py | 119 ++++++++++ tests/integration/standard/test_cluster.py | 11 +- tests/integration/standard/test_connection.py | 4 +- tests/integration/standard/test_metadata.py | 11 +- tests/integration/standard/test_policies.py | 7 +- tests/unit/io/test_twistedreactor.py | 4 +- tests/unit/io/utils.py | 5 +- tests/unit/test_connection.py | 70 +++++- tests/unit/test_control_connection.py | 41 ++-- tests/unit/test_host_connection_pool.py | 14 +- tests/unit/test_policies.py | 75 +++--- 27 files changed, 655 insertions(+), 240 deletions(-) create mode 100644 tests/integration/simulacron/test_endpoint.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 730d96163f..e77d8249f0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,12 @@ +3.18.0 +====== +NOT RELEASED + +Features +-------- + +* Abstract Host Connection information (PYTHON-1079) + 3.17.0 ====== February 19, 2019 diff --git a/build.yaml b/build.yaml index 890b614800..373ba08fc3 100644 --- a/build.yaml +++ b/build.yaml @@ -43,6 +43,18 @@ schedules: - python: [3.4, 3.6] - cassandra: ['2.0', '2.1', '3.0'] + commit_branches_dev: + schedule: per_commit + branches: + include: [/dev.*/] + env_vars: | + EVENT_LOOP_MANAGER='libev' + EXCLUDE_LONG=1 + matrix: + exclude: + - python: [2.7, 3.4, 3.6] + - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse'] + weekly_libev: schedule: 0 10 * * 6 branches: diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 291df1e48b..9f71b6f295 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -46,7 +46,8 @@ SchemaTargetType, DriverException, ProtocolVersion, UnresolvableContactPoints) from cassandra.connection import (ConnectionException, ConnectionShutdown, - ConnectionHeartbeat, ProtocolVersionUnsupported) + ConnectionHeartbeat, ProtocolVersionUnsupported, + EndPoint, DefaultEndPoint, DefaultEndPointFactory) from cassandra.cqltypes import UserType from cassandra.encoder import Encoder from cassandra.protocol import (QueryMessage, ResultMessage, @@ -799,6 +800,13 @@ def token_metadata_enabled(self): def token_metadata_enabled(self, enabled): self.control_connection._token_meta_enabled = bool(enabled) + endpoint_factory = None + """ + An :class:`~.connection.EndPointFactory` instance to use internally when creating + a socket connection to a node. You can ignore this unless you need a special + connection mechanism. + """ + profile_manager = None _config_mode = _ConfigMode.UNCOMMITTED @@ -855,7 +863,8 @@ def __init__(self, timestamp_generator=None, idle_heartbeat_timeout=30, no_compact=False, - ssl_context=None): + ssl_context=None, + endpoint_factory=None): """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as extablishing connection pools or refreshing metadata. @@ -878,8 +887,19 @@ def __init__(self, self.port = port - self.contact_points_resolved = _resolve_contact_points(self.contact_points, - self.port) + self.endpoint_factory = endpoint_factory or DefaultEndPointFactory() + self.endpoint_factory.configure(self) + + raw_contact_points = [cp for cp in self.contact_points if not isinstance(cp, EndPoint)] + self.endpoints_resolved = [cp for cp in self.contact_points if isinstance(cp, EndPoint)] + + try: + self.endpoints_resolved += [DefaultEndPoint(address, self.port) + for address in _resolve_contact_points(raw_contact_points, self.port)] + except UnresolvableContactPoints: + # rethrow if no EndPoint was provided + if not self.endpoints_resolved: + raise self.compression = compression @@ -1260,21 +1280,21 @@ def set_max_connections_per_host(self, host_distance, max_connections): "when using protocol_version 1 or 2.") self._max_connections_per_host[host_distance] = max_connections - def connection_factory(self, address, *args, **kwargs): + def connection_factory(self, endpoint, *args, **kwargs): """ Called to create a new connection with proper configuration. Intended for internal use only. """ - kwargs = self._make_connection_kwargs(address, kwargs) - return self.connection_class.factory(address, self.connect_timeout, *args, **kwargs) + kwargs = self._make_connection_kwargs(endpoint, kwargs) + return self.connection_class.factory(endpoint, self.connect_timeout, *args, **kwargs) def _make_connection_factory(self, host, *args, **kwargs): - kwargs = self._make_connection_kwargs(host.address, kwargs) - return partial(self.connection_class.factory, host.address, self.connect_timeout, *args, **kwargs) + kwargs = self._make_connection_kwargs(host.endpoint, kwargs) + return partial(self.connection_class.factory, host.endpoint, self.connect_timeout, *args, **kwargs) - def _make_connection_kwargs(self, address, kwargs_dict): + def _make_connection_kwargs(self, endpoint, kwargs_dict): if self._auth_provider_callable: - kwargs_dict.setdefault('authenticator', self._auth_provider_callable(address)) + kwargs_dict.setdefault('authenticator', self._auth_provider_callable(endpoint.address)) kwargs_dict.setdefault('port', self.port) kwargs_dict.setdefault('compression', self.compression) @@ -1289,7 +1309,7 @@ def _make_connection_kwargs(self, address, kwargs_dict): return kwargs_dict - def protocol_downgrade(self, host_addr, previous_version): + def protocol_downgrade(self, host_endpoint, previous_version): if self._protocol_version_explicit: raise DriverException("ProtocolError returned from server while using explicitly set client protocol_version %d" % (previous_version,)) @@ -1300,7 +1320,7 @@ def protocol_downgrade(self, host_addr, previous_version): log.warning("Downgrading core protocol version from %d to %d for %s. " "To avoid this, it is best practice to explicitly set Cluster(protocol_version) to the version supported by your cluster. " - "http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Cluster.protocol_version", self.protocol_version, new_version, host_addr) + "http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Cluster.protocol_version", self.protocol_version, new_version, host_endpoint) self.protocol_version = new_version def connect(self, keyspace=None, wait_for_all_pools=False): @@ -1318,8 +1338,8 @@ def connect(self, keyspace=None, wait_for_all_pools=False): self.contact_points, self.protocol_version) self.connection_class.initialize_reactor() _register_cluster_shutdown(self) - for address in self.contact_points_resolved: - host, new = self.add_host(address, signal=False) + for endpoint in self.endpoints_resolved: + host, new = self.add_host(endpoint, signal=False) if new: host.set_up() for listener in self.listeners: @@ -1335,8 +1355,8 @@ def connect(self, keyspace=None, wait_for_all_pools=False): self.control_connection.connect() # we set all contact points up for connecting, but we won't infer state after this - for address in self.contact_points_resolved: - h = self.metadata.get_host(address) + for endpoint in self.endpoints_resolved: + h = self.metadata.get_host(endpoint) if h and self.profile_manager.distance(h) == HostDistance.IGNORED: h.is_up = None @@ -1683,7 +1703,7 @@ def signal_connection_failure(self, host, connection_exc, is_host_addition, expe self.on_down(host, is_host_addition, expect_host_to_be_down) return is_down - def add_host(self, address, datacenter=None, rack=None, signal=True, refresh_nodes=True): + def add_host(self, endpoint, datacenter=None, rack=None, signal=True, refresh_nodes=True): """ Called when adding initial contact points and when the control connection subsequently discovers a new node. @@ -1691,7 +1711,7 @@ def add_host(self, address, datacenter=None, rack=None, signal=True, refresh_nod the metadata. Intended for internal use only. """ - host, new = self.metadata.add_or_return_host(Host(address, self.conviction_policy_factory, datacenter, rack)) + host, new = self.metadata.add_or_return_host(Host(endpoint, self.conviction_policy_factory, datacenter, rack)) if new and signal: log.info("New Cassandra host %r discovered", host) self.on_add(host, refresh_nodes) @@ -1762,8 +1782,8 @@ def get_control_connection_host(self): Returns the control connection host metadata. """ connection = self.control_connection._connection - host = connection.host if connection else None - return self.metadata.get_host(host) if host else None + endpoint = connection.endpoint if connection else None + return self.metadata.get_host(endpoint) if endpoint else None def refresh_schema_metadata(self, max_schema_agreement_wait=None): """ @@ -1895,7 +1915,7 @@ def _prepare_all_queries(self, host): log.debug("Preparing all known prepared statements against host %s", host) connection = None try: - connection = self.connection_factory(host.address) + connection = self.connection_factory(host.endpoint) statements = self._prepared_statements.values() if ProtocolVersion.uses_keyspace_flag(self.protocol_version): # V5 protocol and higher, no need to set the keyspace @@ -2565,6 +2585,7 @@ def run_add_or_renew_pool(): if self._protocol_version >= 3: new_pool = HostConnection(host, distance, self) else: + # TODO remove host pool again ??? new_pool = HostConnectionPool(host, distance, self) except AuthenticationFailed as auth_exc: conn_exc = ConnectionException(str(auth_exc), host=host) @@ -2808,9 +2829,9 @@ class ControlConnection(object): """ _SELECT_PEERS = "SELECT * FROM system.peers" - _SELECT_PEERS_NO_TOKENS = "SELECT peer, data_center, rack, rpc_address, release_version, schema_version FROM system.peers" + _SELECT_PEERS_NO_TOKENS = "SELECT host_id, peer, data_center, rack, rpc_address, release_version, schema_version FROM system.peers" _SELECT_LOCAL = "SELECT * FROM system.local WHERE key='local'" - _SELECT_LOCAL_NO_TOKENS = "SELECT cluster_name, data_center, rack, partitioner, release_version, schema_version FROM system.local WHERE key='local'" + _SELECT_LOCAL_NO_TOKENS = "SELECT host_id, rpc_address, cluster_name, data_center, rack, partitioner, release_version, schema_version FROM system.local WHERE key='local'" _SELECT_SCHEMA_PEERS = "SELECT peer, rpc_address, schema_version FROM system.peers" _SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'" @@ -2894,11 +2915,11 @@ def _reconnect_internal(self): try: return self._try_connect(host) except ConnectionException as exc: - errors[host.address] = exc + errors[host.endpoint] = exc log.warning("[control connection] Error connecting to %s:", host, exc_info=True) self._cluster.signal_connection_failure(host, exc, is_host_addition=False) except Exception as exc: - errors[host.address] = exc + errors[host.endpoint] = exc log.warning("[control connection] Error connecting to %s:", host, exc_info=True) if self._is_shutdown: raise DriverException("[control connection] Reconnection in progress during shutdown") @@ -2914,13 +2935,13 @@ def _try_connect(self, host): while True: try: - connection = self._cluster.connection_factory(host.address, is_control_connection=True) + connection = self._cluster.connection_factory(host.endpoint, is_control_connection=True) if self._is_shutdown: connection.close() raise DriverException("Reconnecting during shutdown") break except ProtocolVersionUnsupported as e: - self._cluster.protocol_downgrade(host.address, e.startup_version) + self._cluster.protocol_downgrade(host.endpoint, e.startup_version) log.debug("[control connection] Established new connection %r, " "registering watchers and refreshing schema and topology", @@ -3093,7 +3114,7 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, found_hosts = set() if local_result.results: - found_hosts.add(connection.host) + found_hosts.add(connection.endpoint) local_rows = dict_factory(*(local_result.results)) local_row = local_rows[0] cluster_name = local_row["cluster_name"] @@ -3102,13 +3123,15 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, partitioner = local_row.get("partitioner") tokens = local_row.get("tokens") - host = self._cluster.metadata.get_host(connection.host) + host = self._cluster.metadata.get_host(connection.endpoint) if host: datacenter = local_row.get("data_center") rack = local_row.get("rack") self._update_location_info(host, datacenter, rack) + host.host_id = local_row.get("host_id") host.listen_address = local_row.get("listen_address") host.broadcast_address = local_row.get("broadcast_address") + host.broadcast_rpc_address = self._address_from_row(local_row) host.release_version = local_row.get("release_version") host.dse_version = local_row.get("dse_version") host.dse_workload = local_row.get("workload") @@ -3121,29 +3144,31 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, # any new nodes, so we need this additional check. (See PYTHON-90) should_rebuild_token_map = force_token_rebuild or self._cluster.metadata.partitioner is None for row in peers_result: - addr = self._rpc_from_peer_row(row) + endpoint = self._cluster.endpoint_factory.create(row) tokens = row.get("tokens", None) if 'tokens' in row and not tokens: # it was selected, but empty - log.warning("Excluding host (%s) with no tokens in system.peers table of %s." % (addr, connection.host)) + log.warning("Excluding host (%s) with no tokens in system.peers table of %s." % (endpoint, connection.endpoint)) continue - if addr in found_hosts: - log.warning("Found multiple hosts with the same rpc_address (%s). Excluding peer %s", addr, row.get("peer")) + if endpoint in found_hosts: + log.warning("Found multiple hosts with the same endpoint (%s). Excluding peer %s", endpoint, row.get("peer")) continue - found_hosts.add(addr) + found_hosts.add(endpoint) - host = self._cluster.metadata.get_host(addr) + host = self._cluster.metadata.get_host(endpoint) datacenter = row.get("data_center") rack = row.get("rack") if host is None: - log.debug("[control connection] Found new host to connect to: %s", addr) - host, _ = self._cluster.add_host(addr, datacenter, rack, signal=True, refresh_nodes=False) + log.debug("[control connection] Found new host to connect to: %s", endpoint) + host, _ = self._cluster.add_host(endpoint, datacenter, rack, signal=True, refresh_nodes=False) should_rebuild_token_map = True else: should_rebuild_token_map |= self._update_location_info(host, datacenter, rack) + host.host_id = row.get("host_id") host.broadcast_address = row.get("peer") + host.broadcast_rpc_address = self._address_from_row(row) host.release_version = row.get("release_version") host.dse_version = row.get("dse_version") host.dse_workload = row.get("workload") @@ -3152,7 +3177,7 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, token_map[host] = tokens for old_host in self._cluster.metadata.all_hosts(): - if old_host.address != connection.host and old_host.address not in found_hosts: + if old_host.endpoint.address != connection.endpoint and old_host.endpoint not in found_hosts: should_rebuild_token_map = True log.debug("[control connection] Removing host not found in peers metadata: %r", old_host) self._cluster.remove_host(old_host) @@ -3188,30 +3213,27 @@ def _delay_for_event_type(self, event_type, delay_window): self._event_schedule_times[event_type] = this_time return delay - def _refresh_nodes_if_not_up(self, addr): + def _refresh_nodes_if_not_up(self, host): """ Used to mitigate refreshes for nodes that are already known. Some versions of the server send superfluous NEW_NODE messages in addition to UP events. """ - host = self._cluster.metadata.get_host(addr) if not host or not host.is_up: self.refresh_node_list_and_token_map() def _handle_topology_change(self, event): change_type = event["change_type"] - addr = self._translate_address(event["address"][0]) + host = self._cluster.metadata.get_host(event["address"][0]) if change_type == "NEW_NODE" or change_type == "MOVED_NODE": if self._topology_event_refresh_window >= 0: delay = self._delay_for_event_type('topology_change', self._topology_event_refresh_window) - self._cluster.scheduler.schedule_unique(delay, self._refresh_nodes_if_not_up, addr) + self._cluster.scheduler.schedule_unique(delay, self._refresh_nodes_if_not_up, host) elif change_type == "REMOVED_NODE": - host = self._cluster.metadata.get_host(addr) self._cluster.scheduler.schedule_unique(0, self._cluster.remove_host, host) def _handle_status_change(self, event): change_type = event["change_type"] - addr = self._translate_address(event["address"][0]) - host = self._cluster.metadata.get_host(addr) + host = self._cluster.metadata.get_host(event["address"][0]) if change_type == "UP": delay = self._delay_for_event_type('status_change', self._status_event_refresh_window) if host is None: @@ -3228,9 +3250,6 @@ def _handle_status_change(self, event): # this will be run by the scheduler self._cluster.on_down(host, is_host_addition=False) - def _translate_address(self, addr): - return self._cluster.address_translator.translate(addr) - def _handle_schema_change(self, event): if self._schema_event_refresh_window < 0: return @@ -3259,7 +3278,7 @@ def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wai peers_result = preloaded_results[0] local_result = preloaded_results[1] - schema_mismatches = self._get_schema_mismatches(peers_result, local_result, connection.host) + schema_mismatches = self._get_schema_mismatches(peers_result, local_result, connection.endpoint) if schema_mismatches is None: return True @@ -3287,7 +3306,7 @@ def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wai else: raise - schema_mismatches = self._get_schema_mismatches(peers_result, local_result, connection.host) + schema_mismatches = self._get_schema_mismatches(peers_result, local_result, connection.endpoint) if schema_mismatches is None: return True @@ -3296,7 +3315,7 @@ def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wai elapsed = self._time.time() - start log.warning("Node %s is reporting a schema disagreement: %s", - connection.host, schema_mismatches) + connection.endpoint, schema_mismatches) return False def _get_schema_mismatches(self, peers_result, local_result, local_address): @@ -3312,10 +3331,10 @@ def _get_schema_mismatches(self, peers_result, local_result, local_address): schema_ver = row.get('schema_version') if not schema_ver: continue - addr = self._rpc_from_peer_row(row) - peer = self._cluster.metadata.get_host(addr) + endpoint = self._cluster.endpoint_factory.create(row) + peer = self._cluster.metadata.get_host(endpoint) if peer and peer.is_up is not False: - versions[schema_ver].add(addr) + versions[schema_ver].add(endpoint) if len(versions) == 1: log.debug("[control connection] Schemas match") @@ -3323,12 +3342,20 @@ def _get_schema_mismatches(self, peers_result, local_result, local_address): return dict((version, list(nodes)) for version, nodes in six.iteritems(versions)) - def _rpc_from_peer_row(self, row): - addr = row.get("rpc_address") + def _address_from_row(self, row): + """ + Parse the broadcast rpc address from a row and return it untranslated. + """ + addr = None + if "rpc_address" in row: + addr = row.get("rpc_address") # peers and local + if "native_transport_address" in row: + addr = row.get("native_transport_address") if not addr or addr in ["0.0.0.0", "::"]: addr = row.get("peer") - return self._translate_address(addr) + return addr + def _signal_error(self): with self._lock: if self._is_shutdown: @@ -3337,7 +3364,7 @@ def _signal_error(self): # try just signaling the cluster, as this will trigger a reconnect # as part of marking the host down if self._connection and self._connection.is_defunct: - host = self._cluster.metadata.get_host(self._connection.host) + host = self._cluster.metadata.get_host(self._connection.endpoint) # host may be None if it's already been removed, but that indicates # that errors have already been reported, so we're fine if host: @@ -3355,7 +3382,7 @@ def on_up(self, host): def on_down(self, host): conn = self._connection - if conn and conn.host == host.address and \ + if conn and conn.endpoint == host.endpoint and \ self._reconnection_handler is None: log.debug("[control connection] Control connection host (%s) is " "considered down, starting reconnection", host) @@ -3368,7 +3395,7 @@ def on_add(self, host, refresh_nodes=True): def on_remove(self, host): c = self._connection - if c and c.host == host.address: + if c and c.endpoint == host.endpoint: log.debug("[control connection] Control connection host (%s) is being removed. Reconnecting", host) # refresh will be done on reconnect self.reconnect() @@ -3636,11 +3663,11 @@ def _on_timeout(self, _attempts=0): errors = self._errors if not errors: if self.is_schema_agreed: - key = self._current_host.address if self._current_host else 'no host queried before timeout' + key = self._current_host.endpoint if self._current_host else 'no host queried before timeout' errors = {key: "Client request timeout. See Session.execute[_async](timeout)"} else: connection = self.session.cluster.control_connection._connection - host = connection.host if connection else 'unknown' + host = connection.endpoint if connection else 'unknown' errors = {host: "Request timed out while waiting for schema agreement. See Session.execute[_async](timeout) and Cluster.max_schema_agreement_wait."} self._set_final_exception(OperationTimedOut(errors, self._current_host)) diff --git a/cassandra/connection.py b/cassandra/connection.py index 18a9381389..2de0797cb9 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -15,7 +15,7 @@ from __future__ import absolute_import # to enable import io from stdlib from collections import defaultdict, deque import errno -from functools import wraps, partial +from functools import wraps, partial, total_ordering from heapq import heappush, heappop import io import logging @@ -118,6 +118,111 @@ def decompress(byts): frame_header_v3 = struct.Struct('>BhBi') +class EndPoint(object): + """ + Represents the information to connect to a cassandra node. + """ + + @property + def address(self): + """ + The IP address of the node. This is the RPC address the driver uses when connecting to the node + """ + raise NotImplementedError() + + @property + def port(self): + """ + The port of the node. + """ + raise NotImplementedError() + + @property + def ssl_options(self): + """ + SSL options specific to this endpoint. + """ + return None + + def resolve(self): + """ + Resolve the endpoint to an address/port. This is called + only on socket connection. + """ + raise NotImplementedError() + + +class EndPointFactory(object): + + cluster = None + + def configure(self, cluster): + """ + This is called by the cluster during its initialization. + """ + self.cluster = cluster + return self + + def create(self, row): + """ + Create an EndPoint from a system.peers row. + """ + raise NotImplementedError() + + +@total_ordering +class DefaultEndPoint(EndPoint): + """ + Default EndPoint implementation, basically just an address and port. + """ + + def __init__(self, address, port=9042): + self._address = address + self._port = port + + @property + def address(self): + return self._address + + @property + def port(self): + return self._port + + def resolve(self): + return self._address, self._port + + def __eq__(self, other): + return isinstance(other, DefaultEndPoint) and \ + self.address == other.address and self.port == other.port + + def __hash__(self): + return hash((self.address, self.port)) + + def __lt__(self, other): + return (self.address, self.port) < (other.address, other.port) + + def __str__(self): + return str("%s:%d" % (self.address, self.port)) + + def __repr__(self): + return "<%s: %s:%d>" % (self.__class__.__name__, self.address, self.port) + + +class DefaultEndPointFactory(EndPointFactory): + + def create(self, row): + addr = None + if "rpc_address" in row: + addr = row.get("rpc_address") + if "native_transport_address" in row: + addr = row.get("native_transport_address") + if not addr or addr in ["0.0.0.0", "::"]: + addr = row.get("peer") + + # create the endpoint with the translated address + return DefaultEndPoint(self.cluster.address_translator.translate(addr), 9042) # will eventually support port + + class _Frame(object): def __init__(self, version, flags, stream, opcode, body_offset, end_pos): self.version = version @@ -141,7 +246,6 @@ def __str__(self): return "ver({0}); flags({1:04b}); stream({2}); op({3}); offset({4}); len({5})".format(self.version, self.flags, self.stream, self.opcode, self.body_offset, self.end_pos - self.body_offset) - NONBLOCKING = (errno.EAGAIN, errno.EWOULDBLOCK) @@ -151,9 +255,13 @@ class ConnectionException(Exception): or the connection was already closed or defunct. """ - def __init__(self, message, host=None): + def __init__(self, message, endpoint=None): Exception.__init__(self, message) - self.host = host + self.endpoint = endpoint + + @property + def host(self): + return self.endpoint.address class ConnectionShutdown(ConnectionException): @@ -167,9 +275,9 @@ class ProtocolVersionUnsupported(ConnectionException): """ Server rejected startup message due to unsupported protocol version """ - def __init__(self, host, startup_version): - msg = "Unsupported protocol version on %s: %d" % (host, startup_version) - super(ProtocolVersionUnsupported, self).__init__(msg, host) + def __init__(self, endpoint, startup_version): + msg = "Unsupported protocol version on %s: %d" % (endpoint, startup_version) + super(ProtocolVersionUnsupported, self).__init__(msg, endpoint) self.startup_version = startup_version @@ -224,6 +332,7 @@ class Connection(object): compressor = None decompressor = None + endpoint = None ssl_options = None ssl_context = None last_error = None @@ -276,8 +385,10 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, cql_version=None, protocol_version=ProtocolVersion.MAX_SUPPORTED, is_control_connection=False, user_type_map=None, connect_timeout=None, allow_beta_protocol_version=False, no_compact=False, ssl_context=None): - self.host = host - self.port = port + + # TODO next major rename host to endpoint and remove port kwarg. + self.endpoint = host if isinstance(host, EndPoint) else DefaultEndPoint(host, port) + self.authenticator = authenticator self.ssl_options = ssl_options.copy() if ssl_options else None self.ssl_context = ssl_context @@ -300,6 +411,10 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, if not getattr(ssl, 'match_hostname', None): raise RuntimeError("ssl_options specify 'check_hostname', but ssl.match_hostname is not provided. " "Patch or upgrade Python to use this option.") + self.ssl.options.update(self.endpoint.ssl_options or {}) + elif self.endpoint.ssl_options: + self.ssl_options = self.endpoint.ssl_options + if protocol_version >= 3: self.max_request_id = min(self.max_in_flight - 1, (2 ** 15) - 1) @@ -316,6 +431,14 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.lock = RLock() self.connected_event = Event() + @property + def host(self): + return self.endpoint.address + + @property + def port(self): + return self.endpoint.port + @classmethod def initialize_reactor(cls): """ @@ -337,7 +460,7 @@ def create_timer(cls, timeout, callback): raise NotImplementedError() @classmethod - def factory(cls, host, timeout, *args, **kwargs): + def factory(cls, endpoint, timeout, *args, **kwargs): """ A factory function which returns connections which have succeeded in connecting and are ready for service (or @@ -345,12 +468,12 @@ def factory(cls, host, timeout, *args, **kwargs): """ start = time.time() kwargs['connect_timeout'] = timeout - conn = cls(host, *args, **kwargs) + conn = cls(endpoint, *args, **kwargs) elapsed = time.time() - start conn.connected_event.wait(timeout - elapsed) if conn.last_error: if conn.is_unsupported_proto_version: - raise ProtocolVersionUnsupported(host, conn.protocol_version) + raise ProtocolVersionUnsupported(endpoint, conn.protocol_version) raise conn.last_error elif not conn.connected_event.is_set(): conn.close() @@ -360,9 +483,10 @@ def factory(cls, host, timeout, *args, **kwargs): def _connect_socket(self): sockerr = None - addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) + inet_address, port = self.endpoint.resolve() + addresses = socket.getaddrinfo(inet_address, port, socket.AF_UNSPEC, socket.SOCK_STREAM) if not addresses: - raise ConnectionException("getaddrinfo returned empty list for %s" % (self.host,)) + raise ConnectionException("getaddrinfo returned empty list for %s" % (self.endpoint,)) for (af, socktype, proto, canonname, sockaddr) in addresses: try: self._socket = self._socket_impl.socket(af, socktype, proto) @@ -377,7 +501,7 @@ def _connect_socket(self): self._socket.connect(sockaddr) self._socket.settimeout(None) if self._check_hostname: - ssl.match_hostname(self._socket.getpeercert(), self.host) + ssl.match_hostname(self._socket.getpeercert(), self.endpoint.address) sockerr = None break except socket.error as err: @@ -406,10 +530,10 @@ def defunct(self, exc): # if we are not handling an exception, just use the passed exception, and don't try to format exc_info with the message if any(exc_info): log.debug("Defuncting connection (%s) to %s:", - id(self), self.host, exc_info=exc_info) + id(self), self.endpoint, exc_info=exc_info) else: log.debug("Defuncting connection (%s) to %s: %s", - id(self), self.host, exc) + id(self), self.endpoint, exc) self.last_error = exc self.close() @@ -432,7 +556,7 @@ def try_callback(cb): except Exception: log.warning("Ignoring unhandled exception while erroring requests for a " "failed connection (%s) to host %s:", - id(self), self.host, exc_info=True) + id(self), self.endpoint, exc_info=True) # run first callback from this thread to ensure pool state before leaving cb, _, _ = requests.popitem()[1] @@ -479,9 +603,9 @@ def handle_pushed(self, response): def send_msg(self, msg, request_id, cb, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=None): if self.is_defunct: - raise ConnectionShutdown("Connection to %s is defunct" % self.host) + raise ConnectionShutdown("Connection to %s is defunct" % self.endpoint) elif self.is_closed: - raise ConnectionShutdown("Connection to %s is closed" % self.host) + raise ConnectionShutdown("Connection to %s is closed" % self.endpoint) # queue the decoder function with the request # this allows us to inject custom functions per request to encode, decode messages @@ -655,12 +779,12 @@ def _send_options_message(self): if self.cql_version is None and (not self.compression or not locally_supported_compressions): log.debug("Not sending options message for new connection(%s) to %s " "because compression is disabled and a cql version was not " - "specified", id(self), self.host) + "specified", id(self), self.endpoint) self._compressor = None self.cql_version = DEFAULT_CQL_VERSION self._send_startup_message(no_compact=self.no_compact) else: - log.debug("Sending initial options message for new connection (%s) to %s", id(self), self.host) + log.debug("Sending initial options message for new connection (%s) to %s", id(self), self.endpoint) self.send_msg(OptionsMessage(), self.get_request_id(), self._handle_options_response) @defunct_on_error @@ -679,7 +803,7 @@ def _handle_options_response(self, options_response): % (options_response,)) log.debug("Received options response on new connection (%s) from %s", - id(self), self.host) + id(self), self.endpoint) supported_cql_versions = options_response.cql_versions remote_supported_compressions = options_response.options['COMPRESSION'] @@ -709,7 +833,7 @@ def _handle_options_response(self, options_response): if self.compression not in remote_supported_compressions: raise ProtocolError( "The requested compression type (%s) is not supported by the Cassandra server at %s" - % (self.compression, self.host)) + % (self.compression, self.endpoint)) compression_type = self.compression else: # our locally supported compressions are ordered to prefer @@ -750,13 +874,13 @@ def _handle_startup_response(self, startup_response, did_authenticate=False): "authentication (configured authenticator = %s)", self.authenticator.__class__.__name__) - log.debug("Got ReadyMessage on new connection (%s) from %s", id(self), self.host) + log.debug("Got ReadyMessage on new connection (%s) from %s", id(self), self.endpoint) if self._compressor: self.compressor = self._compressor self.connected_event.set() elif isinstance(startup_response, AuthenticateMessage): log.debug("Got AuthenticateMessage on new connection (%s) from %s: %s", - id(self), self.host, startup_response.authenticator) + id(self), self.endpoint, startup_response.authenticator) if self.authenticator is None: raise AuthenticationFailed('Remote end requires authentication.') @@ -774,17 +898,17 @@ def _handle_startup_response(self, startup_response, did_authenticate=False): self.send_msg(AuthResponseMessage(initial_response), self.get_request_id(), self._handle_auth_response) elif isinstance(startup_response, ErrorMessage): log.debug("Received ErrorMessage on new connection (%s) from %s: %s", - id(self), self.host, startup_response.summary_msg()) + id(self), self.endpoint, startup_response.summary_msg()) if did_authenticate: raise AuthenticationFailed( "Failed to authenticate to %s: %s" % - (self.host, startup_response.summary_msg())) + (self.endpoint, startup_response.summary_msg())) else: raise ConnectionException( "Failed to initialize new connection to %s: %s" - % (self.host, startup_response.summary_msg())) + % (self.endpoint, startup_response.summary_msg())) elif isinstance(startup_response, ConnectionShutdown): - log.debug("Connection to %s was closed during the startup handshake", (self.host)) + log.debug("Connection to %s was closed during the startup handshake", (self.endpoint)) raise startup_response else: msg = "Unexpected response during Connection setup: %r" @@ -809,17 +933,17 @@ def _handle_auth_response(self, auth_response): self.send_msg(msg, self.get_request_id(), self._handle_auth_response) elif isinstance(auth_response, ErrorMessage): log.debug("Received ErrorMessage on new connection (%s) from %s: %s", - id(self), self.host, auth_response.summary_msg()) + id(self), self.endpoint, auth_response.summary_msg()) raise AuthenticationFailed( "Failed to authenticate to %s: %s" % - (self.host, auth_response.summary_msg())) + (self.endpoint, auth_response.summary_msg())) elif isinstance(auth_response, ConnectionShutdown): - log.debug("Connection to %s was closed during the authentication process", self.host) + log.debug("Connection to %s was closed during the authentication process", self.endpoint) raise auth_response else: msg = "Unexpected response during Connection authentication to %s: %r" - log.error(msg, self.host, auth_response) - raise ProtocolError(msg % (self.host, auth_response)) + log.error(msg, self.endpoint, auth_response) + raise ProtocolError(msg % (self.endpoint, auth_response)) def set_keyspace_blocking(self, keyspace): if not keyspace or keyspace == self.keyspace: @@ -834,7 +958,7 @@ def set_keyspace_blocking(self, keyspace): raise ire.to_exception() except Exception as exc: conn_exc = ConnectionException( - "Problem while setting keyspace: %r" % (exc,), self.host) + "Problem while setting keyspace: %r" % (exc,), self.endpoint) self.defunct(conn_exc) raise conn_exc @@ -842,7 +966,7 @@ def set_keyspace_blocking(self, keyspace): self.keyspace = keyspace else: conn_exc = ConnectionException( - "Problem while setting keyspace: %r" % (result,), self.host) + "Problem while setting keyspace: %r" % (result,), self.endpoint) self.defunct(conn_exc) raise conn_exc @@ -890,7 +1014,7 @@ def process_result(result): callback(self, result.to_exception()) else: callback(self, self.defunct(ConnectionException( - "Problem while setting keyspace: %r" % (result,), self.host))) + "Problem while setting keyspace: %r" % (result,), self.endpoint))) # We've incremented self.in_flight above, so we "have permission" to # acquire a new request id @@ -912,7 +1036,7 @@ def __str__(self): elif self.is_closed: status = " (closed)" - return "<%s(%r) %s:%d%s>" % (self.__class__.__name__, id(self), self.host, self.port, status) + return "<%s(%r) %s%s>" % (self.__class__.__name__, id(self), self.endpoint, status) __repr__ = __str__ @@ -973,7 +1097,7 @@ def __init__(self, connection, owner): self.connection = connection self.owner = owner log.debug("Sending options message heartbeat on idle connection (%s) %s", - id(connection), connection.host) + id(connection), connection.endpoint) with connection.lock: if connection.in_flight <= connection.max_request_id: connection.in_flight += 1 @@ -988,12 +1112,12 @@ def wait(self, timeout): if self._exception: raise self._exception else: - raise OperationTimedOut("Connection heartbeat timeout after %s seconds" % (timeout,), self.connection.host) + raise OperationTimedOut("Connection heartbeat timeout after %s seconds" % (timeout,), self.connection.endpoint) def _options_callback(self, response): if isinstance(response, SupportedMessage): log.debug("Received options response on connection (%s) from %s", - id(self.connection), self.connection.host) + id(self.connection), self.connection.endpoint) else: if isinstance(response, ConnectionException): self._exception = response @@ -1034,13 +1158,13 @@ def run(self): futures.append(HeartbeatFuture(connection, owner)) except Exception as e: log.warning("Failed sending heartbeat message on connection (%s) to %s", - id(connection), connection.host) + id(connection), connection.endpoint) failed_connections.append((connection, owner, e)) else: connection.reset_idle() else: log.debug("Cannot send heartbeat message on connection (%s) to %s", - id(connection), connection.host) + id(connection), connection.endpoint) # make sure the owner sees this defunt/closed connection owner.return_connection(connection) self._raise_if_stopped() @@ -1059,7 +1183,7 @@ def run(self): connection.reset_idle() except Exception as e: log.warning("Heartbeat failed for connection (%s) to %s", - id(connection), connection.host) + id(connection), connection.endpoint) failed_connections.append((f.connection, f.owner, e)) timeout = self._timeout - (time.time() - start_time) diff --git a/cassandra/io/asyncioreactor.py b/cassandra/io/asyncioreactor.py index f8835e897c..b386388da3 100644 --- a/cassandra/io/asyncioreactor.py +++ b/cassandra/io/asyncioreactor.py @@ -136,7 +136,7 @@ def close(self): @asyncio.coroutine def _close(self): - log.debug("Closing connection (%s) to %s" % (id(self), self.host)) + log.debug("Closing connection (%s) to %s" % (id(self), self.endpoint)) if self._write_watcher: self._write_watcher.cancel() if self._read_watcher: @@ -146,11 +146,11 @@ def _close(self): self._loop.remove_reader(self._socket.fileno()) self._socket.close() - log.debug("Closed socket to %s" % (self.host,)) + log.debug("Closed socket to %s" % (self.endpoint,)) if not self.is_defunct: self.error_all_requests( - ConnectionShutdown("Connection to %s was closed" % self.host)) + ConnectionShutdown("Connection to %s was closed" % self.endpoint)) # don't leave in-progress operations hanging self.connected_event.set() diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 2d30cdeedf..fa21cbe108 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -362,22 +362,22 @@ def close(self): return self.is_closed = True - log.debug("Closing connection (%s) to %s", id(self), self.host) + log.debug("Closing connection (%s) to %s", id(self), self.endpoint) self._writable = False self._readable = False # We don't have to wait for this to be closed, we can just schedule it self.create_timer(0, partial(asyncore.dispatcher.close, self)) - log.debug("Closed socket to %s", self.host) + log.debug("Closed socket to %s", self.endpoint) if not self.is_defunct: self.error_all_requests( - ConnectionShutdown("Connection to %s was closed" % self.host)) + ConnectionShutdown("Connection to %s was closed" % self.endpoint)) #This happens when the connection is shutdown while waiting for the ReadyMessage if not self.connected_event.is_set(): - self.last_error = ConnectionShutdown("Connection to %s was closed" % self.host) + self.last_error = ConnectionShutdown("Connection to %s was closed" % self.endpoint) # don't leave in-progress operations hanging self.connected_event.set() diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index bc01f755c9..2b16ef69af 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -95,7 +95,7 @@ def close(self): return self.is_closed = True - log.debug("Closing connection (%s) to %s" % (id(self), self.host)) + log.debug("Closing connection (%s) to %s" % (id(self), self.endpoint)) cur_gthread = eventlet.getcurrent() @@ -105,11 +105,11 @@ def close(self): self._write_watcher.kill() if self._socket: self._socket.close() - log.debug("Closed socket to %s" % (self.host,)) + log.debug("Closed socket to %s" % (self.endpoint,)) if not self.is_defunct: self.error_all_requests( - ConnectionShutdown("Connection to %s was closed" % self.host)) + ConnectionShutdown("Connection to %s was closed" % self.endpoint)) # don't leave in-progress operations hanging self.connected_event.set() diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index bbf9e83eb0..ebc664d485 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -86,18 +86,18 @@ def close(self): return self.is_closed = True - log.debug("Closing connection (%s) to %s" % (id(self), self.host)) + log.debug("Closing connection (%s) to %s" % (id(self), self.endpoint)) if self._read_watcher: self._read_watcher.kill(block=False) if self._write_watcher: self._write_watcher.kill(block=False) if self._socket: self._socket.close() - log.debug("Closed socket to %s" % (self.host,)) + log.debug("Closed socket to %s" % (self.endpoint,)) if not self.is_defunct: self.error_all_requests( - ConnectionShutdown("Connection to %s was closed" % self.host)) + ConnectionShutdown("Connection to %s was closed" % self.endpoint)) # don't leave in-progress operations hanging self.connected_event.set() diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index afdf3b8137..1a38bd6143 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -20,7 +20,6 @@ import ssl from threading import Lock, Thread import time -import weakref from six.moves import range @@ -285,16 +284,16 @@ def close(self): return self.is_closed = True - log.debug("Closing connection (%s) to %s", id(self), self.host) + log.debug("Closing connection (%s) to %s", id(self), self.endpoint) _global_loop.connection_destroyed(self) self._socket.close() - log.debug("Closed socket to %s", self.host) + log.debug("Closed socket to %s", self.endpoint) # don't leave in-progress operations hanging if not self.is_defunct: self.error_all_requests( - ConnectionShutdown("Connection to %s was closed" % self.host)) + ConnectionShutdown("Connection to %s was closed" % self.endpoint)) def handle_write(self, watcher, revents, errno=None): if revents & libev.EV_ERROR: diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index 3611cdf851..1dbe9d8b5d 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -260,13 +260,13 @@ def add_connection(self): ) self.connector = reactor.connectSSL( - host=self.host, port=self.port, + host=self.endpoint.address, port=self.port, factory=TwistedConnectionClientFactory(self), - contextFactory=_SSLContextFactory(self.ssl_options, self._check_hostname, self.host), + contextFactory=_SSLContextFactory(self.ssl_options, self._check_hostname, self.endpoint.address), timeout=self.connect_timeout) else: self.connector = reactor.connectTCP( - host=self.host, port=self.port, + host=self.endpoint.address, port=self.port, factory=TwistedConnectionClientFactory(self), timeout=self.connect_timeout) @@ -289,13 +289,13 @@ def close(self): return self.is_closed = True - log.debug("Closing connection (%s) to %s", id(self), self.host) + log.debug("Closing connection (%s) to %s", id(self), self.endpoint) reactor.callFromThread(self.connector.disconnect) - log.debug("Closed socket to %s", self.host) + log.debug("Closed socket to %s", self.endpoint) if not self.is_defunct: self.error_all_requests( - ConnectionShutdown("Connection to %s was closed" % self.host)) + ConnectionShutdown("Connection to %s was closed" % self.endpoint)) # don't leave in-progress operations hanging self.connected_event.set() diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 377ea4dc85..4129784021 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -42,7 +42,7 @@ from cassandra.query import dict_factory, bind_params from cassandra.util import OrderedDict from cassandra.pool import HostDistance - +from cassandra.connection import EndPoint log = logging.getLogger(__name__) @@ -123,7 +123,7 @@ def export_schema_as_string(self): def refresh(self, connection, timeout, target_type=None, change_type=None, **kwargs): - server_version = self.get_host(connection.host).release_version + server_version = self.get_host(connection.endpoint).release_version parser = get_schema_parser(connection, server_version, timeout) if not target_type: @@ -317,17 +317,30 @@ def add_or_return_host(self, host): """ with self._hosts_lock: try: - return self._hosts[host.address], False + return self._hosts[host.endpoint], False except KeyError: - self._hosts[host.address] = host + self._hosts[host.endpoint] = host return host, True def remove_host(self, host): with self._hosts_lock: - return bool(self._hosts.pop(host.address, False)) + return bool(self._hosts.pop(host.endpoint, False)) + + def get_host(self, endpoint_or_address): + """ + Find a host in the metadata for a specific endpoint. If a string inet address is passed, + iterate all hosts to match the :attr:`~.pool.Host.broadcast_rpc_address` attribute. + """ + if not isinstance(endpoint_or_address, EndPoint): + return self._get_host_by_address(endpoint_or_address) + + return self._hosts.get(endpoint_or_address) - def get_host(self, address): - return self._hosts.get(address) + def _get_host_by_address(self, address): + for host in six.itervalues(self._hosts): + if host.broadcast_rpc_address == address: + return host + return None def all_hosts(self): """ diff --git a/cassandra/policies.py b/cassandra/policies.py index 540f5d3e2e..5d7cc757f7 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -230,7 +230,7 @@ def __init__(self, local_dc='', used_hosts_per_remote_dc=0): self.used_hosts_per_remote_dc = used_hosts_per_remote_dc self._dc_live_hosts = {} self._position = 0 - self._contact_points = [] + self._endpoints = [] LoadBalancingPolicy.__init__(self) def _dc(self, host): @@ -241,7 +241,9 @@ def populate(self, cluster, hosts): self._dc_live_hosts[dc] = tuple(set(dc_hosts)) if not self.local_dc: - self._contact_points = cluster.contact_points_resolved + self._endpoints = [ + endpoint + for endpoint in cluster.endpoints_resolved] self._position = randint(0, len(hosts) - 1) if hosts else 0 @@ -284,13 +286,13 @@ def on_up(self, host): # not worrying about threads because this will happen during # control connection startup/refresh if not self.local_dc and host.datacenter: - if host.address in self._contact_points: + if host.endpoint in self._endpoints: self.local_dc = host.datacenter log.info("Using datacenter '%s' for DCAwareRoundRobinPolicy (via host '%s'); " "if incorrect, please specify a local_dc to the constructor, " "or limit contact points to local cluster nodes" % - (self.local_dc, host.address)) - del self._contact_points + (self.local_dc, host.endpoint)) + del self._endpoints dc = self._dc(host) with self._hosts_lock: diff --git a/cassandra/pool.py b/cassandra/pool.py index 1d6bcf4e6e..61ffff7f42 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -28,7 +28,7 @@ from cassandra.util import WeakSet # NOQA from cassandra import AuthenticationFailed -from cassandra.connection import ConnectionException +from cassandra.connection import ConnectionException, EndPoint, DefaultEndPoint from cassandra.policies import HostDistance log = logging.getLogger(__name__) @@ -48,9 +48,9 @@ class Host(object): Represents a single Cassandra node. """ - address = None + endpoint = None """ - The IP address of the node. This is the RPC address the driver uses when connecting to the node + The :class:`~.connection.EndPoint` to connect to the node. """ broadcast_address = None @@ -60,6 +60,11 @@ class Host(object): :attr:`~.Cluster.token_metadata_enabled` is ``False``. """ + broadcast_rpc_address = None + """ + The broadcast rpc address of the node (`native_address` or `rpc_address`). + """ + listen_address = None """ listen address configured for the node, *if available*. This is only available in the ``system.local`` table for newer @@ -85,6 +90,11 @@ class Host(object): release_version as queried from the control connection system tables """ + host_id = None + """ + The unique identifier of the cassandra node + """ + dse_version = None """ dse_version as queried from the control connection system tables. Only populated when connecting to @@ -104,17 +114,23 @@ class Host(object): _currently_handling_node_up = False - def __init__(self, inet_address, conviction_policy_factory, datacenter=None, rack=None): - if inet_address is None: - raise ValueError("inet_address may not be None") + def __init__(self, endpoint, conviction_policy_factory, datacenter=None, rack=None, host_id=None): + if endpoint is None: + raise ValueError("endpoint may not be None") if conviction_policy_factory is None: raise ValueError("conviction_policy_factory may not be None") - self.address = inet_address + self.endpoint = endpoint if isinstance(endpoint, EndPoint) else DefaultEndPoint(endpoint) self.conviction_policy = conviction_policy_factory(self) + self.host_id = host_id self.set_location_info(datacenter, rack) self.lock = RLock() + @property + def address(self): + # backward compatibility + return self.endpoint.address + @property def datacenter(self): """ The datacenter the node is in. """ @@ -136,7 +152,7 @@ def set_location_info(self, datacenter, rack): def set_up(self): if not self.is_up: - log.debug("Host %s is now marked up", self.address) + log.debug("Host %s is now marked up", self.endpoint) self.conviction_policy.reset() self.is_up = True @@ -160,20 +176,20 @@ def get_and_set_reconnection_handler(self, new_handler): return old def __eq__(self, other): - return self.address == other.address + return self.endpoint == other.endpoint def __hash__(self): - return hash(self.address) + return hash(self.endpoint) def __lt__(self, other): - return self.address < other.address + return self.endpoint < other.endpoint def __str__(self): - return str(self.address) + return str(self.endpoint) def __repr__(self): dc = (" %s" % (self._datacenter,)) if self._datacenter else "" - return "<%s: %s%s>" % (self.__class__.__name__, self.address, dc) + return "<%s: %s%s>" % (self.__class__.__name__, self.endpoint, dc) class _ReconnectionHandler(object): @@ -329,7 +345,7 @@ def __init__(self, host, host_distance, session): return log.debug("Initializing connection for host %s", self.host) - self._connection = session.cluster.connection_factory(host.address) + self._connection = session.cluster.connection_factory(host.endpoint) self._keyspace = session.keyspace if self._keyspace: self._connection.set_keyspace_blocking(self._keyspace) @@ -399,12 +415,12 @@ def _replace(self, connection): log.debug("Replacing connection (%s) to %s", id(connection), self.host) try: - conn = self._session.cluster.connection_factory(self.host.address) + conn = self._session.cluster.connection_factory(self.host) if self._keyspace: conn.set_keyspace_blocking(self._keyspace) self._connection = conn except Exception: - log.warning("Failed reconnecting %s. Retrying." % (self.host.address,)) + log.warning("Failed reconnecting %s. Retrying." % (self.host.endpoint,)) self._session.submit(self._replace, connection) else: with self._lock: @@ -478,7 +494,7 @@ def __init__(self, host, host_distance, session): log.debug("Initializing new connection pool for host %s", self.host) core_conns = session.cluster.get_core_connections_per_host(host_distance) - self._connections = [session.cluster.connection_factory(host.address) + self._connections = [session.cluster.connection_factory(host.endpoint) for i in range(core_conns)] self._keyspace = session.keyspace @@ -582,7 +598,7 @@ def _add_conn_if_under_max(self): log.debug("Going to open new connection to host %s", self.host) try: - conn = self._session.cluster.connection_factory(self.host.address) + conn = self._session.cluster.connection_factory(self.host.endpoint) if self._keyspace: conn.set_keyspace_blocking(self._session.keyspace) self._next_trash_allowed_at = time.time() + _MIN_TRASH_INTERVAL diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 105ce31fa0..7fece18569 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -70,6 +70,8 @@ .. autoattribute:: timestamp_generator + .. autoattribute:: endpoint_factory + .. automethod:: connect .. automethod:: shutdown diff --git a/docs/api/cassandra/connection.rst b/docs/api/cassandra/connection.rst index 3e9851b1a3..49ee1ec20d 100644 --- a/docs/api/cassandra/connection.rst +++ b/docs/api/cassandra/connection.rst @@ -7,3 +7,9 @@ .. autoexception:: ConnectionShutdown () .. autoexception:: ConnectionBusy () .. autoexception:: ProtocolError () + +.. autoclass:: EndPoint + :members: + +.. autoclass:: EndPointFactory + :members: diff --git a/docs/api/cassandra/metadata.rst b/docs/api/cassandra/metadata.rst index 7a95141722..ed79d04f42 100644 --- a/docs/api/cassandra/metadata.rst +++ b/docs/api/cassandra/metadata.rst @@ -14,7 +14,7 @@ .. autoclass:: Metadata () :members: - :exclude-members: rebuild_schema, rebuild_token_map, add_host, remove_host, get_host + :exclude-members: rebuild_schema, rebuild_token_map, add_host, remove_host Schemas ------- diff --git a/tests/integration/simulacron/test_endpoint.py b/tests/integration/simulacron/test_endpoint.py new file mode 100644 index 0000000000..1af5995f94 --- /dev/null +++ b/tests/integration/simulacron/test_endpoint.py @@ -0,0 +1,119 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from functools import total_ordering + +from cassandra.cluster import Cluster +from cassandra.connection import DefaultEndPoint, EndPoint, EndPointFactory +from tests.integration import (PROTOCOL_VERSION, requiressimulacron) +from tests.integration.simulacron import SimulacronCluster + + +@total_ordering +class AddressEndPoint(EndPoint): + + def __init__(self, address): + self._address = address + + @property + def address(self): + return self._address + + @property + def port(self): + return None + + def resolve(self): + return self._address, 9042 # connection purpose + + def __eq__(self, other): + return isinstance(other, AddressEndPoint) and \ + self.address == other.address + + def __hash__(self): + return hash(self.address) + + def __lt__(self, other): + return self.address < other.address + + def __str__(self): + return str("%s" % self.address) + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self.address) + + +class AddressEndPointFactory(EndPointFactory): + + def create(self, row): + addr = None + if "rpc_address" in row: + addr = row.get("rpc_address") + if "native_transport_address" in row: + addr = row.get("native_transport_address") + if not addr or addr in ["0.0.0.0", "::"]: + addr = row.get("peer") + + return AddressEndPoint(addr) + + +@requiressimulacron +class EndPointTests(SimulacronCluster): + """ + Basic tests to validate the internal use of the EndPoint class. + + @since 3.18 + @jira_ticket PYTHON-1079 + @expected_result all the hosts are using the proper endpoint class + """ + + def test_default_endpoint(self): + hosts = self.cluster.metadata.all_hosts() + for host in hosts: + self.assertIsNotNone(host.endpoint) + self.assertIsInstance(host.endpoint, DefaultEndPoint) + self.assertEqual(host.address, host.endpoint.address) + self.assertEqual(host.broadcast_rpc_address, host.endpoint.address) + + self.assertIsInstance(self.cluster.control_connection._connection.endpoint, DefaultEndPoint) + self.assertIsNotNone(self.cluster.control_connection._connection.endpoint) + endpoints = [host.endpoint for host in hosts] + self.assertIn(self.cluster.control_connection._connection.endpoint, endpoints) + + def test_custom_endpoint(self): + cluster = Cluster( + contact_points=[AddressEndPoint('127.0.0.1')], + protocol_version=PROTOCOL_VERSION, + endpoint_factory=AddressEndPointFactory() + ) + cluster.connect(wait_for_all_pools=True) + + hosts = cluster.metadata.all_hosts() + for host in hosts: + self.assertIsNotNone(host.endpoint) + self.assertIsInstance(host.endpoint, AddressEndPoint) + self.assertEqual(str(host.endpoint), host.endpoint.address) + self.assertEqual(host.address, host.endpoint.address) + self.assertEqual(host.broadcast_rpc_address, host.endpoint.address) + + self.assertIsInstance(cluster.control_connection._connection.endpoint, AddressEndPoint) + self.assertIsNotNone(cluster.control_connection._connection.endpoint) + endpoints = [host.endpoint for host in hosts] + self.assertIn(cluster.control_connection._connection.endpoint, endpoints) + + cluster.shutdown() diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 9df8a3586c..768fa778cd 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -37,6 +37,7 @@ from cassandra.query import SimpleStatement, TraceUnavailable, tuple_factory from cassandra.auth import PlainTextAuthProvider, SaslAuthProvider from cassandra import connection +from cassandra.connection import DefaultEndPoint from tests import notwindows from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, CASSANDRA_VERSION, \ @@ -58,7 +59,7 @@ def __init__(self, ignored_hosts): RoundRobinPolicy.__init__(self) def distance(self, host): - if(str(host) in self.ignored_hosts): + if(host.address in self.ignored_hosts): return HostDistance.IGNORED else: return HostDistance.LOCAL @@ -80,7 +81,7 @@ def test_ignored_host_up(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=ingored_host_policy) session = cluster.connect() for host in cluster.metadata.all_hosts(): - if str(host) == "127.0.0.1": + if str(host) == "127.0.0.1:9042": self.assertTrue(host.is_up) else: self.assertIsNone(host.is_up) @@ -98,7 +99,7 @@ def test_host_resolution(self): @test_category connection """ cluster = Cluster(contact_points=["localhost"], protocol_version=PROTOCOL_VERSION, connect_timeout=1) - self.assertTrue('127.0.0.1' in cluster.contact_points_resolved) + self.assertTrue(DefaultEndPoint('127.0.0.1') in cluster.endpoints_resolved) @local def test_host_duplication(self): @@ -1264,7 +1265,7 @@ def test_address_translator_with_mixed_nodes(self): c = Cluster(address_translator=lh_ad) c.connect() for host in c.metadata.all_hosts(): - self.assertEqual(adder_map.get(str(host)), host.broadcast_address) + self.assertEqual(adder_map.get(host.address), host.broadcast_address) c.shutdown() @local @@ -1424,7 +1425,7 @@ def test_prepare_on_ignored_hosts(self): # the length of mock_calls will vary, but all should use the unignored # address for c in cluster.connection_factory.mock_calls: - self.assertEqual(call(unignored_address), c) + self.assertEqual(call(DefaultEndPoint(unignored_address)), c) cluster.shutdown() diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 3ce3f5e2b6..595fc12a2c 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -126,7 +126,7 @@ def tearDown(self): def test_heart_beat_timeout(self): # Setup a host listener to ensure the nodes don't go down test_listener = TestHostListener() - host = "127.0.0.1" + host = "127.0.0.1:9042" node = get_node(1) initial_connections = self.fetch_connections(host, self.cluster) self.assertNotEqual(len(initial_connections), 0) @@ -214,7 +214,7 @@ def get_connection(self, timeout=5): for i in range(5): try: contact_point = CASSANDRA_IP - conn = self.klass.factory(host=contact_point, timeout=timeout, protocol_version=PROTOCOL_VERSION) + conn = self.klass.factory(endpoint=contact_point, timeout=timeout, protocol_version=PROTOCOL_VERSION) break except (OperationTimedOut, NoHostAvailable, ConnectionShutdown) as e: continue diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index d12a655fa1..f50fa8c9e2 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -54,7 +54,7 @@ class HostMetatDataTests(BasicExistingKeyspaceUnitTestCase): @local def test_broadcast_listen_address(self): """ - Check to ensure that the broadcast and listen adresss is populated correctly + Check to ensure that the broadcast, rpc_address, listen adresss and host are is populated correctly @since 3.3 @jira_ticket PYTHON-332 @@ -62,15 +62,22 @@ def test_broadcast_listen_address(self): @test_category metadata """ - # All nodes should have the broadcast_address set + # All nodes should have the broadcast_address, rpc_address and host_id set for host in self.cluster.metadata.all_hosts(): self.assertIsNotNone(host.broadcast_address) + self.assertIsNotNone(host.broadcast_rpc_address) + self.assertIsNotNone(host.host_id) con = self.cluster.control_connection.get_connections()[0] local_host = con.host + # The control connection node should have the listen address set. listen_addrs = [host.listen_address for host in self.cluster.metadata.all_hosts()] self.assertTrue(local_host in listen_addrs) + # The control connection node should have the broadcast_rpc_address set. + rpc_addrs = [host.broadcast_rpc_address for host in self.cluster.metadata.all_hosts()] + self.assertTrue(local_host in rpc_addrs) + def test_host_release_version(self): """ Checks the hosts release version and validates that it is equal to the diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 9540c7bc8e..5a985e5c92 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -21,6 +21,7 @@ from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, SimpleConvictionPolicy, \ WhiteListRoundRobinPolicy from cassandra.pool import Host +from cassandra.connection import DefaultEndPoint from tests.integration import PROTOCOL_VERSION, local, use_singledc @@ -44,12 +45,12 @@ def test_predicate_changes(self): @test_category policy """ external_event = True - contact_point = "127.0.0.1" + contact_point = DefaultEndPoint("127.0.0.1") single_host = {Host(contact_point, SimpleConvictionPolicy)} - all_hosts = {Host("127.0.0.{}".format(i), SimpleConvictionPolicy) for i in (1, 2, 3)} + all_hosts = {Host(DefaultEndPoint("127.0.0.{}".format(i)), SimpleConvictionPolicy) for i in (1, 2, 3)} - predicate = lambda host: host.address == contact_point if external_event else True + predicate = lambda host: host.endpoint == contact_point if external_event else True cluster = Cluster((contact_point,), load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), predicate=predicate), protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index 125ac62537..f0a1d737bc 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -18,6 +18,8 @@ import unittest from mock import Mock, patch +from cassandra.connection import DefaultEndPoint + try: from twisted.test import proto_helpers from twisted.python.failure import Failure @@ -126,7 +128,7 @@ def setUp(self): self.reactor_run_patcher = patch('twisted.internet.reactor.run') self.mock_reactor_cft = self.reactor_cft_patcher.start() self.mock_reactor_run = self.reactor_run_patcher.start() - self.obj_ut = twistedreactor.TwistedConnection('1.2.3.4', + self.obj_ut = twistedreactor.TwistedConnection(DefaultEndPoint('1.2.3.4'), cql_version='3.0.1') def tearDown(self): diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index f342ceddbe..a066196695 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -17,6 +17,8 @@ from cassandra.marshal import int32_pack, uint8_pack, uint32_pack from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ReadyMessage, ServerError) +from cassandra.connection import DefaultEndPoint + from tests import is_monkey_patched from functools import wraps @@ -150,6 +152,7 @@ class TimerTestMixin(object): def setUp(self): self.connection = self.connection_class( + DefaultEndPoint("127.0.0.1"), connect_timeout=5 ) @@ -206,7 +209,7 @@ def make_header_prefix(self, message_class, version=2, stream_id=0): ])) def make_connection(self): - c = self.connection_class('1.2.3.4', cql_version='3.0.1', connect_timeout=5) + c = self.connection_class(DefaultEndPoint('1.2.3.4'), cql_version='3.0.1', connect_timeout=5) mocket = Mock() mocket.send.side_effect = lambda x: len(x) self.set_socket(c, mocket) diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index e35d1e8a8b..fccf854b89 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -26,7 +26,7 @@ from cassandra.cluster import Cluster from cassandra.connection import (Connection, HEADER_DIRECTION_TO_CLIENT, ProtocolError, locally_supported_compressions, ConnectionHeartbeat, _Frame, Timer, TimerManager, - ConnectionException) + ConnectionException, DefaultEndPoint) from cassandra.marshal import uint8_pack, uint32_pack, int32_pack from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ProtocolHandler) @@ -35,7 +35,7 @@ class ConnectionTest(unittest.TestCase): def make_connection(self): - c = Connection('1.2.3.4') + c = Connection(DefaultEndPoint('1.2.3.4')) c._socket = Mock() c._socket.send.side_effect = lambda x: len(x) return c @@ -74,6 +74,21 @@ def make_error_body(self, code, msg): def make_msg(self, header, body=""): return header + uint32_pack(len(body)) + body + def test_connection_endpoint(self): + endpoint = DefaultEndPoint('1.2.3.4') + c = Connection(endpoint) + self.assertEqual(c.endpoint, endpoint) + self.assertEqual(c.endpoint.address, endpoint.address) + + c = Connection(host=endpoint) # kwarg + self.assertEqual(c.endpoint, endpoint) + self.assertEqual(c.endpoint.address, endpoint.address) + + c = Connection('10.0.0.1') + endpoint = DefaultEndPoint('10.0.0.1') + self.assertEqual(c.endpoint, endpoint) + self.assertEqual(c.endpoint.address, endpoint.address) + def test_bad_protocol_version(self, *args): c = self.make_connection() c._requests = Mock() @@ -388,7 +403,8 @@ def test_timeout(self, *args): def send_msg(msg, req_id, msg_callback): pass - connection = Mock(spec=Connection, host='localhost', + # we used endpoint=X here because it's a mock and we need connection.endpoint to be set + connection = Mock(spec=Connection, endpoint=DefaultEndPoint('localhost'), max_request_id=127, lock=Lock(), in_flight=0, is_idle=True, @@ -406,7 +422,7 @@ def send_msg(msg, req_id, msg_callback): exc = connection.defunct.call_args_list[0][0][0] self.assertIsInstance(exc, OperationTimedOut) self.assertEqual(exc.errors, 'Connection heartbeat timeout after 0.05 seconds') - self.assertEqual(exc.last_host, 'localhost') + self.assertEqual(exc.last_host, DefaultEndPoint('localhost')) holder.return_connection.assert_has_calls( [call(connection)] * get_holders.call_count) @@ -425,3 +441,49 @@ def test_timer_collision(self): tm.add_timer(t2) # Prior to #466: "TypeError: unorderable types: Timer() < Timer()" tm.service_timeouts() + + +class DefaultEndPointTest(unittest.TestCase): + + def test_default_endpoint_properties(self): + endpoint = DefaultEndPoint('10.0.0.1') + self.assertEqual(endpoint.address, '10.0.0.1') + self.assertEqual(endpoint.port, 9042) + self.assertEqual(str(endpoint), '10.0.0.1:9042') + + endpoint = DefaultEndPoint('10.0.0.1', 8888) + self.assertEqual(endpoint.address, '10.0.0.1') + self.assertEqual(endpoint.port, 8888) + self.assertEqual(str(endpoint), '10.0.0.1:8888') + + def test_endpoint_equality(self): + self.assertEqual( + DefaultEndPoint('10.0.0.1'), + DefaultEndPoint('10.0.0.1') + ) + + self.assertEqual( + DefaultEndPoint('10.0.0.1'), + DefaultEndPoint('10.0.0.1', 9042) + ) + + self.assertNotEqual( + DefaultEndPoint('10.0.0.1'), + DefaultEndPoint('10.0.0.2') + ) + + self.assertNotEqual( + DefaultEndPoint('10.0.0.1'), + DefaultEndPoint('10.0.0.1', 0000) + ) + + def test_endpoint_resolve(self): + self.assertEqual( + DefaultEndPoint('10.0.0.1').resolve(), + ('10.0.0.1', 9042) + ) + + self.assertEqual( + DefaultEndPoint('10.0.0.1', 3232).resolve(), + ('10.0.0.1', 3232) + ) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index 6cabd923c2..e76fbd2559 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -17,6 +17,8 @@ except ImportError: import unittest # noqa +import six + from concurrent.futures import ThreadPoolExecutor from mock import Mock, ANY, call @@ -24,6 +26,7 @@ from cassandra.protocol import ResultMessage, RESULT_KIND_ROWS from cassandra.cluster import ControlConnection, _Scheduler, ProfileManager, EXEC_PROFILE_DEFAULT, ExecutionProfile from cassandra.pool import Host +from cassandra.connection import EndPoint, DefaultEndPoint, DefaultEndPointFactory from cassandra.policies import (SimpleConvictionPolicy, RoundRobinPolicy, ConstantReconnectionPolicy, IdentityTranslator) @@ -34,9 +37,9 @@ class MockMetadata(object): def __init__(self): self.hosts = { - "192.168.1.0": Host("192.168.1.0", SimpleConvictionPolicy), - "192.168.1.1": Host("192.168.1.1", SimpleConvictionPolicy), - "192.168.1.2": Host("192.168.1.2", SimpleConvictionPolicy) + DefaultEndPoint("192.168.1.0"): Host(DefaultEndPoint("192.168.1.0"), SimpleConvictionPolicy), + DefaultEndPoint("192.168.1.1"): Host(DefaultEndPoint("192.168.1.1"), SimpleConvictionPolicy), + DefaultEndPoint("192.168.1.2"): Host(DefaultEndPoint("192.168.1.2"), SimpleConvictionPolicy) } for host in self.hosts.values(): host.set_up() @@ -45,8 +48,13 @@ def __init__(self): self.partitioner = None self.token_map = {} - def get_host(self, rpc_address): - return self.hosts.get(rpc_address) + def get_host(self, endpoint_or_address): + if not isinstance(endpoint_or_address, EndPoint): + for host in six.itervalues(self.hosts): + if host.address == endpoint_or_address: + return host + else: + return self.hosts.get(endpoint_or_address) def all_hosts(self): return self.hosts.values() @@ -73,9 +81,10 @@ def __init__(self): self.scheduler = Mock(spec=_Scheduler) self.executor = Mock(spec=ThreadPoolExecutor) self.profile_manager.profiles[EXEC_PROFILE_DEFAULT] = ExecutionProfile(RoundRobinPolicy()) + self.endpoint_factory = DefaultEndPointFactory().configure(self) - def add_host(self, address, datacenter, rack, signal=False, refresh_nodes=True): - host = Host(address, SimpleConvictionPolicy, datacenter, rack) + def add_host(self, endpoint, datacenter, rack, signal=False, refresh_nodes=True): + host = Host(endpoint, SimpleConvictionPolicy, datacenter, rack) self.added_hosts.append(host) return host @@ -94,7 +103,7 @@ class MockConnection(object): is_defunct = False def __init__(self): - self.host = "192.168.1.0" + self.endpoint = DefaultEndPoint("192.168.1.0") self.local_results = [ ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"], [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]] @@ -224,7 +233,7 @@ def test_wait_for_schema_agreement_skipping(self): # change the schema version on one of the existing entries self.connection.peer_results[1][1][3] = 'c' - self.cluster.metadata.get_host('192.168.1.1').is_up = False + self.cluster.metadata.get_host(DefaultEndPoint('192.168.1.1')).is_up = False self.assertTrue(self.control_connection.wait_for_schema_agreement()) self.assertEqual(self.time.clock, 0) @@ -236,8 +245,8 @@ def test_wait_for_schema_agreement_rpc_lookup(self): self.connection.peer_results[1].append( ["0.0.0.0", PEER_IP, "b", "dc1", "rack1", ["3", "103", "203"]] ) - host = Host("0.0.0.0", SimpleConvictionPolicy) - self.cluster.metadata.hosts[PEER_IP] = host + host = Host(DefaultEndPoint("0.0.0.0"), SimpleConvictionPolicy) + self.cluster.metadata.hosts[DefaultEndPoint("foobar")] = host host.is_up = False # even though the new host has a different schema version, it's @@ -348,7 +357,7 @@ def test_handle_topology_change(self): } self.cluster.scheduler.reset_mock() self.control_connection._handle_topology_change(event) - self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection._refresh_nodes_if_not_up, '1.2.3.4') + self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection._refresh_nodes_if_not_up, None) event = { 'change_type': 'REMOVED_NODE', @@ -364,7 +373,7 @@ def test_handle_topology_change(self): } self.cluster.scheduler.reset_mock() self.control_connection._handle_topology_change(event) - self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection._refresh_nodes_if_not_up, '1.2.3.4') + self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection._refresh_nodes_if_not_up, None) def test_handle_status_change(self): event = { @@ -382,7 +391,7 @@ def test_handle_status_change(self): } self.cluster.scheduler.reset_mock() self.control_connection._handle_status_change(event) - host = self.cluster.metadata.hosts['192.168.1.0'] + host = self.cluster.metadata.hosts[DefaultEndPoint('192.168.1.0')] self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.cluster.on_up, host) self.cluster.scheduler.schedule.reset_mock() @@ -399,7 +408,7 @@ def test_handle_status_change(self): 'address': ('192.168.1.0', 9000) } self.control_connection._handle_status_change(event) - host = self.cluster.metadata.hosts['192.168.1.0'] + host = self.cluster.metadata.hosts[DefaultEndPoint('192.168.1.0')] self.assertIs(host, self.cluster.down_host) def test_handle_schema_change(self): @@ -454,7 +463,7 @@ def test_refresh_disabled(self): cc_no_schema_refresh._handle_status_change(status_event) cc_no_schema_refresh._handle_topology_change(topo_event) cluster.scheduler.schedule_unique.assert_has_calls([call(ANY, cc_no_schema_refresh.refresh_node_list_and_token_map), - call(ANY, cc_no_schema_refresh._refresh_nodes_if_not_up, '1.2.3.4')]) + call(ANY, cc_no_schema_refresh._refresh_nodes_if_not_up, None)]) cc_no_topo_refresh = ControlConnection(cluster, 1, 0, -1, 0) cluster.scheduler.reset_mock() diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index 733ccb55a1..78af47651b 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -42,7 +42,7 @@ def test_borrow_and_return(self): session.cluster.connection_factory.return_value = conn pool = HostConnectionPool(host, HostDistance.LOCAL, session) - session.cluster.connection_factory.assert_called_once_with(host.address) + session.cluster.connection_factory.assert_called_once_with(host.endpoint) c, request_id = pool.borrow_connection(timeout=0.01) self.assertIs(c, conn) @@ -60,7 +60,7 @@ def test_failed_wait_for_connection(self): session.cluster.connection_factory.return_value = conn pool = HostConnectionPool(host, HostDistance.LOCAL, session) - session.cluster.connection_factory.assert_called_once_with(host.address) + session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) self.assertEqual(1, conn.in_flight) @@ -78,7 +78,7 @@ def test_successful_wait_for_connection(self): session.cluster.connection_factory.return_value = conn pool = HostConnectionPool(host, HostDistance.LOCAL, session) - session.cluster.connection_factory.assert_called_once_with(host.address) + session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) self.assertEqual(1, conn.in_flight) @@ -148,7 +148,7 @@ def test_spawn_when_at_max(self): session.cluster.get_max_connections_per_host.return_value = 2 pool = HostConnectionPool(host, HostDistance.LOCAL, session) - session.cluster.connection_factory.assert_called_once_with(host.address) + session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) self.assertEqual(1, conn.in_flight) @@ -170,7 +170,7 @@ def test_return_defunct_connection(self): session.cluster.connection_factory.return_value = conn pool = HostConnectionPool(host, HostDistance.LOCAL, session) - session.cluster.connection_factory.assert_called_once_with(host.address) + session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) conn.is_defunct = True @@ -189,7 +189,7 @@ def test_return_defunct_connection_on_down_host(self): session.cluster.connection_factory.return_value = conn pool = HostConnectionPool(host, HostDistance.LOCAL, session) - session.cluster.connection_factory.assert_called_once_with(host.address) + session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) conn.is_defunct = True @@ -209,7 +209,7 @@ def test_return_closed_connection(self): session.cluster.connection_factory.return_value = conn pool = HostConnectionPool(host, HostDistance.LOCAL, session) - session.cluster.connection_factory.assert_called_once_with(host.address) + session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) conn.is_closed = True diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 6f6c7c33c9..affcacebbc 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -37,6 +37,7 @@ LoadBalancingPolicy, ConvictionPolicy, ReconnectionPolicy, FallthroughRetryPolicy, IdentityTranslator, EC2MultiRegionTranslator, HostFilterPolicy) from cassandra.pool import Host +from cassandra.connection import DefaultEndPoint from cassandra.query import Statement from six.moves import xrange @@ -49,7 +50,7 @@ def test_non_implemented(self): """ policy = LoadBalancingPolicy() - host = Host("ip1", SimpleConvictionPolicy) + host = Host(DefaultEndPoint("ip1"), SimpleConvictionPolicy) host.set_location_info("dc1", "rack1") self.assertRaises(NotImplementedError, policy.distance, host) @@ -186,7 +187,7 @@ class DCAwareRoundRobinPolicyTest(unittest.TestCase): def test_no_remote(self): hosts = [] for i in range(4): - h = Host(i, SimpleConvictionPolicy) + h = Host(DefaultEndPoint(i), SimpleConvictionPolicy) h.set_location_info("dc1", "rack1") hosts.append(h) @@ -196,7 +197,7 @@ def test_no_remote(self): self.assertEqual(sorted(qplan), sorted(hosts)) def test_with_remotes(self): - hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)] + hosts = [Host(DefaultEndPoint(i), SimpleConvictionPolicy) for i in range(4)] for h in hosts[:2]: h.set_location_info("dc1", "rack1") for h in hosts[2:]: @@ -231,14 +232,14 @@ def test_with_remotes(self): def test_get_distance(self): policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0) - host = Host("ip1", SimpleConvictionPolicy) + host = Host(DefaultEndPoint("ip1"), SimpleConvictionPolicy) host.set_location_info("dc1", "rack1") policy.populate(Mock(), [host]) self.assertEqual(policy.distance(host), HostDistance.LOCAL) # used_hosts_per_remote_dc is set to 0, so ignore it - remote_host = Host("ip2", SimpleConvictionPolicy) + remote_host = Host(DefaultEndPoint("ip2"), SimpleConvictionPolicy) remote_host.set_location_info("dc2", "rack1") self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) @@ -252,14 +253,14 @@ def test_get_distance(self): # since used_hosts_per_remote_dc is set to 1, only the first # remote host in dc2 will be REMOTE, the rest are IGNORED - second_remote_host = Host("ip3", SimpleConvictionPolicy) + second_remote_host = Host(DefaultEndPoint("ip3"), SimpleConvictionPolicy) second_remote_host.set_location_info("dc2", "rack1") policy.populate(Mock(), [host, remote_host, second_remote_host]) distances = set([policy.distance(remote_host), policy.distance(second_remote_host)]) self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED])) def test_status_updates(self): - hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)] + hosts = [Host(DefaultEndPoint(i), SimpleConvictionPolicy) for i in range(4)] for h in hosts[:2]: h.set_location_info("dc1", "rack1") for h in hosts[2:]: @@ -270,11 +271,11 @@ def test_status_updates(self): policy.on_down(hosts[0]) policy.on_remove(hosts[2]) - new_local_host = Host(4, SimpleConvictionPolicy) + new_local_host = Host(DefaultEndPoint(4), SimpleConvictionPolicy) new_local_host.set_location_info("dc1", "rack1") policy.on_up(new_local_host) - new_remote_host = Host(5, SimpleConvictionPolicy) + new_remote_host = Host(DefaultEndPoint(5), SimpleConvictionPolicy) new_remote_host.set_location_info("dc9000", "rack1") policy.on_add(new_remote_host) @@ -297,7 +298,7 @@ def test_status_updates(self): self.assertEqual(qplan, []) def test_modification_during_generation(self): - hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)] + hosts = [Host(DefaultEndPoint(i), SimpleConvictionPolicy) for i in range(4)] for h in hosts[:2]: h.set_location_info("dc1", "rack1") for h in hosts[2:]: @@ -311,7 +312,7 @@ def test_modification_during_generation(self): # approach that changes specific things during known phases of the # generator. - new_host = Host(4, SimpleConvictionPolicy) + new_host = Host(DefaultEndPoint(4), SimpleConvictionPolicy) new_host.set_location_info("dc1", "rack1") # new local before iteration @@ -422,7 +423,7 @@ def test_modification_during_generation(self): policy.on_up(hosts[2]) policy.on_up(hosts[3]) - another_host = Host(5, SimpleConvictionPolicy) + another_host = Host(DefaultEndPoint(5), SimpleConvictionPolicy) another_host.set_location_info("dc3", "rack1") new_host.set_location_info("dc3", "rack1") @@ -456,7 +457,7 @@ def test_no_live_nodes(self): hosts = [] for i in range(4): - h = Host(i, SimpleConvictionPolicy) + h = Host(DefaultEndPoint(i), SimpleConvictionPolicy) h.set_location_info("dc1", "rack1") hosts.append(h) @@ -481,12 +482,12 @@ def test_no_nodes(self): self.assertEqual(qplan, []) def test_default_dc(self): - host_local = Host(1, SimpleConvictionPolicy, 'local') - host_remote = Host(2, SimpleConvictionPolicy, 'remote') - host_none = Host(1, SimpleConvictionPolicy) + host_local = Host(DefaultEndPoint(1), SimpleConvictionPolicy, 'local') + host_remote = Host(DefaultEndPoint(2), SimpleConvictionPolicy, 'remote') + host_none = Host(DefaultEndPoint(1), SimpleConvictionPolicy) # contact point is '1' - cluster = Mock(contact_points_resolved=[1]) + cluster = Mock(endpoints_resolved=[DefaultEndPoint(1)]) # contact DC first policy = DCAwareRoundRobinPolicy() @@ -526,7 +527,7 @@ class TokenAwarePolicyTest(unittest.TestCase): def test_wrap_round_robin(self): cluster = Mock(spec=Cluster) cluster.metadata = Mock(spec=Metadata) - hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)] + hosts = [Host(DefaultEndPoint(str(i)), SimpleConvictionPolicy) for i in range(4)] for host in hosts: host.set_up() @@ -557,7 +558,7 @@ def get_replicas(keyspace, packed_key): def test_wrap_dc_aware(self): cluster = Mock(spec=Cluster) cluster.metadata = Mock(spec=Metadata) - hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)] + hosts = [Host(DefaultEndPoint(str(i)), SimpleConvictionPolicy) for i in range(4)] for host in hosts: host.set_up() for h in hosts[:2]: @@ -608,7 +609,7 @@ def test_get_distance(self): """ policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0)) - host = Host("ip1", SimpleConvictionPolicy) + host = Host(DefaultEndPoint("ip1"), SimpleConvictionPolicy) host.set_location_info("dc1", "rack1") policy.populate(self.FakeCluster(), [host]) @@ -616,7 +617,7 @@ def test_get_distance(self): self.assertEqual(policy.distance(host), HostDistance.LOCAL) # used_hosts_per_remote_dc is set to 0, so ignore it - remote_host = Host("ip2", SimpleConvictionPolicy) + remote_host = Host(DefaultEndPoint("ip2"), SimpleConvictionPolicy) remote_host.set_location_info("dc2", "rack1") self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) @@ -630,7 +631,7 @@ def test_get_distance(self): # since used_hosts_per_remote_dc is set to 1, only the first # remote host in dc2 will be REMOTE, the rest are IGNORED - second_remote_host = Host("ip3", SimpleConvictionPolicy) + second_remote_host = Host(DefaultEndPoint("ip3"), SimpleConvictionPolicy) second_remote_host.set_location_info("dc2", "rack1") policy.populate(self.FakeCluster(), [host, remote_host, second_remote_host]) distances = set([policy.distance(remote_host), policy.distance(second_remote_host)]) @@ -641,7 +642,7 @@ def test_status_updates(self): Same test as DCAwareRoundRobinPolicyTest.test_status_updates() """ - hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)] + hosts = [Host(DefaultEndPoint(i), SimpleConvictionPolicy) for i in range(4)] for h in hosts[:2]: h.set_location_info("dc1", "rack1") for h in hosts[2:]: @@ -652,11 +653,11 @@ def test_status_updates(self): policy.on_down(hosts[0]) policy.on_remove(hosts[2]) - new_local_host = Host(4, SimpleConvictionPolicy) + new_local_host = Host(DefaultEndPoint(4), SimpleConvictionPolicy) new_local_host.set_location_info("dc1", "rack1") policy.on_up(new_local_host) - new_remote_host = Host(5, SimpleConvictionPolicy) + new_remote_host = Host(DefaultEndPoint(5), SimpleConvictionPolicy) new_remote_host.set_location_info("dc9000", "rack1") policy.on_add(new_remote_host) @@ -679,7 +680,7 @@ def test_status_updates(self): self.assertEqual(qplan, []) def test_statement_keyspace(self): - hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)] + hosts = [Host(DefaultEndPoint(str(i)), SimpleConvictionPolicy) for i in range(4)] for host in hosts: host.set_up() @@ -769,7 +770,7 @@ def test_no_shuffle_if_given_no_routing_key(self): @patch('cassandra.policies.shuffle') def _assert_shuffle(self, patched_shuffle, keyspace, routing_key): - hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)] + hosts = [Host(DefaultEndPoint(str(i)), SimpleConvictionPolicy) for i in range(4)] for host in hosts: host.set_up() @@ -1224,7 +1225,7 @@ class WhiteListRoundRobinPolicyTest(unittest.TestCase): def test_hosts_with_hostname(self): hosts = ['localhost'] policy = WhiteListRoundRobinPolicy(hosts) - host = Host("127.0.0.1", SimpleConvictionPolicy) + host = Host(DefaultEndPoint("127.0.0.1"), SimpleConvictionPolicy) policy.populate(None, [host]) qplan = list(policy.make_query_plan()) @@ -1347,8 +1348,8 @@ def setUp(self): child_policy=Mock(name='child_policy', distance=Mock(name='distance')), predicate=lambda host: host.address == 'acceptme' ) - self.ignored_host = Host(inet_address='ignoreme', conviction_policy_factory=Mock()) - self.accepted_host = Host(inet_address='acceptme', conviction_policy_factory=Mock()) + self.ignored_host = Host(DefaultEndPoint('ignoreme'), conviction_policy_factory=Mock()) + self.accepted_host = Host(DefaultEndPoint('acceptme'), conviction_policy_factory=Mock()) def test_ignored_with_filter(self): self.assertEqual(self.hfp.distance(self.ignored_host), @@ -1422,7 +1423,7 @@ def test_query_plan_deferred_to_child(self): def test_wrap_token_aware(self): cluster = Mock(spec=Cluster) - hosts = [Host("127.0.0.{}".format(i), SimpleConvictionPolicy) for i in range(1, 6)] + hosts = [Host(DefaultEndPoint("127.0.0.{}".format(i)), SimpleConvictionPolicy) for i in range(1, 6)] for host in hosts: host.set_up() @@ -1447,13 +1448,13 @@ def get_replicas(keyspace, packed_key): query_plan = hfp.make_query_plan("keyspace", mocked_query) # First the not filtered replica, and then the rest of the allowed hosts ordered query_plan = list(query_plan) - self.assertEqual(query_plan[0], Host("127.0.0.2", SimpleConvictionPolicy)) - self.assertEqual(set(query_plan[1:]),{Host("127.0.0.3", SimpleConvictionPolicy), - Host("127.0.0.5", SimpleConvictionPolicy)}) + self.assertEqual(query_plan[0], Host(DefaultEndPoint("127.0.0.2"), SimpleConvictionPolicy)) + self.assertEqual(set(query_plan[1:]),{Host(DefaultEndPoint("127.0.0.3"), SimpleConvictionPolicy), + Host(DefaultEndPoint("127.0.0.5"), SimpleConvictionPolicy)}) def test_create_whitelist(self): cluster = Mock(spec=Cluster) - hosts = [Host("127.0.0.{}".format(i), SimpleConvictionPolicy) for i in range(1, 6)] + hosts = [Host(DefaultEndPoint("127.0.0.{}".format(i)), SimpleConvictionPolicy) for i in range(1, 6)] for host in hosts: host.set_up() @@ -1471,6 +1472,6 @@ def test_create_whitelist(self): mocked_query = Mock() query_plan = hfp.make_query_plan("keyspace", mocked_query) # Only the filtered replicas should be allowed - self.assertEqual(set(query_plan), {Host("127.0.0.1", SimpleConvictionPolicy), - Host("127.0.0.4", SimpleConvictionPolicy)}) + self.assertEqual(set(query_plan), {Host(DefaultEndPoint("127.0.0.1"), SimpleConvictionPolicy), + Host(DefaultEndPoint("127.0.0.4"), SimpleConvictionPolicy)}) From b96ca9b471d8bb9f246c861803ccefea2113a70e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 21 Mar 2019 14:51:56 -0400 Subject: [PATCH 1483/2005] Some more fixes about host connection abstraction --- build.yaml | 2 +- cassandra/cluster.py | 31 ++++++++++++++++++++++++++----- cassandra/connection.py | 4 ++-- cassandra/pool.py | 8 +++++++- 4 files changed, 36 insertions(+), 9 deletions(-) diff --git a/build.yaml b/build.yaml index 373ba08fc3..b6657efed6 100644 --- a/build.yaml +++ b/build.yaml @@ -46,7 +46,7 @@ schedules: commit_branches_dev: schedule: per_commit branches: - include: [/dev.*/] + include: [/dev-python.*/] env_vars: | EVENT_LOOP_MANAGER='libev' EXCLUDE_LONG=1 diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 9f71b6f295..cfbe7a0626 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2831,7 +2831,9 @@ class ControlConnection(object): _SELECT_PEERS = "SELECT * FROM system.peers" _SELECT_PEERS_NO_TOKENS = "SELECT host_id, peer, data_center, rack, rpc_address, release_version, schema_version FROM system.peers" _SELECT_LOCAL = "SELECT * FROM system.local WHERE key='local'" - _SELECT_LOCAL_NO_TOKENS = "SELECT host_id, rpc_address, cluster_name, data_center, rack, partitioner, release_version, schema_version FROM system.local WHERE key='local'" + _SELECT_LOCAL_NO_TOKENS = "SELECT host_id, cluster_name, data_center, rack, partitioner, release_version, schema_version FROM system.local WHERE key='local'" + # Used only when token_metadata_enabled is set to False + _SELECT_LOCAL_NO_TOKENS_RPC_ADDRESS = "SELECT rpc_address FROM system.local WHERE key='local'" _SELECT_SCHEMA_PEERS = "SELECT peer, rpc_address, schema_version FROM system.peers" _SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'" @@ -2915,11 +2917,11 @@ def _reconnect_internal(self): try: return self._try_connect(host) except ConnectionException as exc: - errors[host.endpoint] = exc + errors[str(host.endpoint)] = exc log.warning("[control connection] Error connecting to %s:", host, exc_info=True) self._cluster.signal_connection_failure(host, exc, is_host_addition=False) except Exception as exc: - errors[host.endpoint] = exc + errors[str(host.endpoint)] = exc log.warning("[control connection] Error connecting to %s:", host, exc_info=True) if self._is_shutdown: raise DriverException("[control connection] Reconnection in progress during shutdown") @@ -3131,7 +3133,26 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, host.host_id = local_row.get("host_id") host.listen_address = local_row.get("listen_address") host.broadcast_address = local_row.get("broadcast_address") + host.broadcast_rpc_address = self._address_from_row(local_row) + if host.broadcast_rpc_address is None: + if self._token_meta_enabled: + # local rpc_address is not available, use the connection endpoint + host.broadcast_rpc_address = connection.endpoint.address + else: + # local rpc_address has not been queried yet, try to fetch it + # separately, which might fail because C* < 2.1.6 doesn't have rpc_address + # in system.local. See CASSANDRA-9436. + local_rpc_address_query = QueryMessage(query=self._SELECT_LOCAL_NO_TOKENS_RPC_ADDRESS, + consistency_level=ConsistencyLevel.ONE) + success, local_rpc_address_result = connection.wait_for_response( + local_rpc_address_query, timeout=self._timeout, fail_on_error=False) + if success: + row = dict_factory(*local_rpc_address_result.results) + host.broadcast_rpc_address = row[0]['rpc_address'] + else: + host.broadcast_rpc_address = connection.endpoint.address + host.release_version = local_row.get("release_version") host.dse_version = local_row.get("dse_version") host.dse_workload = local_row.get("workload") @@ -3663,11 +3684,11 @@ def _on_timeout(self, _attempts=0): errors = self._errors if not errors: if self.is_schema_agreed: - key = self._current_host.endpoint if self._current_host else 'no host queried before timeout' + key = str(self._current_host.endpoint) if self._current_host else 'no host queried before timeout' errors = {key: "Client request timeout. See Session.execute[_async](timeout)"} else: connection = self.session.cluster.control_connection._connection - host = connection.endpoint if connection else 'unknown' + host = str(connection.endpoint) if connection else 'unknown' errors = {host: "Request timed out while waiting for schema agreement. See Session.execute[_async](timeout) and Cluster.max_schema_agreement_wait."} self._set_final_exception(OperationTimedOut(errors, self._current_host)) diff --git a/cassandra/connection.py b/cassandra/connection.py index 2de0797cb9..39d6af6ef5 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -614,8 +614,8 @@ def send_msg(self, msg, request_id, cb, encoder=ProtocolHandler.encode_message, self.push(msg) return len(msg) - def wait_for_response(self, msg, timeout=None): - return self.wait_for_responses(msg, timeout=timeout)[0] + def wait_for_response(self, msg, timeout=None, **kwargs): + return self.wait_for_responses(msg, timeout=timeout, **kwargs)[0] def wait_for_responses(self, *msgs, **kwargs): """ diff --git a/cassandra/pool.py b/cassandra/pool.py index 61ffff7f42..cd814ef067 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -128,6 +128,9 @@ def __init__(self, endpoint, conviction_policy_factory, datacenter=None, rack=No @property def address(self): + """ + The IP address of the endpoint. This is the RPC address the driver uses when connecting to the node. + """ # backward compatibility return self.endpoint.address @@ -176,7 +179,10 @@ def get_and_set_reconnection_handler(self, new_handler): return old def __eq__(self, other): - return self.endpoint == other.endpoint + if isinstance(other, Host): + return self.endpoint == other.endpoint + else: # TODO Backward compatibility, remove next major + return self.endpoint.address == other def __hash__(self): return hash(self.endpoint) From 59889bf2fbe0f35ac1456dd93807090be7a04af8 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 2 Apr 2019 14:37:21 -0400 Subject: [PATCH 1484/2005] Fix error when preparing queries with beta protocol v5 --- CHANGELOG.rst | 5 +++++ cassandra/cluster.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e77d8249f0..cd2956ce99 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,6 +7,11 @@ Features * Abstract Host Connection information (PYTHON-1079) +Bug Fixes +--------- + +* Fix error when preparing queries with beta protocol v5 (PYTHON-1081) + 3.17.0 ====== February 19, 2019 diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cfbe7a0626..9c4a01c2cc 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1916,7 +1916,7 @@ def _prepare_all_queries(self, host): connection = None try: connection = self.connection_factory(host.endpoint) - statements = self._prepared_statements.values() + statements = list(self._prepared_statements.values()) if ProtocolVersion.uses_keyspace_flag(self.protocol_version): # V5 protocol and higher, no need to set the keyspace chunks = [] From 01ebd6af8d5928b5bd1db4c9309e34ddf3d61723 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 22 Apr 2019 18:15:21 -0400 Subject: [PATCH 1485/2005] Fix ssl_options use in connection.py --- cassandra/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 39d6af6ef5..f762d10667 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -411,7 +411,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, if not getattr(ssl, 'match_hostname', None): raise RuntimeError("ssl_options specify 'check_hostname', but ssl.match_hostname is not provided. " "Patch or upgrade Python to use this option.") - self.ssl.options.update(self.endpoint.ssl_options or {}) + self.ssl_options.update(self.endpoint.ssl_options or {}) elif self.endpoint.ssl_options: self.ssl_options = self.endpoint.ssl_options From 45353500746f244d60a6842964c1463d8b032a76 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 1 May 2019 16:10:40 -0400 Subject: [PATCH 1486/2005] Fix socket errors EAGAIN/EWOULDBLOCK handling --- CHANGELOG.rst | 8 +++ cassandra/io/asyncorereactor.py | 6 +- cassandra/io/libevreactor.py | 6 +- tests/unit/io/utils.py | 115 ++++++++++++++++++++++++-------- 4 files changed, 104 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 730d96163f..e725bc8bfa 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +3.17.1 +====== +May 2, 2019 + +Bug Fixes +--------- +* Socket errors EAGAIN/EWOULDBLOCK are not handled properly and cause timeouts (PYTHON-1089) + 3.17.0 ====== February 19, 2019 diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 2d30cdeedf..d07fd1d9d9 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -426,12 +426,14 @@ def handle_read(self): except socket.error as err: if ssl and isinstance(err, ssl.SSLError): if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): - return + if not self._iobuf.tell(): + return else: self.defunct(err) return elif err.args[0] in NONBLOCKING: - return + if not self._iobuf.tell(): + return else: self.defunct(err) return diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index afdf3b8137..e611336bc0 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -346,12 +346,14 @@ def handle_read(self, watcher, revents, errno=None): except socket.error as err: if ssl and isinstance(err, ssl.SSLError): if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): - return + if not self._iobuf.tell(): + return else: self.defunct(err) return elif err.args[0] in NONBLOCKING: - return + if not self._iobuf.tell(): + return else: self.defunct(err) return diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index f342ceddbe..a5bed6997f 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -19,7 +19,10 @@ SupportedMessage, ReadyMessage, ServerError) from tests import is_monkey_patched +import io +import random from functools import wraps +from itertools import cycle import six from six import binary_type, BytesIO from mock import Mock @@ -258,42 +261,62 @@ def test_ewouldblock_on_buffer_size(self): self._check_error_recovery_on_buffer_size(errno.EWOULDBLOCK) def test_sslwantread_on_buffer_size(self): - self._check_error_recovery_on_buffer_size(ssl.SSL_ERROR_WANT_READ) + self._check_error_recovery_on_buffer_size( + ssl.SSL_ERROR_WANT_READ, + error_class=ssl.SSLError) def test_sslwantwrite_on_buffer_size(self): - self._check_error_recovery_on_buffer_size(ssl.SSL_ERROR_WANT_WRITE) + self._check_error_recovery_on_buffer_size( + ssl.SSL_ERROR_WANT_WRITE, + error_class=ssl.SSLError) - def _check_error_recovery_on_buffer_size(self, error_code): + def _check_error_recovery_on_buffer_size(self, error_code, error_class=socket_error): c = self.test_successful_connection() - header = six.b('\x00\x00\x00\x00') + int32_pack(20000) - responses = [ - header + (six.b('a') * (4096 - len(header))), - six.b('a') * 4096, - socket_error(error_code), - six.b('a') * 100, - socket_error(error_code)] - - def side_effect(*args): - response = responses.pop(0) - log.debug('about to mock return {}'.format(response)) - if isinstance(response, socket_error): + # current data, used by the recv side_effect + message_chunks = None + + def recv_side_effect(*args): + response = message_chunks.pop(0) + if isinstance(response, error_class): raise response else: return response - self.get_socket(c).recv.side_effect = side_effect - c.handle_read(*self.null_handle_function_args) - # the EAGAIN prevents it from reading the last 100 bytes - c._iobuf.seek(0, os.SEEK_END) - pos = c._iobuf.tell() - self.assertEqual(pos, 4096 + 4096) - - # now tell it to read the last 100 bytes - c.handle_read(*self.null_handle_function_args) - c._iobuf.seek(0, os.SEEK_END) - pos = c._iobuf.tell() - self.assertEqual(pos, 4096 + 4096 + 100) + # setup + self.get_socket(c).recv.side_effect = recv_side_effect + c.process_io_buffer = Mock() + + def chunk(size): + return six.b('a') * size + + buf_size = c.in_buffer_size + + # List of messages to test. A message = (chunks, expected_read_size) + messages = [ + ([chunk(200)], 200), + ([chunk(200), chunk(200)], 200), # first chunk < in_buffer_size, process the message + ([chunk(buf_size), error_class(error_code)], buf_size), + ([chunk(buf_size), chunk(buf_size), error_class(error_code)], buf_size*2), + ([chunk(buf_size), chunk(buf_size), chunk(10)], (buf_size*2) + 10), + ([chunk(buf_size), chunk(buf_size), error_class(error_code), chunk(10)], buf_size*2), + ([error_class(error_code), chunk(buf_size)], 0) + ] + + for message, expected_size in messages: + message_chunks = message + c._iobuf = io.BytesIO() + c.process_io_buffer.reset_mock() + c.handle_read(*self.null_handle_function_args) + c._iobuf.seek(0, os.SEEK_END) + + # Ensure the message size is the good one and that the + # message has been processed if it is non-empty + self.assertEqual(c._iobuf.tell(), expected_size) + if expected_size == 0: + c.process_io_buffer.assert_not_called() + else: + c.process_io_buffer.assert_called_once_with() def test_protocol_error(self): c = self.make_connection() @@ -450,3 +473,41 @@ def test_partial_message_read(self): self.assertTrue(c.connected_event.is_set()) self.assertFalse(c.is_defunct) + + def test_mixed_message_and_buffer_sizes(self): + """ + Validate that all messages are processed with different scenarios: + + - various message sizes + - various socket buffer sizes + - random non-fatal errors raised + """ + c = self.make_connection() + c.process_io_buffer = Mock() + + errors = cycle([ + ssl.SSLError(ssl.SSL_ERROR_WANT_READ), + ssl.SSLError(ssl.SSL_ERROR_WANT_WRITE), + socket_error(errno.EWOULDBLOCK), + socket_error(errno.EAGAIN) + ]) + + for buffer_size in [512, 1024, 2048, 4096, 8192]: + c.in_buffer_size = buffer_size + + for i in range(1, 15): + c.process_io_buffer.reset_mock() + c._iobuf = io.BytesIO() + message = io.BytesIO(six.b('a') * (2**i)) + + def recv_side_effect(*args): + if random.randint(1,10) % 3 == 0: + raise next(errors) + return message.read(args[0]) + + self.get_socket(c).recv.side_effect = recv_side_effect + c.handle_read(*self.null_handle_function_args) + if c._iobuf.tell(): + c.process_io_buffer.assert_called_once() + else: + c.process_io_buffer.assert_not_called() From 5d287d5fdda75ff5dbd0411c3648d849e2eef7c3 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 1 May 2019 21:28:47 -0400 Subject: [PATCH 1487/2005] Add a custom build config for releases --- build.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/build.yaml b/build.yaml index 890b614800..e8ee639e11 100644 --- a/build.yaml +++ b/build.yaml @@ -43,6 +43,17 @@ schedules: - python: [3.4, 3.6] - cassandra: ['2.0', '2.1', '3.0'] + commit_branches: + schedule: per_commit + branches: + include: [/.+release/] + env_vars: | + EVENT_LOOP_MANAGER='libev' + EXCLUDE_LONG=1 + matrix: + exclude: + - python: [3.4, 3.6] + weekly_libev: schedule: 0 10 * * 6 branches: From 1d33b2f47808098a1ca3fb446a6b80d63aab07a9 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 2 May 2019 07:27:51 -0400 Subject: [PATCH 1488/2005] Bump version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index ead66b576a..6534a1738b 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 17, 0) +__version_info__ = (3, 17, 1) __version__ = '.'.join(map(str, __version_info__)) From 3601a552bba0ba29f9a43f4eead77dd935e0efd7 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Sat, 4 May 2019 19:17:27 -0400 Subject: [PATCH 1489/2005] ensure the role manager is setup in test_authentication --- .../integration/standard/test_authentication.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 6c389f9282..7cdc3b14eb 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -77,9 +77,20 @@ def get_authentication_provider(self, username, password): return PlainTextAuthProvider(username=username, password=password) def cluster_as(self, usr, pwd): - return Cluster(protocol_version=PROTOCOL_VERSION, - idle_heartbeat_interval=0, - auth_provider=self.get_authentication_provider(username=usr, password=pwd)) + # test we can connect at least once with creds + # to ensure the role manager is setup + for _ in range(5): + try: + cluster = Cluster( + protocol_version=PROTOCOL_VERSION, + idle_heartbeat_interval=0, + auth_provider=self.get_authentication_provider(username=usr, password=pwd)) + cluster.connect(wait_for_all_pools=True) + return cluster + except Exception as e: + time.sleep(5) + + raise Exception('Unable to connect with creds: {}/{}'.format(usr, pwd)) def test_auth_connect(self): user = 'u' From 649320c55aab59d5ca907739b5d369075ff83426 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Sat, 4 May 2019 20:56:34 -0400 Subject: [PATCH 1490/2005] use the right creds in test_authentication --- tests/integration/standard/test_authentication.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 7cdc3b14eb..b42d7946d9 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -84,9 +84,13 @@ def cluster_as(self, usr, pwd): cluster = Cluster( protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=0, - auth_provider=self.get_authentication_provider(username=usr, password=pwd)) + auth_provider=self.get_authentication_provider(username='cassandra', password='cassandra')) cluster.connect(wait_for_all_pools=True) - return cluster + + return Cluster( + protocol_version=PROTOCOL_VERSION, + idle_heartbeat_interval=0, + auth_provider=self.get_authentication_provider(username=usr, password=pwd)) except Exception as e: time.sleep(5) From d3dff85f3b0ee453e6972a451f6a938ffa1ebb91 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 6 May 2019 11:13:47 -0400 Subject: [PATCH 1491/2005] Improve version parsing to support a non-integer 4th component --- CHANGELOG.rst | 1 + cassandra/metadata.py | 8 ++-- cassandra/util.py | 79 +++++++++++++++++++++++++++++++++++ tests/unit/test_util_types.py | 76 ++++++++++++++++++++++++++++++++- 4 files changed, 158 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 8435a0f083..840c82f407 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,6 +6,7 @@ Features -------- * Abstract Host Connection information (PYTHON-1079) +* Improve version parsing to support a non-integer 4th component (PYTHON-1091) Bug Fixes --------- diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 4129784021..5d6f52a199 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -40,7 +40,7 @@ from cassandra.marshal import varint_unpack from cassandra.protocol import QueryMessage from cassandra.query import dict_factory, bind_params -from cassandra.util import OrderedDict +from cassandra.util import OrderedDict, Version from cassandra.pool import HostDistance from cassandra.connection import EndPoint @@ -2763,10 +2763,10 @@ def export_as_string(self): def get_schema_parser(connection, server_version, timeout): - server_major_version = int(server_version.split('.')[0]) - if server_major_version >= 4: + version = Version(server_version) + if version >= Version('4.0.0'): return SchemaParserV4(connection, timeout) - if server_major_version >= 3: + if version >= Version('3.0.0'): return SchemaParserV3(connection, timeout) else: # we could further specialize by version. Right now just refactoring the diff --git a/cassandra/util.py b/cassandra/util.py index 28d2c729d3..bf6ad7c928 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1232,3 +1232,82 @@ def __str__(self): abs(self.days), abs(self.nanoseconds) ) + + +@total_ordering +class Version(object): + """ + Internal minimalist class to compare versions. + A valid version is: .... + + TODO: when python2 support is removed, use packaging.version. + """ + + _version = None + major = None + minor = 0 + patch = 0 + build = 0 + + def __init__(self, version): + self._version = version + parts = list(reversed(version.split('.'))) + if len(parts) > 4: + raise ValueError("Invalid version: {}. Only 4 " + "components are supported".format(version)) + + self.major = int(parts.pop()) + self.minor = int(parts.pop()) if parts else 0 + self.patch = int(parts.pop()) if parts else 0 + + if parts: # we have a build version + build = parts.pop() + try: + self.build = int(build) + except ValueError: + self.build = build + + def __hash__(self): + return self._version + + def __repr__(self): + if self.build: + return "Version({0}, {1}, {2}, {3})".format(self.major, self.minor, self.patch, self.build) + + return "Version({0}, {1}, {2})".format(self.major, self.minor, self.patch) + + def __str__(self): + return self._version + + @staticmethod + def _compare_build(build, other_build, cmp): + if not (isinstance(build, six.integer_types) and + isinstance(other_build, six.integer_types)): + build = str(build) + other_build = str(other_build) + + return cmp(build, other_build) + + def __eq__(self, other): + if not isinstance(other, Version): + return NotImplemented + + return (self.major == other.major and + self.minor == other.minor and + self.patch == other.patch and + self._compare_build(self.build, other.build, lambda s, o: s == o)) + + def __gt__(self, other): + if not isinstance(other, Version): + return NotImplemented + + is_major_ge = self.major >= other.major + is_minor_ge = self.minor >= other.minor + is_patch_ge = self.patch >= other.patch + is_build_gt = self._compare_build( + self.build, other.build, lambda s, o: s > o) + + return (self.major > other.major or + (is_major_ge and self.minor > other.minor) or + (is_major_ge and is_minor_ge and self.patch > other.patch) or + (is_major_ge and is_minor_ge and is_patch_ge and is_build_gt)) diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py index 05cc8dbdae..c2dff20967 100644 --- a/tests/unit/test_util_types.py +++ b/tests/unit/test_util_types.py @@ -18,7 +18,7 @@ import datetime -from cassandra.util import Date, Time, Duration +from cassandra.util import Date, Time, Duration, Version class DateTests(unittest.TestCase): @@ -206,4 +206,76 @@ def test_str(self): self.assertEqual(str(Duration(52, 23, 564564)), "52mo23d564564ns") - +class VersionTests(unittest.TestCase): + + def test_version_parsing(self): + + versions = [ + (2, 0, 0), + (3, 1, 0), + (2, 4, 54), + (3, 1, 1, 12), + (3, 55, 1, 'build12'), + (3, 55, 1, '20190429-TEST') + ] + + for version in versions: + str_version = '.'.join([str(p) for p in version]) + v = Version(str_version) + self.assertEqual(str_version, str(v)) + self.assertEqual(v.major, version[0]) + self.assertEqual(v.minor, version[1]) + self.assertEqual(v.patch, version[2]) + if len(version) > 3: + self.assertEqual(v.build, version[3]) + + # not supported version formats + with self.assertRaises(ValueError): + Version('2.1.hello') + + with self.assertRaises(ValueError): + Version('2.test.1') + + with self.assertRaises(ValueError): + Version('test.1.0') + + with self.assertRaises(ValueError): + Version('1.0.0.0.1') + + def test_version_compare(self): + # just tests a bunch of versions + + # major wins + self.assertTrue(Version('3.3.0') > Version('2.5.0')) + self.assertTrue(Version('3.3.0') > Version('2.5.0.66')) + self.assertTrue(Version('3.3.0') > Version('2.5.21')) + + # minor wins + self.assertTrue(Version('2.3.0') > Version('2.2.0')) + self.assertTrue(Version('2.3.0') > Version('2.2.7')) + self.assertTrue(Version('2.3.0') > Version('2.2.7.9')) + + # patch wins + self.assertTrue(Version('2.3.1') > Version('2.3.0')) + self.assertTrue(Version('2.3.1') > Version('2.3.0.4post0')) + self.assertTrue(Version('2.3.1') > Version('2.3.0.44')) + + # various + self.assertTrue(Version('2.3.0.1') > Version('2.3.0.0')) + self.assertTrue(Version('2.3.0.680') > Version('2.3.0.670')) + self.assertTrue(Version('2.3.0.681') > Version('2.3.0.680')) + self.assertTrue(Version('2.3.0.1build0') > Version('2.3.0.1')) # 4th part fallback to str cmp + self.assertTrue(Version('2.3.0.build0') > Version('2.3.0.1')) # 4th part fallback to str cmp + self.assertTrue(Version('2.3.0') < Version('2.3.0.build')) + + self.assertTrue(Version('4.0.0') >= Version('4.0.0')) + self.assertTrue(Version('4.0.0.421') >= Version('4.0.0')) + self.assertTrue(Version('4.0.1') >= Version('4.0.0')) + self.assertTrue(Version('2.3.0') == Version('2.3.0')) + self.assertTrue(Version('2.3.32') == Version('2.3.32')) + self.assertTrue(Version('2.3.32') == Version('2.3.32.0')) + self.assertTrue(Version('2.3.0.build') == Version('2.3.0.build')) + + self.assertTrue(Version('4') == Version('4.0.0')) + self.assertTrue(Version('4.0') == Version('4.0.0.0')) + self.assertTrue(Version('4.0') > Version('3.9.3')) From 64350b727676eb1efbad54ba6dd10d9685b66321 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 9 May 2019 18:06:32 -0400 Subject: [PATCH 1492/2005] Expose on_request_error method in the RetryPolicy --- cassandra/cluster.py | 60 +++++++++------ cassandra/policies.py | 28 +++++++ tests/integration/simulacron/test_policies.py | 76 +++++++++++++++++-- tests/unit/test_response_future.py | 4 +- 4 files changed, 136 insertions(+), 32 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 9c4a01c2cc..ed667eaffd 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -58,6 +58,7 @@ PrepareMessage, ExecuteMessage, PreparedQueryNotFound, IsBootstrappingErrorMessage, + TruncateError, ServerError, BatchMessage, RESULT_KIND_PREPARED, RESULT_KIND_SET_KEYSPACE, RESULT_KIND_ROWS, RESULT_KIND_SCHEMA_CHANGE, ProtocolHandler) @@ -3925,20 +3926,15 @@ def _set_result(self, host, connection, pool, response): self._metrics.on_unavailable() retry = retry_policy.on_unavailable( self.query, retry_num=self._query_retries, **response.info) - elif isinstance(response, OverloadedErrorMessage): + elif isinstance(response, (OverloadedErrorMessage, + IsBootstrappingErrorMessage, + TruncateError, ServerError)): + log.warning("Host %s error: %s.", host, response.summary) if self._metrics is not None: self._metrics.on_other_error() - # need to retry against a different host here - log.warning("Host %s is overloaded, retrying against a different " - "host", host) - self._retry(reuse_connection=False, consistency_level=None, host=host) - return - elif isinstance(response, IsBootstrappingErrorMessage): - if self._metrics is not None: - self._metrics.on_other_error() - # need to retry against a different host here - self._retry(reuse_connection=False, consistency_level=None, host=host) - return + retry = retry_policy.on_request_error( + self.query, self.message.consistency_level, error=response, + retry_num=self._query_retries) elif isinstance(response, PreparedQueryNotFound): if self.prepared_statement: query_id = self.prepared_statement.query_id @@ -3988,24 +3984,16 @@ def _set_result(self, host, connection, pool, response): self._set_final_exception(response) return - retry_type, consistency = retry - if retry_type in (RetryPolicy.RETRY, RetryPolicy.RETRY_NEXT_HOST): - self._query_retries += 1 - reuse = retry_type == RetryPolicy.RETRY - self._retry(reuse, consistency, host) - elif retry_type is RetryPolicy.RETHROW: - self._set_final_exception(response.to_exception()) - else: # IGNORE - if self._metrics is not None: - self._metrics.on_ignore() - self._set_final_result(None) - self._errors[host] = response.to_exception() + self._handle_retry_decision(retry, response, host) elif isinstance(response, ConnectionException): if self._metrics is not None: self._metrics.on_connection_error() if not isinstance(response, ConnectionShutdown): self._connection.defunct(response) - self._retry(reuse_connection=False, consistency_level=None, host=host) + retry = self._retry_policy.on_request_error( + self.query, self.message.consistency_level, error=response, + retry_num=self._query_retries) + self._handle_retry_decision(retry, response, host) elif isinstance(response, Exception): if hasattr(response, 'to_exception'): self._set_final_exception(response.to_exception()) @@ -4121,6 +4109,28 @@ def _set_final_exception(self, response): for callback_partial in to_call: callback_partial() + def _handle_retry_decision(self, retry_decision, response, host): + + def exception_from_response(response): + if hasattr(response, 'to_exception'): + return response.to_exception() + else: + return response + + retry_type, consistency = retry_decision + if retry_type in (RetryPolicy.RETRY, RetryPolicy.RETRY_NEXT_HOST): + self._query_retries += 1 + reuse = retry_type == RetryPolicy.RETRY + self._retry(reuse, consistency, host) + elif retry_type is RetryPolicy.RETHROW: + self._set_final_exception(exception_from_response(response)) + else: # IGNORE + if self._metrics is not None: + self._metrics.on_ignore() + self._set_final_result(None) + + self._errors[host] = exception_from_response(response) + def _retry(self, reuse_connection, consistency_level, host): if self._final_exception: # the connection probably broke while we were waiting diff --git a/cassandra/policies.py b/cassandra/policies.py index 5d7cc757f7..0b8c84a952 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -839,6 +839,31 @@ def on_unavailable(self, query, consistency, required_replicas, alive_replicas, """ return (self.RETRY_NEXT_HOST, None) if retry_num == 0 else (self.RETHROW, None) + def on_request_error(self, query, consistency, error, retry_num): + """ + This is called when an unexpected error happens. This can be in the + following situations: + + * On a connection error + * On server errors: overloaded, isBootstrapping, serverError, etc. + + `query` is the :class:`.Statement` that timed out. + + `consistency` is the :class:`.ConsistencyLevel` that the operation was + attempted at. + + `error` the instance of the exception. + + `retry_num` counts how many times the operation has been retried, so + the first time this method is called, `retry_num` will be 0. + + The default, it triggers a retry on the next host in the query plan + with the same consistency level. + """ + # TODO revisit this for the next major + # To preserve the same behavior than before, we don't take retry_num into account + return self.RETRY_NEXT_HOST, None + class FallthroughRetryPolicy(RetryPolicy): """ @@ -855,6 +880,9 @@ def on_write_timeout(self, *args, **kwargs): def on_unavailable(self, *args, **kwargs): return self.RETHROW, None + def on_request_error(self, *args, **kwargs): + return self.RETHROW, None + class DowngradingConsistencyRetryPolicy(RetryPolicy): """ diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index a934ccb117..d7a6775003 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -224,6 +224,7 @@ def __init__(self): self.write_timeout = count() self.read_timeout = count() self.unavailable = count() + self.request_error = count() def on_read_timeout(self, query, consistency, required_responses, received_responses, data_retrieved, retry_num): @@ -239,10 +240,15 @@ def on_unavailable(self, query, consistency, required_replicas, alive_replicas, next(self.unavailable) return self.IGNORE, None + def on_request_error(self, query, consistency, error, retry_num): + next(self.request_error) + return self.IGNORE, None + def reset_counters(self): self.write_timeout = count() self.read_timeout = count() self.unavailable = count() + self.request_error = count() @requiressimulacron @@ -262,6 +268,12 @@ def tearDownClass(cls): def tearDown(self): clear_queries() + def set_cluster(self, retry_policy): + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False, + default_retry_policy=retry_policy) + self.session = self.cluster.connect(wait_for_all_pools=True) + self.addCleanup(self.cluster.shutdown) + def test_retry_policy_ignores_and_rethrows(self): """ Test to verify :class:`~cassandra.protocol.WriteTimeoutErrorMessage` is decoded correctly and that @@ -378,8 +390,62 @@ def test_setting_retry_policy_to_statement(self): self.session.execute(bound_stmt) self.assertEqual(next(counter_policy.write_timeout), 1) - def set_cluster(self, retry_policy): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False, - default_retry_policy=retry_policy) - self.session = self.cluster.connect(wait_for_all_pools=True) - self.addCleanup(self.cluster.shutdown) \ No newline at end of file + def test_retry_policy_on_request_error(self): + """ + Test to verify that on_request_error is called properly. + + @since 3.18 + @jira_ticket PYTHON-1064 + @expected_result the appropriate retry policy is called + + @test_category connection + """ + overloaded_error = { + "result": "overloaded", + "message": "overloaded" + } + + bootstrapping_error = { + "result": "is_bootstrapping", + "message": "isbootstrapping" + } + + truncate_error = { + "result": "truncate_error", + "message": "truncate_error" + } + + server_error = { + "result": "server_error", + "message": "server_error" + } + + # Test the on_request_error call + retry_policy = CounterRetryPolicy() + self.set_cluster(retry_policy) + + for e in [overloaded_error, bootstrapping_error, truncate_error, server_error]: + query_to_prime = "SELECT * from simulacron_keyspace.simulacron_table;" + prime_query(query_to_prime, then=e, rows=None, column_types=None) + rf = self.session.execute_async(query_to_prime) + try: + rf.result() + except: + pass + self.assertEqual(len(rf.attempted_hosts), 1) # no retry + + self.assertEqual(next(retry_policy.request_error), 4) + + # Test that by default, retry on next host + retry_policy = RetryPolicy() + self.set_cluster(retry_policy) + + for e in [overloaded_error, bootstrapping_error, truncate_error, server_error]: + query_to_prime = "SELECT * from simulacron_keyspace.simulacron_table;" + prime_query(query_to_prime, then=e, rows=None, column_types=None) + rf = self.session.execute_async(query_to_prime) + try: + rf.result() + except: + pass + self.assertEqual(len(rf.attempted_hosts), 3) # all 3 nodes failed diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index b785507801..08a1f8b108 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -241,8 +241,8 @@ def test_retry_with_different_host(self): rf._set_result(host, None, None, result) session.submit.assert_called_once_with(rf._retry_task, False, host) - # query_retries does not get incremented for Overloaded/Bootstrapping errors - self.assertEqual(0, rf._query_retries) + # query_retries does get incremented for Overloaded/Bootstrapping errors (since 3.18) + self.assertEqual(1, rf._query_retries) connection = Mock(spec=Connection) pool.borrow_connection.return_value = (connection, 2) From 5745d26c92e08bec572ae21b5c272e3023bda094 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 13 May 2019 10:53:17 -0400 Subject: [PATCH 1493/2005] Add changelog entry for PYTHON-1064 --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 840c82f407..a5c5b9ea8e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,6 +7,7 @@ Features * Abstract Host Connection information (PYTHON-1079) * Improve version parsing to support a non-integer 4th component (PYTHON-1091) +* Expose on_request_error method in the RetryPolicy (PYTHON-1064) Bug Fixes --------- From 8b1a86819afc8c36c1a339d9b7d9bef88502df44 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 13 May 2019 13:53:49 -0400 Subject: [PATCH 1494/2005] Skip host version check for test-dse --- tests/integration/standard/test_metadata.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index f50fa8c9e2..11ca4cf878 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -23,6 +23,7 @@ import six import sys import time +import os from packaging.version import Version from mock import Mock, patch @@ -78,6 +79,9 @@ def test_broadcast_listen_address(self): rpc_addrs = [host.broadcast_rpc_address for host in self.cluster.metadata.all_hosts()] self.assertTrue(local_host in rpc_addrs) + @unittest.skipUnless( + os.getenv('MAPPED_CASSANDRA_VERSION', None) is None, + "Don't check the host version for test-dse") def test_host_release_version(self): """ Checks the hosts release version and validates that it is equal to the From 8533a63941374e6d26dc37b97bba815fa77c7d97 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 15 May 2019 20:31:23 -0400 Subject: [PATCH 1495/2005] ensure the driver supports legacy empty column names --- CHANGELOG.rst | 1 + cassandra/cqlengine/columns.py | 2 +- cassandra/metadata.py | 2 +- .../simulacron/test_empty_column.py | 257 ++++++++++++++++++ 4 files changed, 260 insertions(+), 2 deletions(-) create mode 100644 tests/integration/simulacron/test_empty_column.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a5c5b9ea8e..cfd3285433 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -13,6 +13,7 @@ Bug Fixes --------- * Fix error when preparing queries with beta protocol v5 (PYTHON-1081) +* Accept legacy empty strings as column names (PYTHON-1082) 3.17.1 ====== diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 5808f60613..1656bfb6cf 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -288,7 +288,7 @@ def set_column_name(self, name): @property def db_field_name(self): """ Returns the name of the cql name of this column """ - return self.db_field or self.column_name + return self.db_field if self.db_field is not None else self.column_name @property def db_index_name(self): diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 5d6f52a199..2c252c9571 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2037,7 +2037,7 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): # other normal columns for col_row in col_rows: column_meta = self._build_column_metadata(table_meta, col_row) - if column_meta.name: + if column_meta.name is not None: table_meta.columns[column_meta.name] = column_meta index_meta = self._build_index_metadata(column_meta, col_row) if index_meta: diff --git a/tests/integration/simulacron/test_empty_column.py b/tests/integration/simulacron/test_empty_column.py new file mode 100644 index 0000000000..e07534f429 --- /dev/null +++ b/tests/integration/simulacron/test_empty_column.py @@ -0,0 +1,257 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from collections import namedtuple, OrderedDict + +from cassandra.cluster import Cluster +from cassandra.query import (named_tuple_factory, tuple_factory, + dict_factory, ordered_dict_factory) + +from cassandra.cqlengine import columns +from cassandra.cqlengine.connection import set_session +from cassandra.cqlengine.models import Model + +from tests.integration import PROTOCOL_VERSION, requiressimulacron +from tests.integration.simulacron import SimulacronCluster +from tests.integration.simulacron.utils import PrimeQuery, prime_request + + +@requiressimulacron +class EmptyColumnTests(SimulacronCluster): + """ + Test that legacy empty column names can be read by the driver. + + @since 3.18 + @jira_ticket PYTHON-1082 + @expected_result the driver supports those columns + """ + connect = False + + def tearDown(self): + if self.cluster: + self.cluster.shutdown() + + @staticmethod + def _prime_testtable_query(): + queries = [ + 'SELECT "", " " FROM testks.testtable', + 'SELECT "", " " FROM testks.testtable LIMIT 10000' # cqlengine + ] + then = { + 'result': 'success', + 'delay_in_ms': 0, + 'rows': [ + { + "": "testval", + " ": "testval1" + } + ], + 'column_types': { + "": "ascii", + " ": "ascii" + }, + 'ignore_on_prepare': False + } + for query in queries: + prime_request(PrimeQuery(query, then=then)) + + def test_empty_columns_with_all_row_factories(self): + query = 'SELECT "", " " FROM testks.testtable' + self._prime_testtable_query() + + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect(wait_for_all_pools=True) + + # Test all row factories + self.session.row_factory = named_tuple_factory + self.assertEqual( + list(self.session.execute(query)), + [namedtuple('Row', ['field_0_', 'field_1_'])('testval', 'testval1')] + ) + + self.session.row_factory = tuple_factory + self.assertEqual( + list(self.session.execute(query)), + [('testval', 'testval1')] + ) + + self.session.row_factory = dict_factory + self.assertEqual( + list(self.session.execute(query)), + [{'': 'testval', ' ': 'testval1'}] + ) + + self.session.row_factory = ordered_dict_factory + self.assertEqual( + list(self.session.execute(query)), + [OrderedDict((('', 'testval'), (' ', 'testval1')))] + ) + + def test_empty_columns_in_system_schema(self): + queries = [ + "SELECT * FROM system_schema.tables", + "SELECT * FROM system.schema.tables", + "SELECT * FROM system.schema_columnfamilies" + ] + then = { + 'result': 'success', + 'delay_in_ms': 0, + 'rows': [ + { + "compression": dict(), + "compaction": dict(), + "bloom_filter_fp_chance": 0.1, + "caching": {"keys": "ALL", "rows_per_partition": "NONE"}, + "comment": "comment", + "gc_grace_seconds": 60000, + "dclocal_read_repair_chance": 0.1, + "read_repair_chance": 0.1, + "keyspace_name": "testks", + "table_name": "testtable", + "columnfamily_name": "testtable", # C* 2.2 + "flags": ["compound"], + "comparator": "none" # C* 2.2 + } + ], + 'column_types': { + "compression": "map", + "compaction": "map", + "bloom_filter_fp_chance": "double", + "caching": "map", + "comment": "ascii", + "gc_grace_seconds": "int", + "dclocal_read_repair_chance": "double", + "read_repair_chance": "double", + "keyspace_name": "ascii", + "table_name": "ascii", + "columnfamily_name": "ascii", + "flags": "set", + "comparator": "ascii" + }, + 'ignore_on_prepare': False + } + for query in queries: + query = PrimeQuery(query, then=then) + prime_request(query) + + queries = [ + "SELECT * FROM system_schema.keyspaces", + "SELECT * FROM system.schema_keyspaces" + ] + then = { + 'result': 'success', + 'delay_in_ms': 0, + 'rows': [ + { + "strategy_class": "SimpleStrategy", # C* 2.2 + "strategy_options": '{}', # C* 2.2 + "replication": {'strategy': 'SimpleStrategy', 'replication_factor': 1}, + "durable_writes": True, + "keyspace_name": "testks" + } + ], + 'column_types': { + "strategy_class": "ascii", + "strategy_options": "ascii", + "replication": "map", + "keyspace_name": "ascii", + "durable_writes": "boolean" + }, + 'ignore_on_prepare': False + } + for query in queries: + query = PrimeQuery(query, then=then) + prime_request(query) + + queries = [ + "SELECT * FROM system_schema.columns", + "SELECT * FROM system.schema.columns", + "SELECT * FROM system.schema_columns" + ] + then = { + 'result': 'success', + 'delay_in_ms': 0, + 'rows': [ + { + "table_name": 'testtable', + "columnfamily_name": 'testtable', # C* 2.2 + "column_name": "", + "keyspace_name": "testks", + "kind": "partition_key", + "clustering_order": "none", + "position": 0, + "type": "text", + "column_name_bytes": 0x12, + "validator": "none" # C* 2.2 + }, + { + "table_name": 'testtable', + "columnfamily_name": 'testtable', # C* 2.2 + "column_name": " ", + "keyspace_name": "testks", + "kind": "regular", + "clustering_order": "none", + "position": -1, + "type": "text", + "column_name_bytes": 0x13, + "validator": "none" # C* 2.2 + } + ], + 'column_types': { + "table_name": "ascii", + "columnfamily_name": "ascii", + "column_name": "ascii", + "keyspace_name": "ascii", + "clustering_order": "ascii", + "column_name_bytes": "blob", + "kind": "ascii", + "position": "int", + "type": "ascii", + "validator": "ascii" # C* 2.2 + }, + 'ignore_on_prepare': False + } + for query in queries: + query = PrimeQuery(query, then=then) + prime_request(query) + + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect(wait_for_all_pools=True) + + table_metadata = self.cluster.metadata.keyspaces['testks'].tables['testtable'] + self.assertEqual(len(table_metadata.columns), 2) + self.assertIn('', table_metadata.columns) + self.assertIn(' ', table_metadata.columns) + + def test_empty_columns_with_cqlengine(self): + self._prime_testtable_query() + + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect(wait_for_all_pools=True) + set_session(self.session) + + class TestModel(Model): + __keyspace__ = 'testks' + __table_name__ = 'testtable' + empty = columns.Text(db_field='', primary_key=True) + space = columns.Text(db_field=' ') + + self.assertEqual( + [TestModel(empty='testval', space='testval1')], + list(TestModel.objects.only(['empty', 'space']).all()) + ) From eb1571dd55009cffe075b1bbb8f35d0db1f7a0b9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 16 May 2019 17:25:17 -0500 Subject: [PATCH 1496/2005] PYTHON-1087: make util.SortedSet handle uncomparable elements --- cassandra/util.py | 81 ++++++++++++++++-------------------- tests/unit/test_sortedset.py | 29 +++++++++++++ 2 files changed, 66 insertions(+), 44 deletions(-) diff --git a/cassandra/util.py b/cassandra/util.py index bf6ad7c928..0208afff38 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -487,9 +487,6 @@ def isdisjoint(self, other): return len(self.intersection(other)) == 0 -from bisect import bisect_left - - class SortedSet(object): ''' A sorted set based on sorted list @@ -593,7 +590,7 @@ def __ixor__(self, other): return self def __contains__(self, item): - i = bisect_left(self._items, item) + i = self._find_insertion(item) return i < len(self._items) and self._items[i] == item def __delitem__(self, i): @@ -603,7 +600,7 @@ def __delslice__(self, i, j): del self._items[i:j] def add(self, item): - i = bisect_left(self._items, item) + i = self._find_insertion(item) if i < len(self._items): if self._items[i] != item: self._items.insert(i, item) @@ -637,7 +634,7 @@ def pop(self): return self._items.pop() def remove(self, item): - i = bisect_left(self._items, item) + i = self._find_insertion(item) if i < len(self._items): if self._items[i] == item: self._items.pop(i) @@ -648,18 +645,8 @@ def union(self, *others): union = sortedset() union._items = list(self._items) for other in others: - if isinstance(other, self.__class__): - i = 0 - for item in other._items: - i = bisect_left(union._items, item, i) - if i < len(union._items): - if item != union._items[i]: - union._items.insert(i, item) - else: - union._items.append(item) - else: - for item in other: - union.add(item) + for item in other: + union.add(item) return union def intersection(self, *others): @@ -685,38 +672,44 @@ def symmetric_difference(self, other): def _diff(self, other): diff = sortedset() - if isinstance(other, self.__class__): - i = 0 - for item in self._items: - i = bisect_left(other._items, item, i) - if i < len(other._items): - if item != other._items[i]: - diff._items.append(item) - else: - diff._items.append(item) - else: - for item in self._items: - if item not in other: - diff.add(item) + for item in self._items: + if item not in other: + diff.add(item) return diff def _intersect(self, other): isect = sortedset() - if isinstance(other, self.__class__): - i = 0 - for item in self._items: - i = bisect_left(other._items, item, i) - if i < len(other._items): - if item == other._items[i]: - isect._items.append(item) - else: - break - else: - for item in self._items: - if item in other: - isect.add(item) + for item in self._items: + if item in other: + isect.add(item) return isect + def _find_insertion(self, x): + # this uses bisect_left algorithm unless it has elements it can't compare, + # in which case it defaults to grouping non-comparable items at the beginning or end, + # and scanning sequentially to find an insertion point + a = self._items + lo = 0 + hi = len(a) + try: + while lo < hi: + mid = (lo + hi) // 2 + if a[mid] < x: lo = mid + 1 + else: hi = mid + except TypeError: + # could not compare a[mid] with x + # start scanning to find insertion point while swallowing type errors + lo = 0 + compared_one = False # flag is used to determine whether uncomparables are grouped at the front or back + while lo < hi: + try: + if a[lo] >= x: break + compared_one = True + except TypeError: + if compared_one: break + lo += 1 + return lo + sortedset = SortedSet # backwards-compatibility diff --git a/tests/unit/test_sortedset.py b/tests/unit/test_sortedset.py index 72e557c642..213ca51550 100644 --- a/tests/unit/test_sortedset.py +++ b/tests/unit/test_sortedset.py @@ -20,6 +20,8 @@ from cassandra.util import sortedset from cassandra.cqltypes import EMPTY +from datetime import datetime +from itertools import permutations class SortedSetTest(unittest.TestCase): def test_init(self): @@ -364,3 +366,30 @@ def test_reduce_pickle(self): s = pickle.dumps(ss) self.assertEqual(pickle.loads(s), ss) + def test_uncomparable_types(self): + # PYTHON-1087 - make set handle uncomparable types + dt = datetime(2019, 5, 16) + items = (('samekey', 3, 1), + ('samekey', None, 0), + ('samekey', dt), + ("samekey", None, 2), + ("samekey", None, 1), + ('samekey', dt), + ('samekey', None, 0), + ("samekey", datetime.now())) + + for perm in permutations(items): + ss = sortedset(perm) + s = set(perm) + self.assertEqual(s, ss) + self.assertEqual(ss, ss.union(s)) + for x in range(len(ss)): + subset = set(s) + for _ in range(x): + subset.pop() + self.assertEqual(ss.difference(subset), s.difference(subset)) + self.assertEqual(ss.intersection(subset), s.intersection(subset)) + for x in ss: + self.assertIn(x, ss) + ss.remove(x) + self.assertNotIn(x, ss) From f25560e9b926fb732c758f4d49adf76bbb455fbf Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 17 May 2019 07:15:03 -0500 Subject: [PATCH 1497/2005] changelog update for PYTHON-1087 --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index cfd3285433..cb4cc2d8de 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -14,6 +14,7 @@ Bug Fixes * Fix error when preparing queries with beta protocol v5 (PYTHON-1081) * Accept legacy empty strings as column names (PYTHON-1082) +* Let util.SortedSet handle uncomparable elements (PYTHON-1087) 3.17.1 ====== From 62dcb4907cdbf06b521b18eef2da9715d24a708d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 17 May 2019 10:39:11 -0400 Subject: [PATCH 1498/2005] py3 can't compare NoneType --- cassandra/util.py | 2 +- tests/unit/test_sortedset.py | 32 ++++++++++++++++++++------------ 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/cassandra/util.py b/cassandra/util.py index 0208afff38..aaff0b9986 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -703,7 +703,7 @@ def _find_insertion(self, x): compared_one = False # flag is used to determine whether uncomparables are grouped at the front or back while lo < hi: try: - if a[lo] >= x: break + if a[lo] == x or a[lo] >= x: break compared_one = True except TypeError: if compared_one: break diff --git a/tests/unit/test_sortedset.py b/tests/unit/test_sortedset.py index 213ca51550..3845c2c31c 100644 --- a/tests/unit/test_sortedset.py +++ b/tests/unit/test_sortedset.py @@ -366,18 +366,7 @@ def test_reduce_pickle(self): s = pickle.dumps(ss) self.assertEqual(pickle.loads(s), ss) - def test_uncomparable_types(self): - # PYTHON-1087 - make set handle uncomparable types - dt = datetime(2019, 5, 16) - items = (('samekey', 3, 1), - ('samekey', None, 0), - ('samekey', dt), - ("samekey", None, 2), - ("samekey", None, 1), - ('samekey', dt), - ('samekey', None, 0), - ("samekey", datetime.now())) - + def _test_uncomparable_types(self, items): for perm in permutations(items): ss = sortedset(perm) s = set(perm) @@ -393,3 +382,22 @@ def test_uncomparable_types(self): self.assertIn(x, ss) ss.remove(x) self.assertNotIn(x, ss) + + def test_uncomparable_types_with_tuples(self): + # PYTHON-1087 - make set handle uncomparable types + dt = datetime(2019, 5, 16) + items = (('samekey', 3, 1), + ('samekey', None, 0), + ('samekey', dt), + ("samekey", None, 2), + ("samekey", None, 1), + ('samekey', dt), + ('samekey', None, 0), + ("samekey", datetime.now())) + + self._test_uncomparable_types(items) + + def test_uncomparable_types_with_integers(self): + # PYTHON-1087 - make set handle uncomparable types + items = (None, 1, 2, 6, None, None, 92) + self._test_uncomparable_types(items) From 3d8349d77698b2987bd63937d22a5c5fbebf6302 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Sat, 18 May 2019 11:03:15 -0400 Subject: [PATCH 1499/2005] Add jitter to ExponentialReconnectionPolicy --- CHANGELOG.rst | 1 + cassandra/policies.py | 12 +++++++++++- tests/unit/test_policies.py | 33 ++++++++++++++++++++++++++++----- 3 files changed, 40 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index cb4cc2d8de..bffc5fb7c1 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,6 +8,7 @@ Features * Abstract Host Connection information (PYTHON-1079) * Improve version parsing to support a non-integer 4th component (PYTHON-1091) * Expose on_request_error method in the RetryPolicy (PYTHON-1064) +* Add jitter to ExponentialReconnectionPolicy (PYTHON-1065) Bug Fixes --------- diff --git a/cassandra/policies.py b/cassandra/policies.py index 0b8c84a952..474b2a0500 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -659,6 +659,10 @@ class ExponentialReconnectionPolicy(ReconnectionPolicy): A :class:`.ReconnectionPolicy` subclass which exponentially increases the length of the delay inbetween each reconnection attempt up to a set maximum delay. + + A random amount of jitter (+/- 15%) will be added to the pure exponential + delay value to avoid the situations where many reconnection handlers are + trying to reconnect at exactly the same time. """ # TODO: max_attempts is 64 to preserve legacy default behavior @@ -693,13 +697,19 @@ def new_schedule(self): yield self.max_delay else: try: - yield min(self.base_delay * (2 ** i), self.max_delay) + yield self._add_jitter(min(self.base_delay * (2 ** i), self.max_delay)) except OverflowError: overflowed = True yield self.max_delay i += 1 + # Adds -+ 15% to the delay provided + def _add_jitter(self, value): + jitter = randint(85, 115) + delay = (jitter * value) / 100 + return min(max(self.base_delay, delay), self.max_delay) + class RetryPolicy(object): """ diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index affcacebbc..15fa3160b0 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -881,6 +881,9 @@ def test_schedule_infinite_attempts(self): class ExponentialReconnectionPolicyTest(unittest.TestCase): + def _assert_between(self, value, min, max): + self.assertTrue(min <= value <= max) + def test_bad_vals(self): self.assertRaises(ValueError, ExponentialReconnectionPolicy, -1, 0) self.assertRaises(ValueError, ExponentialReconnectionPolicy, 0, -1) @@ -893,8 +896,8 @@ def test_schedule_no_max(self): test_iter = 10000 policy = ExponentialReconnectionPolicy(base_delay=base_delay, max_delay=max_delay, max_attempts=None) sched_slice = list(islice(policy.new_schedule(), 0, test_iter)) - self.assertEqual(sched_slice[0], base_delay) - self.assertEqual(sched_slice[-1], max_delay) + self._assert_between(sched_slice[0], base_delay*0.85, base_delay*1.15) + self._assert_between(sched_slice[-1], max_delay*0.85, max_delay*1.15) self.assertEqual(len(sched_slice), test_iter) def test_schedule_with_max(self): @@ -906,11 +909,12 @@ def test_schedule_with_max(self): self.assertEqual(len(schedule), max_attempts) for i, delay in enumerate(schedule): if i == 0: - self.assertEqual(delay, base_delay) + self._assert_between(delay, base_delay*0.85, base_delay*1.15) elif i < 6: - self.assertEqual(delay, schedule[i - 1] * 2) + value = base_delay * (2 ** i) + self._assert_between(delay, value*85/100, value*1.15) else: - self.assertEqual(delay, max_delay) + self._assert_between(delay, max_delay*85/100, max_delay*1.15) def test_schedule_exactly_one_attempt(self): base_delay = 2.0 @@ -945,6 +949,25 @@ def test_schedule_overflow(self): for number in schedule: self.assertLessEqual(number, sys.float_info.max) + def test_schedule_with_jitter(self): + """ + Test to verify jitter is added properly and is always between -/+ 15%. + + @since 3.18 + @jira_ticket PYTHON-1065 + """ + for i in range(100): + base_delay = float(randint(2, 5)) + max_delay = (base_delay - 1) * 100.0 + ep = ExponentialReconnectionPolicy(base_delay, max_delay, max_attempts=64) + schedule = ep.new_schedule() + for i in range(64): + exp_delay = min(base_delay * (2 ** i), max_delay) + min_jitter_delay = max(base_delay, exp_delay*85/100) + max_jitter_delay = min(max_delay, exp_delay*115/100) + delay = next(schedule) + self._assert_between(delay, min_jitter_delay, max_jitter_delay) + ONE = ConsistencyLevel.ONE From 7a7d71679b4848d50bc03f8a6ea3ef4c59c5765b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 21 May 2019 09:41:34 -0400 Subject: [PATCH 1500/2005] Better string mapping of execution profiles object keys --- cassandra/cluster.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ed667eaffd..fb5e0dd25f 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2375,6 +2375,11 @@ def _create_response_future(self, query, parameters, trace, custom_payload, load_balancer=load_balancing_policy, start_time=start_time, speculative_execution_plan=spec_exec_plan, host=host) + def _execution_profile_to_string(self, name): + if name is EXEC_PROFILE_DEFAULT: + return 'EXEC_PROFILE_DEFAULT' + return '"%s"' % (name,) + def get_execution_profile(self, name): """ Returns the execution profile associated with the provided ``name``. @@ -2385,7 +2390,9 @@ def get_execution_profile(self, name): try: return profiles[name] except KeyError: - raise ValueError("Invalid execution_profile: '%s'; valid profiles are %s" % (name, profiles.keys())) + eps = [self._execution_profile_to_string(ep) for ep in profiles.keys()] + raise ValueError("Invalid execution_profile: %s; valid profiles are: %s." % ( + self._execution_profile_to_string(name), ', '.join(eps))) def _maybe_get_execution_profile(self, ep): return ep if isinstance(ep, ExecutionProfile) else self.get_execution_profile(ep) From 650979e1c67fa365540a622c1de10da396077858 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 21 May 2019 09:51:59 -0400 Subject: [PATCH 1501/2005] Remove changelog check in .travis.yml --- .travis.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.travis.yml b/.travis.yml index 0f3c01b200..eecbdb4ac8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,14 +24,6 @@ install: - pip install tox-travis lz4 script: - - | - changed_code_filenames=`git diff --name-only $TRAVIS_COMMIT_RANGE` - changed_driver_filenames=`echo "$changed_code_filenames" | grep '^cassandra'` - changelog_changed=`echo "$changed_code_filenames" | grep '^CHANGELOG.rst'` - if [ "$changed_driver_filenames" ] && [ "$changelog_changed" = "" ] ; then - echo "This PR includes code changes, but no CHANGELOG changes. Please include a CHANGELOG entry." - exit 1 - fi - tox - tox -e gevent_loop - tox -e eventlet_loop From 3bb5caa17b4fdf60da020d880c28a93b288ffdf6 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 21 May 2019 12:14:24 -0400 Subject: [PATCH 1502/2005] Add release task in build.yaml --- build.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/build.yaml b/build.yaml index b6657efed6..5658b213f0 100644 --- a/build.yaml +++ b/build.yaml @@ -55,6 +55,13 @@ schedules: - python: [2.7, 3.4, 3.6] - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse'] + release: + schedule: per_commit + branches: + include: [/release-.+/] + env_vars: | + EVENT_LOOP_MANAGER='libev' + weekly_libev: schedule: 0 10 * * 6 branches: From 1d635037d0993cedd1d44f0eaca53450d751e4c9 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 21 May 2019 12:20:44 -0400 Subject: [PATCH 1503/2005] cannot use 'release' in build.yaml --- build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.yaml b/build.yaml index 5658b213f0..18ca56b5e4 100644 --- a/build.yaml +++ b/build.yaml @@ -55,7 +55,7 @@ schedules: - python: [2.7, 3.4, 3.6] - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse'] - release: + release_test: schedule: per_commit branches: include: [/release-.+/] From 3ada0758f502cb25f012e7bae00e1a06f20c835a Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 21 May 2019 17:11:14 -0400 Subject: [PATCH 1504/2005] Fix metadata host getter in long loadbalancing tests --- tests/integration/long/test_loadbalancingpolicies.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 4f3396a47d..2cc3d8e7f1 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -673,7 +673,7 @@ def test_white_list(self): # white list policy should not allow reconnecting to ignored hosts force_stop(3) self._wait_for_nodes_down([3]) - self.assertFalse(cluster.metadata._hosts[IP_FORMAT % 3].is_currently_reconnecting()) + self.assertFalse(cluster.metadata.get_host(IP_FORMAT % 3).is_currently_reconnecting()) self.coordinator_stats.reset_counts() force_stop(2) @@ -734,4 +734,4 @@ def test_black_list_with_host_filter_policy(self): # policy should not allow reconnecting to ignored host force_stop(2) self._wait_for_nodes_down([2]) - self.assertFalse(cluster.metadata._hosts[ignored_address].is_currently_reconnecting()) + self.assertFalse(cluster.metadata.get_host(ignored_address).is_currently_reconnecting()) From a3ab23e538f03ec9d15f61ee20e3d6fcf421bffe Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 21 May 2019 22:02:03 -0400 Subject: [PATCH 1505/2005] add cleanup for ccm cluster dir in tests --- tests/integration/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 370c34116f..8abe33f0ab 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -32,6 +32,7 @@ from subprocess import call from itertools import groupby import six +import shutil from cassandra import OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure, AlreadyExists, \ InvalidRequest @@ -401,6 +402,11 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], se log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, ccm_options)) + # Make sure we cleanup old cluster dir if it exists + cluster_path = os.path.join(path, cluster_name) + if os.path.exists(cluster_path): + shutil.rmtree(cluster_path) + if dse_cluster: CCM_CLUSTER = DseCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) From d57ab0150f14c52d98253f54cba953b9b7081b8b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 23 May 2019 13:46:37 -0400 Subject: [PATCH 1506/2005] Add 3.18 in docs.yaml --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index 2cf56dd88b..6b6fe14071 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,6 +22,8 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: + - name: '3.18' + ref: a3ab23e5 - name: '3.17' ref: 38e359e1 - name: '3.16' From abfae10b167d78a5a700095b34b539b0b9962021 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 23 May 2019 14:56:45 -0400 Subject: [PATCH 1507/2005] Minor docs improvement about a cluster contact point --- cassandra/cluster.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index fb5e0dd25f..004c8afacf 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -393,7 +393,9 @@ class Cluster(object): contact_points = ['127.0.0.1'] """ - The list of contact points to try connecting for cluster discovery. + The list of contact points to try connecting for cluster discovery. A + contact point can be a string (ip, hostname) or a + :class:`.connection.EndPoint` instance. Defaults to loopback interface. From ec36b9577d645b5b585fe9e29813ca95e17b882b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 24 May 2019 15:03:31 -0400 Subject: [PATCH 1508/2005] release 3.18.0: changelog & version --- CHANGELOG.rst | 2 +- cassandra/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index bffc5fb7c1..e5f721c04b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,6 @@ 3.18.0 ====== -NOT RELEASED +May 27, 2019 Features -------- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 8910105c7e..e8c3b2f49f 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 17, 1, 'post0') +__version_info__ = (3, 18, 0) __version__ = '.'.join(map(str, __version_info__)) From ede155688528b0234ec1b4c6f0ce6f8be71a4117 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 24 May 2019 15:06:05 -0400 Subject: [PATCH 1509/2005] release 3.18.0: docs --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 6b6fe14071..fc87b6e938 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.18' - ref: a3ab23e5 + ref: ec36b957 - name: '3.17' ref: 38e359e1 - name: '3.16' From f200f72c4e69f4e2b5c2d53a02b568d6ffa7df5c Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 27 May 2019 15:55:32 -0400 Subject: [PATCH 1510/2005] post version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index e8c3b2f49f..a3687670ec 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 18, 0) +__version_info__ = (3, 18, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From df8c7a373263615cdf8af54b9485989ed76a9055 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 29 May 2019 08:53:49 -0400 Subject: [PATCH 1511/2005] Remove copyright date in readme --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index f14cc77963..6a39d2f5b8 100644 --- a/README.rst +++ b/README.rst @@ -73,7 +73,7 @@ and the ``#datastax-drivers`` channel in the `DataStax Academy Slack Date: Wed, 19 Jun 2019 11:57:11 +0200 Subject: [PATCH 1512/2005] Remove doc pointing RoundRobinPolicy as default Remove a sentence from the API docs pointing to `RoundRobinPolicy` as the default lbp. --- cassandra/policies.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 474b2a0500..d61066665e 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -155,8 +155,6 @@ class RoundRobinPolicy(LoadBalancingPolicy): A subclass of :class:`.LoadBalancingPolicy` which evenly distributes queries across all nodes in the cluster, regardless of what datacenter the nodes may be in. - - This load balancing policy is used by default. """ _live_hosts = frozenset(()) _position = 0 From 6b6600a000444e445a210cc3b50d430d82eea175 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 19 Jun 2019 09:57:38 -0400 Subject: [PATCH 1513/2005] Add a note that we don't upload docs on gh-pages. --- README-dev.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README-dev.rst b/README-dev.rst index acf8c3ad58..c10aed2330 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -54,6 +54,11 @@ To build the docs, run:: python setup.py doc +Upload the Docs +================= + +This is deprecated. The docs is now only published on https://docs.datastax.com. + To upload the docs, checkout the ``gh-pages`` branch and copy the entire contents all of ``docs/_build/X.Y.Z/*`` into the root of the ``gh-pages`` branch and then push that branch to github. From b56cef9bfba14583552ad9c276d1af3ca1368047 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 21 Jun 2019 10:53:57 -0400 Subject: [PATCH 1514/2005] Add host to execute api docs --- docs/api/cassandra/cluster.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 7fece18569..856ee694d0 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -152,9 +152,9 @@ .. autoattribute:: client_protocol_handler - .. automethod:: execute(statement[, parameters][, timeout][, trace][, custom_payload]) + .. automethod:: execute(statement[, parameters][, timeout][, trace][, custom_payload][, paging_state][, host]) - .. automethod:: execute_async(statement[, parameters][, trace][, custom_payload]) + .. automethod:: execute_async(statement[, parameters][, trace][, custom_payload][, paging_state][, host]) .. automethod:: prepare(statement) From c2a42512778a6b52e6bba4c1295fdf0fa68a8566 Mon Sep 17 00:00:00 2001 From: Yoan Blanc Date: Tue, 22 May 2018 06:59:44 +0200 Subject: [PATCH 1515/2005] tox: enable python 3.7 remove pyyaml as it seems unused --- .travis.yml | 9 ++++++--- test-requirements.txt | 1 - tox.ini | 4 ++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index eecbdb4ac8..f1fff4bb63 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,12 +1,15 @@ +dist: xenial +sudo: false + language: python python: - "2.7" - - "3.4" - "3.5" - "3.6" - - "pypy-5.4.1" + - "3.7" + - "pypy2.7-6.0" + - "pypy3.5" -sudo: false env: - CASS_DRIVER_NO_CYTHON=1 diff --git a/test-requirements.txt b/test-requirements.txt index 126780cd3c..bd6fca51dd 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -4,7 +4,6 @@ nose mock!=1.1.* ccm>=2.0 unittest2 -PyYAML pytz sure pure-sasl diff --git a/tox.ini b/tox.ini index 3645bef4b8..2f77e97d99 100644 --- a/tox.ini +++ b/tox.ini @@ -1,10 +1,10 @@ [tox] -envlist = py{27,34,35,36},pypy +envlist = py{27,34,35,36,37},pypy +skip_missing_interpreters = True [base] deps = nose mock<=1.0.1 - PyYAML six packaging cython From b4c459224850b16fcf4d8e6d8f0f76a4a03dd920 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 3 Jul 2019 08:36:40 -0400 Subject: [PATCH 1516/2005] Future-proof Mapping imports --- CHANGELOG.rst | 11 +++++++++++ cassandra/cluster.py | 3 ++- cassandra/compat.py | 20 ++++++++++++++++++++ cassandra/metadata.py | 3 ++- cassandra/util.py | 2 +- 5 files changed, 36 insertions(+), 3 deletions(-) create mode 100644 cassandra/compat.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e5f721c04b..0c435bd42b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,14 @@ +3.19.0 +====== +Not released + +Features +-------- +* Future-proof Mapping imports (PYTHON-1023) + +Bug Fixes +--------- + 3.18.0 ====== May 27, 2019 diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 004c8afacf..a04f76bfc0 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -19,7 +19,7 @@ from __future__ import absolute_import import atexit -from collections import defaultdict, Mapping +from collections import defaultdict from concurrent.futures import ThreadPoolExecutor, FIRST_COMPLETED, wait as wait_futures from copy import copy from functools import partial, wraps @@ -74,6 +74,7 @@ BatchStatement, bind_params, QueryTrace, TraceUnavailable, named_tuple_factory, dict_factory, tuple_factory, FETCH_SIZE_UNSET) from cassandra.timestamps import MonotonicTimestampGenerator +from cassandra.compat import Mapping def _is_eventlet_monkey_patched(): diff --git a/cassandra/compat.py b/cassandra/compat.py new file mode 100644 index 0000000000..83c1b104e5 --- /dev/null +++ b/cassandra/compat.py @@ -0,0 +1,20 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six + +if six.PY2: + from collections import Mapping +elif six.PY3: + from collections.abc import Mapping diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 2c252c9571..273f3fc9f7 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -14,7 +14,7 @@ from binascii import unhexlify from bisect import bisect_left -from collections import defaultdict, Mapping +from collections import defaultdict from functools import total_ordering from hashlib import md5 from itertools import islice, cycle @@ -43,6 +43,7 @@ from cassandra.util import OrderedDict, Version from cassandra.pool import HostDistance from cassandra.connection import EndPoint +from cassandra.compat import Mapping log = logging.getLogger(__name__) diff --git a/cassandra/util.py b/cassandra/util.py index aaff0b9986..78920374d3 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -713,7 +713,7 @@ def _find_insertion(self, x): sortedset = SortedSet # backwards-compatibility -from collections import Mapping +from cassandra.compat import Mapping from six.moves import cPickle From fe3636c2945d2aeed0924d85e5d4e1f074ec8fe7 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 3 Jul 2019 10:20:39 -0400 Subject: [PATCH 1517/2005] Add Python 3.7 support --- CHANGELOG.rst | 1 + README.rst | 2 +- appveyor.yml | 4 ++-- build.yaml | 15 ++++++++------- cassandra/cluster.py | 4 ++-- cassandra/util.py | 4 ++-- docs/index.rst | 2 +- docs/installation.rst | 2 +- tox.ini | 1 - 9 files changed, 18 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 0c435bd42b..51c490133e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,7 @@ Not released Features -------- +* Add Python 3.7 support (PYTHON-1016) * Future-proof Mapping imports (PYTHON-1023) Bug Fixes diff --git a/README.rst b/README.rst index 6a39d2f5b8..b2039a957f 100644 --- a/README.rst +++ b/README.rst @@ -6,7 +6,7 @@ DataStax Python Driver for Apache Cassandra A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (2.1+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. -The driver supports Python 2.7, 3.4, 3.5, and 3.6. +The driver supports Python 2.7, 3.4, 3.5, 3.6 and 3.7. If you require compatibility with DataStax Enterprise, use the `DataStax Enterprise Python Driver `_. diff --git a/appveyor.yml b/appveyor.yml index 113f8d308c..d1daaa6ec6 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -3,9 +3,9 @@ environment: - PYTHON: "C:\\Python27-x64" cassandra_version: 3.11.2 ci_type: standard - - PYTHON: "C:\\Python34" + - PYTHON: "C:\\Python35-x64" cassandra_version: 3.11.2 - ci_type: unit + ci_type: standard os: Visual Studio 2015 platform: - x64 diff --git a/build.yaml b/build.yaml index 18ca56b5e4..38e0604b16 100644 --- a/build.yaml +++ b/build.yaml @@ -8,7 +8,7 @@ schedules: EXCLUDE_LONG=1 matrix: exclude: - - python: [3.4, 3.6] + - python: [3.4, 3.6, 3.7] nightly_master: schedule: nightly @@ -18,7 +18,7 @@ schedules: EVENT_LOOP_MANAGER='libev' matrix: exclude: - - python: [3.4, 3.6] + - python: [3.4, 3.6, 3.7] commit_long_test: schedule: per_commit @@ -28,7 +28,7 @@ schedules: EVENT_LOOP_MANAGER='libev' matrix: exclude: - - python: [3.4, 3.6] + - python: [3.4, 3.6, 3.7] - cassandra: ['2.0', '2.1', '3.0'] commit_branches: @@ -40,7 +40,7 @@ schedules: EXCLUDE_LONG=1 matrix: exclude: - - python: [3.4, 3.6] + - python: [3.4, 3.6, 3.7] - cassandra: ['2.0', '2.1', '3.0'] commit_branches_dev: @@ -52,7 +52,7 @@ schedules: EXCLUDE_LONG=1 matrix: exclude: - - python: [2.7, 3.4, 3.6] + - python: [2.7, 3.4, 3.6, 3.7] - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse'] release_test: @@ -77,7 +77,7 @@ schedules: EVENT_LOOP_MANAGER='gevent' matrix: exclude: - - python: [3.4, 3.5, 3.6] + - python: [3.4, 3.5, 3.6, 3.7] weekly_eventlet: schedule: 0 18 * * 6 @@ -119,7 +119,7 @@ schedules: JUST_UPGRADE=True matrix: exclude: - - python: [3.4, 3.6] + - python: [3.4, 3.6, 3.7] - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse'] python: @@ -127,6 +127,7 @@ python: - 3.4 - 3.5 - 3.6 + - 3.7 os: - ubuntu/bionic64/python-driver diff --git a/cassandra/cluster.py b/cassandra/cluster.py index a04f76bfc0..ead250bd55 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2420,10 +2420,10 @@ def add_request_init_listener(self, fn, *args, **kwargs): Adds a callback with arguments to be called when any request is created. It will be invoked as `fn(response_future, *args, **kwargs)` after each client request is created, - and before the request is sent\*. This can be used to create extensions by adding result callbacks to the + and before the request is sent. This can be used to create extensions by adding result callbacks to the response future. - \* where `response_future` is the :class:`.ResponseFuture` for the request. + `response_future` is the :class:`.ResponseFuture` for the request. Note that the init callback is done on the client thread creating the request, so you may need to consider synchronization if you have multiple threads. Any callbacks added to the response future will be executed diff --git a/cassandra/util.py b/cassandra/util.py index 78920374d3..2035dd8f97 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -732,7 +732,7 @@ class OrderedMap(Mapping): ['value', 'value2'] These constructs are needed to support nested collections in Cassandra 2.1.3+, - where frozen collections can be specified as parameters to others\*:: + where frozen collections can be specified as parameters to others:: CREATE TABLE example ( ... @@ -743,7 +743,7 @@ class OrderedMap(Mapping): This class derives from the (immutable) Mapping API. Objects in these maps are not intended be modified. - \* Note: Because of the way Cassandra encodes nested types, when using the + Note: Because of the way Cassandra encodes nested types, when using the driver with nested collections, :attr:`~.Cluster.protocol_version` must be 3 or higher. diff --git a/docs/index.rst b/docs/index.rst index 0b578a845e..577b443a3d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,7 +4,7 @@ A Python client driver for `Apache Cassandra `_. This driver works exclusively with the Cassandra Query Language v3 (CQL3) and Cassandra's native protocol. Cassandra 2.1+ is supported. -The driver supports Python 2.7, 3.4, 3.5, and 3.6. +The driver supports Python 2.7, 3.4, 3.5, 3.6 and 3.7. This driver is open source under the `Apache v2 License `_. diff --git a/docs/installation.rst b/docs/installation.rst index 1ef08546a0..a6eedf4eb8 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -3,7 +3,7 @@ Installation Supported Platforms ------------------- -Python 2.7, 3.4, 3.5 and 3.6 are supported. Both CPython (the standard Python +Python 2.7, 3.4, 3.5, 3.6 and 3.7 are supported. Both CPython (the standard Python implementation) and `PyPy `_ are supported and tested. Linux, OSX, and Windows are supported. diff --git a/tox.ini b/tox.ini index 2f77e97d99..b3716de3c8 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,5 @@ [tox] envlist = py{27,34,35,36,37},pypy -skip_missing_interpreters = True [base] deps = nose From edb13ce3f6e4958517efa3c40e81fb25efbab3bf Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 4 Jul 2019 13:02:49 -0400 Subject: [PATCH 1518/2005] Fix errno module import in long tests --- tests/integration/long/test_ipv6.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index 5b27b94d68..5f2bdbddf3 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os, socket +import os, socket, errno from ccmlib import common from cassandra.cluster import Cluster, NoHostAvailable @@ -87,7 +87,7 @@ def test_error(self): cluster = Cluster(connection_class=self.connection_class, contact_points=['::1'], port=9043, connect_timeout=10, protocol_version=PROTOCOL_VERSION) self.assertRaisesRegexp(NoHostAvailable, '\(\'Unable to connect.*%s.*::1\', 9043.*Connection refused.*' - % os.errno.ECONNREFUSED, cluster.connect) + % errno.ECONNREFUSED, cluster.connect) def test_error_multiple(self): if len(socket.getaddrinfo('localhost', 9043, socket.AF_UNSPEC, socket.SOCK_STREAM)) < 2: From a2efa524e70b1f769e7947ceea5f1d6a429a8f90 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 11 Jul 2019 13:28:01 -0500 Subject: [PATCH 1519/2005] [PYTHON-1117] Call ConnectionException with correct kwargs --- CHANGELOG.rst | 2 +- cassandra/cluster.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 51c490133e..ef78041ebb 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -9,7 +9,7 @@ Features Bug Fixes --------- - +* Call ConnectionException with correct kwargs (PYTHON-1117) 3.18.0 ====== May 27, 2019 diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ead250bd55..cf41660a88 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2599,7 +2599,7 @@ def run_add_or_renew_pool(): # TODO remove host pool again ??? new_pool = HostConnectionPool(host, distance, self) except AuthenticationFailed as auth_exc: - conn_exc = ConnectionException(str(auth_exc), host=host) + conn_exc = ConnectionException(str(auth_exc), endpoint=host) self.cluster.signal_connection_failure(host, conn_exc, is_host_addition) return False except Exception as conn_exc: From 7368f6969959fbb8a08381170f13f595dda0158e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 4 Jul 2019 15:13:55 -0400 Subject: [PATCH 1520/2005] Add 2 concurrent execution examples --- .../execute_async_with_queue.py | 65 +++++++++++++++++ .../execute_with_threads.py | 72 +++++++++++++++++++ 2 files changed, 137 insertions(+) create mode 100644 examples/concurrent_executions/execute_async_with_queue.py create mode 100644 examples/concurrent_executions/execute_with_threads.py diff --git a/examples/concurrent_executions/execute_async_with_queue.py b/examples/concurrent_executions/execute_async_with_queue.py new file mode 100644 index 0000000000..60d2a69c3c --- /dev/null +++ b/examples/concurrent_executions/execute_async_with_queue.py @@ -0,0 +1,65 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Inserts multiple rows in a table asynchronously, limiting the amount +of parallel requests with a Queue. +""" + +import time +import uuid +from six.moves import queue + +from cassandra.cluster import Cluster + + +CONCURRENCY_LEVEL = 32 +TOTAL_QUERIES = 10000 + +cluster = Cluster() +session = cluster.connect() + +session.execute(("CREATE KEYSPACE IF NOT EXISTS examples " + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1' }")) +session.execute("USE examples") +session.execute("CREATE TABLE IF NOT EXISTS tbl_sample_kv (id uuid, value text, PRIMARY KEY (id))") +prepared_insert = session.prepare("INSERT INTO tbl_sample_kv (id, value) VALUES (?, ?)") + + +def clear_queue(): + while True: + try: + futures.get_nowait().result() + except queue.Empty: + break + + +start = time.time() +futures = queue.Queue(maxsize=CONCURRENCY_LEVEL) + +# Chunking way, when the max concurrency level is reached, we +# wait the current chunk of requests to finish +for i in range(TOTAL_QUERIES): + future = session.execute_async(prepared_insert, (uuid.uuid4(), str(i))) + try: + futures.put_nowait(future) + except queue.Full: + clear_queue() + futures.put_nowait(future) + +clear_queue() +end = time.time() + +print("Finished executing {} queries with a concurrency level of {} in {:.2f} seconds.". + format(TOTAL_QUERIES, CONCURRENCY_LEVEL, (end-start))) diff --git a/examples/concurrent_executions/execute_with_threads.py b/examples/concurrent_executions/execute_with_threads.py new file mode 100644 index 0000000000..e3c80f5d6b --- /dev/null +++ b/examples/concurrent_executions/execute_with_threads.py @@ -0,0 +1,72 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Inserts multiple rows in a table limiting the amount of parallel requests. + +Note that the driver also provide convenient utility functions to accomplish this. +See https://docs.datastax.com/en/developer/python-driver/latest/api/cassandra/concurrent/ +""" + +import time +import uuid +import threading +from cassandra.cluster import Cluster + + +CONCURRENCY_LEVEL = 32 +TOTAL_QUERIES = 10000 +COUNTER = 0 +COUNTER_LOCK = threading.Lock() + +cluster = Cluster() +session = cluster.connect() + +session.execute(("CREATE KEYSPACE IF NOT EXISTS examples " + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1' }")) +session.execute("USE examples") +session.execute("CREATE TABLE IF NOT EXISTS tbl_sample_kv (id uuid, value text, PRIMARY KEY (id))") +prepared_insert = session.prepare("INSERT INTO tbl_sample_kv (id, value) VALUES (?, ?)") + + +class SimpleQueryExecutor(threading.Thread): + + def run(self): + global COUNTER + + while True: + with COUNTER_LOCK: + current = COUNTER + COUNTER += 1 + + if current >= TOTAL_QUERIES: + break + + session.execute(prepared_insert, (uuid.uuid4(), str(current))) + + +# Launch in parallel n async operations (n being the concurrency level) +start = time.time() +threads = [] +for i in range(CONCURRENCY_LEVEL): + t = SimpleQueryExecutor() + threads.append(t) + t.start() + +for thread in threads: + thread.join() +end = time.time() + +print("Finished executing {} queries with a concurrency level of {} in {:.2f} seconds.". + format(TOTAL_QUERIES, CONCURRENCY_LEVEL, (end-start))) From 316d69295141d22fd0a5b19cf6e6e46fb04fed09 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 12 Jul 2019 14:41:48 -0500 Subject: [PATCH 1521/2005] [PYTHON-1117] Add test checking differently configured auth --- tests/integration/__init__.py | 8 ++-- .../standard/test_authentication.py | 6 +-- .../test_authentication_misconfiguration.py | 47 +++++++++++++++++++ 3 files changed, 54 insertions(+), 7 deletions(-) create mode 100644 tests/integration/standard/test_authentication_misconfiguration.py diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 8abe33f0ab..02f9851477 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -246,7 +246,7 @@ def get_unsupported_upper_protocol(): def local_decorator_creator(): - if not CASSANDRA_IP.startswith("127.0.0."): + if USE_CASS_EXTERNAL or not CASSANDRA_IP.startswith("127.0.0."): return unittest.skip('Tests only runs against local C*') def _id_and_mark(f): @@ -352,9 +352,11 @@ def is_current_cluster(cluster_name, node_counts): return False -def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], set_keyspace=True, ccm_options=None, +def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, set_keyspace=True, ccm_options=None, configuration_options={}, dse_cluster=False, dse_options={}, dse_version=None): + if not workloads: + workloads = [] if (dse_version and not dse_cluster): raise ValueError('specified dse_version {} but not dse_cluster'.format(dse_version)) set_default_cass_ip() @@ -440,7 +442,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], se if 'graph' not in workloads: if PROTOCOL_VERSION >= 4: jvm_args = [" -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler"] - if(len(workloads) > 0): + if len(workloads) > 0: for node in CCM_CLUSTER.nodes.values(): node.set_workloads(workloads) if start: diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index b42d7946d9..f8ce42315c 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -19,7 +19,7 @@ from cassandra.auth import PlainTextAuthProvider, SASLClient, SaslAuthProvider from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION, CASSANDRA_IP, \ - set_default_cass_ip + set_default_cass_ip, USE_CASS_EXTERNAL from tests.integration.util import assert_quiescent_pool_state try: @@ -35,7 +35,7 @@ def setup_module(): - if CASSANDRA_IP.startswith("127.0.0."): + if CASSANDRA_IP.startswith("127.0.0.") and not USE_CASS_EXTERNAL: use_singledc(start=False) ccm_cluster = get_cluster() ccm_cluster.stop() @@ -59,7 +59,6 @@ class AuthenticationTests(unittest.TestCase): """ Tests to cover basic authentication functionality """ - def get_authentication_provider(self, username, password): """ Return correct authentication provider based on protocol version. @@ -165,7 +164,6 @@ class SaslAuthenticatorTests(AuthenticationTests): """ Test SaslAuthProvider as PlainText """ - def setUp(self): if PROTOCOL_VERSION < 2: raise unittest.SkipTest('Sasl authentication not available for protocol v1') diff --git a/tests/integration/standard/test_authentication_misconfiguration.py b/tests/integration/standard/test_authentication_misconfiguration.py new file mode 100644 index 0000000000..5998693e53 --- /dev/null +++ b/tests/integration/standard/test_authentication_misconfiguration.py @@ -0,0 +1,47 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from cassandra.cluster import Cluster +from tests.integration import CASSANDRA_IP, USE_CASS_EXTERNAL, use_cluster, PROTOCOL_VERSION + + +class MisconfiguredAuthenticationTests(unittest.TestCase): + """ One node (not the contact point) has password auth. The rest of the nodes have no auth """ + @classmethod + def setUpClass(cls): + if not USE_CASS_EXTERNAL: + ccm_cluster = use_cluster(cls.__name__, [3], start=False) + node3 = ccm_cluster.nodes['node3'] + node3.set_configuration_options(values={ + 'authenticator': 'PasswordAuthenticator', + 'authorizer': 'CassandraAuthorizer', + }) + ccm_cluster.start() + + cls.ccm_cluster = ccm_cluster + + def test_connect_no_auth_provider(self): + cluster = Cluster(protocol_version=PROTOCOL_VERSION, contact_points=[CASSANDRA_IP]) + cluster.connect() + cluster.refresh_nodes() + down_hosts = [host for host in cluster.metadata.all_hosts() if not host.is_up] + self.assertEqual(len(down_hosts), 1) + cluster.shutdown() + + @classmethod + def tearDownClass(cls): + if not USE_CASS_EXTERNAL: + cls.ccm_cluster.stop() From 2e40d2b8a5968ad11e5593c65ea9a3cda6397268 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 16 Jul 2019 13:44:19 -0500 Subject: [PATCH 1522/2005] copyright symbol --- README.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index b2039a957f..a3cf6c56b8 100644 --- a/README.rst +++ b/README.rst @@ -73,7 +73,8 @@ and the ``#datastax-drivers`` channel in the `DataStax Academy Slack Date: Thu, 11 Jul 2019 12:48:31 -0500 Subject: [PATCH 1523/2005] [PYTHON-1100] Fix incorrect metadata for compact counter tables --- CHANGELOG.rst | 1 + cassandra/metadata.py | 4 +- tests/integration/standard/test_metadata.py | 47 +++++++++++++++++++++ 3 files changed, 50 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index ef78041ebb..117349ac2c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -9,6 +9,7 @@ Features Bug Fixes --------- +* Fix incorrect metadata for compact counter tables (PYTHON-1100) * Call ConnectionException with correct kwargs (PYTHON-1117) 3.18.0 ====== diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 273f3fc9f7..2830d4e5fa 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2326,9 +2326,9 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_row table_meta.options = self._build_table_options(row) flags = row.get('flags', set()) if flags: - compact_static = False - table_meta.is_compact_storage = 'dense' in flags or 'super' in flags or 'compound' not in flags is_dense = 'dense' in flags + compact_static = not is_dense and 'super' not in flags and 'compound' not in flags + table_meta.is_compact_storage = is_dense or 'super' in flags or 'compound' not in flags elif virtual: compact_static = False table_meta.is_compact_storage = False diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 11ca4cf878..4fb7ebf4aa 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -405,6 +405,53 @@ def test_composite_in_compound_primary_key_ordering(self): tablemeta = self.get_table_metadata() self.check_create_statement(tablemeta, create_statement) + def test_compact_storage(self): + create_statement = self.make_create_statement(["a"], [], ["b"]) + create_statement += " WITH COMPACT STORAGE" + + self.session.execute(create_statement) + tablemeta = self.get_table_metadata() + self.check_create_statement(tablemeta, create_statement) + + def test_dense_compact_storage(self): + create_statement = self.make_create_statement(["a"], ["b"], ["c"]) + create_statement += " WITH COMPACT STORAGE" + + self.session.execute(create_statement) + tablemeta = self.get_table_metadata() + self.check_create_statement(tablemeta, create_statement) + + def test_counter(self): + create_statement = ( + "CREATE TABLE {keyspace}.{table} (" + "key text PRIMARY KEY, a1 counter)" + ).format(keyspace=self.keyspace_name, table=self.function_table_name) + + self.session.execute(create_statement) + tablemeta = self.get_table_metadata() + self.check_create_statement(tablemeta, create_statement) + + def test_counter_with_compact_storage(self): + """ PYTHON-1100 """ + create_statement = ( + "CREATE TABLE {keyspace}.{table} (" + "key text PRIMARY KEY, a1 counter) WITH COMPACT STORAGE" + ).format(keyspace=self.keyspace_name, table=self.function_table_name) + + self.session.execute(create_statement) + tablemeta = self.get_table_metadata() + self.check_create_statement(tablemeta, create_statement) + + def test_counter_with_dense_compact_storage(self): + create_statement = ( + "CREATE TABLE {keyspace}.{table} (" + "key text, c1 text, a1 counter, PRIMARY KEY (key, c1)) WITH COMPACT STORAGE" + ).format(keyspace=self.keyspace_name, table=self.function_table_name) + + self.session.execute(create_statement) + tablemeta = self.get_table_metadata() + self.check_create_statement(tablemeta, create_statement) + def test_indexes(self): create_statement = self.make_create_statement(["a"], ["b", "c"], ["d", "e", "f"]) create_statement += " WITH CLUSTERING ORDER BY (b ASC, c ASC)" From df5916789283ef0a4ea3f3a035db416a8e3d388a Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 19 Jul 2019 13:26:11 -0500 Subject: [PATCH 1524/2005] =?UTF-8?q?[PYTHON-1110]=20Fix=20ForcedHostSwitc?= =?UTF-8?q?hPolicy=20not=20always=20choosing=20correct=20=E2=80=A6=20(#103?= =?UTF-8?q?1)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [PYTHON-1110] Fix ForcedHostSwitchPolicy not always choosing correct host in prepared tests. --- tests/integration/standard/test_query.py | 96 ++++++++++++++---------- 1 file changed, 58 insertions(+), 38 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 22b4130740..ac639c1647 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -24,7 +24,7 @@ from cassandra import ConsistencyLevel, Unavailable, InvalidRequest, cluster from cassandra.query import (PreparedStatement, BoundStatement, SimpleStatement, BatchStatement, BatchType, dict_factory, TraceUnavailable) -from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile +from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.policies import HostDistance, RoundRobinPolicy, WhiteListRoundRobinPolicy from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, \ greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, local, get_cluster, setup_keyspace, \ @@ -37,6 +37,7 @@ import re import mock +import six log = logging.getLogger(__name__) @@ -46,7 +47,7 @@ def setup_module(): if not USE_CASS_EXTERNAL: use_singledc(start=False) ccm_cluster = get_cluster() - ccm_cluster.clear() + ccm_cluster.stop() # This is necessary because test_too_many_statements may # timeout otherwise config_options = {'write_request_timeout_in_ms': '20000'} @@ -445,17 +446,26 @@ def test_bound_keyspace(self): self.assertEqual(bound.keyspace, 'test3rf') -class ForcedHostSwitchPolicy(RoundRobinPolicy): +class ForcedHostIndexPolicy(RoundRobinPolicy): + def __init__(self, host_index_to_use=0): + super(ForcedHostIndexPolicy, self).__init__() + self.host_index_to_use = host_index_to_use + + def set_host(self, host_index): + """ 0-based index of which host to use """ + self.host_index_to_use = host_index def make_query_plan(self, working_keyspace=None, query=None): - if hasattr(self, 'counter'): - self.counter += 1 - else: - self.counter = 0 - index = self.counter % 3 - a = list(self._live_hosts) - value = [a[index]] - return value + live_hosts = sorted(list(self._live_hosts)) + host = [] + try: + host = [live_hosts[self.host_index_to_use]] + except IndexError as e: + six.raise_from(IndexError( + 'You specified an index larger than the number of hosts. Total hosts: {}. Index specified: {}'.format( + len(live_hosts), self.host_index_to_use + )), e) + return host class PreparedStatementMetdataTest(unittest.TestCase): @@ -495,33 +505,30 @@ def test_prepared_metadata_generation(self): class PreparedStatementArgTest(unittest.TestCase): + def setUp(self): + self.mock_handler = MockLoggingHandler() + logger = logging.getLogger(cluster.__name__) + logger.addHandler(self.mock_handler) + def test_prepare_on_all_hosts(self): """ Test to validate prepare_on_all_hosts flag is honored. - Use a special ForcedHostSwitchPolicy to ensure prepared queries are cycled over nodes that should not + Force the host of each query to ensure prepared queries are cycled over nodes that should not have them prepared. Check the logs to insure they are being re-prepared on those nodes @since 3.4.0 @jira_ticket PYTHON-556 @expected_result queries will have to re-prepared on hosts that aren't the control connection """ - white_list = ForcedHostSwitchPolicy() - clus = Cluster( - load_balancing_policy=white_list, - protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, reprepare_on_up=False) + clus = Cluster(protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, reprepare_on_up=False) self.addCleanup(clus.shutdown) session = clus.connect(wait_for_all_pools=True) - mock_handler = MockLoggingHandler() - logger = logging.getLogger(cluster.__name__) - logger.addHandler(mock_handler) - select_statement = session.prepare("SELECT * FROM system.local") - session.execute(select_statement) - session.execute(select_statement) - session.execute(select_statement) - self.assertEqual(2, mock_handler.get_message_count('debug', "Re-preparing")) - + select_statement = session.prepare("SELECT k FROM test3rf.test WHERE k = ?") + for host in clus.metadata.all_hosts(): + session.execute(select_statement, (1, ), host=host) + self.assertEqual(2, self.mock_handler.get_message_count('debug', "Re-preparing")) def test_prepare_batch_statement(self): """ @@ -533,11 +540,15 @@ def test_prepare_batch_statement(self): @expected_result queries will have to re-prepared on hosts that aren't the control connection and the batch statement will be sent. """ - white_list = ForcedHostSwitchPolicy() + policy = ForcedHostIndexPolicy() clus = Cluster( - load_balancing_policy=white_list, - protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, - reprepare_on_up=False) + execution_profiles={ + EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=policy), + }, + protocol_version=PROTOCOL_VERSION, + prepare_on_all_hosts=False, + reprepare_on_up=False, + ) self.addCleanup(clus.shutdown) table = "test3rf.%s" % self._testMethodName.lower() @@ -551,9 +562,14 @@ def test_prepare_batch_statement(self): # This is going to query a host where the query # is not prepared + policy.set_host(1) batch_statement = BatchStatement(consistency_level=ConsistencyLevel.ONE) batch_statement.add(insert_statement, (1, 2)) session.execute(batch_statement) + + # To verify our test assumption that queries are getting re-prepared properly + self.assertEqual(1, self.mock_handler.get_message_count('debug', "Re-preparing")) + select_results = session.execute(SimpleStatement("SELECT * FROM %s WHERE k = 1" % table, consistency_level=ConsistencyLevel.ALL)) first_row = select_results[0][:2] @@ -570,11 +586,7 @@ def test_prepare_batch_statement_after_alter(self): @expected_result queries will have to re-prepared on hosts that aren't the control connection and the batch statement will be sent. """ - white_list = ForcedHostSwitchPolicy() - clus = Cluster( - load_balancing_policy=white_list, - protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, - reprepare_on_up=False) + clus = Cluster(protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, reprepare_on_up=False) self.addCleanup(clus.shutdown) table = "test3rf.%s" % self._testMethodName.lower() @@ -590,21 +602,29 @@ def test_prepare_batch_statement_after_alter(self): values_to_insert = [(1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)] - # We query the three hosts in order (due to the ForcedHostSwitchPolicy) + # We query the three hosts in order (due to the ForcedHostIndexPolicy) # the first three queries will have to be repreapred and the rest should # work as normal batch prepared statements + hosts = clus.metadata.all_hosts() for i in range(10): value_to_insert = values_to_insert[i % len(values_to_insert)] batch_statement = BatchStatement(consistency_level=ConsistencyLevel.ONE) batch_statement.add(insert_statement, value_to_insert) - session.execute(batch_statement) + session.execute(batch_statement, host=hosts[i % len(hosts)]) select_results = session.execute("SELECT * FROM %s" % table) - expected_results = [(1, None, 2, None, 3), (2, None, 3, None, 4), - (3, None, 4, None, 5), (4, None, 5, None, 6)] + expected_results = [ + (1, None, 2, None, 3), + (2, None, 3, None, 4), + (3, None, 4, None, 5), + (4, None, 5, None, 6) + ] self.assertEqual(set(expected_results), set(select_results._current_rows)) + # To verify our test assumption that queries are getting re-prepared properly + self.assertEqual(3, self.mock_handler.get_message_count('debug', "Re-preparing")) + class PrintStatementTests(unittest.TestCase): """ From e82ca2620fec3349fa6d776e686240fdef4e8aeb Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 29 Jul 2019 07:51:35 -0500 Subject: [PATCH 1525/2005] Allow connecting to pre-release server versions (PYTHON-1118) (#1032) Allow connecting to pre-release server versions (PYTHON-1118) --- CHANGELOG.rst | 1 + cassandra/util.py | 54 ++++++++++++++++++++++++----------- tests/unit/test_util_types.py | 38 +++++++++++++++--------- 3 files changed, 63 insertions(+), 30 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 117349ac2c..048bd46424 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,6 +11,7 @@ Bug Fixes --------- * Fix incorrect metadata for compact counter tables (PYTHON-1100) * Call ConnectionException with correct kwargs (PYTHON-1117) +* Can't connect to clusters built from source because version parsing doesn't handle 'x.y-SNAPSHOT' (PYTHON-1118) 3.18.0 ====== May 27, 2019 diff --git a/cassandra/util.py b/cassandra/util.py index 2035dd8f97..2b9e9161a3 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1241,13 +1241,18 @@ class Version(object): minor = 0 patch = 0 build = 0 + prerelease = 0 def __init__(self, version): self._version = version - parts = list(reversed(version.split('.'))) + if '-' in version: + version_without_prerelease, self.prerelease = version.split('-') + else: + version_without_prerelease = version + parts = list(reversed(version_without_prerelease.split('.'))) if len(parts) > 4: raise ValueError("Invalid version: {}. Only 4 " - "components are supported".format(version)) + "components plus prerelease are supported".format(version)) self.major = int(parts.pop()) self.minor = int(parts.pop()) if parts else 0 @@ -1264,22 +1269,26 @@ def __hash__(self): return self._version def __repr__(self): + version_string = "Version({0}, {1}, {2}".format(self.major, self.minor, self.patch) if self.build: - return "Version({0}, {1}, {2}, {3})".format(self.major, self.minor, self.patch, self.build) + version_string += ", {}".format(self.build) + if self.prerelease: + version_string += ", {}".format(self.prerelease) + version_string += ")" - return "Version({0}, {1}, {2})".format(self.major, self.minor, self.patch) + return version_string def __str__(self): return self._version @staticmethod - def _compare_build(build, other_build, cmp): - if not (isinstance(build, six.integer_types) and - isinstance(other_build, six.integer_types)): - build = str(build) - other_build = str(other_build) + def _compare_version_part(version, other_version, cmp): + if not (isinstance(version, six.integer_types) and + isinstance(other_version, six.integer_types)): + version = str(version) + other_version = str(other_version) - return cmp(build, other_build) + return cmp(version, other_version) def __eq__(self, other): if not isinstance(other, Version): @@ -1288,7 +1297,9 @@ def __eq__(self, other): return (self.major == other.major and self.minor == other.minor and self.patch == other.patch and - self._compare_build(self.build, other.build, lambda s, o: s == o)) + self._compare_version_part(self.build, other.build, lambda s, o: s == o) and + self._compare_version_part(self.prerelease, other.prerelease, lambda s, o: s == o) + ) def __gt__(self, other): if not isinstance(other, Version): @@ -1297,10 +1308,21 @@ def __gt__(self, other): is_major_ge = self.major >= other.major is_minor_ge = self.minor >= other.minor is_patch_ge = self.patch >= other.patch - is_build_gt = self._compare_build( - self.build, other.build, lambda s, o: s > o) + is_build_gt = self._compare_version_part(self.build, other.build, lambda s, o: s > o) + is_build_ge = self._compare_version_part(self.build, other.build, lambda s, o: s >= o) + + # By definition, a prerelease comes BEFORE the actual release, so if a version + # doesn't have a prerelease, it's automatically greater than anything that does + if self.prerelease and not other.prerelease: + is_prerelease_gt = False + elif other.prerelease and not self.prerelease: + is_prerelease_gt = True + else: + is_prerelease_gt = self._compare_version_part(self.prerelease, other.prerelease, lambda s, o: s > o) \ return (self.major > other.major or - (is_major_ge and self.minor > other.minor) or - (is_major_ge and is_minor_ge and self.patch > other.patch) or - (is_major_ge and is_minor_ge and is_patch_ge and is_build_gt)) + (is_major_ge and self.minor > other.minor) or + (is_major_ge and is_minor_ge and self.patch > other.patch) or + (is_major_ge and is_minor_ge and is_patch_ge and is_build_gt) or + (is_major_ge and is_minor_ge and is_patch_ge and is_build_ge and is_prerelease_gt) + ) diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py index c2dff20967..e5d50ea036 100644 --- a/tests/unit/test_util_types.py +++ b/tests/unit/test_util_types.py @@ -209,25 +209,24 @@ def test_str(self): class VersionTests(unittest.TestCase): def test_version_parsing(self): - versions = [ - (2, 0, 0), - (3, 1, 0), - (2, 4, 54), - (3, 1, 1, 12), - (3, 55, 1, 'build12'), - (3, 55, 1, '20190429-TEST') + ('2.0.0', (2, 0, 0, 0, 0)), + ('3.1.0', (3, 1, 0, 0, 0)), + ('2.4.54', (2, 4, 54, 0, 0)), + ('3.1.1.12', (3, 1, 1, 12, 0)), + ('3.55.1.build12', (3, 55, 1, 'build12', 0)), + ('3.55.1.20190429-TEST', (3, 55, 1, 20190429, 'TEST')), + ('4.0-SNAPSHOT', (4, 0, 0, 0, 'SNAPSHOT')), ] - for version in versions: - str_version = '.'.join([str(p) for p in version]) + for str_version, expected_result in versions: v = Version(str_version) self.assertEqual(str_version, str(v)) - self.assertEqual(v.major, version[0]) - self.assertEqual(v.minor, version[1]) - self.assertEqual(v.patch, version[2]) - if len(version) > 3: - self.assertEqual(v.build, version[3]) + self.assertEqual(v.major, expected_result[0]) + self.assertEqual(v.minor, expected_result[1]) + self.assertEqual(v.patch, expected_result[2]) + self.assertEqual(v.build, expected_result[3]) + self.assertEqual(v.prerelease, expected_result[4]) # not supported version formats with self.assertRaises(ValueError): @@ -279,3 +278,14 @@ def test_version_compare(self): self.assertTrue(Version('4') == Version('4.0.0')) self.assertTrue(Version('4.0') == Version('4.0.0.0')) self.assertTrue(Version('4.0') > Version('3.9.3')) + + self.assertTrue(Version('4.0') > Version('4.0-SNAPSHOT')) + self.assertTrue(Version('4.0-SNAPSHOT') == Version('4.0-SNAPSHOT')) + self.assertTrue(Version('4.0.0-SNAPSHOT') == Version('4.0-SNAPSHOT')) + self.assertTrue(Version('4.0.0-SNAPSHOT') == Version('4.0.0-SNAPSHOT')) + self.assertTrue(Version('4.0.0.build5-SNAPSHOT') == Version('4.0.0.build5-SNAPSHOT')) + self.assertTrue(Version('4.1-SNAPSHOT') > Version('4.0-SNAPSHOT')) + self.assertTrue(Version('4.0.1-SNAPSHOT') > Version('4.0.0-SNAPSHOT')) + self.assertTrue(Version('4.0.0.build6-SNAPSHOT') > Version('4.0.0.build5-SNAPSHOT')) + self.assertTrue(Version('4.0-SNAPSHOT2') > Version('4.0-SNAPSHOT1')) + self.assertTrue(Version('4.0-SNAPSHOT2') > Version('4.0.0-SNAPSHOT1')) From 19936e15bc8c8e4af08191258e883faefee7f827 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 30 Jul 2019 08:14:44 -0500 Subject: [PATCH 1526/2005] =?UTF-8?q?[PYTHON-1104]=20Remove=20invalid=20wa?= =?UTF-8?q?rning=20in=20set=5Fsession=20when=20we=20initializ=E2=80=A6=20(?= =?UTF-8?q?#1035)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [PYTHON-1104] Remove invalid warning in set_session when we initialize a default connection --- CHANGELOG.rst | 2 +- cassandra/cqlengine/connection.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 048bd46424..fe529bb2c6 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,7 +6,7 @@ Features -------- * Add Python 3.7 support (PYTHON-1016) * Future-proof Mapping imports (PYTHON-1023) - +* Remove invalid warning in set_session when we initialize a default connection (PYTHON-1104) Bug Fixes --------- * Fix incorrect metadata for compact counter tables (PYTHON-1100) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 88371e9b7e..619946b4e3 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -277,9 +277,9 @@ def set_session(s): # no default connection set; initalize one register_connection('default', session=s, default=True) conn = get_connection() - - if conn.session: - log.warning("configuring new default connection for cqlengine when one was already set") + else: + if conn.session: + log.warning("configuring new default session for cqlengine when one was already set") if s.row_factory is not dict_factory: raise CQLEngineException("Failed to initialize: 'Session.row_factory' must be 'dict_factory'.") From d98b69dcc4e74eab8f23ad73a21cc713ac3e4a75 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 26 Jul 2019 17:06:13 -0500 Subject: [PATCH 1527/2005] [PYTHON-1119] Set the proper default ExecutionProfile.row_factory value --- CHANGELOG.rst | 1 + cassandra/cluster.py | 2 +- docs/api/cassandra/cluster.rst | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index fe529bb2c6..7fd5036116 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -12,6 +12,7 @@ Bug Fixes * Fix incorrect metadata for compact counter tables (PYTHON-1100) * Call ConnectionException with correct kwargs (PYTHON-1117) * Can't connect to clusters built from source because version parsing doesn't handle 'x.y-SNAPSHOT' (PYTHON-1118) +* Set the proper default ExecutionProfile.row_factory value (PYTHON-1119) 3.18.0 ====== May 27, 2019 diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cf41660a88..2e13b4cce9 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -258,7 +258,7 @@ class ExecutionProfile(object): Request timeout used when not overridden in :meth:`.Session.execute` """ - row_factory = staticmethod(tuple_factory) + row_factory = staticmethod(named_tuple_factory) """ A callable to format results, accepting ``(colnames, rows)`` where ``colnames`` is a list of column names, and ``rows`` is a list of tuples, with each tuple representing a row of parsed values. diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 856ee694d0..fef8f4d188 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -118,7 +118,7 @@ .. automethod:: set_meta_refresh_enabled -.. autoclass:: ExecutionProfile (load_balancing_policy=, retry_policy=None, consistency_level=LOCAL_ONE, serial_consistency_level=None, request_timeout=10.0, row_factory=, speculative_execution_policy=None) +.. autoclass:: ExecutionProfile (load_balancing_policy=, retry_policy=None, consistency_level=LOCAL_ONE, serial_consistency_level=None, request_timeout=10.0, row_factory=, speculative_execution_policy=None) :members: :exclude-members: consistency_level From 9124af2244e8917800f3b9dfea16827a0fc6f1a3 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 5 Aug 2019 13:42:55 -0500 Subject: [PATCH 1528/2005] [PYTHON-1031] Remove "frozen" from metadata of function arguments (#1030) [PYTHON-1031] Remove "frozen" from metadata of function arguments --- CHANGELOG.rst | 1 + cassandra/cqltypes.py | 68 +++++++++++++++++++++++++++++++++++++ cassandra/metadata.py | 7 ++-- tests/unit/test_metadata.py | 47 ++++++++++++++++++++++++- 4 files changed, 119 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7fd5036116..37981ad0c2 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -9,6 +9,7 @@ Features * Remove invalid warning in set_session when we initialize a default connection (PYTHON-1104) Bug Fixes --------- +* as_cql_query UDF/UDA parameters incorrectly includes "frozen" if arguments are collections (PYTHON-1031) * Fix incorrect metadata for compact counter tables (PYTHON-1100) * Call ConnectionException with correct kwargs (PYTHON-1117) * Can't connect to clusters built from source because version parsing doesn't handle 'x.y-SNAPSHOT' (PYTHON-1118) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 22dec9ada1..55bb022e8c 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -28,6 +28,7 @@ # .from_cql_literal() and .as_cql_literal() classmethods (or whatever). from __future__ import absolute_import # to enable import io from stdlib +import ast from binascii import unhexlify import calendar from collections import namedtuple @@ -121,6 +122,73 @@ def __new__(metacls, name, bases, dct): )) +def cqltype_to_python(cql_string): + """ + Given a cql type string, creates a list that can be manipulated in python + Example: + int -> ['int'] + frozen> -> ['frozen', ['tuple', ['text', 'int']]] + """ + scanner = re.Scanner(( + (r'[a-zA-Z0-9_]+', lambda s, t: "'{}'".format(t)), + (r'<', lambda s, t: ', ['), + (r'>', lambda s, t: ']'), + (r'[, ]', lambda s, t: t), + (r'".*?"', lambda s, t: "'{}'".format(t)), + )) + + scanned_tokens = scanner.scan(cql_string)[0] + hierarchy = ast.literal_eval(''.join(scanned_tokens)) + return [hierarchy] if isinstance(hierarchy, str) else list(hierarchy) + + +def python_to_cqltype(types): + """ + Opposite of the `cql_to_python` function. Given a python list, creates a cql type string from the representation + Example: + ['int'] -> int + ['frozen', ['tuple', ['text', 'int']]] -> frozen> + """ + scanner = re.Scanner(( + (r"'[a-zA-Z0-9_]+'", lambda s, t: t[1:-1]), + (r'^\[', lambda s, t: None), + (r'\]$', lambda s, t: None), + (r',\s*\[', lambda s, t: '<'), + (r'\]', lambda s, t: '>'), + (r'[, ]', lambda s, t: t), + (r'\'".*?"\'', lambda s, t: t[1:-1]), + )) + + scanned_tokens = scanner.scan(repr(types))[0] + cql = ''.join(scanned_tokens).replace('\\\\', '\\') + return cql + + +def _strip_frozen_from_python(types): + """ + Given a python list representing a cql type, removes 'frozen' + Example: + ['frozen', ['tuple', ['text', 'int']]] -> ['tuple', ['text', 'int']] + """ + while 'frozen' in types: + index = types.index('frozen') + types = types[:index] + types[index + 1] + types[index + 2:] + new_types = [_strip_frozen_from_python(item) if isinstance(item, list) else item for item in types] + return new_types + + +def strip_frozen(cql): + """ + Given a cql type string, and removes frozen + Example: + frozen> -> tuple + """ + types = cqltype_to_python(cql) + types_without_frozen = _strip_frozen_from_python(types) + cql = python_to_cqltype(types_without_frozen) + return cql + + def lookup_casstype_simple(casstype): """ Given a Cassandra type name (either fully distinguished or not), hand diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 2830d4e5fa..daa79fd698 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -908,9 +908,9 @@ def as_cql_query(self, formatted=False): sep = '\n ' if formatted else ' ' keyspace = protect_name(self.keyspace) name = protect_name(self.name) - type_list = ', '.join(self.argument_types) + type_list = ', '.join([types.strip_frozen(arg_type) for arg_type in self.argument_types]) state_func = protect_name(self.state_func) - state_type = self.state_type + state_type = types.strip_frozen(self.state_type) ret = "CREATE AGGREGATE %(keyspace)s.%(name)s(%(type_list)s)%(sep)s" \ "SFUNC %(state_func)s%(sep)s" \ @@ -1001,7 +1001,7 @@ def as_cql_query(self, formatted=False): sep = '\n ' if formatted else ' ' keyspace = protect_name(self.keyspace) name = protect_name(self.name) - arg_list = ', '.join(["%s %s" % (protect_name(n), t) + arg_list = ', '.join(["%s %s" % (protect_name(n), types.strip_frozen(t)) for n, t in zip(self.argument_names, self.argument_types)]) typ = self.return_type lang = self.language @@ -2833,3 +2833,4 @@ def group_keys_by_replica(session, keyspace, table, keys): keys_per_host[NO_VALID_REPLICA].append(key) return dict(keys_per_host) + diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index cc0fc61c7e..49b2627c67 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - try: import unittest2 as unittest except ImportError: @@ -25,6 +24,7 @@ import timeit import cassandra +from cassandra.cqltypes import strip_frozen from cassandra.marshal import uint16_unpack, uint16_pack from cassandra.metadata import (Murmur3Token, MD5Token, BytesToken, ReplicationStrategy, @@ -470,6 +470,32 @@ def test_as_cql_query_name_escaping(self): self.assertEqual('CREATE TYPE "MyKeyspace"."MyType" ("AbA" ascii, "keyspace" ascii)', udt.as_cql_query(formatted=False)) +class UserDefinedFunctionTest(unittest.TestCase): + def test_as_cql_query_removes_frozen(self): + func = Function("ks1", "myfunction", ["frozen>"], ["a"], "int", "java", "return 0;", True) + expected_result = ( + "CREATE FUNCTION ks1.myfunction(a tuple) " + "CALLED ON NULL INPUT " + "RETURNS int " + "LANGUAGE java " + "AS $$return 0;$$" + ) + self.assertEqual(expected_result, func.as_cql_query(formatted=False)) + + +class UserDefinedAggregateTest(unittest.TestCase): + def test_as_cql_query_removes_frozen(self): + aggregate = Aggregate("ks1", "myaggregate", ["frozen>"], "statefunc", "frozen>", "finalfunc", "(0)", "tuple") + expected_result = ( + "CREATE AGGREGATE ks1.myaggregate(tuple) " + "SFUNC statefunc " + "STYPE tuple " + "FINALFUNC finalfunc " + "INITCOND (0)" + ) + self.assertEqual(expected_result, aggregate.as_cql_query(formatted=False)) + + class IndexTest(unittest.TestCase): def test_build_index_as_cql(self): @@ -575,3 +601,22 @@ def test_iterate_all_hosts_and_modify(self): metadata.remove_host(host) self.assertEqual(len(metadata.all_hosts()), 0) + + +class MetadataHelpersTest(unittest.TestCase): + """ For any helper functions that need unit tests """ + def test_strip_frozen(self): + self.longMessage = True + + argument_to_expected_results = [ + ('int', 'int'), + ('tuple', 'tuple'), + (r'map<"!@#$%^&*()[]\ frozen >>>", int>', r'map<"!@#$%^&*()[]\ frozen >>>", int>'), # A valid UDT name + ('frozen>', 'tuple'), + (r'frozen>>", int>>', r'map<"!@#$%^&*()[]\ frozen >>>", int>'), + ('frozen>, int>>, frozen>>>>>', + 'map, int>, map>>'), + ] + for argument, expected_result in argument_to_expected_results: + result = strip_frozen(argument) + self.assertEqual(result, expected_result, "strip_frozen() arg: {}".format(argument)) From 7f43caa287050e98cad147d6e59b89d21bc038cf Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 29 Jul 2019 15:10:23 -0500 Subject: [PATCH 1529/2005] [python-1105] Include param values in cqlengine logging --- CHANGELOG.rst | 1 + cassandra/cqlengine/connection.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 37981ad0c2..54ff467fd0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,6 +7,7 @@ Features * Add Python 3.7 support (PYTHON-1016) * Future-proof Mapping imports (PYTHON-1023) * Remove invalid warning in set_session when we initialize a default connection (PYTHON-1104) +* Include param values in cqlengine logging (PYTHON-1105) Bug Fixes --------- * as_cql_query UDF/UDA parameters incorrectly includes "frozen" if arguments are collections (PYTHON-1031) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 619946b4e3..9322e4b96a 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -335,7 +335,7 @@ def execute(query, params=None, consistency_level=None, timeout=NOT_SET, connect query = SimpleStatement(str(query), consistency_level=consistency_level, fetch_size=query.fetch_size) elif isinstance(query, six.string_types): query = SimpleStatement(query, consistency_level=consistency_level) - log.debug(format_log_context(query.query_string, connection=connection)) + log.debug(format_log_context('Query: {}, Params: {}'.format(query.query_string, params), connection=connection)) result = conn.session.execute(query, params, timeout=timeout) From dd9d6b169f745cb55561ddb259fb47d70141a434 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Aug 2019 12:49:10 -0500 Subject: [PATCH 1530/2005] explain wait_for_all_pools in the docstring --- cassandra/cluster.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 2e13b4cce9..471ceda5ee 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1329,9 +1329,14 @@ def protocol_downgrade(self, host_endpoint, previous_version): def connect(self, keyspace=None, wait_for_all_pools=False): """ - Creates and returns a new :class:`~.Session` object. If `keyspace` - is specified, that keyspace will be the default keyspace for + Creates and returns a new :class:`~.Session` object. + + If `keyspace` is specified, that keyspace will be the default keyspace for operations on the ``Session``. + + `wait_for_all_pools` specifies whether this call should wait for all connection pools to be + established or attempted. Default is `False`, which means it will return when the first + successful connection is established. Remaining pools are added asynchronously. """ with self._lock: if self.is_shutdown: From 71aa360ad6984f08adcae185d02d8a5549e27022 Mon Sep 17 00:00:00 2001 From: Alistair Broomhead Date: Thu, 16 May 2019 14:52:47 +0100 Subject: [PATCH 1531/2005] Add test for PYTHON-1093 --- CHANGELOG.rst | 1 + tests/integration/cqlengine/test_timestamp.py | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 54ff467fd0..cc7a8368a0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,6 +11,7 @@ Features Bug Fixes --------- * as_cql_query UDF/UDA parameters incorrectly includes "frozen" if arguments are collections (PYTHON-1031) +* cqlengine does not currently support combining TTL and TIMESTAMP on INSERT (PYTHON-1093) * Fix incorrect metadata for compact counter tables (PYTHON-1100) * Call ConnectionException with correct kwargs (PYTHON-1117) * Can't connect to clusters built from source because version parsing doesn't handle 'x.y-SNAPSHOT' (PYTHON-1118) diff --git a/tests/integration/cqlengine/test_timestamp.py b/tests/integration/cqlengine/test_timestamp.py index 88ed5e91df..50e8bf4812 100644 --- a/tests/integration/cqlengine/test_timestamp.py +++ b/tests/integration/cqlengine/test_timestamp.py @@ -83,6 +83,16 @@ def test_non_batch_syntax_unit(self): "USING TIMESTAMP".should.be.within(query) + def test_non_batch_syntax_with_ttl_unit(self): + + with mock.patch.object(self.session, "execute") as m: + TestTimestampModel.timestamp(timedelta(seconds=30)).ttl(30).create( + count=1) + + query = m.call_args[0][0].query_string + + query.should.match(r"USING TTL \d* AND TIMESTAMP") + class UpdateWithTimestampTest(BaseTimestampTest): def setUp(self): From b3b826358eaf092a46da60ee77681658a8f4d10b Mon Sep 17 00:00:00 2001 From: Alistair Broomhead Date: Thu, 16 May 2019 14:54:08 +0100 Subject: [PATCH 1532/2005] Handle case where there is both TTL and TIMESTAMP The existing code causes an error like so: ``` cassandra.protocol.SyntaxException: ``` by checking for the ttl in the timestamp code we can change the generated query from `USING TTL {ttl} USING TIMESTAMP {timestamp}` => `USING TTL {ttl} AND TIMESTAMP {timestamp}`, which is correct. --- cassandra/cqlengine/statements.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 57aac56e59..f8721fa39f 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -755,7 +755,8 @@ def __unicode__(self): qs += ["USING TTL {0}".format(self.ttl)] if self.timestamp: - qs += ["USING TIMESTAMP {0}".format(self.timestamp_normalized)] + statement = "AND TIMESTAMP {0}" if self.ttl else "USING TIMESTAMP {0}" + qs += [statement.format(self.timestamp_normalized)] return ' '.join(qs) From cfa8c79f5539737b4296fa4b49a243ca7b14e6f5 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 8 Aug 2019 16:52:26 -0500 Subject: [PATCH 1533/2005] [python-1093] Add integration test for cqlengine timestamp with ttl and some small code cleanup --- cassandra/cqlengine/statements.py | 8 +++++--- tests/integration/cqlengine/test_timestamp.py | 4 ++++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index f8721fa39f..c6ceb16607 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -751,13 +751,15 @@ def __unicode__(self): if self.if_not_exists: qs += ["IF NOT EXISTS"] + using_options = [] if self.ttl: - qs += ["USING TTL {0}".format(self.ttl)] + using_options += ["TTL {}".format(self.ttl)] if self.timestamp: - statement = "AND TIMESTAMP {0}" if self.ttl else "USING TIMESTAMP {0}" - qs += [statement.format(self.timestamp_normalized)] + using_options += ["TIMESTAMP {}".format(self.timestamp_normalized)] + if using_options: + qs += ["USING {}".format(" AND ".join(using_options))] return ' '.join(qs) diff --git a/tests/integration/cqlengine/test_timestamp.py b/tests/integration/cqlengine/test_timestamp.py index 50e8bf4812..abf751ec47 100644 --- a/tests/integration/cqlengine/test_timestamp.py +++ b/tests/integration/cqlengine/test_timestamp.py @@ -74,6 +74,10 @@ def test_non_batch_syntax_integration(self): tmp = TestTimestampModel.timestamp(timedelta(seconds=30)).create(count=1) tmp.should.be.ok + def test_non_batch_syntax_with_tll_integration(self): + tmp = TestTimestampModel.timestamp(timedelta(seconds=30)).ttl(30).create(count=1) + tmp.should.be.ok + def test_non_batch_syntax_unit(self): with mock.patch.object(self.session, "execute") as m: From 3553c48bd17b9a4f80dfdd7bf4aa1f9710996d59 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 26 Jul 2019 15:35:21 -0500 Subject: [PATCH 1534/2005] [PYTHON-1103] Make Session._execution_profile_to_string standalone --- cassandra/cluster.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cf41660a88..2c630b02e2 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -225,6 +225,12 @@ def _resolve_contact_points(contact_points, port): for endpoint in addrinfo] +def _execution_profile_to_string(name): + if name is EXEC_PROFILE_DEFAULT: + return 'EXEC_PROFILE_DEFAULT' + return '"%s"' % (name,) + + class ExecutionProfile(object): load_balancing_policy = None """ @@ -1161,7 +1167,7 @@ def add_execution_profile(self, name, profile, pool_wait_timeout=5): 'to a cluster with explicitly configured contact_points will ' 'raise an exception; please specify a load-balancing policy ' 'in the ExecutionProfile.' - ''.format(name=repr(name), self=self, ep=profile)) + ''.format(name=_execution_profile_to_string(name), self=self, ep=profile)) self.profile_manager.profiles[name] = profile profile.load_balancing_policy.populate(self, self.metadata.all_hosts()) @@ -2378,11 +2384,6 @@ def _create_response_future(self, query, parameters, trace, custom_payload, load_balancer=load_balancing_policy, start_time=start_time, speculative_execution_plan=spec_exec_plan, host=host) - def _execution_profile_to_string(self, name): - if name is EXEC_PROFILE_DEFAULT: - return 'EXEC_PROFILE_DEFAULT' - return '"%s"' % (name,) - def get_execution_profile(self, name): """ Returns the execution profile associated with the provided ``name``. @@ -2393,9 +2394,9 @@ def get_execution_profile(self, name): try: return profiles[name] except KeyError: - eps = [self._execution_profile_to_string(ep) for ep in profiles.keys()] + eps = [_execution_profile_to_string(ep) for ep in profiles.keys()] raise ValueError("Invalid execution_profile: %s; valid profiles are: %s." % ( - self._execution_profile_to_string(name), ', '.join(eps))) + _execution_profile_to_string(name), ', '.join(eps))) def _maybe_get_execution_profile(self, ep): return ep if isinstance(ep, ExecutionProfile) else self.get_execution_profile(ep) From 2266c3580a8b01fe8efe051894b7cdde5bbaf630 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 11 Aug 2016 17:41:28 -0500 Subject: [PATCH 1535/2005] [PYTHON-622] Performance Improvement for NetworkTopologyStrategy make_token_replica_map This updates the `make_token_replica_map` function multiple times faster. The previous implementation used a combination of islice and cycle. cycle will build an internal list, and save every element it produces. `islice` will iterate over everything in it's sequence up to `start`. I've replaced this with a simple index model which uses the existing list. Timings on my (admittingly slow) machine for these tests show differences in speed of: pre_change: test_nts_token_performance_large_cluster: 4.509002365055494 test_nts_token_performance_large_with_small_backup 5.222048101015389 post_change: test_nts_token_performance_large_cluster: 0.2818319270154461 test_nts_token_performance_large_with_small_backup 0.6304094420047477 I've done some more performance testing on a load-testing cluster with 200 nodes and 256 vnodes, and the performance improvement is roughly 30x. GregBestland added some very useful performance tests for this in [PYTHON-622], however the way that the tokens were generated and distributed in the ring meant the tests were quite slow due to the fact that each nodes vnodes tokens were adjacent in the ring causing many iterations to find the next replica on a different physical node. --- CHANGELOG.rst | 2 ++ cassandra/metadata.py | 7 ++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index cc7a8368a0..26ad38b805 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -16,6 +16,8 @@ Bug Fixes * Call ConnectionException with correct kwargs (PYTHON-1117) * Can't connect to clusters built from source because version parsing doesn't handle 'x.y-SNAPSHOT' (PYTHON-1118) * Set the proper default ExecutionProfile.row_factory value (PYTHON-1119) +* NTS Token Replica Map Generation is slow (PYTHON-622) + 3.18.0 ====== May 27, 2019 diff --git a/cassandra/metadata.py b/cassandra/metadata.py index daa79fd698..1608a114c2 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -536,7 +536,12 @@ def make_token_replica_map(self, token_to_host_owner, ring): racks_placed = set() racks_this_dc = dc_racks[dc] hosts_this_dc = len(hosts_per_dc[dc]) - for token_offset in islice(cycle(token_offsets), index, index + num_tokens): + + for token_offset_index in six.moves.range(index, index+num_tokens): + if token_offset_index >= len(token_offsets): + token_offset_index = token_offset_index - len(token_offsets) + + token_offset = token_offsets[token_offset_index] host = token_to_host_owner[ring[token_offset]] if replicas_remaining == 0 or replicas_this_dc == hosts_this_dc: break From c9ffdaa7ba5b652cf336dd7e62eded3d0ef14572 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 13 Aug 2019 09:51:38 -0400 Subject: [PATCH 1536/2005] Update changelog --- CHANGELOG.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 26ad38b805..b23566effb 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,6 +8,8 @@ Features * Future-proof Mapping imports (PYTHON-1023) * Remove invalid warning in set_session when we initialize a default connection (PYTHON-1104) * Include param values in cqlengine logging (PYTHON-1105) +* NTS Token Replica Map Generation is slow (PYTHON-622) + Bug Fixes --------- * as_cql_query UDF/UDA parameters incorrectly includes "frozen" if arguments are collections (PYTHON-1031) @@ -16,7 +18,6 @@ Bug Fixes * Call ConnectionException with correct kwargs (PYTHON-1117) * Can't connect to clusters built from source because version parsing doesn't handle 'x.y-SNAPSHOT' (PYTHON-1118) * Set the proper default ExecutionProfile.row_factory value (PYTHON-1119) -* NTS Token Replica Map Generation is slow (PYTHON-622) 3.18.0 ====== From 9b145cf94c40457f6ee650062a1b5a1293dee160 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 13 Aug 2019 14:23:47 -0400 Subject: [PATCH 1537/2005] Add SNIEndPoint support --- CHANGELOG.rst | 8 ++++ cassandra/connection.py | 76 +++++++++++++++++++++++++++++++ docs/api/cassandra/connection.rst | 4 ++ tests/unit/test_endpoints.py | 70 ++++++++++++++++++++++++++++ 4 files changed, 158 insertions(+) create mode 100644 tests/unit/test_endpoints.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index b23566effb..6920bbcb14 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +3.20.0 +====== +Not Released + +Features +-------- +* Add SNIEndPoint support (PYTHON-1084) + 3.19.0 ====== Not released diff --git a/cassandra/connection.py b/cassandra/connection.py index f762d10667..1b8ec55722 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -223,6 +223,82 @@ def create(self, row): return DefaultEndPoint(self.cluster.address_translator.translate(addr), 9042) # will eventually support port +@total_ordering +class SniEndPoint(EndPoint): + """SNI Proxy EndPoint implementation.""" + + def __init__(self, proxy_address, server_name, port=9042): + self._proxy_address = proxy_address + self._index = 0 + self._resolved_address = None # resolved address + self._port = port + self._server_name = server_name + self._ssl_options = {'server_hostname': server_name} + + @property + def address(self): + return self._proxy_address + + @property + def port(self): + return self._port + + @property + def ssl_options(self): + return self._ssl_options + + def resolve(self): + try: + resolved_addresses = socket.getaddrinfo(self._proxy_address, self._port, + socket.AF_UNSPEC, socket.SOCK_STREAM) + except socket.gaierror: + log.debug('Could not resolve sni proxy hostname "%s" ' + 'with port %d' % (self._proxy_address, self._port)) + raise + + # round-robin pick + self._resolved_address = sorted(addr[4][0] for addr in resolved_addresses)[self._index % len(resolved_addresses)] + self._index += 1 + + return self._resolved_address, self._port + + def __eq__(self, other): + return (isinstance(other, SniEndPoint) and + self.address == other.address and self.port == other.port and + self._server_name == other._server_name) + + def __hash__(self): + return hash((self.address, self.port, self._server_name)) + + def __lt__(self, other): + return ((self.address, self.port, self._server_name) < + (other.address, other.port, self._server_name)) + + def __str__(self): + return str("%s:%d:%s" % (self.address, self.port, self._server_name)) + + def __repr__(self): + return "<%s: %s:%d:%s>" % (self.__class__.__name__, + self.address, self.port, self._server_name) + + +class SniEndPointFactory(EndPointFactory): + + def __init__(self, proxy_address, port): + self._proxy_address = proxy_address + self._port = port + + def create(self, row): + host_id = row.get("host_id") + if host_id is None: + raise ValueError("No host_id to create the SniEndPoint") + + return SniEndPoint(self._proxy_address, str(host_id), self._port) + + def create_from_sni(self, sni): + return SniEndPoint(self._proxy_address, sni, self._port) + + class _Frame(object): def __init__(self, version, flags, stream, opcode, body_offset, end_pos): self.version = version diff --git a/docs/api/cassandra/connection.rst b/docs/api/cassandra/connection.rst index 49ee1ec20d..dfbb89740b 100644 --- a/docs/api/cassandra/connection.rst +++ b/docs/api/cassandra/connection.rst @@ -13,3 +13,7 @@ .. autoclass:: EndPointFactory :members: + +.. autoclass:: SniEndPoint + +.. autoclass:: SniEndPointFactory diff --git a/tests/unit/test_endpoints.py b/tests/unit/test_endpoints.py new file mode 100644 index 0000000000..2452e267ba --- /dev/null +++ b/tests/unit/test_endpoints.py @@ -0,0 +1,70 @@ +# Copyright DataStax, Inc. +# +# Licensed under the DataStax DSE Driver License; +# you may not use this file except in compliance with the License. +# +# You may obtain a copy of the License at +# +# http://www.datastax.com/terms/datastax-dse-driver-license-terms +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import itertools + +from cassandra.connection import DefaultEndPoint, SniEndPoint, SniEndPointFactory + +from mock import patch + + +def socket_getaddrinfo(*args): + return [ + (0, 0, 0, '', ('127.0.0.1', 30002)), + (0, 0, 0, '', ('127.0.0.2', 30002)), + (0, 0, 0, '', ('127.0.0.3', 30002)) + ] + + +@patch('socket.getaddrinfo', socket_getaddrinfo) +class SniEndPointTest(unittest.TestCase): + + endpoint_factory = SniEndPointFactory("proxy.datastax.com", 30002) + + def test_sni_endpoint_properties(self): + + endpoint = self.endpoint_factory.create_from_sni('test') + self.assertEqual(endpoint.address, 'proxy.datastax.com') + self.assertEqual(endpoint.port, 30002) + self.assertEqual(endpoint._server_name, 'test') + self.assertEqual(str(endpoint), 'proxy.datastax.com:30002:test') + + def test_endpoint_equality(self): + self.assertNotEqual( + DefaultEndPoint('10.0.0.1'), + self.endpoint_factory.create_from_sni('10.0.0.1') + ) + + self.assertEqual( + self.endpoint_factory.create_from_sni('10.0.0.1'), + self.endpoint_factory.create_from_sni('10.0.0.1') + ) + + self.assertNotEqual( + self.endpoint_factory.create_from_sni('10.0.0.1'), + self.endpoint_factory.create_from_sni('10.0.0.0') + ) + + self.assertNotEqual( + self.endpoint_factory.create_from_sni('10.0.0.1'), + SniEndPointFactory("proxy.datastax.com", 9999).create_from_sni('10.0.0.1') + ) + + def test_endpoint_resolve(self): + ips = ['127.0.0.1', '127.0.0.2', '127.0.0.3'] + it = itertools.cycle(ips) + + endpoint = self.endpoint_factory.create_from_sni('test') + for i in range(10): + (address, _) = endpoint.resolve() + self.assertEqual(address, next(it)) From 7bf81c8ea9b344bbb58a0006f04e2a8e7bee5401 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 13 Aug 2019 14:41:54 -0500 Subject: [PATCH 1538/2005] Add docs clarification to host argument of Session.execute --- cassandra/cluster.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 07d776f3af..78a4c7a816 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2245,8 +2245,9 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, `paging_state` is an optional paging state, reused from a previous :class:`ResultSet`. - `host` is the :class:`pool.Host` that should handle the query. Using this is discouraged except in a few - cases, e.g., querying node-local tables and applying schema changes. + `host` is the :class:`cassandra.pool.Host` that should handle the query. If the host specified is down or + not yet connected, the query will fail with :class:`NoHostAvailable`. Using this is + discouraged except in a few cases, e.g., querying node-local tables and applying schema changes. """ return self.execute_async(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state, host).result() From 78e9cb390fb71b71d5869566a13e9a29d3f9f9ae Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 13 Aug 2019 15:51:28 -0500 Subject: [PATCH 1539/2005] =?UTF-8?q?[PYTHON-1111]=20Disallow=20multiple?= =?UTF-8?q?=20cluster=20objects=20sharing=20same=20executio=E2=80=A6=20(#1?= =?UTF-8?q?034)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update documentation to state multiple cluster objects shouldn't share execution profile. --- docs/execution_profiles.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/execution_profiles.rst b/docs/execution_profiles.rst index 0243bb19ab..698f3db84b 100644 --- a/docs/execution_profiles.rst +++ b/docs/execution_profiles.rst @@ -9,6 +9,10 @@ The legacy configuration remains intact, but legacy and Execution Profile APIs cannot be used simultaneously on the same client ``Cluster``. Legacy configuration will be removed in the next major release (4.0). +An execution profile and its parameters should be unique across ``Cluster`` instances. +For example, an execution profile and its ``LoadBalancingPolicy`` should +not be applied to more than one ``Cluster`` instance. + This document explains how Execution Profiles relate to existing settings, and shows how to use the new profiles for request execution. From 0f8c9915dff6a598ff3ed95e9ba3411a42f02904 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 14 Aug 2019 08:29:59 -0400 Subject: [PATCH 1540/2005] Add a new Endpoint type to support unix sockets --- CHANGELOG.rst | 1 + build.yaml | 3 + cassandra/connection.py | 70 +++++++++++++++-- docs/api/cassandra/connection.rst | 2 + tests/integration/advanced/__init__.py | 23 ++++++ .../advanced/test_unixsocketendpoint.py | 77 +++++++++++++++++++ 6 files changed, 170 insertions(+), 6 deletions(-) create mode 100644 tests/integration/advanced/__init__.py create mode 100644 tests/integration/advanced/test_unixsocketendpoint.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6920bbcb14..dd744b580f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,7 @@ Not Released Features -------- * Add SNIEndPoint support (PYTHON-1084) +* Add a new Endpoint type to support unix sockets (PYTHON-1098) 3.19.0 ====== diff --git a/build.yaml b/build.yaml index 38e0604b16..7839b7141c 100644 --- a/build.yaml +++ b/build.yaml @@ -150,6 +150,9 @@ build: export PATH=$JAVA_HOME/bin:$PATH export PYTHONPATH="" + # Required for unix socket tests + sudo apt-get install socat + # Install latest setuptools pip install --upgrade pip pip install -U setuptools diff --git a/cassandra/connection.py b/cassandra/connection.py index 1b8ec55722..c6dcc9e541 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -144,6 +144,13 @@ def ssl_options(self): """ return None + @property + def socket_family(self): + """ + The socket family of the endpoint. + """ + return socket.AF_UNSPEC + def resolve(self): """ Resolve the endpoint to an address/port. This is called @@ -299,6 +306,47 @@ def create_from_sni(self, sni): return SniEndPoint(self._proxy_address, sni, self._port) +@total_ordering +class UnixSocketEndPoint(EndPoint): + """ + Unix Socket EndPoint implementation. + """ + + def __init__(self, unix_socket_path): + self._unix_socket_path = unix_socket_path + + @property + def address(self): + return self._unix_socket_path + + @property + def port(self): + return None + + @property + def socket_family(self): + return socket.AF_UNIX + + def resolve(self): + return self.address, None + + def __eq__(self, other): + return (isinstance(other, UnixSocketEndPoint) and + self._unix_socket_path == other._unix_socket_path) + + def __hash__(self): + return hash(self._unix_socket_path) + + def __lt__(self, other): + return self._unix_socket_path < other._unix_socket_path + + def __str__(self): + return str("%s" % (self._unix_socket_path,)) + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self._unix_socket_path) + + class _Frame(object): def __init__(self, version, flags, stream, opcode, body_offset, end_pos): self.version = version @@ -557,13 +605,22 @@ def factory(cls, endpoint, timeout, *args, **kwargs): else: return conn - def _connect_socket(self): - sockerr = None - inet_address, port = self.endpoint.resolve() - addresses = socket.getaddrinfo(inet_address, port, socket.AF_UNSPEC, socket.SOCK_STREAM) + def _get_socket_addresses(self): + address, port = self.endpoint.resolve() + + if self.endpoint.socket_family == socket.AF_UNIX: + return [(socket.AF_UNIX, socket.SOCK_STREAM, 0, None, address)] + + addresses = socket.getaddrinfo(address, port, self.endpoint.socket_family, socket.SOCK_STREAM) if not addresses: raise ConnectionException("getaddrinfo returned empty list for %s" % (self.endpoint,)) - for (af, socktype, proto, canonname, sockaddr) in addresses: + + return addresses + + def _connect_socket(self): + sockerr = None + addresses = self._get_socket_addresses() + for (af, socktype, proto, _, sockaddr) in addresses: try: self._socket = self._socket_impl.socket(af, socktype, proto) if self.ssl_context: @@ -587,7 +644,8 @@ def _connect_socket(self): sockerr = err if sockerr: - raise socket.error(sockerr.errno, "Tried connecting to %s. Last error: %s" % ([a[4] for a in addresses], sockerr.strerror or sockerr)) + raise socket.error(sockerr.errno, "Tried connecting to %s. Last error: %s" % + ([a[4] for a in addresses], sockerr.strerror or sockerr)) if self.sockopts: for args in self.sockopts: diff --git a/docs/api/cassandra/connection.rst b/docs/api/cassandra/connection.rst index dfbb89740b..32cca590c0 100644 --- a/docs/api/cassandra/connection.rst +++ b/docs/api/cassandra/connection.rst @@ -17,3 +17,5 @@ .. autoclass:: SniEndPoint .. autoclass:: SniEndPointFactory + +.. autoclass:: UnixSocketEndPoint diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py new file mode 100644 index 0000000000..662f5b8c76 --- /dev/null +++ b/tests/integration/advanced/__init__.py @@ -0,0 +1,23 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +try: + from ccmlib import common +except ImportError as e: + raise unittest.SkipTest('ccm is a dependency for integration tests:', e) diff --git a/tests/integration/advanced/test_unixsocketendpoint.py b/tests/integration/advanced/test_unixsocketendpoint.py new file mode 100644 index 0000000000..e435314637 --- /dev/null +++ b/tests/integration/advanced/test_unixsocketendpoint.py @@ -0,0 +1,77 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import time +import subprocess +import logging + +from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.connection import UnixSocketEndPoint +from cassandra.policies import WhiteListRoundRobinPolicy, RoundRobinPolicy + +from tests import notwindows +from tests.integration import use_single_node + +log = logging.getLogger() +log.setLevel('DEBUG') + +UNIX_SOCKET_PATH = '/tmp/cass.sock' + + +def setup_module(): + use_single_node() + + +class UnixSocketWhiteListRoundRobinPolicy(WhiteListRoundRobinPolicy): + def __init__(self, hosts): + self._allowed_hosts = self._allowed_hosts_resolved = tuple(hosts) + RoundRobinPolicy.__init__(self) + + +@notwindows +class UnixSocketTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + log.debug("Starting socat...") + cls.proc = subprocess.Popen( + ['socat', + 'UNIX-LISTEN:%s,fork' % UNIX_SOCKET_PATH, + 'TCP:localhost:9042'], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + + time.sleep(1) + if cls.proc.poll() is not None: + for line in cls.proc.stdout.readlines(): + log.debug("socat: " + line.decode('utf-8')) + raise Exception("Error while starting socat. Return code: %d" % cls.proc.returncode) + + lbp = UnixSocketWhiteListRoundRobinPolicy([UNIX_SOCKET_PATH]) + ep = ExecutionProfile(load_balancing_policy=lbp) + endpoint = UnixSocketEndPoint(UNIX_SOCKET_PATH) + cls.cluster = Cluster([endpoint], execution_profiles={EXEC_PROFILE_DEFAULT: ep}) + + @classmethod + def tearDownClass(cls): + cls.cluster.shutdown() + cls.proc.terminate() + + def test_unix_socket_connection(self): + s = self.cluster.connect() + s.execute('select * from system.local') From c5dbb05e2f895d753bbd40973376e13d9ed8ce6d Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 14 Aug 2019 13:42:55 -0500 Subject: [PATCH 1541/2005] Fix flakiness in LWT tests --- .../cqlengine/test_lwt_conditional.py | 34 +++++++++---------- tests/integration/standard/test_query.py | 9 ++--- 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index 45afb48697..1c418ae6d8 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -57,7 +57,7 @@ def tearDownClass(cls): drop_table(TestConditionalModel) def test_update_using_conditional(self): - t = TestConditionalModel.create(text='blah blah') + t = TestConditionalModel.if_not_exists().create(text='blah blah') t.text = 'new blah' with mock.patch.object(self.session, 'execute') as m: t.iff(text='blah blah').save() @@ -66,7 +66,7 @@ def test_update_using_conditional(self): self.assertIn('IF "text" = %(0)s', args[0][0].query_string) def test_update_conditional_success(self): - t = TestConditionalModel.create(text='blah blah', count=5) + t = TestConditionalModel.if_not_exists().create(text='blah blah', count=5) id = t.id t.text = 'new blah' t.iff(text='blah blah').save() @@ -76,7 +76,7 @@ def test_update_conditional_success(self): self.assertEqual(updated.text, 'new blah') def test_update_failure(self): - t = TestConditionalModel.create(text='blah blah') + t = TestConditionalModel.if_not_exists().create(text='blah blah') t.text = 'new blah' t = t.iff(text='something wrong') @@ -89,7 +89,7 @@ def test_update_failure(self): }) def test_blind_update(self): - t = TestConditionalModel.create(text='blah blah') + t = TestConditionalModel.if_not_exists().create(text='blah blah') t.text = 'something else' uid = t.id @@ -100,7 +100,7 @@ def test_blind_update(self): self.assertIn('IF "text" = %(1)s', args[0][0].query_string) def test_blind_update_fail(self): - t = TestConditionalModel.create(text='blah blah') + t = TestConditionalModel.if_not_exists().create(text='blah blah') t.text = 'something else' uid = t.id qs = TestConditionalModel.objects(id=uid).iff(text='Not dis!') @@ -120,7 +120,7 @@ def test_conditional_clause(self): self.assertEqual('"some_value" = %(3)s', str(tc)) def test_batch_update_conditional(self): - t = TestConditionalModel.create(text='something', count=5) + t = TestConditionalModel.if_not_exists().create(text='something', count=5) id = t.id with BatchQuery() as b: t.batch(b).iff(count=5).update(text='something else') @@ -166,7 +166,7 @@ def test_batch_update_conditional_several_rows(self): def test_delete_conditional(self): # DML path - t = TestConditionalModel.create(text='something', count=5) + t = TestConditionalModel.if_not_exists().create(text='something', count=5) self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) with self.assertRaises(LWTException): t.iff(count=9999).delete() @@ -175,7 +175,7 @@ def test_delete_conditional(self): self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0) # QuerySet path - t = TestConditionalModel.create(text='something', count=5) + t = TestConditionalModel.if_not_exists().create(text='something', count=5) self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) with self.assertRaises(LWTException): TestConditionalModel.objects(id=t.id).iff(count=9999).delete() @@ -195,7 +195,7 @@ def test_delete_lwt_ne(self): """ # DML path - t = TestConditionalModel.create(text='something', count=5) + t = TestConditionalModel.if_not_exists().create(text='something', count=5) self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) with self.assertRaises(LWTException): t.iff(count__ne=5).delete() @@ -203,7 +203,7 @@ def test_delete_lwt_ne(self): self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0) # QuerySet path - t = TestConditionalModel.create(text='something', count=5) + t = TestConditionalModel.if_not_exists().create(text='something', count=5) self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) with self.assertRaises(LWTException): TestConditionalModel.objects(id=t.id).iff(count__ne=5).delete() @@ -222,7 +222,7 @@ def test_update_lwt_ne(self): """ # DML path - t = TestConditionalModel.create(text='something', count=5) + t = TestConditionalModel.if_not_exists().create(text='something', count=5) self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) with self.assertRaises(LWTException): t.iff(count__ne=5).update(text='nothing') @@ -231,7 +231,7 @@ def test_update_lwt_ne(self): t.delete() # QuerySet path - t = TestConditionalModel.create(text='something', count=5) + t = TestConditionalModel.if_not_exists().create(text='something', count=5) self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) with self.assertRaises(LWTException): TestConditionalModel.objects(id=t.id).iff(count__ne=5).update(text='nothing') @@ -245,7 +245,7 @@ def test_update_to_none(self): # https://github.com/datastax/python-driver/blob/3.1.1/cassandra/cqlengine/query.py#L1197-L1200 # DML path - t = TestConditionalModel.create(text='something', count=5) + t = TestConditionalModel.if_not_exists().create(text='something', count=5) self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) with self.assertRaises(LWTException): t.iff(count=9999).update(text=None) @@ -254,7 +254,7 @@ def test_update_to_none(self): self.assertIsNone(TestConditionalModel.objects(id=t.id).first().text) # QuerySet path - t = TestConditionalModel.create(text='something', count=5) + t = TestConditionalModel.if_not_exists().create(text='something', count=5) self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1) with self.assertRaises(LWTException): TestConditionalModel.objects(id=t.id).iff(count=9999).update(text=None) @@ -264,14 +264,14 @@ def test_update_to_none(self): def test_column_delete_after_update(self): # DML path - t = TestConditionalModel.create(text='something', count=5) + t = TestConditionalModel.if_not_exists().create(text='something', count=5) t.iff(count=5).update(text=None, count=6) self.assertIsNone(t.text) self.assertEqual(t.count, 6) # QuerySet path - t = TestConditionalModel.create(text='something', count=5) + t = TestConditionalModel.if_not_exists().create(text='something', count=5) TestConditionalModel.objects(id=t.id).iff(count=5).update(text=None, count=6) self.assertIsNone(TestConditionalModel.objects(id=t.id).first().text) @@ -288,7 +288,7 @@ def test_conditional_without_instance(self): @test_category object_mapper """ uuid = uuid4() - TestConditionalModel.create(id=uuid, text='test_for_cassandra', count=5) + TestConditionalModel.if_not_exists().create(id=uuid, text='test_for_cassandra', count=5) # This uses the iff method directly from the model class without # an instance having been created diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index ac639c1647..97a1e68f6d 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -887,7 +887,8 @@ def setUp(self): "Protocol 2.0+ is required for Lightweight transactions, currently testing against %r" % (PROTOCOL_VERSION,)) - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + serial_profile = ExecutionProfile(consistency_level=ConsistencyLevel.SERIAL) + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, execution_profiles={'serial': serial_profile}) self.session = self.cluster.connect() ddl = ''' @@ -975,7 +976,7 @@ def test_was_applied_batch_stmt(self): """ for batch_type in (BatchType.UNLOGGED, BatchType.LOGGED): batch_statement = BatchStatement(batch_type) - batch_statement.add_all(["INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 0, 10);", + batch_statement.add_all(["INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 0, 10) IF NOT EXISTS;", "INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 1, 10);", "INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 2, 10);"], [None] * 3) result = self.session.execute(batch_statement) @@ -991,7 +992,7 @@ def test_was_applied_batch_stmt(self): result = self.session.execute(batch_statement) self.assertFalse(result.was_applied) - all_rows = self.session.execute("SELECT * from test3rf.lwt_clustering") + all_rows = self.session.execute("SELECT * from test3rf.lwt_clustering", execution_profile='serial') # Verify the non conditional insert hasn't been inserted self.assertEqual(len(all_rows.current_rows), 3) @@ -1017,7 +1018,7 @@ def test_was_applied_batch_stmt(self): result = self.session.execute(batch_statement) self.assertTrue(result.was_applied) - all_rows = self.session.execute("SELECT * from test3rf.lwt_clustering") + all_rows = self.session.execute("SELECT * from test3rf.lwt_clustering", execution_profile='serial') for i, row in enumerate(all_rows): self.assertEqual((0, i, 10), (row[0], row[1], row[2])) From f12ae6dba25b7982d09da9c270f592dcbbdb1305 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 20 Aug 2019 09:10:43 -0400 Subject: [PATCH 1542/2005] Python 3.7 eventlet reactor fix --- cassandra/cluster.py | 38 ++++++++++++++++++- test-requirements.txt | 1 + tests/__init__.py | 9 +++++ .../integration/simulacron/test_connection.py | 9 ++--- tox.ini | 1 + 5 files changed, 52 insertions(+), 6 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 78a4c7a816..4702963b53 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1061,7 +1061,7 @@ def __init__(self, HostDistance.REMOTE: DEFAULT_MAX_CONNECTIONS_PER_REMOTE_HOST } - self.executor = ThreadPoolExecutor(max_workers=executor_threads) + self.executor = self._create_thread_pool_executor(max_workers=executor_threads) self.scheduler = _Scheduler(self.executor) self._lock = RLock() @@ -1076,6 +1076,42 @@ def __init__(self, self.status_event_refresh_window, schema_metadata_enabled, token_metadata_enabled) + def _create_thread_pool_executor(self, **kwargs): + """ + Create a ThreadPoolExecutor for the cluster. In most cases, the built-in + `concurrent.futures.ThreadPoolExecutor` is used. + + Python 3.7 and Eventlet cause the `concurrent.futures.ThreadPoolExecutor` + to hang indefinitely. In that case, the user needs to have the `futurist` + package so we can use the `futurist.GreenThreadPoolExecutor` class instead. + + :param kwargs: All keyword args are passed to the ThreadPoolExecutor constructor. + :return: A ThreadPoolExecutor instance. + """ + tpe_class = ThreadPoolExecutor + if sys.version_info[0] >= 3 and sys.version_info[1] >= 7: + try: + from cassandra.io.eventletreactor import EventletConnection + is_eventlet = issubclass(self.connection_class, EventletConnection) + except: + # Eventlet is not available or can't be detected + return tpe_class(**kwargs) + + if is_eventlet: + try: + from futurist import GreenThreadPoolExecutor + tpe_class = GreenThreadPoolExecutor + except ImportError: + # futurist is not available + raise ImportError( + ("Python 3.7 and Eventlet cause the `concurrent.futures.ThreadPoolExecutor` " + "to hang indefinitely. If you want to use the Eventlet reactor, you " + "need to install the `futurist` package to allow the driver to use " + "the GreenThreadPoolExecutor. See https://github.com/eventlet/eventlet/issues/508 " + "for more details.")) + + return tpe_class(**kwargs) + def register_user_type(self, keyspace, user_type, klass): """ Registers a class to use to represent a particular user-defined type. diff --git a/test-requirements.txt b/test-requirements.txt index bd6fca51dd..9679247325 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -12,4 +12,5 @@ gevent>=1.0 eventlet cython>=0.20,<0.30 packaging +futurist; python_version >= '3.7' asynctest; python_version > '3.4' diff --git a/tests/__init__.py b/tests/__init__.py index 23c956ae03..6260583beb 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -21,6 +21,7 @@ import socket import platform import os +from concurrent.futures import ThreadPoolExecutor log = logging.getLogger() log.setLevel('DEBUG') @@ -58,6 +59,7 @@ def is_eventlet_time_monkey_patched(): def is_monkey_patched(): return is_gevent_monkey_patched() or is_eventlet_monkey_patched() +thread_pool_executor_class = ThreadPoolExecutor EVENT_LOOP_MANAGER = os.getenv('EVENT_LOOP_MANAGER', "libev") if "gevent" in EVENT_LOOP_MANAGER: @@ -71,6 +73,13 @@ def is_monkey_patched(): from cassandra.io.eventletreactor import EventletConnection connection_class = EventletConnection + + try: + from futurist import GreenThreadPoolExecutor + thread_pool_executor_class = GreenThreadPoolExecutor + except: + # futurist is installed only with python >=3.7 + pass elif "asyncore" in EVENT_LOOP_MANAGER: from cassandra.io.asyncorereactor import AsyncoreConnection connection_class = AsyncoreConnection diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 6fb6e53b04..4bc52d58de 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -19,8 +19,6 @@ import logging import time -from concurrent.futures import ThreadPoolExecutor - from mock import Mock from cassandra import OperationTimedOut @@ -28,6 +26,7 @@ _Scheduler, NoHostAvailable) from cassandra.policies import HostStateListener, RoundRobinPolicy from cassandra.io.asyncorereactor import AsyncoreConnection +from tests import connection_class, thread_pool_executor_class from tests.integration import (PROTOCOL_VERSION, requiressimulacron) from tests.integration.util import assert_quiescent_pool_state from tests.integration.simulacron import SimulacronBase @@ -55,7 +54,7 @@ def on_add(self, host): def on_remove(self, host): pass -class ThreadTracker(ThreadPoolExecutor): +class ThreadTracker(thread_pool_executor_class): called_functions = [] def submit(self, fn, *args, **kwargs): @@ -339,9 +338,9 @@ def test_host_is_not_set_to_down_after_query_oto(self): self.assertEqual(listener.hosts_marked_down, []) assert_quiescent_pool_state(self, cluster) - def test_can_shutdown_asyncoreconnection_subclass(self): + def test_can_shutdown_connection_subclass(self): start_and_prime_singledc() - class ExtendedConnection(AsyncoreConnection): + class ExtendedConnection(connection_class): pass cluster = Cluster(contact_points=["127.0.0.2"], diff --git a/tox.ini b/tox.ini index b3716de3c8..629c3f2261 100644 --- a/tox.ini +++ b/tox.ini @@ -9,6 +9,7 @@ deps = nose cython eventlet twisted <15.5.0 + futurist [testenv] deps = {[base]deps} From 851739487e6dd216f18774a9dc2de073c55d4726 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 20 Aug 2019 21:50:32 -0400 Subject: [PATCH 1543/2005] Fix MetricsTests.test_connection_error integration test --- tests/integration/standard/test_metrics.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index d5052ace57..d40a66f9c8 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -14,6 +14,7 @@ import time +from cassandra.connection import ConnectionShutdown from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, FallthroughRetryPolicy try: @@ -63,7 +64,8 @@ def test_connection_error(self): try: # Ensure the nodes are actually down query = SimpleStatement("SELECT * FROM test", consistency_level=ConsistencyLevel.ALL) - with self.assertRaises(NoHostAvailable): + # both exceptions can happen depending on when the connection has been detected as defunct + with self.assertRaises((NoHostAvailable, ConnectionShutdown)): self.session.execute(query) finally: get_cluster().start(wait_for_binary_proto=True, wait_other_notice=True) From 2120af10800ac9f13412bf8ff3fe133d8f7495a2 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 20 Aug 2019 22:00:39 -0400 Subject: [PATCH 1544/2005] Freeze twisted dependency to 19.2.1 to support 3.4 testing --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 9679247325..e8552d57ea 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,7 +7,7 @@ unittest2 pytz sure pure-sasl -twisted[tls] +twisted[tls]==19.2.1 gevent>=1.0 eventlet cython>=0.20,<0.30 From 2ff3dd24000897e0ce7c06a896b912637a299f77 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 21 Aug 2019 09:09:33 -0400 Subject: [PATCH 1545/2005] Add Python 3.7 package metadata --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 2c13639834..b79774476b 100644 --- a/setup.py +++ b/setup.py @@ -432,6 +432,7 @@ def run_setup(extensions): 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Software Development :: Libraries :: Python Modules' From c39c048c168fc89a9b1e98f41112003e68c53623 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Sun, 25 Aug 2019 15:12:03 -0400 Subject: [PATCH 1546/2005] =?UTF-8?q?Discovered=20node=20doesn=C2=B4t=20ho?= =?UTF-8?q?nor=20the=20configured=20Cluster=20port=20on=20connection?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CHANGELOG.rst | 1 + cassandra/cluster.py | 2 +- cassandra/connection.py | 12 +++- .../standard/test_custom_cluster.py | 63 +++++++++++++++++++ tests/integration/util.py | 47 ++++++++++++++ 5 files changed, 123 insertions(+), 2 deletions(-) create mode 100644 tests/integration/standard/test_custom_cluster.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index b23566effb..ac1af88c98 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -18,6 +18,7 @@ Bug Fixes * Call ConnectionException with correct kwargs (PYTHON-1117) * Can't connect to clusters built from source because version parsing doesn't handle 'x.y-SNAPSHOT' (PYTHON-1118) * Set the proper default ExecutionProfile.row_factory value (PYTHON-1119) +* Discovered node doesn´t honor the configured Cluster port on connection (PYTHON-1127) 3.18.0 ====== diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 4702963b53..9818ce2f12 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -897,7 +897,7 @@ def __init__(self, self.port = port - self.endpoint_factory = endpoint_factory or DefaultEndPointFactory() + self.endpoint_factory = endpoint_factory or DefaultEndPointFactory(port=self.port) self.endpoint_factory.configure(self) raw_contact_points = [cp for cp in self.contact_points if not isinstance(cp, EndPoint)] diff --git a/cassandra/connection.py b/cassandra/connection.py index f762d10667..8552faa6e3 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -210,6 +210,14 @@ def __repr__(self): class DefaultEndPointFactory(EndPointFactory): + port = None + """ + If set, force all endpoints to use this port. + """ + + def __init__(self, port=None): + self.port = port + def create(self, row): addr = None if "rpc_address" in row: @@ -220,7 +228,9 @@ def create(self, row): addr = row.get("peer") # create the endpoint with the translated address - return DefaultEndPoint(self.cluster.address_translator.translate(addr), 9042) # will eventually support port + return DefaultEndPoint( + self.cluster.address_translator.translate(addr), + self.port if self.port is not None else 9042) class _Frame(object): diff --git a/tests/integration/standard/test_custom_cluster.py b/tests/integration/standard/test_custom_cluster.py new file mode 100644 index 0000000000..5ca5c5c373 --- /dev/null +++ b/tests/integration/standard/test_custom_cluster.py @@ -0,0 +1,63 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from cassandra.cluster import Cluster, NoHostAvailable +from tests.integration import use_singledc, get_cluster, remove_cluster, local +from tests.integration.util import wait_until, wait_until_not_raised + +try: + import unittest2 as unittest +except ImportError: + import unittest + + +def setup_module(): + use_singledc(start=False) + ccm_cluster = get_cluster() + ccm_cluster.stop() + config_options = {'native_transport_port': 9046} + ccm_cluster.set_configuration_options(config_options) + # can't use wait_for_binary_proto cause ccm tries on port 9042 + ccm_cluster.start(wait_for_binary_proto=False) + # wait until all nodes are up + wait_until_not_raised(lambda: Cluster(['127.0.0.1'], port=9046).connect().shutdown(), 1, 10) + wait_until_not_raised(lambda: Cluster(['127.0.0.2'], port=9046).connect().shutdown(), 1, 10) + wait_until_not_raised(lambda: Cluster(['127.0.0.3'], port=9046).connect().shutdown(), 1, 10) + + +def teardown_module(): + remove_cluster() + + +class CustomClusterTests(unittest.TestCase): + + @local + def test_connection_honor_cluster_port(self): + """ + Test that the initial contact point and discovered nodes honor + the cluster port on new connection. + + All hosts should be marked as up and we should be able to execute queries on it. + """ + cluster = Cluster() + with self.assertRaises(NoHostAvailable): + cluster.connect() # should fail on port 9042 + + cluster = Cluster(port=9046) + session = cluster.connect(wait_for_all_pools=True) + + wait_until(lambda: len(cluster.metadata.all_hosts()) == 3, 1, 5) + for host in cluster.metadata.all_hosts(): + self.assertTrue(host.is_up) + session.execute("select * from system.local", host=host) diff --git a/tests/integration/util.py b/tests/integration/util.py index 6215449d1f..4e7afc5618 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -49,3 +49,50 @@ def assert_quiescent_pool_state(test_case, cluster, wait=None): test_case.assertEqual(connection.highest_request_id, max(req_ids)) if PROTOCOL_VERSION < 3: test_case.assertEqual(connection.highest_request_id, connection.max_request_id) + + +def wait_until(condition, delay, max_attempts): + """ + Executes a function at regular intervals while the condition + is false and the amount of attempts < maxAttempts. + :param condition: a function + :param delay: the delay in second + :param max_attempts: the maximum number of attempts. So the timeout + of this function is delay*max_attempts + """ + attempt = 0 + while not condition() and attempt < max_attempts: + attempt += 1 + time.sleep(delay) + + if attempt >= max_attempts: + raise Exception("Condition is still False after {} attempts.".format(max_attempts)) + + +def wait_until_not_raised(condition, delay, max_attempts): + """ + Executes a function at regular intervals while the condition + doesn't raise an exception and the amount of attempts < maxAttempts. + :param condition: a function + :param delay: the delay in second + :param max_attempts: the maximum number of attemps. So the timeout + of this function will be delay*max_attempts + """ + def wrapped_condition(): + try: + condition() + except: + return False + + return True + + attempt = 0 + while attempt < (max_attempts-1): + attempt += 1 + if wrapped_condition(): + return + + time.sleep(delay) + + # last attempt, let the exception raise + condition() From 68d2dd6e114b07e26186fb223c12bd61c851022b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Sun, 25 Aug 2019 19:59:01 -0400 Subject: [PATCH 1547/2005] Disable all weekly jobs --- build.yaml | 94 +++++++++++++++++++++++++++--------------------------- 1 file changed, 47 insertions(+), 47 deletions(-) diff --git a/build.yaml b/build.yaml index 38e0604b16..afca3c7c0e 100644 --- a/build.yaml +++ b/build.yaml @@ -62,53 +62,53 @@ schedules: env_vars: | EVENT_LOOP_MANAGER='libev' - weekly_libev: - schedule: 0 10 * * 6 - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='libev' - - weekly_gevent: - schedule: 0 14 * * 6 - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='gevent' - matrix: - exclude: - - python: [3.4, 3.5, 3.6, 3.7] - - weekly_eventlet: - schedule: 0 18 * * 6 - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='eventlet' - - weekly_asyncio: - schedule: 0 22 * * 6 - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='asyncio' - matrix: - exclude: - - python: [2.7] - - weekly_async: - schedule: 0 10 * * 7 - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='asyncore' - - weekly_twister: - schedule: 0 14 * * 7 - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='twisted' + # weekly_libev: + # schedule: 0 10 * * 6 + # branches: + # include: [master] + # env_vars: | + # EVENT_LOOP_MANAGER='libev' + + # weekly_gevent: + # schedule: 0 14 * * 6 + # branches: + # include: [master] + # env_vars: | + # EVENT_LOOP_MANAGER='gevent' + # matrix: + # exclude: + # - python: [3.4, 3.5, 3.6, 3.7] + + # weekly_eventlet: + # schedule: 0 18 * * 6 + # branches: + # include: [master] + # env_vars: | + # EVENT_LOOP_MANAGER='eventlet' + + # weekly_asyncio: + # schedule: 0 22 * * 6 + # branches: + # include: [master] + # env_vars: | + # EVENT_LOOP_MANAGER='asyncio' + # matrix: + # exclude: + # - python: [2.7] + + # weekly_async: + # schedule: 0 10 * * 7 + # branches: + # include: [master] + # env_vars: | + # EVENT_LOOP_MANAGER='asyncore' + + # weekly_twister: + # schedule: 0 14 * * 7 + # branches: + # include: [master] + # env_vars: | + # EVENT_LOOP_MANAGER='twisted' upgrade_tests: schedule: adhoc From ef846cc18dab1643e2d08421e7802b6b5d8c527b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 26 Aug 2019 10:18:49 -0400 Subject: [PATCH 1548/2005] release 3.19.0: changelog & version --- CHANGELOG.rst | 9 ++++++--- cassandra/__init__.py | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index ac1af88c98..8f90f8955a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,12 +1,11 @@ 3.19.0 ====== -Not released +August 27, 2019 Features -------- * Add Python 3.7 support (PYTHON-1016) * Future-proof Mapping imports (PYTHON-1023) -* Remove invalid warning in set_session when we initialize a default connection (PYTHON-1104) * Include param values in cqlengine logging (PYTHON-1105) * NTS Token Replica Map Generation is slow (PYTHON-622) @@ -17,9 +16,13 @@ Bug Fixes * Fix incorrect metadata for compact counter tables (PYTHON-1100) * Call ConnectionException with correct kwargs (PYTHON-1117) * Can't connect to clusters built from source because version parsing doesn't handle 'x.y-SNAPSHOT' (PYTHON-1118) -* Set the proper default ExecutionProfile.row_factory value (PYTHON-1119) * Discovered node doesn´t honor the configured Cluster port on connection (PYTHON-1127) +Other +----- +* Remove invalid warning in set_session when we initialize a default connection (PYTHON-1104) +* Set the proper default ExecutionProfile.row_factory value (PYTHON-1119) + 3.18.0 ====== May 27, 2019 diff --git a/cassandra/__init__.py b/cassandra/__init__.py index a3687670ec..002f907a1e 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 18, 0, 'post0') +__version_info__ = (3, 19, 0) __version__ = '.'.join(map(str, __version_info__)) From ec75c49a8535fb1a4f1a68b3889841c64e6ad62f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 26 Aug 2019 10:20:08 -0400 Subject: [PATCH 1549/2005] release 3.19.0: docs --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index fc87b6e938..07af2b70b2 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,6 +22,8 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: + - name: '3.19' + ref: ef846cc1 - name: '3.18' ref: ec36b957 - name: '3.17' From e53a59b86a333d74b9d4e9b65cdb6858848f3dbc Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 26 Aug 2019 10:21:00 -0400 Subject: [PATCH 1550/2005] post version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 002f907a1e..13a64afbdc 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 19, 0) +__version_info__ = (3, 19, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From ac2471f919b5556c2fbb7478d3333fff778acaf1 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 26 Aug 2019 10:47:03 -0400 Subject: [PATCH 1551/2005] release 3.19.0: changelog & version --- CHANGELOG.rst | 2 +- cassandra/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 8f90f8955a..040ea29028 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,6 @@ 3.19.0 ====== -August 27, 2019 +August 26, 2019 Features -------- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 13a64afbdc..002f907a1e 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 19, 0, 'post0') +__version_info__ = (3, 19, 0) __version__ = '.'.join(map(str, __version_info__)) From 4eebed89cf9782a8ffc26492ec25ffb7a08e3b6a Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 26 Aug 2019 10:48:00 -0400 Subject: [PATCH 1552/2005] release 3.19.0: docs --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 07af2b70b2..a5ecceb6ec 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.19' - ref: ef846cc1 + ref: ac2471f9 - name: '3.18' ref: ec36b957 - name: '3.17' From ac12bf86455b0b306099e5586ce4f9904379c841 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 26 Aug 2019 16:03:48 -0400 Subject: [PATCH 1553/2005] remove bad restructed symbol in README --- README.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.rst b/README.rst index a3cf6c56b8..b2039a957f 100644 --- a/README.rst +++ b/README.rst @@ -73,8 +73,7 @@ and the ``#datastax-drivers`` channel in the `DataStax Academy Slack Date: Mon, 26 Aug 2019 16:12:06 -0400 Subject: [PATCH 1554/2005] post version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 002f907a1e..13a64afbdc 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 19, 0) +__version_info__ = (3, 19, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From e3fd262cafd498096d7f85c07e743cb77a6c37c8 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 29 Aug 2019 10:17:47 -0400 Subject: [PATCH 1555/2005] DataStax Cloud enablement --- CHANGELOG.rst | 2 + build.yaml | 7 + cassandra/cloud/__init__.py | 156 ++++++++++++++++++ cassandra/cluster.py | 43 ++++- docs/api/cassandra/cluster.rst | 2 + docs/getting_started.rst | 10 ++ tests/integration/__init__.py | 2 + tests/integration/advanced/cloud/__init__.py | 119 +++++++++++++ .../integration/advanced/cloud/test_cloud.py | 132 +++++++++++++++ tests/integration/util.py | 47 ++++++ tests/unit/advanced/cloud/__init__.py | 0 tests/unit/advanced/cloud/creds.zip | Bin 0 -> 7209 bytes tests/unit/advanced/cloud/test_cloud.py | 77 +++++++++ 13 files changed, 595 insertions(+), 2 deletions(-) create mode 100644 cassandra/cloud/__init__.py create mode 100644 tests/integration/advanced/cloud/__init__.py create mode 100644 tests/integration/advanced/cloud/test_cloud.py create mode 100644 tests/unit/advanced/cloud/__init__.py create mode 100644 tests/unit/advanced/cloud/creds.zip create mode 100644 tests/unit/advanced/cloud/test_cloud.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index dd744b580f..f4ca7b9345 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,6 +6,8 @@ Features -------- * Add SNIEndPoint support (PYTHON-1084) * Add a new Endpoint type to support unix sockets (PYTHON-1098) +* DataStax Cloud enablement (PYTHON-1075) +* Add creds.zip support (PYTHON-1097) 3.19.0 ====== diff --git a/build.yaml b/build.yaml index 7839b7141c..9e2b84ab62 100644 --- a/build.yaml +++ b/build.yaml @@ -164,6 +164,9 @@ build: pip install nose-exclude pip install service_identity + # Required for DBaaS and https+ssl cert + pip install pyopenssl ndg-httpsclient pyasn1 + FORCE_CYTHON=False if [[ $CYTHON == 'CYTHON' ]]; then FORCE_CYTHON=True @@ -211,9 +214,13 @@ build: echo "==========RUNNING INTEGRATION TESTS==========" EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true + echo "==========RUNNING ADVANCED AND CLOUD TESTS==========" + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CLOUD_PROXY_PATH="$HOME/proxy/" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/advanced/ || true + if [ -z "$EXCLUDE_LONG" ]; then echo "==========RUNNING LONG INTEGRATION TESTS==========" EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true fi + - xunit: - "*_results.xml" diff --git a/cassandra/cloud/__init__.py b/cassandra/cloud/__init__.py new file mode 100644 index 0000000000..4110296f5b --- /dev/null +++ b/cassandra/cloud/__init__.py @@ -0,0 +1,156 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import logging +import json +import tempfile +import shutil +from six.moves.urllib.request import urlopen +from ssl import SSLContext, PROTOCOL_TLSv1, CERT_REQUIRED +from zipfile import ZipFile + +# 2.7 vs 3.x +try: + from zipfile import BadZipFile +except: + from zipfile import BadZipfile as BadZipFile + +from cassandra import DriverException + +log = logging.getLogger(__name__) + +__all__ = ['get_cloud_config'] + + +class CloudConfig(object): + + username = None + password = None + host = None + port = None + keyspace = None + local_dc = None + ssl_context = None + + sni_host = None + sni_port = None + host_ids = None + + @classmethod + def from_dict(cls, d): + c = cls() + + c.port = d.get('port', None) + try: + c.port = int(d['port']) + except: + pass + + c.username = d.get('username', None) + c.password = d.get('password', None) + c.host = d.get('host', None) + c.keyspace = d.get('keyspace', None) + c.local_dc = d.get('localDC', None) + + return c + + +def get_cloud_config(cloud_config): + if 'secure_connect_bundle' not in cloud_config: + raise ValueError("The cloud config doesn't have a secure_connect_bundle specified.") + + try: + config = read_cloud_config_from_zip(cloud_config) + except BadZipFile: + raise ValueError("Unable to open the zip file for the cloud config. Check your secure connect bundle.") + + return read_metadata_info(config, cloud_config) + + +def read_cloud_config_from_zip(cloud_config): + secure_bundle = cloud_config['secure_connect_bundle'] + with ZipFile(secure_bundle) as zipfile: + base_dir = os.path.dirname(secure_bundle) + tmp_dir = tempfile.mkdtemp(dir=base_dir) + try: + zipfile.extractall(path=tmp_dir) + return parse_cloud_config(os.path.join(tmp_dir, 'config.json'), cloud_config) + finally: + shutil.rmtree(tmp_dir) + + +def parse_cloud_config(path, cloud_config): + with open(path, 'r') as stream: + data = json.load(stream) + + config = CloudConfig.from_dict(data) + config_dir = os.path.dirname(path) + + if 'ssl_context' in cloud_config: + config.ssl_context = cloud_config['ssl_context'] + else: + # Load the ssl_context before we delete the temporary directory + ca_cert_location = os.path.join(config_dir, 'ca.crt') + cert_location = os.path.join(config_dir, 'cert') + key_location = os.path.join(config_dir, 'key') + config.ssl_context = _ssl_context_from_cert(ca_cert_location, cert_location, key_location) + + return config + + +def read_metadata_info(config, cloud_config): + url = "https://{}:{}/metadata".format(config.host, config.port) + timeout = cloud_config['connect_timeout'] if 'connect_timeout' in cloud_config else 5 + try: + response = urlopen(url, context=config.ssl_context, timeout=timeout) + except Exception as e: + log.exception(e) + raise DriverException("Unable to connect to the metadata service at %s" % url) + + if response.code != 200: + raise DriverException(("Error while fetching the metadata at: %s. " + "The service returned error code %d." % (url, response.code))) + return parse_metadata_info(config, response.read().decode('utf-8')) + + +def parse_metadata_info(config, http_data): + try: + data = json.loads(http_data) + except: + msg = "Failed to load cluster metadata" + raise DriverException(msg) + + contact_info = data['contact_info'] + config.local_dc = contact_info['local_dc'] + + proxy_info = contact_info['sni_proxy_address'].split(':') + config.sni_host = proxy_info[0] + try: + config.sni_port = int(proxy_info[1]) + except: + config.sni_port = 9042 + + config.host_ids = [host_id for host_id in contact_info['contact_points']] + + return config + + +def _ssl_context_from_cert(ca_cert_location, cert_location, key_location): + ssl_context = SSLContext(PROTOCOL_TLSv1) + ssl_context.load_verify_locations(ca_cert_location) + ssl_context.verify_mode = CERT_REQUIRED + ssl_context.load_cert_chain(certfile=cert_location, keyfile=key_location) + + return ssl_context diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 07d776f3af..f7b449a645 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -18,6 +18,7 @@ """ from __future__ import absolute_import +import os import atexit from collections import defaultdict from concurrent.futures import ThreadPoolExecutor, FIRST_COMPLETED, wait as wait_futures @@ -45,9 +46,11 @@ OperationTimedOut, UnsupportedOperation, SchemaTargetType, DriverException, ProtocolVersion, UnresolvableContactPoints) +from cassandra.auth import PlainTextAuthProvider from cassandra.connection import (ConnectionException, ConnectionShutdown, ConnectionHeartbeat, ProtocolVersionUnsupported, - EndPoint, DefaultEndPoint, DefaultEndPointFactory) + EndPoint, DefaultEndPoint, DefaultEndPointFactory, + SniEndPointFactory) from cassandra.cqltypes import UserType from cassandra.encoder import Encoder from cassandra.protocol import (QueryMessage, ResultMessage, @@ -75,6 +78,7 @@ named_tuple_factory, dict_factory, tuple_factory, FETCH_SIZE_UNSET) from cassandra.timestamps import MonotonicTimestampGenerator from cassandra.compat import Mapping +from cassandra import cloud as dscloud def _is_eventlet_monkey_patched(): @@ -779,6 +783,18 @@ def default_retry_policy(self, policy): documentation for :meth:`Session.timestamp_generator`. """ + cloud = None + """ + A dict of the cloud configuration. Example:: + { + # path to the secure connect bundle + 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip' + } + + The zip file will be temporarily extracted in the same directory to + load the configuration and certificates. + """ + @property def schema_metadata_enabled(self): """ @@ -874,13 +890,36 @@ def __init__(self, idle_heartbeat_timeout=30, no_compact=False, ssl_context=None, - endpoint_factory=None): + endpoint_factory=None, + cloud=None): """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as extablishing connection pools or refreshing metadata. Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. """ + + if cloud is not None: + if contact_points is not _NOT_SET or endpoint_factory: + raise ValueError(("contact_points and endpoint_factory" + "cannot be specified with a cloud configuration")) + + if ssl_context: + cloud['ssl_context'] = ssl_context + + cloud_config = dscloud.get_cloud_config(cloud) + + ssl_context = cloud_config.ssl_context + if (auth_provider is None and cloud_config.username + and cloud_config.password): + auth_provider = PlainTextAuthProvider(cloud_config.username, cloud_config.password) + + endpoint_factory = SniEndPointFactory(cloud_config.sni_host, cloud_config.sni_port) + contact_points = [ + endpoint_factory.create_from_sni(host_id) + for host_id in cloud_config.host_ids + ] + if contact_points is not None: if contact_points is _NOT_SET: self._contact_points_explicit = False diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index fef8f4d188..81cf1f04e9 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -72,6 +72,8 @@ .. autoattribute:: endpoint_factory + .. autoattribute:: cloud + .. automethod:: connect .. automethod:: shutdown diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 2dc32e6499..98baaaaec2 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -403,3 +403,13 @@ level on that: user3_lookup = user_lookup_stmt.bind([user_id3]) user3_lookup.consistency_level = ConsistencyLevel.ALL user3 = session.execute(user3_lookup) + +Connecting to DataStax Cloud +---------------------------- +1. Download the secure connect bundle from your DataStax Constellation account. +2. Connect to your cloud cluster with:: + cloud_config = { + 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip' + } + cluster = Cluster(cloud=cloud_config) + session = cluster.connect() diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 02f9851477..02070915fb 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -122,6 +122,7 @@ def _get_dse_version_from_cass(cass_version): USE_CASS_EXTERNAL = bool(os.getenv('USE_CASS_EXTERNAL', False)) KEEP_TEST_CLUSTER = bool(os.getenv('KEEP_TEST_CLUSTER', False)) SIMULACRON_JAR = os.getenv('SIMULACRON_JAR', None) +CLOUD_PROXY_PATH = os.getenv('CLOUD_PROXY_PATH', None) CASSANDRA_IP = os.getenv('CASSANDRA_IP', '127.0.0.1') CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) @@ -275,6 +276,7 @@ def _id_and_mark(f): requiresmallclockgranularity = unittest.skipIf("Windows" in platform.system() or "asyncore" in EVENT_LOOP_MANAGER, "This test is not suitible for environments with large clock granularity") requiressimulacron = unittest.skipIf(SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"), "Simulacron jar hasn't been specified or C* version is 2.0") +requirescloudproxy = unittest.skipIf(CLOUD_PROXY_PATH is None, "Cloud Proxy path hasn't been specified") def wait_for_node_socket(node, timeout): diff --git a/tests/integration/advanced/cloud/__init__.py b/tests/integration/advanced/cloud/__init__.py new file mode 100644 index 0000000000..ce1e6cda54 --- /dev/null +++ b/tests/integration/advanced/cloud/__init__.py @@ -0,0 +1,119 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import os +import subprocess + +from cassandra.cluster import Cluster + +from tests.integration import CLOUD_PROXY_PATH + + +def teardown_package(): + stop_cloud_proxy() + + +class CloudProxyCluster(unittest.TestCase): + + creds_dir = os.path.join(os.path.abspath(CLOUD_PROXY_PATH or ''), 'certs/bundles/') + creds = os.path.join(creds_dir, 'creds-v1.zip') + creds_no_auth = os.path.join(creds_dir, 'creds-v1-wo-creds.zip') + creds_unreachable = os.path.join(creds_dir, 'creds-v1-unreachable.zip') + + cluster, connect = None, False + session = None + + @classmethod + def setUpClass(cls): + if CLOUD_PROXY_PATH is not None: + start_cloud_proxy() + + @classmethod + def connect(cls, creds, **kwargs): + cloud_config = { + 'secure_connect_bundle': creds, + } + cls.cluster = Cluster(cloud=cloud_config, protocol_version=4, **kwargs) + cls.session = cls.cluster.connect(wait_for_all_pools=True) + + @classmethod + def tearDownClass(cls): + stop_cloud_proxy() + + def tearDown(self): + if self.cluster: + self.cluster.shutdown() + + +class CloudProxyServer(object): + """ + Class for starting and stopping the proxy (sni_single_endpoint) + """ + + ccm_command = 'docker exec $(docker ps -a -q --filter ancestor=single_endpoint) ccm {}' + + def __init__(self, CLOUD_PROXY_PATH): + self.CLOUD_PROXY_PATH = CLOUD_PROXY_PATH + self.running = False + + def start(self): + return_code = subprocess.call( + ['REQUIRE_CLIENT_CERTIFICATE=true ./run.sh'], + cwd=self.CLOUD_PROXY_PATH, + shell=True) + if return_code != 0: + raise Exception("Error while starting proxy server") + self.running = True + + def stop(self): + if self.is_running(): + subprocess.call( + ["docker kill $(docker ps -a -q --filter ancestor=single_endpoint)"], + shell=True) + self.running = False + + def is_running(self): + return self.running + + def start_node(self, id): + subcommand = 'node{} start --root --wait-for-binary-proto'.format(id) + subprocess.call( + [self.ccm_command.format(subcommand)], + shell=True) + + def stop_node(self, id): + subcommand = 'node{} stop'.format(id) + subprocess.call( + [self.ccm_command.format(subcommand)], + shell=True) + + +CLOUD_PROXY_SERVER = CloudProxyServer(CLOUD_PROXY_PATH) + + +def start_cloud_proxy(): + """ + Starts and waits for the proxy to run + """ + CLOUD_PROXY_SERVER.stop() + CLOUD_PROXY_SERVER.start() + + +def stop_cloud_proxy(): + CLOUD_PROXY_SERVER.stop() diff --git a/tests/integration/advanced/cloud/test_cloud.py b/tests/integration/advanced/cloud/test_cloud.py new file mode 100644 index 0000000000..73628549d5 --- /dev/null +++ b/tests/integration/advanced/cloud/test_cloud.py @@ -0,0 +1,132 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import six +from ssl import SSLContext, PROTOCOL_TLSv1 + +from cassandra import DriverException +from cassandra.cluster import NoHostAvailable +from cassandra.connection import SniEndPoint +from cassandra.auth import PlainTextAuthProvider +from cassandra.policies import TokenAwarePolicy, DCAwareRoundRobinPolicy, ConstantReconnectionPolicy + +from mock import patch + +from tests.integration import requirescloudproxy +from tests.integration.util import wait_until_not_raised +from tests.integration.advanced.cloud import CloudProxyCluster, CLOUD_PROXY_SERVER + + +@requirescloudproxy +class CloudTests(CloudProxyCluster): + def hosts_up(self): + return [h for h in self.cluster.metadata.all_hosts() if h.is_up] + + def test_resolve_and_connect(self): + self.connect(self.creds) + + self.assertEqual(len(self.hosts_up()), 3) + for host in self.cluster.metadata.all_hosts(): + self.assertTrue(host.is_up) + self.assertIsInstance(host.endpoint, SniEndPoint) + self.assertEqual(str(host.endpoint), "{}:{}:{}".format( + host.endpoint.address, host.endpoint.port, host.host_id)) + self.assertIn(host.endpoint._resolved_address, ("127.0.0.1", '::1')) + + def test_match_system_local(self): + self.connect(self.creds) + + self.assertEqual(len(self.hosts_up()), 3) + for host in self.cluster.metadata.all_hosts(): + row = self.session.execute('SELECT * FROM system.local', host=host).one() + self.assertEqual(row.host_id, host.host_id) + self.assertEqual(row.rpc_address, host.broadcast_rpc_address) + + def test_set_auth_provider(self): + self.connect(self.creds) + self.assertIsInstance(self.cluster.auth_provider, PlainTextAuthProvider) + self.assertEqual(self.cluster.auth_provider.username, 'cassandra') + self.assertEqual(self.cluster.auth_provider.password, 'cassandra') + + def test_support_leaving_the_auth_unset(self): + with self.assertRaises(NoHostAvailable): + self.connect(self.creds_no_auth) + self.assertIsNone(self.cluster.auth_provider) + + def test_support_overriding_auth_provider(self): + try: + self.connect(self.creds, auth_provider=PlainTextAuthProvider('invalid', 'invalid')) + except: + pass # this will fail soon when sni_single_endpoint is updated + self.assertIsInstance(self.cluster.auth_provider, PlainTextAuthProvider) + self.assertEqual(self.cluster.auth_provider.username, 'invalid') + self.assertEqual(self.cluster.auth_provider.password, 'invalid') + + def test_support_overriding_ssl_context(self): + with self.assertRaises(DriverException): + # will fail since the ssl_context is + self.connect(self.creds, ssl_context=SSLContext(PROTOCOL_TLSv1)) + + def test_error_when_bundle_doesnt_exist(self): + try: + self.connect('/invalid/path/file.zip') + except Exception as e: + if six.PY2: + self.assertIsInstance(e, IOError) + else: + self.assertIsInstance(e, FileNotFoundError) + + def test_load_balancing_policy_is_dcawaretokenlbp(self): + self.connect(self.creds) + self.assertIsInstance(self.cluster.profile_manager.default.load_balancing_policy, + TokenAwarePolicy) + self.assertIsInstance(self.cluster.profile_manager.default.load_balancing_policy._child_policy, + DCAwareRoundRobinPolicy) + + def test_resolve_and_reconnect_on_node_down(self): + + self.connect(self.creds, + idle_heartbeat_interval=1, idle_heartbeat_timeout=1, + reconnection_policy=ConstantReconnectionPolicy(50)) + + self.assertEqual(len(self.hosts_up()), 3) + CLOUD_PROXY_SERVER.stop_node(1) + wait_until_not_raised( + lambda: self.assertEqual(len(self.hosts_up()), 2), + 0.02, 250) + + host = [h for h in self.cluster.metadata.all_hosts() if not h.is_up][0] + with patch.object(SniEndPoint, "resolve", wraps=host.endpoint.resolve) as mocked_resolve: + CLOUD_PROXY_SERVER.start_node(1) + wait_until_not_raised( + lambda: self.assertEqual(len(self.hosts_up()), 3), + 0.02, 250) + mocked_resolve.assert_called_once() + + def test_metadata_unreachable(self): + with self.assertRaises(DriverException) as cm: + self.connect(self.creds_unreachable, connect_timeout=1) + + self.assertIn('Unable to connect to the metadata service', str(cm.exception)) + + def test_metadata_ssl_error(self): + with self.assertRaises(DriverException) as cm: + self.connect(self.creds, ssl_context=SSLContext(PROTOCOL_TLSv1)) + + self.assertIn('Unable to connect to the metadata', str(cm.exception)) diff --git a/tests/integration/util.py b/tests/integration/util.py index 6215449d1f..4e7afc5618 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -49,3 +49,50 @@ def assert_quiescent_pool_state(test_case, cluster, wait=None): test_case.assertEqual(connection.highest_request_id, max(req_ids)) if PROTOCOL_VERSION < 3: test_case.assertEqual(connection.highest_request_id, connection.max_request_id) + + +def wait_until(condition, delay, max_attempts): + """ + Executes a function at regular intervals while the condition + is false and the amount of attempts < maxAttempts. + :param condition: a function + :param delay: the delay in second + :param max_attempts: the maximum number of attempts. So the timeout + of this function is delay*max_attempts + """ + attempt = 0 + while not condition() and attempt < max_attempts: + attempt += 1 + time.sleep(delay) + + if attempt >= max_attempts: + raise Exception("Condition is still False after {} attempts.".format(max_attempts)) + + +def wait_until_not_raised(condition, delay, max_attempts): + """ + Executes a function at regular intervals while the condition + doesn't raise an exception and the amount of attempts < maxAttempts. + :param condition: a function + :param delay: the delay in second + :param max_attempts: the maximum number of attemps. So the timeout + of this function will be delay*max_attempts + """ + def wrapped_condition(): + try: + condition() + except: + return False + + return True + + attempt = 0 + while attempt < (max_attempts-1): + attempt += 1 + if wrapped_condition(): + return + + time.sleep(delay) + + # last attempt, let the exception raise + condition() diff --git a/tests/unit/advanced/cloud/__init__.py b/tests/unit/advanced/cloud/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/advanced/cloud/creds.zip b/tests/unit/advanced/cloud/creds.zip new file mode 100644 index 0000000000000000000000000000000000000000..6bd8faf69f3b20b6a1fd2f93796fb80e4a192c85 GIT binary patch literal 7209 zcmaKRWmFtp)^+3V?iLc0d7F>`l1^`e2)Bwx3 zT$b*hI@%Zjq}Y9QlRx3*iv>VH-hW#3k4s$2YILO%e#FVg=#O-$aYJ@oi?S`vUl-!N zcNpG-S;a)lW}a2Qyr0DGx9N=-bYQW2V2e(G$G~rVB|oWj-pWh+(^2Uoy@V4=iiFge z!n07FZBWP~zVwmyY|v*bHdDk#0D9+feJ+Cd6-&__H~<6mMM6kex*dt3^w3_xOAML} zIZKIRVo1+yc>*G09Yz{LNUUhE9T1C1N1GaQ7a{i(>7-Ojc|+gpE#-r_ZdgqQE{IB0 z4E<_+jf03L1(ir{361frGHKjO@lx!5wzY-jp6en0TG(?f|MVduUitDGfS2)Gp!ra! z_lkWp`1%_?%6lCKybr(ZE4v=?@nL?#?TwgcgBszE?8u0E6?K9bkh&$T#Y=^-iFO@b zp_TDI-D=nA=z~*=Of>lKuXr4xGy3K~_DpoIWAJf|oV9)+f}YK^;h(=H=x|vKb@|xd z@&U_wER{PnZ0K%ltB+&=ddFDwk_}IA&DQb4&yH{stG($^zfL1gxd?ci{EIEoMsEc- zL3R4l7D#g*CERQE-N{MO2xu7D?U||ZY3IJ~S|l)F-9S?5r;QkFkh4Jv^urJgwU+L~Ay3p$ONh!_5fLoR5TNjLvQ`c0kl!(=(2W(q+V+-KGPCz&kk#36bm7 zsY{&m7Z)A`&CZrO2ntK4PQzG5nX;1U4mA#_I6TO&+KZ5@m!V@PU0bionWQD_y!HyL z=AxPeOMhGFe$!U2NR&j^?uLHZ;wj<{X1%fGHQyZk^d=O5*sFLy<$XNt10ME4rzTME ztnY&jf#&MzR zVE-*%dnp_>19lXVxV9@AQzTy=fO?Rv&0KDsAm3n$iH)9OjW}Nsf_7^1qUij)bA?ayK;`fl|AwAxJH^xY>DjiBYd4oH}#rfS_ycO?e znMbEg98Lho30kZ$i`A;-xaT>oLWgqi8rJAzo9!vE)>W0WFh%1KAq$zmJw(SnEesFW zlXNG&=9=@r5(52wlb)+(2p(buw2^WGlZ1`(Jrjpe@Zz3vrm4+m+6DuUKtdw{{iS3t z{AdKw!o*Lu4SCs8Hk9@Jo0nbt^`|0_nwn3VJ*t^e|l6EA zKOWqF5k{e{Tq!K4T8@|${>JUkrA=+x9k^%lzZ__})76_YeRjo#dFb?Ac2F>(W`2ar zJv8R$;BY+CXzmoLsp|7joMWn+q@JQQ*qGRc z{3fY}fW;vkznhBE$Kz|jw~noQTf=yJO9{dH_qUrcjK2tH`$V{OCAU@_6aXLr6#zhb zBAlbO-#>ndN&lH|?^0U`E0+mnJwQ8q6|6a~y~t=T;Urv0+Y+yL0A`P#F9LGCb*?Mc zP5d7(rfPi^XJ`X~ba!q*PUPUr;l$6~ae?^CbHyljKtKe*7{j~~ep%#oF!n>D=F%0n zP+$3eJO6drZ=Gzo+|WjSx=~bRrwAgSN9DKTcCWZ?C)b2?m62uy{R6azSKfJak+V_)>54cx-T}JP_A1vn*R5RCq$FmRK@FR%7bcR1rUMKF z``0wFr%~CVYC|6C9E>~D=Mvl5mlf-Uc9TXxFdwk^(+5(u7Mam^aH5lW$gs1mzSpgb(Iqh|$+1kRF5a!?I|S=kBeo*E$u60`=PU6|Dw^`2DL~5EdGf0Ww(|%;G z$|j-{WFKQ^v~UBykuyRY^B24~Ve`sBf0Ih&(Ivy&`%g7q)ba`7?^VjpR+c{Cx+!Lc z5c5SZv^Kd+9_!&%f%esB8>K&E<~Ekx*eXGo0cU(9AJaQ=oUj_-MKQZ}wX+8!B(erw zCBeW0nWZF>yg#~E7`5%_-RnzKwpu|>2*UnbA4_IbD^p0r8U4A%IR!kWqJ3x5TE*ef z=j$p?IH)8ZZSfzS4yBcT2AJ%cJC|Y*msaD+@cY(uA6ZhgcrW)E{bDlln1MBr9mpVh zf@XK98ro?}6kbnc2bs`r8&M`Om-EPe!ihp=o8uadM=8OVu%loUY}mAaeDO?t)PC=c+0AT*@Xyamq->u>+Q!-RYE}-7 z8FMUF)=tCnM#@>UjJ$nRj^?mOl{R_kaoz>s1Q*bBednb$ejyl5kV?(gN7{N+blI=zh*zhTf}|-rgxa~|zcM8? zPEQwgcYpczm-(8D=18i#mAe= z)tQy=;p~Vd&Q}>iAlau=5sD>@m6gaMw#x2(*M;p@p~hkH=)C;;=NGCM zhL`U{krQmh1V%ok0S6%M^{Vp__}80jwrn6*J}+^LG4NT3R3I)sMn0M7_ipJiQn6Vo zH^|E_YG$|FH`1hU1vH9Ix$CFh8EiY;zpD11VwlED()!O~SNCT+erEF`JBt6JIIhA) zH$}TDT^4!RKt(}C08>E)Iggi$eDp$oKru|hS{^zZ%B!_HJII?JHK*m~O z8AVkk%pDo~ ztn}TZ+lAy`O8UtHqDDRzz3+wr0C+tG_LGv@TUk4M+I#wOIXHU!LsI{gFN%LJVRTaL z1&A@JhHd%NT&=sjhe)G%t@NN?#y@|g6V?kgN7N;}xk!_$j~L?OLsk3^a^cmqYqeH1 zcpX7e4>`)asG-Zd$N^tVP7QC;#arSaA@sUZSE8oqAh^VDf&qL;%C1#p^dh>y&GZrD zk+dGEq?BE^w#~}ukdf~UX&x_-$%5Eu1|^ozV-sdtw{y*H2Jxnz|SbG2HkQq6Ib)<4Zhn6 z)$Ok4r~(~JYmP<9UC*C{oS6Y6S6l1v&9XPZ)7H&A)40)?lP=P_1trCWvqIJd&%=x4 z3Fy^j%+*#kTUS1-6OggUnG!PRf0W51B8|7@XD1CKxrv^HvzYklvT4==fO`jXs`=No z!JG4@2stUuYKk4`Wu_mMb3!y~7p!YTXIau@%-|aW&;TE4-YnY%6ee368oG@8p#D7$ zKAys$UNBngi@=R!SoS8KLPbOljf=G|PhV4g7orFx?n*eU5#uFqvA>n>oKQAQ`cY|9 z+ABSVXXrg`C0(aGGCD^GlRU&yaxy$NNpcVeO|ct&lcAGz^fLLu$0r-tS;q6SbrNAk2SNj1-l(Sxf+qEoLrZ)%CQVS; z2Cl~H34vOg>T3{2X{u5|BN0i@l`ieRe7F#1x+=EZRZ5pk=ftQ!1-OH_?OA#z(*_KZ zksDmdr$9bN+Dkcg@P3^_y#*3Fzd1x~BwH|Lk0?ZU(n-8G`@RF@Dn;O(7?=3aj<&r1&izY1&RKQ5O>6)A+l3&5XB^uT(54M*ewgvkdzit}VJbE&a1U&=W=0pfPHmM0 z2&n!l{B||Zc%wF3g1i%2c^Yy#{_U+SZ1(<;%xoM>%fzI>fFk|-x`!*L(#gkeGJ7_o=~SbKpBOd zjba;N)yvS`+N0Pm%k6Q&6ma7$`EDd3nfUnB$+O&5r;b(y;D>5{DtzsbP^VtE#H+^! z>bsU-){Ept9K}(Gn`zRniQE0J;3XWM*LJ^8fBTt$*CXJiuTwv?z&J}|n=qxMns(Ym z$OWZ)pM1 zR~YDGYe@H8rb)AUd|Ov^ouU+rEv97h?8|~g#$1bd|I+d!2Z)v@-tL@9{#{=Z&CPFl z`K%;vX;m?pVxiJk1huFf(hB~U0(@guVjTIV;a@{W8MaS{#HGb4_q2x8+H#D}+pa|D zIz8PW=ElsFWpA8i49P|6zomBFjvAPv1VmljOh*K2pK5@=Vu)k;h&`vDTl`RDF|p%} zZt=GCBOV`BB3b)MkW(3_6OM=123c31fa_xa&zZch=4z+J8%2O-!*7PbLK_K`1yD)T z%GK3w!%|faZ@I(Jz?Q#%L>S>~!qt%AP?>@aX=}38-J8VG?3xXre%4CRx%M=ts()I7 zrT+C$iY-o(Xx=W0Y*}O#Nggje6>Qpv)ja+-+PD{D0;i$ef5#TDQ30=6Pl;rLPyf^~ zSCtRw6g`1|i;&0Z+h=EBO5I`dvF83=o0*PLUxXZMVep3Zdcj3EK790Sb{y%X09&}) zB<`T%6Mh#T5$$VQ-_LlvgL@3E8er_$OLf!5i9>_&GXHaWTiR{rB91!cjpL-}C}|_# zPWL^c%*|}-@*zhqtd&s$U9&G=Zmf}C5`2g(>ik-ig0FF5Q?*0qO>jXjNW;WT>{<>3 zZg?tSvM2Q)9mxDh$#dJiY8)+T`Jxak+(NYrt@GpC%N55ycj3PPUX9R0G%nx`V#0A;7g!c>&cf5#Q8dg2IMbdHL7<5}N9xGS8skuKtJ%u*S zsUd}L0`k%F8>%i5HV02py%a%&Ab_zt1qVfCRlObjV0qG2Pa{0+Wk&Ch8AzYiW%iBk>)Qc|*&>!}t0qGsS?QQ9DVF(i`uVVyPb zmEZk7<1$R{Xe;Y&p<9r4b_!<~4M)+fz)s(d&5eT+!{)xMG;`V<^KR2>E*?8w_T~*S z@8y62wcNxbt#SUB+D}RwgBPk!K_!mOavSzj5tVB)G$PYHQ%r9j@PBCbE9|rL_J0F0 zkZjYS@nG#-tP514J$NYjL=rXrY82LvF$bfYGlpNM{+_R1Pq*NxG(H-cD)Gux zLi<=R9iPB)g|w&-)bVlw)UdbT_dr}=a!m5Q2_i`FqIYKXcIUdm$&iW2j9p=LRCY50 zXkcv*{IcpxaX$k2G<@P%q8vEm7nOww+0(c3otn z-x|Tx_qY%JxuVI-_0^Hq-%%17RpFK7XHxQmBE|W~q61#uNTo_wwbUpSE0>S-lswGE zSAAf+!#5RU1)Ow)$^5UzPvzoGi0%hW8?b8-)598GXL7lRNWE48pu7B1izYOwJa$aJ z=ImkWTZFDW5TNvhTr@^ERlc7)^-+Ex`m1bnHG!x>7T~c2PmYuKQ{X<8ZJzF49-dmB zF7DR5xoD3nud)% zzK5zhK7^{OGOLn-9=~)FIMUSmcibyvimZr>%HE&l+`Gord&PY_{n^IR2Qk1NvmsD&& zcyY;X&2yaL-u+dgdg$_WaU9!ca+&VQA_7PhVF^!c4S{J0sFx{${>M>z(4 zXNAF?9G92LX?WISKFq)XyX6-SvnqBi>xKqA{{F?Oo@TE+HA|M< zkaI+)`%FP5&oYTKd4+7r`|{hDWfLE8hxq4|n*5vniGA+Y-8UKA!PYhRfq{AJe6>u& zAhtIU&1=3lEm+vyC!-CAf$eMeWRJxiy;wN!^v?Nt`JXDk5;Pl)o=bGyW6J>Eeyf$4 zkWccwd24bVOV1QIV~z4AC$z;ym5YWvyC80h5+sCrhNC85fon9C*B`r0ccUA-y>Iiq zgq&}hhxr^%EVt)}Fxu`qaBXt3OlkIKMZ^PZ?M>~c&5@2_iTykm&s`&=K;lr}bQr}L@#x94C z5K_5K_vI(@VcM6IYo!q=Z$9rC$CP@DO$MAS@;zpqXAYYp#OPJAcHMu2c28i)`9+YC zTu)7XN@8yk@rBpJc6Fc?^5@iM>U#H%XGKoX#v(cSK^=~Thq4Fp#d^16+l`!~EN%}g z+T~GJe${l5KEIq>YZS*8!HM-h{RvI7Q#b!5*_4OVhGSsMnI%R`oG3UV} z9Q^0*{|}Q;{y9wgH>LPjtiQY5|B6-eq(cAnyq}Q%c;5d7>F@U4zao*M0sd*`JpuhO z^ZpCa-@Dg;0{uxWDA*hu@Nd2B6XJic;{P?tzYFVMh>gKyq=3K4>=WjHkl264{QH^u kE8WImSBj?(_pc}H|9VN3C!_Pv01y?h__Tczlz*Q72cq6XGXMYp literal 0 HcmV?d00001 diff --git a/tests/unit/advanced/cloud/test_cloud.py b/tests/unit/advanced/cloud/test_cloud.py new file mode 100644 index 0000000000..6855beced2 --- /dev/null +++ b/tests/unit/advanced/cloud/test_cloud.py @@ -0,0 +1,77 @@ +# Copyright DataStax, Inc. +# +# Licensed under the DataStax DSE Driver License; +# you may not use this file except in compliance with the License. +# +# You may obtain a copy of the License at +# +# http://www.datastax.com/terms/datastax-dse-driver-license-terms +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import os + +from cassandra import cloud + +from mock import patch + + +class CloudTests(unittest.TestCase): + + current_path = os.path.dirname(os.path.abspath(__file__)) + config_zip = { + 'secure_connect_bundle': os.path.join(current_path, './creds.zip') + } + metadata_json = """ + {"region":"local", + "contact_info": { + "type":"sni_proxy", + "local_dc":"dc1", + "contact_points":[ + "b13ae7b4-e711-4660-8dd1-bec57d37aa64", + "d4330144-5fb3-425a-86a1-431b3e4d0671", + "86537b87-91a9-4c59-b715-716486e72c42" + ], + "sni_proxy_address":"localhost:30002" + } + }""" + + @staticmethod + def _read_metadata_info_side_effect(config, _): + return config + + def _check_config(self, config): + self.assertEqual(config.username, 'cassandra') + self.assertEqual(config.password, 'cassandra') + self.assertEqual(config.host, 'localhost') + self.assertEqual(config.port, 30443) + self.assertEqual(config.keyspace, 'system') + self.assertEqual(config.local_dc, None) + self.assertIsNotNone(config.ssl_context) + self.assertIsNone(config.sni_host) + self.assertIsNone(config.sni_port) + self.assertIsNone(config.host_ids) + + def test_read_cloud_config_from_zip(self): + + with patch('cassandra.cloud.read_metadata_info', side_effect=self._read_metadata_info_side_effect): + config = cloud.get_cloud_config(self.config_zip) + + self._check_config(config) + + def test_parse_metadata_info(self): + config = cloud.CloudConfig() + cloud.parse_metadata_info(config, self.metadata_json) + self.assertEqual(config.sni_host, 'localhost') + self.assertEqual(config.sni_port, 30002) + self.assertEqual(config.local_dc, 'dc1') + + host_ids = [ + "b13ae7b4-e711-4660-8dd1-bec57d37aa64", + "d4330144-5fb3-425a-86a1-431b3e4d0671", + "86537b87-91a9-4c59-b715-716486e72c42" + ] + for host_id in host_ids: + self.assertIn(host_id, config.host_ids) From 83928f3763f76c85692f9efcec8026ff8049e739 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 30 Aug 2019 14:43:18 -0400 Subject: [PATCH 1556/2005] Add ProtocolVersion DSE_V2 and DSE_V1 --- cassandra/__init__.py | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 13a64afbdc..624357363c 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -164,7 +164,17 @@ class ProtocolVersion(object): v5, in beta from 3.x+ """ - SUPPORTED_VERSIONS = (V5, V4, V3, V2, V1) + DSE_V1 = 0x41 + """ + DSE private protocol v1, supported in DSE 5.1+ + """ + + DSE_V2 = 0x42 + """ + DSE private protocol v2, supported in DSE 6.0+ + """ + + SUPPORTED_VERSIONS = (DSE_V2, DSE_V1, V5, V4, V3, V2, V1) """ A tuple of all supported protocol versions """ @@ -203,11 +213,11 @@ def uses_int_query_flags(cls, version): @classmethod def uses_prepare_flags(cls, version): - return version >= cls.V5 + return version >= cls.V5 and version != cls.DSE_V1 @classmethod def uses_prepared_metadata(cls, version): - return version >= cls.V5 + return version >= cls.V5 and version != cls.DSE_V1 @classmethod def uses_error_code_map(cls, version): @@ -215,7 +225,16 @@ def uses_error_code_map(cls, version): @classmethod def uses_keyspace_flag(cls, version): - return version >= cls.V5 + # OSS was >= cls.V5... we should verify that this change is correct. + return version >= cls.DSE_V2 + + @classmethod + def has_continuous_paging_support(cls, version): + return version >= cls.DSE_V1 + + @classmethod + def has_continuous_paging_next_pages(cls, version): + return version >= cls.DSE_V2 class WriteType(object): From 4518584a2b45fcc6360ae8c554f099c48acb93ac Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 30 Aug 2019 15:16:13 -0400 Subject: [PATCH 1557/2005] Add cassandra.cloud in setup.py packages --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b79774476b..2712897112 100644 --- a/setup.py +++ b/setup.py @@ -416,7 +416,7 @@ def run_setup(extensions): url='http://github.com/datastax/python-driver', author='Tyler Hobbs', author_email='tyler@datastax.com', - packages=['cassandra', 'cassandra.io', 'cassandra.cqlengine'], + packages=['cassandra', 'cassandra.io', 'cassandra.cqlengine', 'cassandra.cloud'], keywords='cassandra,cql,orm', include_package_data=True, install_requires=dependencies, From bdea91be43d53b82fabcc0562cd07f6d111e2567 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 3 Sep 2019 07:25:29 -0500 Subject: [PATCH 1558/2005] Fix twisted tests (PYTHON-1037) (#1047) * Fix test_custom_endpoint to work with twisted * Version gate twisted so it doesnt take forever to build on windows --- test-requirements.txt | 3 ++- tests/integration/simulacron/test_endpoint.py | 12 +++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/test-requirements.txt b/test-requirements.txt index e8552d57ea..0eafa67648 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,7 +7,8 @@ unittest2 pytz sure pure-sasl -twisted[tls]==19.2.1 +twisted; python_version >= '3.5' +twisted[tls]==19.2.1; python_version < '3.5' gevent>=1.0 eventlet cython>=0.20,<0.30 diff --git a/tests/integration/simulacron/test_endpoint.py b/tests/integration/simulacron/test_endpoint.py index 1af5995f94..ba95972960 100644 --- a/tests/integration/simulacron/test_endpoint.py +++ b/tests/integration/simulacron/test_endpoint.py @@ -27,8 +27,9 @@ @total_ordering class AddressEndPoint(EndPoint): - def __init__(self, address): + def __init__(self, address, port=9042): self._address = address + self._port = port @property def address(self): @@ -36,14 +37,14 @@ def address(self): @property def port(self): - return None + return self._port def resolve(self): - return self._address, 9042 # connection purpose + return self._address, self._port # connection purpose def __eq__(self, other): return isinstance(other, AddressEndPoint) and \ - self.address == other.address + self.address == other.address def __hash__(self): return hash(self.address) @@ -99,7 +100,8 @@ def test_custom_endpoint(self): cluster = Cluster( contact_points=[AddressEndPoint('127.0.0.1')], protocol_version=PROTOCOL_VERSION, - endpoint_factory=AddressEndPointFactory() + endpoint_factory=AddressEndPointFactory(), + compression=False, ) cluster.connect(wait_for_all_pools=True) From b51b89cfea32c0e0a11cf8cb0d0a377774331f3d Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 3 Sep 2019 08:00:07 -0500 Subject: [PATCH 1559/2005] [PYTHON-1037] Update test schedules so we're nicer on jenkins (#1045) --- build.yaml | 144 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 84 insertions(+), 60 deletions(-) diff --git a/build.yaml b/build.yaml index afca3c7c0e..ca3723736b 100644 --- a/build.yaml +++ b/build.yaml @@ -1,15 +1,4 @@ schedules: - commit_master: - schedule: per_commit - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='libev' - EXCLUDE_LONG=1 - matrix: - exclude: - - python: [3.4, 3.6, 3.7] - nightly_master: schedule: nightly branches: @@ -19,9 +8,11 @@ schedules: matrix: exclude: - python: [3.4, 3.6, 3.7] + - cassandra: ['2.1', '3.0', 'test-dse'] commit_long_test: schedule: per_commit + disable_pull_requests: true branches: include: [/long-python.*/] env_vars: | @@ -29,10 +20,11 @@ schedules: matrix: exclude: - python: [3.4, 3.6, 3.7] - - cassandra: ['2.0', '2.1', '3.0'] + - cassandra: ['2.1', '3.0', 'test-dse'] commit_branches: schedule: per_commit + disable_pull_requests: true branches: include: [/python.*/] env_vars: | @@ -41,7 +33,7 @@ schedules: matrix: exclude: - python: [3.4, 3.6, 3.7] - - cassandra: ['2.0', '2.1', '3.0'] + - cassandra: ['2.1', '3.0', 'test-dse'] commit_branches_dev: schedule: per_commit @@ -62,53 +54,71 @@ schedules: env_vars: | EVENT_LOOP_MANAGER='libev' - # weekly_libev: - # schedule: 0 10 * * 6 - # branches: - # include: [master] - # env_vars: | - # EVENT_LOOP_MANAGER='libev' - - # weekly_gevent: - # schedule: 0 14 * * 6 - # branches: - # include: [master] - # env_vars: | - # EVENT_LOOP_MANAGER='gevent' - # matrix: - # exclude: - # - python: [3.4, 3.5, 3.6, 3.7] - - # weekly_eventlet: - # schedule: 0 18 * * 6 - # branches: - # include: [master] - # env_vars: | - # EVENT_LOOP_MANAGER='eventlet' - - # weekly_asyncio: - # schedule: 0 22 * * 6 - # branches: - # include: [master] - # env_vars: | - # EVENT_LOOP_MANAGER='asyncio' - # matrix: - # exclude: - # - python: [2.7] - - # weekly_async: - # schedule: 0 10 * * 7 - # branches: - # include: [master] - # env_vars: | - # EVENT_LOOP_MANAGER='asyncore' - - # weekly_twister: - # schedule: 0 14 * * 7 - # branches: - # include: [master] - # env_vars: | - # EVENT_LOOP_MANAGER='twisted' + weekly_master: + schedule: 0 10 * * 6 + branches: + include: [master] + env_vars: | + EVENT_LOOP_MANAGER='libev' + matrix: + exclude: + - python: [2.7, 3.5] + - cassandra: ['2.2', '3.1'] + + weekly_gevent: + schedule: 0 14 * * 6 + branches: + include: [master] + env_vars: | + EVENT_LOOP_MANAGER='gevent' + JUST_EVENT_LOOP=1 + matrix: + exclude: + - python: [3.4] + + weekly_eventlet: + schedule: 0 18 * * 6 + branches: + include: [master] + env_vars: | + EVENT_LOOP_MANAGER='eventlet' + JUST_EVENT_LOOP=1 + matrix: + exclude: + - python: [3.4] + + weekly_asyncio: + schedule: 0 22 * * 6 + branches: + include: [master] + env_vars: | + EVENT_LOOP_MANAGER='asyncio' + JUST_EVENT_LOOP=1 + matrix: + exclude: + - python: [2.7] + + weekly_async: + schedule: 0 10 * * 7 + branches: + include: [master] + env_vars: | + EVENT_LOOP_MANAGER='asyncore' + JUST_EVENT_LOOP=1 + matrix: + exclude: + - python: [3.4] + + weekly_twister: + schedule: 0 14 * * 7 + branches: + include: [master] + env_vars: | + EVENT_LOOP_MANAGER='twisted' + JUST_EVENT_LOOP=1 + matrix: + exclude: + - python: [3.4] upgrade_tests: schedule: adhoc @@ -193,7 +203,21 @@ build: EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true EVENT_LOOP_MANAGER=eventlet VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true EVENT_LOOP_MANAGER=gevent VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true + fi + if [ -n "$JUST_EVENT_LOOP" ]; then + echo "Running integration event loop subset with $EVENT_LOOP_MANAGER" + EVENT_LOOP_TESTS=( + "tests/integration/standard/test_cluster.py" + "tests/integration/standard/test_concurrent.py" + "tests/integration/standard/test_connection.py" + "tests/integration/standard/test_control_connection.py" + "tests/integration/standard/test_metrics.py" + "tests/integration/standard/test_query.py" + "tests/integration/simulacron/test_endpoint.py" + ) + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true + exit 0 fi echo "Running with event loop manager: $EVENT_LOOP_MANAGER" From 30a0e27cd1b8999267c146f0a93adf962a50790b Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 3 Sep 2019 08:29:07 -0500 Subject: [PATCH 1560/2005] [ninja] Adding 2.7 back into weekly build as it doesn't make sense to exclude it --- build.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/build.yaml b/build.yaml index ca3723736b..b389c2abc3 100644 --- a/build.yaml +++ b/build.yaml @@ -37,6 +37,7 @@ schedules: commit_branches_dev: schedule: per_commit + disable_pull_requests: true branches: include: [/dev-python.*/] env_vars: | @@ -62,7 +63,7 @@ schedules: EVENT_LOOP_MANAGER='libev' matrix: exclude: - - python: [2.7, 3.5] + - python: [3.5] - cassandra: ['2.2', '3.1'] weekly_gevent: From dc6accb382ea1c1813f57103dab07df167ac0860 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 3 Sep 2019 12:42:15 -0400 Subject: [PATCH 1561/2005] Support running test On C*/DDAC and DSE --- tests/__init__.py | 35 ++- tests/integration/__init__.py | 221 +++++++++++++----- tests/integration/standard/test_connection.py | 25 +- 3 files changed, 194 insertions(+), 87 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index 6260583beb..cea5a872c6 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -46,22 +46,22 @@ def is_gevent_monkey_patched(): return socket.socket is gevent.socket.socket -def is_gevent_time_monkey_patched(): - import gevent.monkey - return "time" in gevent.monkey.saved +def is_monkey_patched(): + return is_gevent_monkey_patched() or is_eventlet_monkey_patched() +MONKEY_PATCH_LOOP = bool(os.getenv('MONKEY_PATCH_LOOP', False)) +EVENT_LOOP_MANAGER = os.getenv('EVENT_LOOP_MANAGER', "libev") -def is_eventlet_time_monkey_patched(): - import eventlet - return eventlet.patcher.is_monkey_patched('time') +# If set to to true this will force the Cython tests to run regardless of whether they are installed +cython_env = os.getenv('VERIFY_CYTHON', "False") -def is_monkey_patched(): - return is_gevent_monkey_patched() or is_eventlet_monkey_patched() +VERIFY_CYTHON = False +if(cython_env == 'True'): + VERIFY_CYTHON = True thread_pool_executor_class = ThreadPoolExecutor -EVENT_LOOP_MANAGER = os.getenv('EVENT_LOOP_MANAGER', "libev") if "gevent" in EVENT_LOOP_MANAGER: import gevent.monkey gevent.monkey.patch_all() @@ -94,20 +94,15 @@ def is_monkey_patched(): try: from cassandra.io.libevreactor import LibevConnection connection_class = LibevConnection - except ImportError: + except ImportError as e: + log.debug('Could not import LibevConnection, ' + 'using connection_class=None; ' + 'failed with error:\n {}'.format( + repr(e) + )) connection_class = None -# If set to to true this will force the Cython tests to run regardless of whether they are installed -cython_env = os.getenv('VERIFY_CYTHON', "False") - - -VERIFY_CYTHON = False - -if(cython_env == 'True'): - VERIFY_CYTHON = True - - def is_windows(): return "Windows" in platform.system() diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 02f9851477..c897cf03f6 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import os from cassandra.cluster import Cluster @@ -34,11 +35,10 @@ import six import shutil -from cassandra import OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure, AlreadyExists, \ +from cassandra import OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure, AlreadyExists,\ InvalidRequest -from cassandra.cluster import NoHostAvailable - from cassandra.protocol import ConfigurationException +from cassandra import ProtocolVersion try: from ccmlib.dse_cluster import DseCluster @@ -105,6 +105,37 @@ def cmd_line_args_to_dict(env_var): return args +def _get_cass_version_from_dse(dse_version): + if dse_version.startswith('4.6') or dse_version.startswith('4.5'): + raise Exception("Cassandra Version 2.0 not supported anymore") + elif dse_version.startswith('4.7') or dse_version.startswith('4.8'): + cass_ver = "2.1" + elif dse_version.startswith('5.0'): + cass_ver = "3.0" + elif dse_version.startswith('5.1'): + # TODO: refactor this method to use packaging.Version everywhere + if Version(dse_version) >= Version('5.1.2'): + cass_ver = "3.11" + else: + cass_ver = "3.10" + elif dse_version.startswith('6.0'): + if dse_version == '6.0.0': + cass_ver = '4.0.0.2284' + elif dse_version == '6.0.1': + cass_ver = '4.0.0.2349' + else: + cass_ver = '4.0.0.' + ''.join(dse_version.split('.')) + elif dse_version.startswith('6.7'): + if dse_version == '6.7.0': + cass_ver = "4.0.0.67" + else: + cass_ver = '4.0.0.' + ''.join(dse_version.split('.')) + else: + log.error("Unknown dse version found {0}, defaulting to 2.1".format(dse_version)) + cass_ver = "2.1" + return Version(cass_ver) + + def _get_dse_version_from_cass(cass_version): if cass_version.startswith('2.1'): dse_ver = "4.8.15" @@ -123,36 +154,46 @@ def _get_dse_version_from_cass(cass_version): KEEP_TEST_CLUSTER = bool(os.getenv('KEEP_TEST_CLUSTER', False)) SIMULACRON_JAR = os.getenv('SIMULACRON_JAR', None) -CASSANDRA_IP = os.getenv('CASSANDRA_IP', '127.0.0.1') -CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) - -default_cassandra_version = '3.11' -cv_string = os.getenv('CASSANDRA_VERSION', default_cassandra_version) -mcv_string = os.getenv('MAPPED_CASSANDRA_VERSION', None) -try: - cassandra_version = Version(cv_string) # env var is set to test-dse -except: - # fallback to MAPPED_CASSANDRA_VERSION - cassandra_version = Version(mcv_string) -CASSANDRA_VERSION = Version(mcv_string) if mcv_string else cassandra_version -CCM_VERSION = cassandra_version if mcv_string else CASSANDRA_VERSION +# Supported Clusters: Cassandra, DDAC, DSE +DSE_VERSION = None +if os.getenv('DSE_VERSION', None): # we are testing against DSE + DSE_VERSION = Version(os.getenv('DSE_VERSION', None)) + DSE_CRED = os.getenv('DSE_CREDS', None) + CASSANDRA_VERSION = _get_cass_version_from_dse(DSE_VERSION.base_version) + CCM_VERSION = DSE_VERSION.base_version +else: # we are testing against Cassandra or DDAC + cv_string = os.getenv('CASSANDRA_VERSION', None) + mcv_string = os.getenv('MAPPED_CASSANDRA_VERSION', None) + try: + cassandra_version = Version(cv_string) # env var is set to test-dse for DDAC + except: + # fallback to MAPPED_CASSANDRA_VERSION + cassandra_version = Version(mcv_string) -default_dse_version = _get_dse_version_from_cass(CASSANDRA_VERSION.base_version) + CASSANDRA_VERSION = Version(mcv_string) if mcv_string else cassandra_version + CCM_VERSION = cassandra_version if mcv_string else CASSANDRA_VERSION -DSE_VERSION = Version(os.getenv('DSE_VERSION', default_dse_version)) +CASSANDRA_IP = os.getenv('CLUSTER_IP', '127.0.0.1') +CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) CCM_KWARGS = {} -if CASSANDRA_DIR: +if DSE_VERSION: + log.info('Using DSE version: %s', DSE_VERSION) + if not CASSANDRA_DIR: + CCM_KWARGS['version'] = DSE_VERSION + if DSE_CRED: + log.info("Using DSE credentials file located at {0}".format(DSE_CRED)) + CCM_KWARGS['dse_credentials_file'] = DSE_CRED +elif CASSANDRA_DIR: log.info("Using Cassandra dir: %s", CASSANDRA_DIR) CCM_KWARGS['install_dir'] = CASSANDRA_DIR - else: - log.info('Using Cassandra version: %s', CASSANDRA_VERSION) - log.info('Using CCM version: %s', CCM_VERSION) + log.info('Using Cassandra version: %s', CCM_VERSION) CCM_KWARGS['version'] = CCM_VERSION + #This changes the default contact_point parameter in Cluster -def set_default_cass_ip(): +def set_default_cluster_ip(): if CASSANDRA_IP.startswith("127.0.0."): return defaults = list(Cluster.__init__.__defaults__) @@ -174,16 +215,24 @@ def set_default_beta_flag_true(): def get_default_protocol(): if CASSANDRA_VERSION >= Version('4.0'): - set_default_beta_flag_true() - return 5 - elif CASSANDRA_VERSION >= Version('2.2'): + if DSE_VERSION: + return ProtocolVersion.DSE_V2 + else: + set_default_beta_flag_true() + return ProtocolVersion.V5 + if CASSANDRA_VERSION >= Version('3.10'): + if DSE_VERSION: + return ProtocolVersion.DSE_V1 + else: + return 4 + if CASSANDRA_VERSION >= Version('2.2'): return 4 elif CASSANDRA_VERSION >= Version('2.1'): return 3 elif CASSANDRA_VERSION >= Version('2.0'): return 2 else: - return 1 + raise Exception("Running tests with an unsupported Cassandra version: {0}".format(CASSANDRA_VERSION)) def get_supported_protocol_versions(): @@ -193,22 +242,31 @@ def get_supported_protocol_versions(): 2.1 -> 3, 2, 1 2.2 -> 4, 3, 2, 1 3.X -> 4, 3 - 3.10 -> 5(beta),4,3 + 3.10(C*) -> 5(beta),4,3 + 3.10(DSE) -> DSE_V1,4,3 + 4.0(C*) -> 5(beta),4,3 + 4.0(DSE) -> DSE_v2, DSE_V1,4,3 ` """ if CASSANDRA_VERSION >= Version('4.0'): - return (3, 4, 5) + if DSE_VERSION: + return (3, 4, ProtocolVersion.DSE_V1, ProtocolVersion.DSE_V2) + else: + return (3, 4, 5) elif CASSANDRA_VERSION >= Version('3.10'): - return (3, 4) + if DSE_VERSION: + return (3, 4, ProtocolVersion.DSE_V1) + else: + return (3, 4) elif CASSANDRA_VERSION >= Version('3.0'): return (3, 4) elif CASSANDRA_VERSION >= Version('2.2'): - return (1, 2, 3, 4) + return (1,2, 3, 4) elif CASSANDRA_VERSION >= Version('2.1'): return (1, 2, 3) elif CASSANDRA_VERSION >= Version('2.0'): return (1, 2) else: - return (1, ) + return (1,) def get_unsupported_lower_protocol(): @@ -216,7 +274,6 @@ def get_unsupported_lower_protocol(): This is used to determine the lowest protocol version that is NOT supported by the version of C* running """ - if CASSANDRA_VERSION >= Version('3.0'): return 2 else: @@ -229,14 +286,24 @@ def get_unsupported_upper_protocol(): supported by the version of C* running """ + if CASSANDRA_VERSION >= Version('4.0'): + if DSE_VERSION: + return None + else: + return ProtocolVersion.DSE_V1 + if CASSANDRA_VERSION >= Version('3.10'): + if DSE_VERSION: + return ProtocolVersion.DSE_V2 + else: + return 5 if CASSANDRA_VERSION >= Version('2.2'): - return None - if CASSANDRA_VERSION >= Version('2.1'): + return 5 + elif CASSANDRA_VERSION >= Version('2.1'): return 4 elif CASSANDRA_VERSION >= Version('2.0'): return 3 else: - return None + return 2 default_protocol_version = get_default_protocol() @@ -260,16 +327,23 @@ def _id_and_mark(f): lessthenprotocolv4 = unittest.skipUnless(PROTOCOL_VERSION < 4, 'Protocol versions 4 or greater not supported') greaterthanprotocolv3 = unittest.skipUnless(PROTOCOL_VERSION >= 4, 'Protocol versions less than 4 are not supported') protocolv5 = unittest.skipUnless(5 in get_supported_protocol_versions(), 'Protocol versions less than 5 are not supported') - greaterthancass20 = unittest.skipUnless(CASSANDRA_VERSION >= Version('2.1'), 'Cassandra version 2.1 or greater required') greaterthancass21 = unittest.skipUnless(CASSANDRA_VERSION >= Version('2.2'), 'Cassandra version 2.2 or greater required') greaterthanorequalcass30 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.0'), 'Cassandra version 3.0 or greater required') +greaterthanorequalcass31 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.1'), 'Cassandra version 3.1 or greater required') greaterthanorequalcass36 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.6'), 'Cassandra version 3.6 or greater required') greaterthanorequalcass3_10 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.10'), 'Cassandra version 3.10 or greater required') greaterthanorequalcass3_11 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.11'), 'Cassandra version 3.10 or greater required') greaterthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION >= Version('4.0'), 'Cassandra version 4.0 or greater required') lessthanorequalcass40 = unittest.skipIf(CASSANDRA_VERSION >= Version('4.0'), 'Cassandra version 4.0 or greater required') lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < Version('3.0'), 'Cassandra version less then 3.0 required') +greaterthanorequaldse67 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.7'), "DSE 6.7 or greater required for this test") +greaterthanorequaldse60 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.0'), "DSE 6.0 or greater required for this test") +greaterthanorequaldse51 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('5.1'), "DSE 5.1 or greater required for this test") +greaterthanorequaldse50 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('5.0'), "DSE 5.0 or greater required for this test") +lessthandse51 = unittest.skipUnless(DSE_VERSION and DSE_VERSION < Version('5.1'), "DSE version less than 5.1 required") +lessthandse60 = unittest.skipUnless(DSE_VERSION and DSE_VERSION < Version('6.0'), "DSE version less than 6.0 required") + pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") notpy3 = unittest.skipIf(sys.version_info >= (3, 0), "Test not applicable for Python 3.x runtime") requiresmallclockgranularity = unittest.skipIf("Windows" in platform.system() or "asyncore" in EVENT_LOOP_MANAGER, @@ -316,8 +390,9 @@ def use_singledc(start=True, workloads=[]): use_cluster(CLUSTER_NAME, [3], start=start, workloads=workloads) -def use_single_node(start=True, workloads=[]): - use_cluster(SINGLE_NODE_CLUSTER_NAME, [1], start=start, workloads=workloads) +def use_single_node(start=True, workloads=[], configuration_options={}, dse_options={}): + use_cluster(SINGLE_NODE_CLUSTER_NAME, [1], start=start, workloads=workloads, + configuration_options=configuration_options, dse_options=dse_options) def remove_cluster(): @@ -343,26 +418,37 @@ def remove_cluster(): raise RuntimeError("Failed to remove cluster after 100 attempts") -def is_current_cluster(cluster_name, node_counts): +def is_current_cluster(cluster_name, node_counts, workloads): global CCM_CLUSTER if CCM_CLUSTER and CCM_CLUSTER.name == cluster_name: if [len(list(nodes)) for dc, nodes in groupby(CCM_CLUSTER.nodelist(), lambda n: n.data_center)] == node_counts: + for node in CCM_CLUSTER.nodelist(): + if set(node.workloads) != set(workloads): + print("node workloads don't match creating new cluster") + return False return True return False +def start_cluster_wait_for_up(cluster): + cluster.start(wait_for_binary_proto=True) + # Added to wait for slow nodes to start up + log.debug("Cluster started waiting for binary ports") + for node in CCM_CLUSTER.nodes.values(): + wait_for_node_socket(node, 300) + log.debug("Binary port are open") + + def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, set_keyspace=True, ccm_options=None, - configuration_options={}, dse_cluster=False, dse_options={}, - dse_version=None): + configuration_options={}, dse_options={}): + dse_cluster = True if DSE_VERSION else False if not workloads: workloads = [] - if (dse_version and not dse_cluster): - raise ValueError('specified dse_version {} but not dse_cluster'.format(dse_version)) - set_default_cass_ip() + set_default_cluster_ip() - if ccm_options is None and dse_cluster: - ccm_options = {"version": dse_version or DSE_VERSION} + if ccm_options is None and DSE_VERSION: + ccm_options = {"version": DSE_VERSION} elif ccm_options is None: ccm_options = CCM_KWARGS.copy() @@ -382,7 +468,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, setup_keyspace(ipformat=ipformat, wait=False) return - if is_current_cluster(cluster_name, nodes): + if is_current_cluster(cluster_name, nodes, workloads): log.debug("Using existing cluster, matching topology: {0}".format(cluster_name)) else: if CCM_CLUSTER: @@ -395,6 +481,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, CCM_CLUSTER.clear() CCM_CLUSTER.set_install_dir(**ccm_options) CCM_CLUSTER.set_configuration_options(configuration_options) + CCM_CLUSTER.set_dse_configuration_options(dse_options) except Exception: ex_type, ex, tb = sys.exc_info() log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) @@ -418,6 +505,9 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) if 'spark' in workloads: config_options = {"initial_spark_worker_resources": 0.1} + if dse_version >= Version('6.7'): + log.debug("Disabling AlwaysON SQL for a DSE 6.7 Cluster") + config_options['alwayson_sql_options'] = {'enabled': False} CCM_CLUSTER.set_dse_configuration_options(config_options) common.switch_cluster(path, cluster_name) CCM_CLUSTER.set_configuration_options(configuration_options) @@ -437,6 +527,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, try: jvm_args = [] + # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back if 'graph' not in workloads: @@ -447,10 +538,12 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, node.set_workloads(workloads) if start: log.debug("Starting CCM cluster: {0}".format(cluster_name)) - CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jvm_args) + CCM_CLUSTER.start(jvm_args=jvm_args, wait_for_binary_proto=True) # Added to wait for slow nodes to start up + log.debug("Cluster started waiting for binary ports") for node in CCM_CLUSTER.nodes.values(): - wait_for_node_socket(node, 120) + wait_for_node_socket(node, 300) + log.debug("Binary ports are open") if set_keyspace: setup_keyspace(ipformat=ipformat) except Exception: @@ -551,15 +644,20 @@ def drop_keyspace_shutdown_cluster(keyspace_name, session, cluster): cluster.shutdown() -def setup_keyspace(ipformat=None, wait=True): +def setup_keyspace(ipformat=None, wait=True, protocol_version=None): # wait for nodes to startup if wait: time.sleep(10) + if protocol_version: + _protocol_version = protocol_version + else: + _protocol_version = PROTOCOL_VERSION + if not ipformat: - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = Cluster(protocol_version=_protocol_version) else: - cluster = Cluster(contact_points=["::1"], protocol_version=PROTOCOL_VERSION) + cluster = Cluster(contact_points=["::1"], protocol_version=_protocol_version) session = cluster.connect() try: @@ -652,8 +750,8 @@ def create_keyspace(cls, rf): execute_with_long_wait_retry(cls.session, ddl) @classmethod - def common_setup(cls, rf, keyspace_creation=True, create_class_table=False, metrics=False): - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, metrics_enabled=metrics) + def common_setup(cls, rf, keyspace_creation=True, create_class_table=False, **cluster_kwargs): + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, **cluster_kwargs) cls.session = cls.cluster.connect(wait_for_all_pools=True) cls.ks_name = cls.__name__.lower() if keyspace_creation: @@ -787,7 +885,11 @@ class BasicSharedKeyspaceUnitTestCaseRF3WM(BasicSharedKeyspaceUnitTestCase): """ @classmethod def setUpClass(self): - self.common_setup(3, True, True, True) + self.common_setup(3, True, True, metrics_enabled=True) + + @classmethod + def tearDownClass(cls): + drop_keyspace_shutdown_cluster(cls.ks_name, cls.session, cls.cluster) class BasicSharedKeyspaceUnitTestCaseWFunctionTable(BasicSharedKeyspaceUnitTestCase): @@ -828,3 +930,10 @@ def setUp(self): def tearDown(self): self.cluster.shutdown() + + +def assert_startswith(s, prefix): + if not s.startswith(prefix): + raise AssertionError( + '{} does not start with {}'.format(repr(s), repr(prefix)) + ) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 595fc12a2c..4af48a562c 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -18,6 +18,7 @@ import unittest # noqa from functools import partial +from mock import patch import logging from six.moves import range import sys @@ -27,7 +28,7 @@ from unittest import SkipTest from cassandra import ConsistencyLevel, OperationTimedOut -from cassandra.cluster import NoHostAvailable, ConnectionShutdown, Cluster +from cassandra.cluster import NoHostAvailable, ConnectionShutdown, Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT import cassandra.io.asyncorereactor from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.protocol import QueryMessage @@ -55,20 +56,22 @@ def setup_module(): class ConnectionTimeoutTest(unittest.TestCase): def setUp(self): - self.defaultInFlight = Connection.max_in_flight - Connection.max_in_flight = 2 - self.cluster = Cluster( - protocol_version=PROTOCOL_VERSION, - load_balancing_policy=HostFilterPolicy( - RoundRobinPolicy(), predicate=lambda host: host.address == CASSANDRA_IP - ) - ) + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles= + {EXEC_PROFILE_DEFAULT: ExecutionProfile( + load_balancing_policy=HostFilterPolicy( + RoundRobinPolicy(), predicate=lambda host: host.address == CASSANDRA_IP + ) + ) + } + ) + self.session = self.cluster.connect() def tearDown(self): - Connection.max_in_flight = self.defaultInFlight self.cluster.shutdown() + @patch('cassandra.connection.Connection.max_in_flight', 2) def test_in_flight_timeout(self): """ Test to ensure that connection id fetching will block when max_id is reached/ @@ -85,7 +88,7 @@ def test_in_flight_timeout(self): """ futures = [] query = '''SELECT * FROM system.local''' - for i in range(100): + for _ in range(100): futures.append(self.session.execute_async(query)) for future in futures: From 9394f32f6116772159db36cb6b0a0c5959f25866 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 3 Sep 2019 14:37:19 -0500 Subject: [PATCH 1562/2005] Adding auth_provider to cloud connect --- docs/getting_started.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 98baaaaec2..8b72915bf3 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -411,5 +411,6 @@ Connecting to DataStax Cloud cloud_config = { 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip' } - cluster = Cluster(cloud=cloud_config) + auth_provider = PlainTextAuthProvider(username='user', password='pass') + cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider) session = cluster.connect() From 5b568ab1b2abea162657ee687ab101094b6871c1 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 4 Sep 2019 09:42:18 -0400 Subject: [PATCH 1563/2005] Refactor protocol.ResultMessage --- cassandra/cluster.py | 56 ++++++++------- cassandra/metadata.py | 12 ++-- cassandra/protocol.py | 149 +++++++++++++++++++++------------------ cassandra/row_parser.pyx | 16 ++--- 4 files changed, 122 insertions(+), 111 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 9818ce2f12..3811be12e3 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -61,7 +61,8 @@ TruncateError, ServerError, BatchMessage, RESULT_KIND_PREPARED, RESULT_KIND_SET_KEYSPACE, RESULT_KIND_ROWS, - RESULT_KIND_SCHEMA_CHANGE, ProtocolHandler) + RESULT_KIND_SCHEMA_CHANGE, ProtocolHandler, + RESULT_KIND_VOID) from cassandra.metadata import Metadata, protect_name, murmur3 from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, @@ -431,7 +432,7 @@ class Cluster(object): server will be automatically used. """ - protocol_version = ProtocolVersion.V4 + protocol_version = ProtocolVersion.DSE_V2 """ The maximum version of the native protocol to use. @@ -2534,18 +2535,18 @@ def prepare(self, query, custom_payload=None, keyspace=None): future = ResponseFuture(self, message, query=None, timeout=self.default_timeout) try: future.send_request() - query_id, bind_metadata, pk_indexes, result_metadata, result_metadata_id = future.result() + response = future.result().one() except Exception: log.exception("Error preparing query:") raise prepared_keyspace = keyspace if keyspace else None prepared_statement = PreparedStatement.from_message( - query_id, bind_metadata, pk_indexes, self.cluster.metadata, query, self.keyspace, - self._protocol_version, result_metadata, result_metadata_id) + response.query_id, response.bind_metadata, response.pk_indexes, self.cluster.metadata, query, self.keyspace, + self._protocol_version, response.column_metadata, response.result_metadata_id) prepared_statement.custom_payload = future.custom_payload - self.cluster.add_prepared(query_id, prepared_statement) + self.cluster.add_prepared(response.query_id, prepared_statement) if self.cluster.prepare_on_all_hosts: host = future._current_host @@ -3163,15 +3164,15 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, peers_result, local_result = connection.wait_for_responses( peers_query, local_query, timeout=self._timeout) - peers_result = dict_factory(*peers_result.results) + peers_result = dict_factory(peers_result.column_names, peers_result.parsed_rows) partitioner = None token_map = {} found_hosts = set() - if local_result.results: + if local_result.parsed_rows: found_hosts.add(connection.endpoint) - local_rows = dict_factory(*(local_result.results)) + local_rows = dict_factory(local_result.column_names, local_result.parsed_rows) local_row = local_rows[0] cluster_name = local_row["cluster_name"] self._cluster.metadata.cluster_name = cluster_name @@ -3202,7 +3203,9 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, success, local_rpc_address_result = connection.wait_for_response( local_rpc_address_query, timeout=self._timeout, fail_on_error=False) if success: - row = dict_factory(*local_rpc_address_result.results) + row = dict_factory( + local_rpc_address_result.column_names, + local_rpc_address_result.parsed_rows) host.broadcast_rpc_address = row[0]['rpc_address'] else: host.broadcast_rpc_address = connection.endpoint.address @@ -3394,11 +3397,11 @@ def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wai return False def _get_schema_mismatches(self, peers_result, local_result, local_address): - peers_result = dict_factory(*peers_result.results) + peers_result = dict_factory(peers_result.column_names, peers_result.parsed_rows) versions = defaultdict(set) - if local_result.results: - local_row = dict_factory(*local_result.results)[0] + if local_result.parsed_rows: + local_row = dict_factory(local_result.column_names, local_result.parsed_rows)[0] if local_row.get("schema_version"): versions[local_row.get("schema_version")].add(local_address) @@ -3944,7 +3947,7 @@ def _set_result(self, host, connection, pool, response): # event loop thread. if session: session._set_keyspace_for_all_pools( - response.results, self._set_keyspace_completed) + response.new_keyspace, self._set_keyspace_completed) elif response.kind == RESULT_KIND_SCHEMA_CHANGE: # refresh the schema before responding, but do it in another # thread instead of the event loop thread @@ -3952,15 +3955,17 @@ def _set_result(self, host, connection, pool, response): self.session.submit( refresh_schema_and_set_result, self.session.cluster.control_connection, - self, connection, **response.results) + self, connection, **response.schema_change_event) + elif response.kind == RESULT_KIND_ROWS: + self._paging_state = response.paging_state + self._col_types = response.column_names + self._col_names = response.column_types + self._set_final_result(self.row_factory( + response.column_names, response.parsed_rows)) + elif response.kind == RESULT_KIND_VOID: + self._set_final_result(None) else: - results = getattr(response, 'results', None) - if results is not None and response.kind == RESULT_KIND_ROWS: - self._paging_state = response.paging_state - self._col_types = response.col_types - self._col_names = results[0] - results = self.row_factory(*results) - self._set_final_result(results) + self._set_final_result(response) elif isinstance(response, ErrorMessage): retry_policy = self._retry_policy @@ -4085,11 +4090,8 @@ def _execute_after_prepare(self, host, connection, pool, response): if isinstance(response, ResultMessage): if response.kind == RESULT_KIND_PREPARED: if self.prepared_statement: - # result metadata is the only thing that could have - # changed from an alter - (_, _, _, - self.prepared_statement.result_metadata, - new_metadata_id) = response.results + self.prepared_statement.result_metadata = response.column_metadata + new_metadata_id = response.result_metadata_id if new_metadata_id is not None: self.prepared_statement.result_metadata_id = new_metadata_id diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 1608a114c2..65e063d6fd 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1710,7 +1710,7 @@ def _handle_results(self, success, result, expected_failures=tuple()): if not success and isinstance(result, expected_failures): return [] elif success: - return dict_factory(*result.results) if result else [] + return dict_factory(result.column_names, result.parsed_rows) if result else [] else: raise result @@ -1723,7 +1723,7 @@ def _query_build_rows(self, query_string, build_func): responses = self.connection.wait_for_responses((query), timeout=self.timeout, fail_on_error=False) (success, response) = responses[0] if success: - result = dict_factory(*response.results) + result = dict_factory(response.column_names, response.parsed_rows) return [build_func(row) for row in result] elif isinstance(response, InvalidRequest): log.debug("user types table not found") @@ -2154,7 +2154,7 @@ def _query_all(self): # if we're connected to Cassandra < 2.0, the triggers table will not exist if triggers_success: - self.triggers_result = dict_factory(*triggers_result.results) + self.triggers_result = dict_factory(triggers_result.column_names, triggers_result.parsed_rows) else: if isinstance(triggers_result, InvalidRequest): log.debug("triggers table not found") @@ -2166,7 +2166,7 @@ def _query_all(self): # if we're connected to Cassandra < 2.1, the usertypes table will not exist if types_success: - self.types_result = dict_factory(*types_result.results) + self.types_result = dict_factory(types_result.column_names, types_result.parsed_rows) else: if isinstance(types_result, InvalidRequest): log.debug("user types table not found") @@ -2176,7 +2176,7 @@ def _query_all(self): # functions were introduced in Cassandra 2.2 if functions_success: - self.functions_result = dict_factory(*functions_result.results) + self.functions_result = dict_factory(functions_result.column_names, functions_result.parsed_rows) else: if isinstance(functions_result, InvalidRequest): log.debug("user functions table not found") @@ -2185,7 +2185,7 @@ def _query_all(self): # aggregates were introduced in Cassandra 2.2 if aggregates_success: - self.aggregates_result = dict_factory(*aggregates_result.results) + self.aggregates_result = dict_factory(aggregates_result.column_names, aggregates_result.parsed_rows) else: if isinstance(aggregates_result, InvalidRequest): log.debug("user aggregates table not found") diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 7e117792af..18118e32d0 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -611,8 +611,6 @@ class ResultMessage(_MessageType): name = 'RESULT' kind = None - results = None - paging_state = None # Names match type name in module scope. Most are imported from cassandra.cqltypes (except CUSTOM_TYPE) type_codes = _cqltypes_by_code = dict((v, globals()[k]) for k, v in type_codes.__dict__.items() if not k.startswith('_')) @@ -620,89 +618,103 @@ class ResultMessage(_MessageType): _FLAGS_GLOBAL_TABLES_SPEC = 0x0001 _HAS_MORE_PAGES_FLAG = 0x0002 _NO_METADATA_FLAG = 0x0004 + _CONTINUOUS_PAGING_FLAG = 0x40000000 + _CONTINUOUS_PAGING_LAST_FLAG = 0x80000000 _METADATA_ID_FLAG = 0x0008 - def __init__(self, kind, results, paging_state=None, col_types=None): + # These are all the things a result message might contain. They are populated according to 'kind' + column_names = None + column_types = None + parsed_rows = None + paging_state = None + continuous_paging_seq = None + continuous_paging_last = None + new_keyspace = None + column_metadata = None + query_id = None + bind_metadata = None + pk_indexes = None + schema_change_event = None + result_metadata_id = None + + def __init__(self, kind): self.kind = kind - self.results = results - self.paging_state = paging_state - self.col_types = col_types + + def recv(self, f, protocol_version, user_type_map, result_metadata): + if self.kind == RESULT_KIND_VOID: + return + elif self.kind == RESULT_KIND_ROWS: + self.recv_results_rows(f, protocol_version, user_type_map, result_metadata) + elif self.kind == RESULT_KIND_SET_KEYSPACE: + self.new_keyspace = read_string(f) + elif self.kind == RESULT_KIND_PREPARED: + self.recv_results_prepared(f, protocol_version, user_type_map) + elif self.kind == RESULT_KIND_SCHEMA_CHANGE: + self.recv_results_schema_change(f, protocol_version) + else: + raise DriverException("Unknown RESULT kind: %d" % self.kind) @classmethod def recv_body(cls, f, protocol_version, user_type_map, result_metadata): kind = read_int(f) - paging_state = None - col_types = None - if kind == RESULT_KIND_VOID: - results = None - elif kind == RESULT_KIND_ROWS: - paging_state, col_types, results, result_metadata_id = cls.recv_results_rows( - f, protocol_version, user_type_map, result_metadata) - elif kind == RESULT_KIND_SET_KEYSPACE: - ksname = read_string(f) - results = ksname - elif kind == RESULT_KIND_PREPARED: - results = cls.recv_results_prepared(f, protocol_version, user_type_map) - elif kind == RESULT_KIND_SCHEMA_CHANGE: - results = cls.recv_results_schema_change(f, protocol_version) - else: - raise DriverException("Unknown RESULT kind: %d" % kind) - return cls(kind, results, paging_state, col_types) + msg = cls(kind) + msg.recv(f, protocol_version, user_type_map, result_metadata) + return msg - @classmethod - def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): - paging_state, column_metadata, result_metadata_id = cls.recv_results_metadata(f, user_type_map) - column_metadata = column_metadata or result_metadata + def recv_results_rows(self, f, protocol_version, user_type_map, result_metadata): + self.recv_results_metadata(f, user_type_map) + column_metadata = self.column_metadata or result_metadata rowcount = read_int(f) - rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)] - colnames = [c[2] for c in column_metadata] - coltypes = [c[3] for c in column_metadata] + rows = [self.recv_row(f, len(column_metadata)) for _ in range(rowcount)] + self.column_names = [c[2] for c in column_metadata] + self.column_types = [c[3] for c in column_metadata] try: - parsed_rows = [ + self.parsed_rows = [ tuple(ctype.from_binary(val, protocol_version) - for ctype, val in zip(coltypes, row)) + for ctype, val in zip(self.column_types, row)) for row in rows] except Exception: for row in rows: for i in range(len(row)): try: - coltypes[i].from_binary(row[i], protocol_version) + self.column_typess[i].from_binary(row[i], protocol_version) except Exception as e: - raise DriverException('Failed decoding result column "%s" of type %s: %s' % (colnames[i], - coltypes[i].cql_parameterized_type(), + raise DriverException('Failed decoding result column "%s" of type %s: %s' % (self.column_names[i], + self.column_names[i].cql_parameterized_type(), str(e))) - return paging_state, coltypes, (colnames, parsed_rows), result_metadata_id - @classmethod - def recv_results_prepared(cls, f, protocol_version, user_type_map): - query_id = read_binary_string(f) + def recv_results_prepared(self, f, protocol_version, user_type_map): + self.query_id = read_binary_string(f) if ProtocolVersion.uses_prepared_metadata(protocol_version): - result_metadata_id = read_binary_string(f) + self.result_metadata_id = read_binary_string(f) else: - result_metadata_id = None - bind_metadata, pk_indexes, result_metadata, _ = cls.recv_prepared_metadata(f, protocol_version, user_type_map) - return query_id, bind_metadata, pk_indexes, result_metadata, result_metadata_id + self.result_metadata_id = None + self.recv_prepared_metadata(f, protocol_version, user_type_map) - @classmethod - def recv_results_metadata(cls, f, user_type_map): + def recv_results_metadata(self, f, user_type_map): flags = read_int(f) colcount = read_int(f) - if flags & cls._HAS_MORE_PAGES_FLAG: - paging_state = read_binary_longstring(f) - else: - paging_state = None + if flags & self._HAS_MORE_PAGES_FLAG: + self.paging_state = read_binary_longstring(f) - if flags & cls._METADATA_ID_FLAG: - result_metadata_id = read_binary_string(f) + if flags & self._METADATA_ID_FLAG: + self.result_metadata_id = read_binary_string(f) else: - result_metadata_id = None + self.result_metadata_id = None - no_meta = bool(flags & cls._NO_METADATA_FLAG) + no_meta = bool(flags & self._NO_METADATA_FLAG) if no_meta: - return paging_state, [], result_metadata_id + return None + + if flags & self._CONTINUOUS_PAGING_FLAG: + self.continuous_paging_seq = read_int(f) + self.continuous_paging_last = flags & self._CONTINUOUS_PAGING_LAST_FLAG - glob_tblspec = bool(flags & cls._FLAGS_GLOBAL_TABLES_SPEC) + if flags & self._METADATA_ID_FLAG: + self.result_metadata_id = read_binary_string(f) + + glob_tblspec = bool(flags & self._FLAGS_GLOBAL_TABLES_SPEC) if glob_tblspec: ksname = read_string(f) cfname = read_string(f) @@ -715,12 +727,12 @@ def recv_results_metadata(cls, f, user_type_map): colksname = read_string(f) colcfname = read_string(f) colname = read_string(f) - coltype = cls.read_type(f, user_type_map) + coltype = self.read_type(f, user_type_map) column_metadata.append((colksname, colcfname, colname, coltype)) - return paging_state, column_metadata, result_metadata_id - @classmethod - def recv_prepared_metadata(cls, f, protocol_version, user_type_map): + self.column_metadata = column_metadata + + def recv_prepared_metadata(self, f, protocol_version, user_type_map): flags = read_int(f) colcount = read_int(f) pk_indexes = None @@ -728,7 +740,7 @@ def recv_prepared_metadata(cls, f, protocol_version, user_type_map): num_pk_indexes = read_int(f) pk_indexes = [read_short(f) for _ in range(num_pk_indexes)] - glob_tblspec = bool(flags & cls._FLAGS_GLOBAL_TABLES_SPEC) + glob_tblspec = bool(flags & self._FLAGS_GLOBAL_TABLES_SPEC) if glob_tblspec: ksname = read_string(f) cfname = read_string(f) @@ -741,18 +753,17 @@ def recv_prepared_metadata(cls, f, protocol_version, user_type_map): colksname = read_string(f) colcfname = read_string(f) colname = read_string(f) - coltype = cls.read_type(f, user_type_map) + coltype = self.read_type(f, user_type_map) bind_metadata.append(ColumnMetadata(colksname, colcfname, colname, coltype)) if protocol_version >= 2: - _, result_metadata, result_metadata_id = cls.recv_results_metadata(f, user_type_map) - return bind_metadata, pk_indexes, result_metadata, result_metadata_id - else: - return bind_metadata, pk_indexes, None, None + self.recv_results_metadata(f, user_type_map) - @classmethod - def recv_results_schema_change(cls, f, protocol_version): - return EventMessage.recv_schema_change(f, protocol_version) + self.bind_metadata = bind_metadata + self.pk_indexes = pk_indexes + + def recv_results_schema_change(self, f, protocol_version): + self.schema_change_event = EventMessage.recv_schema_change(f, protocol_version) @classmethod def read_type(cls, f, user_type_map): @@ -1185,7 +1196,7 @@ class FastResultMessage(ResultMessage): """ # type_codes = ResultMessage.type_codes.copy() code_to_type = dict((v, k) for k, v in ResultMessage.type_codes.items()) - recv_results_rows = classmethod(make_recv_results_rows(colparser)) + recv_results_rows = make_recv_results_rows(colparser) class CythonProtocolHandler(_ProtocolHandler): """ diff --git a/cassandra/row_parser.pyx b/cassandra/row_parser.pyx index 49fafd32e5..3a4b2f4604 100644 --- a/cassandra/row_parser.pyx +++ b/cassandra/row_parser.pyx @@ -19,23 +19,23 @@ from cassandra.deserializers import make_deserializers include "ioutils.pyx" def make_recv_results_rows(ColumnParser colparser): - def recv_results_rows(cls, f, int protocol_version, user_type_map, result_metadata): + def recv_results_rows(self, f, int protocol_version, user_type_map, result_metadata): """ Parse protocol data given as a BytesIO f into a set of columns (e.g. list of tuples) This is used as the recv_results_rows method of (Fast)ResultMessage """ - paging_state, column_metadata, result_metadata_id = cls.recv_results_metadata(f, user_type_map) + self.recv_results_metadata(f, user_type_map) - column_metadata = column_metadata or result_metadata + column_metadata = self.column_metadata or result_metadata - colnames = [c[2] for c in column_metadata] - coltypes = [c[3] for c in column_metadata] + self.column_names = [c[2] for c in column_metadata] + self.column_types = [c[3] for c in column_metadata] - desc = ParseDesc(colnames, coltypes, make_deserializers(coltypes), + desc = ParseDesc(self.column_names, self.column_types, make_deserializers(self.column_types), protocol_version) reader = BytesIOReader(f.read()) try: - parsed_rows = colparser.parse_rows(reader, desc) + self.parsed_rows = colparser.parse_rows(reader, desc) except Exception as e: # Use explicitly the TupleRowParser to display better error messages for column decoding failures rowparser = TupleRowParser() @@ -45,6 +45,4 @@ def make_recv_results_rows(ColumnParser colparser): for i in range(rowcount): rowparser.unpack_row(reader, desc) - return (paging_state, coltypes, (colnames, parsed_rows), result_metadata_id) - return recv_results_rows From 60775d6ed9394f67b5177fefa17b8b7aac2d6481 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 4 Sep 2019 11:15:17 -0400 Subject: [PATCH 1564/2005] ResultMessage.recv_prepared_metadata --- cassandra/protocol.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 18118e32d0..c379bbf603 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -698,14 +698,9 @@ def recv_results_metadata(self, f, user_type_map): if flags & self._HAS_MORE_PAGES_FLAG: self.paging_state = read_binary_longstring(f) - if flags & self._METADATA_ID_FLAG: - self.result_metadata_id = read_binary_string(f) - else: - self.result_metadata_id = None - no_meta = bool(flags & self._NO_METADATA_FLAG) if no_meta: - return None + return if flags & self._CONTINUOUS_PAGING_FLAG: self.continuous_paging_seq = read_int(f) @@ -845,6 +840,7 @@ def send_body(self, f, protocol_version): class ExecuteMessage(_MessageType): opcode = 0x0A name = 'EXECUTE' + def __init__(self, query_id, query_params, consistency_level, serial_consistency_level=None, fetch_size=None, paging_state=None, timestamp=None, skip_meta=False, From 149d351a9d6d8f20f3b033fb4f734e23570d7fc8 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 4 Sep 2019 11:18:30 -0500 Subject: [PATCH 1565/2005] Update cloud getting started code block --- docs/getting_started.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 8b72915bf3..042a33fe61 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -408,6 +408,11 @@ Connecting to DataStax Cloud ---------------------------- 1. Download the secure connect bundle from your DataStax Constellation account. 2. Connect to your cloud cluster with:: +.. code-block:: python + + from cassandra.cluster import Cluster + from cassandra.auth import PlainTextAuthProvider + cloud_config = { 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip' } From e233488cbd9db9fcbfe061b75e9a9b2bc82f6ca7 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 4 Sep 2019 15:10:04 -0400 Subject: [PATCH 1566/2005] Started to merge and fix standard tests --- tests/integration/__init__.py | 4 +- .../standard/test_authentication.py | 9 +- .../test_authentication_misconfiguration.py | 2 +- tests/integration/standard/test_cluster.py | 220 +++++++++++------- tests/integration/standard/test_concurrent.py | 11 +- .../standard/test_custom_cluster.py | 6 +- .../standard/test_custom_protocol_handler.py | 103 +++++--- .../standard/test_cython_protocol_handlers.py | 23 +- 8 files changed, 241 insertions(+), 137 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index c897cf03f6..845ca10711 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -193,7 +193,7 @@ def _get_dse_version_from_cass(cass_version): #This changes the default contact_point parameter in Cluster -def set_default_cluster_ip(): +def set_default_cass_ip(): if CASSANDRA_IP.startswith("127.0.0."): return defaults = list(Cluster.__init__.__defaults__) @@ -445,7 +445,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, dse_cluster = True if DSE_VERSION else False if not workloads: workloads = [] - set_default_cluster_ip() + set_default_cass_ip() if ccm_options is None and DSE_VERSION: ccm_options = {"version": DSE_VERSION} diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index f8ce42315c..4c32e9de3d 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -19,7 +19,7 @@ from cassandra.auth import PlainTextAuthProvider, SASLClient, SaslAuthProvider from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION, CASSANDRA_IP, \ - set_default_cass_ip, USE_CASS_EXTERNAL + set_default_cass_ip, USE_CASS_EXTERNAL, start_cluster_wait_for_up from tests.integration.util import assert_quiescent_pool_state try: @@ -43,10 +43,7 @@ def setup_module(): 'authorizer': 'CassandraAuthorizer'} ccm_cluster.set_configuration_options(config_options) log.debug("Starting ccm test cluster with %s", config_options) - ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) - # there seems to be some race, with some versions of C* taking longer to - # get the auth (and default user) setup. Sleep here to give it a chance - time.sleep(10) + start_cluster_wait_for_up(ccm_cluster) else: set_default_cass_ip() @@ -104,7 +101,7 @@ def test_auth_connect(self): try: cluster = self.cluster_as(user, passwd) - session = cluster.connect() + session = cluster.connect(wait_for_all_pools=True) try: self.assertTrue(session.execute('SELECT release_version FROM system.local')) assert_quiescent_pool_state(self, cluster, wait=1) diff --git a/tests/integration/standard/test_authentication_misconfiguration.py b/tests/integration/standard/test_authentication_misconfiguration.py index 5998693e53..caac84dd25 100644 --- a/tests/integration/standard/test_authentication_misconfiguration.py +++ b/tests/integration/standard/test_authentication_misconfiguration.py @@ -29,7 +29,7 @@ def setUpClass(cls): 'authenticator': 'PasswordAuthenticator', 'authorizer': 'CassandraAuthorizer', }) - ccm_cluster.start() + ccm_cluster.start(wait_for_binary_proto=True) cls.ccm_cluster = ccm_cluster diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 768fa778cd..d3636bea88 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -42,10 +42,13 @@ from tests import notwindows from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, CASSANDRA_VERSION, \ execute_until_pass, execute_with_long_wait_retry, get_node, MockLoggingHandler, get_unsupported_lower_protocol, \ - get_unsupported_upper_protocol, protocolv5, local, CASSANDRA_IP, greaterthanorequalcass30, lessthanorequalcass40 + get_unsupported_upper_protocol, protocolv5, local, CASSANDRA_IP, greaterthanorequalcass30, lessthanorequalcass40, \ + DSE_VERSION from tests.integration.util import assert_quiescent_pool_state import sys +log = logging.getLogger(__name__) + def setup_module(): use_singledc() @@ -77,9 +80,10 @@ def test_ignored_host_up(self): @test_category connection """ - ingored_host_policy = IgnoredHostPolicy(["127.0.0.2", "127.0.0.3"]) - cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=ingored_host_policy) - session = cluster.connect() + ignored_host_policy = IgnoredHostPolicy(["127.0.0.2", "127.0.0.3"]) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=ignored_host_policy)}) + cluster.connect() for host in cluster.metadata.all_hosts(): if str(host) == "127.0.0.1:9042": self.assertTrue(host.is_up) @@ -248,7 +252,13 @@ def test_protocol_negotiation(self): updated_protocol_version = session._protocol_version updated_cluster_version = cluster.protocol_version # Make sure the correct protocol was selected by default - if CASSANDRA_VERSION >= Version('2.2'): + if DSE_VERSION and DSE_VERSION >= Version("6.0"): + self.assertEqual(updated_protocol_version, cassandra.ProtocolVersion.DSE_V2) + self.assertEqual(updated_cluster_version, cassandra.ProtocolVersion.DSE_V2) + elif DSE_VERSION and DSE_VERSION >= Version("5.1"): + self.assertEqual(updated_protocol_version, cassandra.ProtocolVersion.DSE_V1) + self.assertEqual(updated_cluster_version, cassandra.ProtocolVersion.DSE_V1) + elif CASSANDRA_VERSION >= Version('2.2'): self.assertEqual(updated_protocol_version, 4) self.assertEqual(updated_cluster_version, 4) elif CASSANDRA_VERSION >= Version('2.1'): @@ -286,6 +296,7 @@ def test_invalid_protocol_negotation(self): """ upper_bound = get_unsupported_upper_protocol() + log.debug('got upper_bound of {}'.format(upper_bound)) if upper_bound is not None: cluster = Cluster(protocol_version=upper_bound) with self.assertRaises(NoHostAvailable): @@ -293,6 +304,7 @@ def test_invalid_protocol_negotation(self): cluster.shutdown() lower_bound = get_unsupported_lower_protocol() + log.debug('got lower_bound of {}'.format(lower_bound)) if lower_bound is not None: cluster = Cluster(protocol_version=lower_bound) with self.assertRaises(NoHostAvailable): @@ -334,9 +346,7 @@ def test_default_connections(self): """ Cluster( - load_balancing_policy=RoundRobinPolicy(), reconnection_policy=ExponentialReconnectionPolicy(1.0, 600.0), - default_retry_policy=RetryPolicy(), conviction_policy_factory=SimpleConvictionPolicy, protocol_version=PROTOCOL_VERSION ) @@ -493,78 +503,78 @@ def test_refresh_schema_type(self): @notwindows def test_refresh_schema_no_wait(self): contact_points = [CASSANDRA_IP] - cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=10, - contact_points=contact_points, - load_balancing_policy=HostFilterPolicy( - RoundRobinPolicy(), lambda host: host.address == CASSANDRA_IP - )) - session = cluster.connect() - - schema_ver = session.execute("SELECT schema_version FROM system.local WHERE key='local'")[0][0] - new_schema_ver = uuid4() - session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (new_schema_ver,)) + with Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=10, + contact_points=contact_points, + execution_profiles= + {EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy= + HostFilterPolicy( + RoundRobinPolicy(), lambda host: host.address == CASSANDRA_IP + ))}) as cluster: + session = cluster.connect() + schema_ver = session.execute("SELECT schema_version FROM system.local WHERE key='local'")[0][0] + new_schema_ver = uuid4() + session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (new_schema_ver,)) - try: - agreement_timeout = 1 - - # cluster agreement wait exceeded - c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=agreement_timeout) - c.connect() - self.assertTrue(c.metadata.keyspaces) - - # cluster agreement wait used for refresh - original_meta = c.metadata.keyspaces - start_time = time.time() - self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata) - end_time = time.time() - self.assertGreaterEqual(end_time - start_time, agreement_timeout) - self.assertIs(original_meta, c.metadata.keyspaces) - - # refresh wait overrides cluster value - original_meta = c.metadata.keyspaces - start_time = time.time() - c.refresh_schema_metadata(max_schema_agreement_wait=0) - end_time = time.time() - self.assertLess(end_time - start_time, agreement_timeout) - self.assertIsNot(original_meta, c.metadata.keyspaces) - self.assertEqual(original_meta, c.metadata.keyspaces) - - c.shutdown() - - refresh_threshold = 0.5 - # cluster agreement bypass - c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=0) - start_time = time.time() - s = c.connect() - end_time = time.time() - self.assertLess(end_time - start_time, refresh_threshold) - self.assertTrue(c.metadata.keyspaces) - - # cluster agreement wait used for refresh - original_meta = c.metadata.keyspaces - start_time = time.time() - c.refresh_schema_metadata() - end_time = time.time() - self.assertLess(end_time - start_time, refresh_threshold) - self.assertIsNot(original_meta, c.metadata.keyspaces) - self.assertEqual(original_meta, c.metadata.keyspaces) - - # refresh wait overrides cluster value - original_meta = c.metadata.keyspaces - start_time = time.time() - self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata, - max_schema_agreement_wait=agreement_timeout) - end_time = time.time() - self.assertGreaterEqual(end_time - start_time, agreement_timeout) - self.assertIs(original_meta, c.metadata.keyspaces) - c.shutdown() - finally: - # TODO once fixed this connect call - session = cluster.connect() - session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (schema_ver,)) + try: + agreement_timeout = 1 + + # cluster agreement wait exceeded + c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=agreement_timeout) + c.connect() + self.assertTrue(c.metadata.keyspaces) + + # cluster agreement wait used for refresh + original_meta = c.metadata.keyspaces + start_time = time.time() + self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata) + end_time = time.time() + self.assertGreaterEqual(end_time - start_time, agreement_timeout) + self.assertIs(original_meta, c.metadata.keyspaces) + + # refresh wait overrides cluster value + original_meta = c.metadata.keyspaces + start_time = time.time() + c.refresh_schema_metadata(max_schema_agreement_wait=0) + end_time = time.time() + self.assertLess(end_time - start_time, agreement_timeout) + self.assertIsNot(original_meta, c.metadata.keyspaces) + self.assertEqual(original_meta, c.metadata.keyspaces) + + c.shutdown() + + refresh_threshold = 0.5 + # cluster agreement bypass + c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=0) + start_time = time.time() + s = c.connect() + end_time = time.time() + self.assertLess(end_time - start_time, refresh_threshold) + self.assertTrue(c.metadata.keyspaces) + + # cluster agreement wait used for refresh + original_meta = c.metadata.keyspaces + start_time = time.time() + c.refresh_schema_metadata() + end_time = time.time() + self.assertLess(end_time - start_time, refresh_threshold) + self.assertIsNot(original_meta, c.metadata.keyspaces) + self.assertEqual(original_meta, c.metadata.keyspaces) + + # refresh wait overrides cluster value + original_meta = c.metadata.keyspaces + start_time = time.time() + self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata, + max_schema_agreement_wait=agreement_timeout) + end_time = time.time() + self.assertGreaterEqual(end_time - start_time, agreement_timeout) + self.assertIs(original_meta, c.metadata.keyspaces) + c.shutdown() + finally: + # TODO once fixed this connect call + session = cluster.connect() + session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (schema_ver,)) - cluster.shutdown() def test_trace(self): """ @@ -1088,6 +1098,35 @@ def test_add_profile_timeout(self): else: raise Exception("add_execution_profile didn't timeout after {0} retries".format(max_retry_count)) + def test_execute_query_timeout(self): + with Cluster() as cluster: + session = cluster.connect(wait_for_all_pools=True) + query = "SELECT * FROM system.local" + + # default is passed down + default_profile = cluster.profile_manager.profiles[EXEC_PROFILE_DEFAULT] + rs = session.execute(query) + self.assertEqual(rs.response_future.timeout, default_profile.request_timeout) + + # tiny timeout times out as expected + tmp_profile = copy(default_profile) + tmp_profile.request_timeout = sys.float_info.min + + max_retry_count = 10 + for _ in range(max_retry_count): + start = time.time() + try: + with self.assertRaises(cassandra.OperationTimedOut): + session.execute(query, execution_profile=tmp_profile) + break + except: + import traceback + traceback.print_exc() + end = time.time() + self.assertAlmostEqual(start, end, 1) + else: + raise Exception("session.execute didn't time out in {0} tries".format(max_retry_count)) + def test_replicas_are_queried(self): """ Test that replicas are queried first for TokenAwarePolicy. A table with RF 1 @@ -1103,8 +1142,11 @@ def test_replicas_are_queried(self): @test_category metadata """ queried_hosts = set() + tap_profile = ExecutionProfile( + load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()) + ) with Cluster(protocol_version=PROTOCOL_VERSION, - load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy())) as cluster: + execution_profiles={EXEC_PROFILE_DEFAULT: tap_profile}) as cluster: session = cluster.connect(wait_for_all_pools=True) session.execute(''' CREATE TABLE test1rf.table_with_big_key ( @@ -1121,14 +1163,17 @@ def test_replicas_are_queried(self): queried_hosts = self._assert_replica_queried(trace, only_replicas=True) last_i = i + hfp_profile = ExecutionProfile( + load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), + predicate=lambda host: host.address != only_replica) + ) only_replica = queried_hosts.pop() log = logging.getLogger(__name__) log.info("The only replica found was: {}".format(only_replica)) available_hosts = [host for host in ["127.0.0.1", "127.0.0.2", "127.0.0.3"] if host != only_replica] with Cluster(contact_points=available_hosts, protocol_version=PROTOCOL_VERSION, - load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), - predicate=lambda host: host.address != only_replica)) as cluster: + execution_profiles={EXEC_PROFILE_DEFAULT: hfp_profile}) as cluster: session = cluster.connect(wait_for_all_pools=True) prepared = session.prepare("""SELECT * from test1rf.table_with_big_key @@ -1399,7 +1444,7 @@ class DontPrepareOnIgnoredHostsTest(unittest.TestCase): def test_prepare_on_ignored_hosts(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION, - load_balancing_policy=self.ignore_node_3_policy) + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=self.ignore_node_3_policy)}) session = cluster.connect() cluster.reprepare_on_up, cluster.prepare_on_all_hosts = True, False @@ -1436,12 +1481,17 @@ class DuplicateRpcTest(unittest.TestCase): lambda host: host.address == "127.0.0.1") def setUp(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=self.load_balancing_policy) + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=self.load_balancing_policy)}) self.session = self.cluster.connect() - self.session.execute("UPDATE system.peers SET rpc_address = '127.0.0.1' WHERE peer='127.0.0.2'") + + self.address_column = "native_transport_address" if DSE_VERSION and DSE_VERSION >= Version("6.0") else "rpc_address" + self.session.execute("UPDATE system.peers SET {} = '127.0.0.1' WHERE peer='127.0.0.2'". + format(self.address_column)) def tearDown(self): - self.session.execute("UPDATE system.peers SET rpc_address = '127.0.0.2' WHERE peer='127.0.0.2'") + self.session.execute("UPDATE system.peers SET {} = '127.0.0.2' WHERE peer='127.0.0.2'". + format(self.address_column)) self.cluster.shutdown() def test_duplicate(self): @@ -1459,7 +1509,9 @@ def test_duplicate(self): mock_handler = MockLoggingHandler() logger = logging.getLogger(cassandra.cluster.__name__) logger.addHandler(mock_handler) - test_cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=self.load_balancing_policy) + test_cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=self.load_balancing_policy)}) + test_cluster.connect() warnings = mock_handler.messages.get("warning") self.assertEqual(len(warnings), 1) diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index c85bb64b94..954e5f28f4 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -13,12 +13,11 @@ # limitations under the License. from itertools import cycle -from six import next import sys, logging, traceback from cassandra import InvalidRequest, ConsistencyLevel, ReadTimeout, WriteTimeout, OperationTimedOut, \ ReadFailure, WriteFailure -from cassandra.cluster import Cluster +from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args, ExecutionResult from cassandra.policies import HostDistance from cassandra.query import tuple_factory, SimpleStatement @@ -43,11 +42,15 @@ class ClusterTests(unittest.TestCase): @classmethod def setUpClass(cls): - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.cluster = Cluster( + protocol_version=PROTOCOL_VERSION, + execution_profiles = { + EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory) + } + ) if PROTOCOL_VERSION < 3: cls.cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) cls.session = cls.cluster.connect() - cls.session.row_factory = tuple_factory @classmethod def tearDownClass(cls): diff --git a/tests/integration/standard/test_custom_cluster.py b/tests/integration/standard/test_custom_cluster.py index 5ca5c5c373..9208c35cea 100644 --- a/tests/integration/standard/test_custom_cluster.py +++ b/tests/integration/standard/test_custom_cluster.py @@ -31,9 +31,9 @@ def setup_module(): # can't use wait_for_binary_proto cause ccm tries on port 9042 ccm_cluster.start(wait_for_binary_proto=False) # wait until all nodes are up - wait_until_not_raised(lambda: Cluster(['127.0.0.1'], port=9046).connect().shutdown(), 1, 10) - wait_until_not_raised(lambda: Cluster(['127.0.0.2'], port=9046).connect().shutdown(), 1, 10) - wait_until_not_raised(lambda: Cluster(['127.0.0.3'], port=9046).connect().shutdown(), 1, 10) + wait_until_not_raised(lambda: Cluster(['127.0.0.1'], port=9046).connect().shutdown(), 1, 20) + wait_until_not_raised(lambda: Cluster(['127.0.0.2'], port=9046).connect().shutdown(), 1, 20) + wait_until_not_raised(lambda: Cluster(['127.0.0.3'], port=9046).connect().shutdown(), 1, 20) def teardown_module(): diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index e76972b9b8..77b777c5fe 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -19,11 +19,11 @@ from cassandra.protocol import ProtocolHandler, ResultMessage, QueryMessage, UUIDType, read_int from cassandra.query import tuple_factory, SimpleStatement -from cassandra.cluster import Cluster, ResponseFuture +from cassandra.cluster import Cluster, ResponseFuture, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra import ProtocolVersion, ConsistencyLevel from tests.integration import use_singledc, PROTOCOL_VERSION, drop_keyspace_shutdown_cluster, \ - greaterthanorequalcass30, execute_with_long_wait_retry + greaterthanorequalcass30, execute_with_long_wait_retry, greaterthanorequaldse51 from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES from tests.integration.standard.utils import create_table_with_all_types, get_all_primitive_params from six import binary_type @@ -66,18 +66,16 @@ def test_custom_raw_uuid_row_results(self): """ # Ensure that we get normal uuid back first - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)}) session = cluster.connect(keyspace="custserdes") - session.row_factory = tuple_factory result = session.execute("SELECT schema_version FROM system.local") uuid_type = result[0][0] self.assertEqual(type(uuid_type), uuid.UUID) # use our custom protocol handlder - session.client_protocol_handler = CustomTestRawRowType - session.row_factory = tuple_factory result_set = session.execute("SELECT schema_version FROM system.local") raw_value = result_set[0][0] self.assertTrue(isinstance(raw_value, binary_type)) @@ -105,10 +103,10 @@ def test_custom_raw_row_results_all_types(self): @test_category data_types:serialization """ # Connect using a custom protocol handler that tracks the various types the result message is used with. - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)}) session = cluster.connect(keyspace="custserdes") session.client_protocol_handler = CustomProtocolHandlerResultMessageTracked - session.row_factory = tuple_factory colnames = create_table_with_all_types("alltypes", session, 1) columns_string = ", ".join(colnames) @@ -137,6 +135,61 @@ def test_protocol_divergence_v4_fail_by_flag_uses_int(self): self._protocol_divergence_fail_by_flag_uses_int(ProtocolVersion.V4, uses_int_query_flag=False, int_flag=True) + @greaterthanorequaldse51 + def test_protocol_v5_uses_flag_int(self): + """ + Test to validate that the _PAGE_SIZE_FLAG is treated correctly using write_uint for V5 + + @since DSE 2.0b3 GRAPH 1.0b1 + @jira_ticket PYTHON-694 + @expected_result the fetch_size=1 parameter will be honored + + @test_category connection + """ + self._protocol_divergence_fail_by_flag_uses_int(ProtocolVersion.V5, uses_int_query_flag=True, beta=True, + int_flag=True) + + @greaterthanorequaldse51 + def test_protocol_dsev1_uses_flag_int(self): + """ + Test to validate that the _PAGE_SIZE_FLAG is treated correctly using write_uint for DSE_V1 + + @since DSE 2.0b3 GRAPH 1.0b1 + @jira_ticket PYTHON-694 + @expected_result the fetch_size=1 parameter will be honored + + @test_category connection + """ + self._protocol_divergence_fail_by_flag_uses_int(ProtocolVersion.DSE_V1, uses_int_query_flag=True, + int_flag=True) + + @greaterthanorequaldse51 + def test_protocol_divergence_v5_fail_by_flag_uses_int(self): + """ + Test to validate that the _PAGE_SIZE_FLAG is treated correctly using write_uint for V5 + + @since DSE 2.0b3 GRAPH 1.0b1 + @jira_ticket PYTHON-694 + @expected_result the fetch_size=1 parameter will be honored + + @test_category connection + """ + self._protocol_divergence_fail_by_flag_uses_int(ProtocolVersion.V5, uses_int_query_flag=False, beta=True, + int_flag=False) + + @greaterthanorequaldse51 + def test_protocol_divergence_dsev1_fail_by_flag_uses_int(self): + """ + Test to validate that the _PAGE_SIZE_FLAG is treated correctly using write_uint for DSE_V1 + + @since DSE 2.0b3 GRAPH 1.0b1 + @jira_ticket PYTHON-694 + @expected_result the fetch_size=1 parameter will be honored + + @test_category connection + """ + self._protocol_divergence_fail_by_flag_uses_int(ProtocolVersion.DSE_V1, uses_int_query_flag=False, + int_flag=False) def _send_query_message(self, session, timeout, **kwargs): query = "SELECT * FROM test3rf.test" @@ -177,14 +230,13 @@ class CustomResultMessageRaw(ResultMessage): my_type_codes[0xc] = UUIDType type_codes = my_type_codes - @classmethod - def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): - paging_state, column_metadata, result_metadata_id = cls.recv_results_metadata(f, user_type_map) + def recv_results_rows(self, f, protocol_version, user_type_map, result_metadata): + self.recv_results_metadata(f, user_type_map) + column_metadata = self.column_metadata or result_metadata rowcount = read_int(f) - rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)] - colnames = [c[2] for c in column_metadata] - coltypes = [c[3] for c in column_metadata] - return paging_state, coltypes, (colnames, rows), result_metadata_id + self.parsed_rows = [self.recv_row(f, len(column_metadata)) for _ in range(rowcount)] + self.column_names = [c[2] for c in column_metadata] + self.column_types = [c[3] for c in column_metadata] class CustomTestRawRowType(ProtocolHandler): @@ -207,19 +259,18 @@ class CustomResultMessageTracked(ResultMessage): type_codes = my_type_codes checked_rev_row_set = set() - @classmethod - def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata): - paging_state, column_metadata, result_metadata_id = cls.recv_results_metadata(f, user_type_map) + def recv_results_rows(self, f, protocol_version, user_type_map, result_metadata): + self.recv_results_metadata(f, user_type_map) + column_metadata = self.column_metadata or result_metadata rowcount = read_int(f) - rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)] - colnames = [c[2] for c in column_metadata] - coltypes = [c[3] for c in column_metadata] - cls.checked_rev_row_set.update(coltypes) - parsed_rows = [ + rows = [self.recv_row(f, len(column_metadata)) for _ in range(rowcount)] + self.column_names = [c[2] for c in column_metadata] + self.column_types = [c[3] for c in column_metadata] + self.checked_rev_row_set.update(self.column_types) + self.parsed_rows = [ tuple(ctype.from_binary(val, protocol_version) - for ctype, val in zip(coltypes, row)) + for ctype, val in zip(self.column_types, row)) for row in rows] - return paging_state, coltypes, (colnames, parsed_rows), result_metadata_id class CustomProtocolHandlerResultMessageTracked(ProtocolHandler): @@ -230,5 +281,3 @@ class CustomProtocolHandlerResultMessageTracked(ProtocolHandler): my_opcodes = ProtocolHandler.message_types_by_opcode.copy() my_opcodes[CustomResultMessageTracked.opcode] = CustomResultMessageTracked message_types_by_opcode = my_opcodes - - diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index 593dcba50e..c6be3760fb 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -10,12 +10,13 @@ from itertools import count from cassandra.query import tuple_factory -from cassandra.cluster import Cluster, NoHostAvailable +from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.concurrent import execute_concurrent_with_args from cassandra.protocol import ProtocolHandler, LazyProtocolHandler, NumpyProtocolHandler from cassandra.cython_deps import HAVE_CYTHON, HAVE_NUMPY from tests import VERIFY_CYTHON -from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1, drop_keyspace_shutdown_cluster, BasicSharedKeyspaceUnitTestCase, execute_with_retry_tolerant, greaterthancass21 +from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1, \ + drop_keyspace_shutdown_cluster, BasicSharedKeyspaceUnitTestCase, greaterthancass21 from tests.integration.datatype_utils import update_datatypes from tests.integration.standard.utils import ( create_table_with_all_types, get_all_primitive_params, get_primitive_datatypes) @@ -59,16 +60,15 @@ def test_cython_lazy_parser(self): """ verify_iterator_data(self.assertEqual, get_data(LazyProtocolHandler)) - @notprotocolv1 @numpytest def test_cython_lazy_results_paged(self): """ Test Cython-based parser that returns an iterator, over multiple pages """ # arrays = { 'a': arr1, 'b': arr2, ... } - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)}) session = cluster.connect(keyspace="testspace") - session.row_factory = tuple_factory session.client_protocol_handler = LazyProtocolHandler session.default_fetch_size = 2 @@ -99,9 +99,9 @@ def test_numpy_results_paged(self): Test Numpy-based parser that returns a NumPy array """ # arrays = { 'a': arr1, 'b': arr2, ... } - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)}) session = cluster.connect(keyspace="testspace") - session.row_factory = tuple_factory session.client_protocol_handler = NumpyProtocolHandler session.default_fetch_size = 2 @@ -181,12 +181,12 @@ def get_data(protocol_handler): """ Get data from the test table. """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)}) session = cluster.connect(keyspace="testspace") # use our custom protocol handler session.client_protocol_handler = protocol_handler - session.row_factory = tuple_factory results = session.execute("SELECT * FROM test_table") cluster.shutdown() @@ -210,6 +210,10 @@ def verify_iterator_data(assertEqual, results): class NumpyNullTest(BasicSharedKeyspaceUnitTestCase): + @classmethod + def setUpClass(cls): + cls.common_setup(1, execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)}) + @numpytest @greaterthancass21 def test_null_types(self): @@ -223,7 +227,6 @@ def test_null_types(self): @test_category data_types:serialization """ s = self.session - s.row_factory = tuple_factory s.client_protocol_handler = NumpyProtocolHandler table = "%s.%s" % (self.keyspace_name, self.function_table_name) From 59225ea1b0d288786e2d14f500a2eee570d82381 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 5 Sep 2019 09:36:23 -0400 Subject: [PATCH 1567/2005] Add Host.dse_workloads --- cassandra/cluster.py | 2 ++ cassandra/pool.py | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 3811be12e3..a929186332 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3213,6 +3213,7 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, host.release_version = local_row.get("release_version") host.dse_version = local_row.get("dse_version") host.dse_workload = local_row.get("workload") + host.dse_workloads = local_row.get("workloads") if partitioner and tokens: token_map[host] = tokens @@ -3250,6 +3251,7 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, host.release_version = row.get("release_version") host.dse_version = row.get("dse_version") host.dse_workload = row.get("workload") + host.dse_workloads = row.get("workloads") if partitioner and tokens: token_map[host] = tokens diff --git a/cassandra/pool.py b/cassandra/pool.py index cd814ef067..790b1d2a6e 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -107,6 +107,13 @@ class Host(object): DSE with this property available. Not queried if :attr:`~.Cluster.token_metadata_enabled` is ``False``. """ + dse_workloads = None + """ + DSE workloads set, queried from the control connection system tables. Only populated when connecting to + DSE with this property available (added in DSE 5.1). + Not queried if :attr:`~.Cluster.token_metadata_enabled` is ``False``. + """ + _datacenter = None _rack = None _reconnection_handler = None From 69a870c3dd251d7b1f36e5519ec2e1a80d637904 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 5 Sep 2019 14:41:52 -0400 Subject: [PATCH 1568/2005] Add lessthancass40 decorator --- tests/integration/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 845ca10711..100831cd17 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -335,7 +335,8 @@ def _id_and_mark(f): greaterthanorequalcass3_10 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.10'), 'Cassandra version 3.10 or greater required') greaterthanorequalcass3_11 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.11'), 'Cassandra version 3.10 or greater required') greaterthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION >= Version('4.0'), 'Cassandra version 4.0 or greater required') -lessthanorequalcass40 = unittest.skipIf(CASSANDRA_VERSION >= Version('4.0'), 'Cassandra version 4.0 or greater required') +lessthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION <= Version('4.0'), 'Cassandra version less or equal to 4.0 required') +lessthancass40 = unittest.skipUnless(CASSANDRA_VERSION < Version('4.0'), 'Cassandra version less than 4.0 required') lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < Version('3.0'), 'Cassandra version less then 3.0 required') greaterthanorequaldse67 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.7'), "DSE 6.7 or greater required for this test") greaterthanorequaldse60 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.0'), "DSE 6.0 or greater required for this test") From d38a17e385c1f45f1dd85f76fe1277eaf7515f3f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 5 Sep 2019 14:43:50 -0400 Subject: [PATCH 1569/2005] Merged metadata.py --- cassandra/metadata.py | 128 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 112 insertions(+), 16 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 65e063d6fd..e9c03f583b 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -17,7 +17,6 @@ from collections import defaultdict from functools import total_ordering from hashlib import md5 -from itertools import islice, cycle import json import logging import re @@ -50,10 +49,10 @@ cql_keywords = set(( 'add', 'aggregate', 'all', 'allow', 'alter', 'and', 'apply', 'as', 'asc', 'ascii', 'authorize', 'batch', 'begin', 'bigint', 'blob', 'boolean', 'by', 'called', 'clustering', 'columnfamily', 'compact', 'contains', 'count', - 'counter', 'create', 'custom', 'date', 'decimal', 'delete', 'desc', 'describe', 'distinct', 'double', 'drop', + 'counter', 'create', 'custom', 'date', 'decimal', 'delete', 'desc', 'describe', 'deterministic', 'distinct', 'double', 'drop', 'entries', 'execute', 'exists', 'filtering', 'finalfunc', 'float', 'from', 'frozen', 'full', 'function', 'functions', 'grant', 'if', 'in', 'index', 'inet', 'infinity', 'initcond', 'input', 'insert', 'int', 'into', 'is', 'json', - 'key', 'keys', 'keyspace', 'keyspaces', 'language', 'limit', 'list', 'login', 'map', 'materialized', 'modify', 'nan', 'nologin', + 'key', 'keys', 'keyspace', 'keyspaces', 'language', 'limit', 'list', 'login', 'map', 'materialized', 'modify', 'monotonic', 'nan', 'nologin', 'norecursive', 'nosuperuser', 'not', 'null', 'of', 'on', 'options', 'or', 'order', 'password', 'permission', 'permissions', 'primary', 'rename', 'replace', 'returns', 'revoke', 'role', 'roles', 'schema', 'select', 'set', 'sfunc', 'smallint', 'static', 'storage', 'stype', 'superuser', 'table', 'text', 'time', 'timestamp', 'timeuuid', @@ -68,9 +67,9 @@ cql_keywords_unreserved = set(( 'aggregate', 'all', 'as', 'ascii', 'bigint', 'blob', 'boolean', 'called', 'clustering', 'compact', 'contains', - 'count', 'counter', 'custom', 'date', 'decimal', 'distinct', 'double', 'exists', 'filtering', 'finalfunc', 'float', + 'count', 'counter', 'custom', 'date', 'decimal', 'deterministic', 'distinct', 'double', 'exists', 'filtering', 'finalfunc', 'float', 'frozen', 'function', 'functions', 'inet', 'initcond', 'input', 'int', 'json', 'key', 'keys', 'keyspaces', - 'language', 'list', 'login', 'map', 'nologin', 'nosuperuser', 'options', 'password', 'permission', 'permissions', + 'language', 'list', 'login', 'map', 'monotonic', 'nologin', 'nosuperuser', 'options', 'password', 'permission', 'permissions', 'returns', 'role', 'roles', 'sfunc', 'smallint', 'static', 'storage', 'stype', 'superuser', 'text', 'time', 'timestamp', 'timeuuid', 'tinyint', 'trigger', 'ttl', 'tuple', 'type', 'user', 'users', 'uuid', 'values', 'varchar', 'varint', 'writetime' @@ -125,7 +124,8 @@ def export_schema_as_string(self): def refresh(self, connection, timeout, target_type=None, change_type=None, **kwargs): server_version = self.get_host(connection.endpoint).release_version - parser = get_schema_parser(connection, server_version, timeout) + dse_version = self.get_host(connection.endpoint).dse_version + parser = get_schema_parser(connection, server_version, dse_version, timeout) if not target_type: self._rebuild_all(parser) @@ -661,7 +661,7 @@ class KeyspaceMetadata(object): virtual = False """ A boolean indicating if this is a virtual keyspace or not. Always ``False`` - for clusters running pre-4.0 versions of Cassandra. + for clusters running Cassandra pre-4.0 and DSE pre-6.7 versions. .. versionadded:: 3.15 """ @@ -893,8 +893,15 @@ class Aggregate(object): Type of the aggregate state """ + deterministic = None + """ + Flag indicating if this function is guaranteed to produce the same result + for a particular input and state. This is available only with DSE >=6.0. + """ + def __init__(self, keyspace, name, argument_types, state_func, - state_type, final_func, initial_condition, return_type): + state_type, final_func, initial_condition, return_type, + deterministic): self.keyspace = keyspace self.name = name self.argument_types = argument_types @@ -903,6 +910,7 @@ def __init__(self, keyspace, name, argument_types, state_func, self.final_func = final_func self.initial_condition = initial_condition self.return_type = return_type + self.deterministic = deterministic def as_cql_query(self, formatted=False): """ @@ -923,6 +931,7 @@ def as_cql_query(self, formatted=False): ret += ''.join((sep, 'FINALFUNC ', protect_name(self.final_func))) if self.final_func else '' ret += ''.join((sep, 'INITCOND ', self.initial_condition)) if self.initial_condition is not None else '' + ret += '{}DETERMINISTIC'.format(sep) if self.deterministic else '' return ret @@ -984,8 +993,27 @@ class Function(object): (convenience function to avoid handling nulls explicitly if the result will just be null) """ + deterministic = None + """ + Flag indicating if this function is guaranteed to produce the same result + for a particular input. This is available only for DSE >=6.0. + """ + + monotonic = None + """ + Flag indicating if this function is guaranteed to increase or decrease + monotonically on any of its arguments. This is available only for DSE >=6.0. + """ + + monotonic_on = None + """ + A list containing the argument or arguments over which this function is + monotonic. This is available only for DSE >=6.0. + """ + def __init__(self, keyspace, name, argument_types, argument_names, - return_type, language, body, called_on_null_input): + return_type, language, body, called_on_null_input, + deterministic, monotonic, monotonic_on): self.keyspace = keyspace self.name = name self.argument_types = argument_types @@ -996,6 +1024,9 @@ def __init__(self, keyspace, name, argument_types, argument_names, self.language = language self.body = body self.called_on_null_input = called_on_null_input + self.deterministic = deterministic + self.monotonic = monotonic + self.monotonic_on = monotonic_on def as_cql_query(self, formatted=False): """ @@ -1012,10 +1043,25 @@ def as_cql_query(self, formatted=False): lang = self.language body = self.body on_null = "CALLED" if self.called_on_null_input else "RETURNS NULL" + deterministic_token = ('DETERMINISTIC{}'.format(sep) + if self.deterministic else + '') + monotonic_tokens = '' # default for nonmonotonic function + if self.monotonic: + # monotonic on all arguments; ignore self.monotonic_on + monotonic_tokens = 'MONOTONIC{}'.format(sep) + elif self.monotonic_on: + # if monotonic == False and monotonic_on is nonempty, we know that + # monotonicity was specified with MONOTONIC ON , so there's + # exactly 1 value there + monotonic_tokens = 'MONOTONIC ON {}{}'.format(self.monotonic_on[0], + sep) return "CREATE FUNCTION %(keyspace)s.%(name)s(%(arg_list)s)%(sep)s" \ "%(on_null)s ON NULL INPUT%(sep)s" \ "RETURNS %(typ)s%(sep)s" \ + "%(deterministic_token)s" \ + "%(monotonic_tokens)s" \ "LANGUAGE %(lang)s%(sep)s" \ "AS $$%(body)s$$" % locals() @@ -1102,7 +1148,7 @@ def primary_key(self): virtual = False """ A boolean indicating if this is a virtual table or not. Always ``False`` - for clusters running pre-4.0 versions of Cassandra. + for clusters running Cassandra pre-4.0 and DSE pre-6.7 versions. .. versionadded:: 3.15 """ @@ -1733,6 +1779,9 @@ def _query_build_rows(self, query_string, build_func): class SchemaParserV22(_SchemaParser): + """ + For C* 2.2+ + """ _SELECT_KEYSPACES = "SELECT * FROM system.schema_keyspaces" _SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies" _SELECT_COLUMNS = "SELECT * FROM system.schema_columns" @@ -1884,10 +1933,14 @@ def _build_user_type(cls, usertype_row): @classmethod def _build_function(cls, function_row): return_type = cls._schema_type_to_cql(function_row['return_type']) + deterministic = function_row.get('deterministic', False) + monotonic = function_row.get('monotonic', False) + monotonic_on = function_row.get('monotonic_on', ()) return Function(function_row['keyspace_name'], function_row['function_name'], function_row[cls._function_agg_arument_type_col], function_row['argument_names'], return_type, function_row['language'], function_row['body'], - function_row['called_on_null_input']) + function_row['called_on_null_input'], + deterministic, monotonic, monotonic_on) @classmethod def _build_aggregate(cls, aggregate_row): @@ -1899,7 +1952,8 @@ def _build_aggregate(cls, aggregate_row): return_type = cls._schema_type_to_cql(aggregate_row['return_type']) return Aggregate(aggregate_row['keyspace_name'], aggregate_row['aggregate_name'], aggregate_row['signature'], aggregate_row['state_func'], state_type, - aggregate_row['final_func'], initial_condition, return_type) + aggregate_row['final_func'], initial_condition, return_type, + aggregate_row.get('deterministic', False)) def _build_table_metadata(self, row, col_rows=None, trigger_rows=None): keyspace_name = row["keyspace_name"] @@ -2230,6 +2284,9 @@ def _schema_type_to_cql(type_string): class SchemaParserV3(SchemaParserV22): + """ + For C* 3.0+ + """ _SELECT_KEYSPACES = "SELECT * FROM system_schema.keyspaces" _SELECT_TABLES = "SELECT * FROM system_schema.tables" _SELECT_COLUMNS = "SELECT * FROM system_schema.columns" @@ -2316,7 +2373,8 @@ def _build_keyspace_metadata_internal(row): def _build_aggregate(aggregate_row): return Aggregate(aggregate_row['keyspace_name'], aggregate_row['aggregate_name'], aggregate_row['argument_types'], aggregate_row['state_func'], aggregate_row['state_type'], - aggregate_row['final_func'], aggregate_row['initcond'], aggregate_row['return_type']) + aggregate_row['final_func'], aggregate_row['initcond'], aggregate_row['return_type'], + aggregate_row.get('deterministic', False)) def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_rows=None, virtual=False): keyspace_name = row["keyspace_name"] @@ -2498,6 +2556,14 @@ def _schema_type_to_cql(type_string): return type_string +class SchemaParserDSE60(SchemaParserV3): + """ + For DSE 6.0+ + """ + recognized_table_options = (SchemaParserV3.recognized_table_options + + ("nodesync",)) + + class SchemaParserV4(SchemaParserV3): recognized_table_options = tuple( @@ -2629,10 +2695,25 @@ def _build_keyspace_metadata_internal(row): return super(SchemaParserV4, SchemaParserV4)._build_keyspace_metadata_internal(row) +class SchemaParserDSE67(SchemaParserV4): + """ + For DSE 6.7+ + """ + recognized_table_options = (SchemaParserV4.recognized_table_options + + ("nodesync",)) + + class TableMetadataV3(TableMetadata): + """ + For C* 3.0+. `option_maps` take a superset of map names, so if nothing + changes structurally, new option maps can just be appended to the list. + """ compaction_options = {} - option_maps = ['compaction', 'compression', 'caching'] + option_maps = [ + 'compaction', 'compression', 'caching', + 'nodesync' # added DSE 6.0 + ] @property def is_cql_compatible(self): @@ -2768,11 +2849,18 @@ def export_as_string(self): return self.as_cql_query(formatted=True) + ";" -def get_schema_parser(connection, server_version, timeout): +def get_schema_parser(connection, server_version, dse_version, timeout): version = Version(server_version) + if dse_version: + v = Version(dse_version) + if v >= Version('6.7.0'): + return SchemaParserDSE67(connection, timeout) + elif v >= Version('6.0.0'): + return SchemaParserDSE60(connection, timeout) + if version >= Version('4.0.0'): return SchemaParserV4(connection, timeout) - if version >= Version('3.0.0'): + elif version >= Version('3.0.0'): return SchemaParserV3(connection, timeout) else: # we could further specialize by version. Right now just refactoring the @@ -2791,6 +2879,14 @@ def _cql_from_cass_type(cass_type): return cass_type.cql_parameterized_type() +class RLACTableExtension(RegisteredTableExtension): + name = "DSE_RLACA" + + @classmethod + def after_table_cql(cls, table_meta, ext_key, ext_blob): + return "RESTRICT ROWS ON %s.%s USING %s;" % (protect_name(table_meta.keyspace_name), + protect_name(table_meta.name), + protect_name(ext_blob.decode('utf-8'))) NO_VALID_REPLICA = object() From 3e2f616703c37aaf6f2e46505642623423cc8095 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 5 Sep 2019 14:55:07 -0400 Subject: [PATCH 1570/2005] merged tests/integration/standard/test_metadata.py --- tests/integration/standard/test_metadata.py | 139 +++++++++++++------- 1 file changed, 88 insertions(+), 51 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 4fb7ebf4aa..ef269b00fe 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -34,14 +34,16 @@ from cassandra.metadata import (IndexMetadata, Token, murmur3, Function, Aggregate, protect_name, protect_names, RegisteredTableExtension, _RegisteredExtensionType, get_schema_parser, group_keys_by_replica, NO_VALID_REPLICA) +from cassandra.util import SortedSet from tests.integration import (get_cluster, use_singledc, PROTOCOL_VERSION, execute_until_pass, BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION, - get_supported_protocol_versions, greaterthanorequalcass30, lessthancass30, local, - greaterthancass20, greaterthanorequalcass40) - -from tests.integration import greaterthancass21 + greaterthanorequaldse51, greaterthanorequalcass30, lessthancass30, local, + get_supported_protocol_versions, greaterthancass20, + greaterthancass21, assert_startswith, greaterthanorequalcass40, + greaterthanorequaldse67, lessthancass40 +) log = logging.getLogger(__name__) @@ -94,7 +96,8 @@ def test_host_release_version(self): @test_category metadata """ for host in self.cluster.metadata.all_hosts(): - self.assertTrue(host.release_version.startswith(CASSANDRA_VERSION.base_version)) + assert_startswith(host.release_version, CASSANDRA_VERSION.base_version) + @local @@ -225,7 +228,12 @@ def test_basic_table_meta_properties(self): self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys())) cc = self.cluster.control_connection._connection - parser = get_schema_parser(cc, CASSANDRA_VERSION.base_version, 1) + parser = get_schema_parser( + cc, + self.cluster.metadata.get_host(cc.host).release_version, + self.cluster.metadata.get_host(cc.host).dse_version, + 1 + ) for option in tablemeta.options: self.assertIn(option, parser.recognized_table_options) @@ -405,6 +413,7 @@ def test_composite_in_compound_primary_key_ordering(self): tablemeta = self.get_table_metadata() self.check_create_statement(tablemeta, create_statement) + @lessthancass40 def test_compact_storage(self): create_statement = self.make_create_statement(["a"], [], ["b"]) create_statement += " WITH COMPACT STORAGE" @@ -413,6 +422,7 @@ def test_compact_storage(self): tablemeta = self.get_table_metadata() self.check_create_statement(tablemeta, create_statement) + @lessthancass40 def test_dense_compact_storage(self): create_statement = self.make_create_statement(["a"], ["b"], ["c"]) create_statement += " WITH COMPACT STORAGE" @@ -431,6 +441,7 @@ def test_counter(self): tablemeta = self.get_table_metadata() self.check_create_statement(tablemeta, create_statement) + @lessthancass40 def test_counter_with_compact_storage(self): """ PYTHON-1100 """ create_statement = ( @@ -442,6 +453,7 @@ def test_counter_with_compact_storage(self): tablemeta = self.get_table_metadata() self.check_create_statement(tablemeta, create_statement) + @lessthancass40 def test_counter_with_dense_compact_storage(self): create_statement = ( "CREATE TABLE {keyspace}.{table} (" @@ -558,7 +570,6 @@ def test_refresh_schema_metadata(self): @test_category metadata """ - cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) cluster2.connect() @@ -599,7 +610,7 @@ def test_refresh_schema_metadata(self): self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int) RETURNS NULL ON NULL INPUT RETURNS int - LANGUAGE javascript AS 'key + val';""".format(self.keyspace_name)) + LANGUAGE java AS 'return key+val;';""".format(self.keyspace_name)) self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {}) cluster2.refresh_schema_metadata() @@ -837,7 +848,7 @@ def test_refresh_user_function_metadata(self): self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int) RETURNS NULL ON NULL INPUT RETURNS int - LANGUAGE javascript AS 'key + val';""".format(self.keyspace_name)) + LANGUAGE java AS ' return key + val;';""".format(self.keyspace_name)) self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {}) cluster2.refresh_user_function_metadata(self.keyspace_name, UserFunctionDescriptor("sum_int", ["int", "int"])) @@ -873,7 +884,7 @@ def test_refresh_user_aggregate_metadata(self): self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int) RETURNS NULL ON NULL INPUT RETURNS int - LANGUAGE javascript AS 'key + val';""".format(self.keyspace_name)) + LANGUAGE java AS 'return key + val;';""".format(self.keyspace_name)) self.session.execute("""CREATE AGGREGATE {0}.sum_agg(int) SFUNC sum_int @@ -956,7 +967,8 @@ class Ext1(Ext0): self.assertFalse(view_meta.extensions) self.assertIn(Ext0.name, _RegisteredExtensionType._extension_registry) self.assertIn(Ext1.name, _RegisteredExtensionType._extension_registry) - self.assertEqual(len(_RegisteredExtensionType._extension_registry), 2) + # There will bee the RLAC extension here. + self.assertEqual(len(_RegisteredExtensionType._extension_registry), 3) self.cluster.refresh_table_metadata(ks, t) table_meta = ks_meta.tables[t] @@ -1481,7 +1493,10 @@ def make_function_kwargs(self, called_on_null=True): 'return_type': 'double', 'language': 'java', 'body': 'return new Double(0.0);', - 'called_on_null_input': called_on_null} + 'called_on_null_input': called_on_null, + 'deterministic': False, + 'monotonic': False, + 'monotonic_on': []} def test_functions_after_udt(self): """ @@ -1632,15 +1647,15 @@ def setup_class(cls): cls.session.execute("""CREATE OR REPLACE FUNCTION sum_int(s int, i int) RETURNS NULL ON NULL INPUT RETURNS int - LANGUAGE javascript AS 's + i';""") + LANGUAGE java AS 'return s + i;';""") cls.session.execute("""CREATE OR REPLACE FUNCTION sum_int_two(s int, i int, j int) RETURNS NULL ON NULL INPUT RETURNS int - LANGUAGE javascript AS 's + i + j';""") + LANGUAGE java AS 'return s + i + j;';""") cls.session.execute("""CREATE OR REPLACE FUNCTION "List_As_String"(l list) RETURNS NULL ON NULL INPUT RETURNS int - LANGUAGE javascript AS ''''' + l';""") + LANGUAGE java AS 'return l.size();';""") cls.session.execute("""CREATE OR REPLACE FUNCTION extend_list(s list, i int) CALLED ON NULL INPUT RETURNS list @@ -1663,7 +1678,8 @@ def make_aggregate_kwargs(self, state_func, state_type, final_func=None, init_co 'state_type': state_type, 'final_func': final_func, 'initial_condition': init_cond, - 'return_type': "does not matter for creation"} + 'return_type': "does not matter for creation", + 'deterministic': False} def test_return_type_meta(self): """ @@ -1887,7 +1903,13 @@ def setup_class(cls): cls.session.execute("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name) cls.session.set_keyspace(cls.keyspace_name) connection = cls.cluster.control_connection._connection - cls.parser_class = get_schema_parser(connection, CASSANDRA_VERSION.base_version, timeout=20).__class__ + + cls.parser_class = get_schema_parser( + connection, + cls.cluster.metadata.get_host(connection.host).release_version, + cls.cluster.metadata.get_host(connection.host).dse_version, + timeout=20 + ).__class__ cls.cluster.control_connection.reconnect = Mock() @classmethod @@ -1932,28 +1954,36 @@ def test_bad_user_function(self): self.session.execute("""CREATE FUNCTION IF NOT EXISTS %s (key int, val int) RETURNS NULL ON NULL INPUT RETURNS int - LANGUAGE javascript AS 'key + val';""" % self.function_name) - with patch.object(self.parser_class, '_build_function', side_effect=self.BadMetaException): - self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh - m = self.cluster.metadata.keyspaces[self.keyspace_name] - self.assertIs(m._exc_info[0], self.BadMetaException) - self.assertIn("/*\nWarning:", m.export_as_string()) + LANGUAGE java AS 'return key + val;';""" % self.function_name) + + #We need to patch as well the reconnect function because after patching the _build_function + #there will an Error refreshing schema which will trigger a reconnection. If this happened + #in a timely manner in the call self.cluster.refresh_schema_metadata() it would return an exception + #due to that a connection would be closed + with patch.object(self.cluster.control_connection, 'reconnect'): + with patch.object(self.parser_class, '_build_function', side_effect=self.BadMetaException): + self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh + m = self.cluster.metadata.keyspaces[self.keyspace_name] + self.assertIs(m._exc_info[0], self.BadMetaException) + self.assertIn("/*\nWarning:", m.export_as_string()) @greaterthancass21 def test_bad_user_aggregate(self): self.session.execute("""CREATE FUNCTION IF NOT EXISTS sum_int (key int, val int) RETURNS NULL ON NULL INPUT RETURNS int - LANGUAGE javascript AS 'key + val';""") + LANGUAGE java AS 'return key + val;';""") self.session.execute("""CREATE AGGREGATE %s(int) SFUNC sum_int STYPE int INITCOND 0""" % self.function_name) - with patch.object(self.parser_class, '_build_aggregate', side_effect=self.BadMetaException): - self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh - m = self.cluster.metadata.keyspaces[self.keyspace_name] - self.assertIs(m._exc_info[0], self.BadMetaException) - self.assertIn("/*\nWarning:", m.export_as_string()) + #We have the same issue here as in test_bad_user_function + with patch.object(self.cluster.control_connection, 'reconnect'): + with patch.object(self.parser_class, '_build_aggregate', side_effect=self.BadMetaException): + self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh + m = self.cluster.metadata.keyspaces[self.keyspace_name] + self.assertIs(m._exc_info[0], self.BadMetaException) + self.assertIn("/*\nWarning:", m.export_as_string()) class DynamicCompositeTypeTest(BasicSharedKeyspaceUnitTestCase): @@ -2244,6 +2274,8 @@ def test_base_table_type_alter_mv(self): test_create_view_metadata tests that materialized views metadata is properly updated when the type of base table column is changed. + Support for alter type was removed in CASSANDRA-12443 + @since 3.0.0 @jira_ticket CASSANDRA-10424 @expected_result Materialized view metadata should be updated correctly @@ -2363,6 +2395,21 @@ def test_metadata_with_quoted_identifiers(self): self.assertIsNotNone(value_column) self.assertEqual(value_column.name, 'the Value') + @greaterthanorequaldse51 + def test_dse_workloads(self): + """ + Test to ensure dse_workloads is populated appropriately. + Field added in DSE 5.1 + + @jira_ticket PYTHON-667 + @expected_result dse_workloads set is set on host model + + @test_category metadata + """ + for host in self.cluster.metadata.all_hosts(): + self.assertIsInstance(host.dse_workloads, SortedSet) + self.assertIn("Cassandra", host.dse_workloads) + class GroupPerHost(BasicSharedKeyspaceUnitTestCase): @classmethod @@ -2416,30 +2463,18 @@ class VirtualKeypaceTest(BasicSharedKeyspaceUnitTestCase): virtual_ks_names = ('system_virtual_schema', 'system_views') virtual_ks_structure = { - 'system_views': { - # map from table names to sets of column names for unordered - # comparison - 'caches': {'capacity_bytes', 'entry_count', 'hit_count', - 'hit_ratio', 'name', 'recent_hit_rate_per_second', - 'recent_request_rate_per_second', 'request_count', - 'size_bytes'}, - 'clients': {'address', 'connection_stage', 'driver_name', - 'driver_version', 'hostname', 'port', - 'protocol_version', 'request_count', - 'ssl_cipher_suite', 'ssl_enabled', 'ssl_protocol', - 'username'}, - 'sstable_tasks': {'keyspace_name', 'kind', 'progress', - 'table_name', 'task_id', 'total', 'unit'}, - 'thread_pools': {'active_tasks', 'active_tasks_limit', - 'blocked_tasks', 'blocked_tasks_all_time', - 'completed_tasks', 'name', 'pending_tasks'} - }, + # keyspaces 'system_virtual_schema': { - 'columns': {'clustering_order', 'column_name', - 'column_name_bytes', 'keyspace_name', 'kind', - 'position', 'table_name', 'type'}, + # tables: columns. columns are a set because we're comparing unordered 'keyspaces': {'keyspace_name'}, - 'tables': {'comment', 'keyspace_name', 'table_name'} + 'tables': {'comment', 'keyspace_name', 'table_name'}, + 'columns': {'clustering_order', 'column_name', 'column_name_bytes', + 'keyspace_name', 'kind', 'position', 'table_name', + 'type'} + }, + 'system_views': { + 'sstable_tasks': {'keyspace_name', 'kind', 'progress', + 'table_name', 'task_id', 'total', 'unit'} } } @@ -2457,6 +2492,7 @@ def test_existing_keyspaces_have_correct_virtual_tags(self): ) @greaterthanorequalcass40 + @greaterthanorequaldse67 def test_expected_keyspaces_exist_and_are_virtual(self): for name in self.virtual_ks_names: self.assertTrue( @@ -2465,6 +2501,7 @@ def test_expected_keyspaces_exist_and_are_virtual(self): ) @greaterthanorequalcass40 + @greaterthanorequaldse67 def test_virtual_keyspaces_have_expected_schema_structure(self): self.maxDiff = None From d8d69271413573c3d88b55c48f5fcee57c5ed63a Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 5 Sep 2019 15:15:00 -0400 Subject: [PATCH 1571/2005] some dse decorator fixes --- .../standard/test_custom_protocol_handler.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 77b777c5fe..20e3100b1a 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -23,7 +23,7 @@ from cassandra import ProtocolVersion, ConsistencyLevel from tests.integration import use_singledc, PROTOCOL_VERSION, drop_keyspace_shutdown_cluster, \ - greaterthanorequalcass30, execute_with_long_wait_retry, greaterthanorequaldse51 + greaterthanorequalcass30, execute_with_long_wait_retry, greaterthanorequaldse51, greaterthanorequalcass3_10 from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES from tests.integration.standard.utils import create_table_with_all_types, get_all_primitive_params from six import binary_type @@ -135,12 +135,11 @@ def test_protocol_divergence_v4_fail_by_flag_uses_int(self): self._protocol_divergence_fail_by_flag_uses_int(ProtocolVersion.V4, uses_int_query_flag=False, int_flag=True) - @greaterthanorequaldse51 + @greaterthanorequalcass3_10 def test_protocol_v5_uses_flag_int(self): """ Test to validate that the _PAGE_SIZE_FLAG is treated correctly using write_uint for V5 - @since DSE 2.0b3 GRAPH 1.0b1 @jira_ticket PYTHON-694 @expected_result the fetch_size=1 parameter will be honored @@ -154,7 +153,6 @@ def test_protocol_dsev1_uses_flag_int(self): """ Test to validate that the _PAGE_SIZE_FLAG is treated correctly using write_uint for DSE_V1 - @since DSE 2.0b3 GRAPH 1.0b1 @jira_ticket PYTHON-694 @expected_result the fetch_size=1 parameter will be honored @@ -163,12 +161,11 @@ def test_protocol_dsev1_uses_flag_int(self): self._protocol_divergence_fail_by_flag_uses_int(ProtocolVersion.DSE_V1, uses_int_query_flag=True, int_flag=True) - @greaterthanorequaldse51 + @greaterthanorequalcass3_10 def test_protocol_divergence_v5_fail_by_flag_uses_int(self): """ Test to validate that the _PAGE_SIZE_FLAG is treated correctly using write_uint for V5 - @since DSE 2.0b3 GRAPH 1.0b1 @jira_ticket PYTHON-694 @expected_result the fetch_size=1 parameter will be honored @@ -182,7 +179,6 @@ def test_protocol_divergence_dsev1_fail_by_flag_uses_int(self): """ Test to validate that the _PAGE_SIZE_FLAG is treated correctly using write_uint for DSE_V1 - @since DSE 2.0b3 GRAPH 1.0b1 @jira_ticket PYTHON-694 @expected_result the fetch_size=1 parameter will be honored From a492bf8d7b471ce89b4cd525b458b52603b8d503 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 5 Sep 2019 11:27:49 -0500 Subject: [PATCH 1572/2005] Updating cqlengine tests --- .../cqlengine/management/test_management.py | 25 +++++++++++++------ 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 6b91760fdc..f255a8bb8e 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -19,7 +19,6 @@ import mock import logging from packaging.version import Version - from cassandra.cqlengine.connection import get_session, get_cluster from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine import management @@ -27,13 +26,16 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns -from tests.integration import PROTOCOL_VERSION, greaterthancass20, MockLoggingHandler, CASSANDRA_VERSION +from tests.integration import DSE_VERSION, PROTOCOL_VERSION, greaterthancass20, MockLoggingHandler, CASSANDRA_VERSION from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine.query.test_queryset import TestModel from cassandra.cqlengine.usertype import UserType from tests.integration.cqlengine import DEFAULT_KEYSPACE +INCLUDE_REPAIR = not (DSE_VERSION and DSE_VERSION >= Version('6.0.7')) + + class KeyspaceManagementTest(BaseCassEngTestCase): def test_create_drop_succeeeds(self): cluster = get_cluster() @@ -207,9 +209,13 @@ class ModelWithTableProperties(Model): __options__ = {'bloom_filter_fp_chance': '0.76328', 'comment': 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR', - 'gc_grace_seconds': '2063', - 'read_repair_chance': '0.17985', - 'dclocal_read_repair_chance': '0.50811'} + 'gc_grace_seconds': '2063'} + + if INCLUDE_REPAIR: + __options__.update( + {'read_repair_chance': '0.17985', + 'dclocal_read_repair_chance': '0.50811'} + ) key = columns.UUID(primary_key=True) @@ -225,13 +231,15 @@ def test_set_table_properties(self): expected = {'bloom_filter_fp_chance': 0.76328, 'comment': 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR', 'gc_grace_seconds': 2063, - 'read_repair_chance': 0.17985, # For some reason 'dclocal_read_repair_chance' in CQL is called # just 'local_read_repair_chance' in the schema table. # Source: https://issues.apache.org/jira/browse/CASSANDRA-6717 # TODO: due to a bug in the native driver i'm not seeing the local read repair chance show up # 'local_read_repair_chance': 0.50811, } + if INCLUDE_REPAIR: + expected.update({'read_repair_chance': 0.17985}) + options = management._get_table_metadata(ModelWithTableProperties).options self.assertEqual(dict([(k, options.get(k)) for k in expected.keys()]), expected) @@ -241,8 +249,9 @@ def test_table_property_update(self): ModelWithTableProperties.__options__['comment'] = 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy' ModelWithTableProperties.__options__['gc_grace_seconds'] = 96362 - ModelWithTableProperties.__options__['read_repair_chance'] = 0.2989 - ModelWithTableProperties.__options__['dclocal_read_repair_chance'] = 0.12732 + if INCLUDE_REPAIR: + ModelWithTableProperties.__options__['read_repair_chance'] = 0.2989 + ModelWithTableProperties.__options__['dclocal_read_repair_chance'] = 0.12732 sync_table(ModelWithTableProperties) From 1395dd156909df041f06844377c72c60831b1ec5 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 5 Sep 2019 15:21:55 -0500 Subject: [PATCH 1573/2005] Updating upgrade tests --- tests/integration/upgrade/test_upgrade.py | 31 +++++++++++++++-------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/tests/integration/upgrade/test_upgrade.py b/tests/integration/upgrade/test_upgrade.py index 1e4fbb2e92..ace2a52515 100644 --- a/tests/integration/upgrade/test_upgrade.py +++ b/tests/integration/upgrade/test_upgrade.py @@ -26,9 +26,17 @@ import unittest # noqa +# Previous Cassandra upgrade two_to_three_path = upgrade_paths([ UpgradePath("2.2.9-3.11", {"version": "2.2.9"}, {"version": "3.11"}, {}), ]) + +# Previous DSE upgrade +five_upgrade_path = upgrade_paths([ + UpgradePath("5.0.11-5.1.4", {"version": "5.0.11"}, {"version": "5.1.4"}, {}), +]) + + class UpgradeTests(UpgradeBase): @two_to_three_path def test_can_write(self): @@ -43,16 +51,17 @@ def test_can_write(self): """ self.start_upgrade(0) + self.cluster_driver.add_execution_profile("all", ExecutionProfile(consistency_level=ConsistencyLevel.ALL)) + self.cluster_driver.add_execution_profile("one", ExecutionProfile(consistency_level=ConsistencyLevel.LOCAL_ONE)) + c = count() while not self.is_upgraded(): - self.session.execute("INSERT INTO test3rf.test(k, v) VALUES (%s, 0)", (next(c), )) + self.session.execute("INSERT INTO test3rf.test(k, v) VALUES (%s, 0)", (next(c), ), execution_profile="one") time.sleep(0.0001) - self.session.default_consistency_level = ConsistencyLevel.ALL - total_number_of_inserted = self.session.execute("SELECT COUNT(*) from test3rf.test")[0][0] + total_number_of_inserted = self.session.execute("SELECT COUNT(*) from test3rf.test", execution_profile="all")[0][0] self.assertEqual(total_number_of_inserted, next(c)) - self.session.default_consistency_level = ConsistencyLevel.LOCAL_ONE self.assertEqual(self.logger_handler.get_message_count("error", ""), 0) @two_to_three_path @@ -101,16 +110,17 @@ def test_can_write(self): """ self.start_upgrade(0) + self.cluster_driver.add_execution_profile("all", ExecutionProfile(consistency_level=ConsistencyLevel.ALL)) + self.cluster_driver.add_execution_profile("one", ExecutionProfile(consistency_level=ConsistencyLevel.LOCAL_ONE)) + c = count() while not self.is_upgraded(): - self.session.execute("INSERT INTO test3rf.test(k, v) VALUES (%s, 0)", (next(c),)) + self.session.execute("INSERT INTO test3rf.test(k, v) VALUES (%s, 0)", (next(c),), execution_profile="one") time.sleep(0.0001) - self.session.default_consistency_level = ConsistencyLevel.ALL - total_number_of_inserted = self.session.execute("SELECT COUNT(*) from test3rf.test")[0][0] + total_number_of_inserted = self.session.execute("SELECT COUNT(*) from test3rf.test", execution_profile="all")[0][0] self.assertEqual(total_number_of_inserted, next(c)) - self.session.default_consistency_level = ConsistencyLevel.LOCAL_ONE self.assertEqual(self.logger_handler.get_message_count("error", ""), 0) @two_to_three_path @@ -261,7 +271,7 @@ def test_can_write_speculative(self): cluster = Cluster() self.addCleanup(cluster.shutdown) cluster.add_execution_profile("spec_ep_rr", spec_ep_rr) - + cluster.add_execution_profile("all", ExecutionProfile(consistency_level=ConsistencyLevel.ALL)) session = cluster.connect() self.start_upgrade(0) @@ -272,8 +282,7 @@ def test_can_write_speculative(self): execution_profile='spec_ep_rr') time.sleep(0.0001) - session.default_consistency_level = ConsistencyLevel.ALL - total_number_of_inserted = session.execute("SELECT COUNT(*) from test3rf.test")[0][0] + total_number_of_inserted = session.execute("SELECT COUNT(*) from test3rf.test", execution_profile="all")[0][0] self.assertEqual(total_number_of_inserted, next(c)) self.assertEqual(self.logger_handler.get_message_count("error", ""), 0) From b16c479ab41bdd7b86fba751ce51f122f69b9f06 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 5 Sep 2019 19:39:44 -0400 Subject: [PATCH 1574/2005] Fix minor metadata errors --- cassandra/__init__.py | 3 +-- cassandra/cluster.py | 7 ++++--- cassandra/protocol.py | 5 ++--- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 624357363c..b8f4b9af39 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -225,8 +225,7 @@ def uses_error_code_map(cls, version): @classmethod def uses_keyspace_flag(cls, version): - # OSS was >= cls.V5... we should verify that this change is correct. - return version >= cls.DSE_V2 + return version >= cls.V5 and version != cls.DSE_V1 @classmethod def has_continuous_paging_support(cls, version): diff --git a/cassandra/cluster.py b/cassandra/cluster.py index a929186332..731a948c4a 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -875,7 +875,8 @@ def __init__(self, idle_heartbeat_timeout=30, no_compact=False, ssl_context=None, - endpoint_factory=None): + endpoint_factory=None, + monitor_reporting_enabled=False): # TODO just added for tests purpose before insights integration """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as extablishing connection pools or refreshing metadata. @@ -3960,8 +3961,8 @@ def _set_result(self, host, connection, pool, response): self, connection, **response.schema_change_event) elif response.kind == RESULT_KIND_ROWS: self._paging_state = response.paging_state - self._col_types = response.column_names - self._col_names = response.column_types + self._col_types = response.column_types + self._col_names = response.column_names self._set_final_result(self.row_factory( response.column_names, response.parsed_rows)) elif response.kind == RESULT_KIND_VOID: diff --git a/cassandra/protocol.py b/cassandra/protocol.py index c379bbf603..425c353749 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -677,10 +677,10 @@ def recv_results_rows(self, f, protocol_version, user_type_map, result_metadata) for row in rows: for i in range(len(row)): try: - self.column_typess[i].from_binary(row[i], protocol_version) + self.column_types[i].from_binary(row[i], protocol_version) except Exception as e: raise DriverException('Failed decoding result column "%s" of type %s: %s' % (self.column_names[i], - self.column_names[i].cql_parameterized_type(), + self.column_types[i].cql_parameterized_type(), str(e))) def recv_results_prepared(self, f, protocol_version, user_type_map): @@ -910,7 +910,6 @@ def send_body(self, f, protocol_version): write_long(f, self.timestamp) - class BatchMessage(_MessageType): opcode = 0x0D name = 'BATCH' From 04e6b7af073f29b5e5d55306bd7610b446eec58c Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 5 Sep 2019 19:41:13 -0400 Subject: [PATCH 1575/2005] More integration tests --- tests/integration/__init__.py | 4 +- tests/integration/standard/test_metrics.py | 30 +++++++++---- tests/integration/standard/test_policies.py | 9 ++-- .../standard/test_prepared_statements.py | 42 ++++++++++++++++--- tests/integration/standard/test_query.py | 41 +++++++++++------- 5 files changed, 91 insertions(+), 35 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 100831cd17..729373c3eb 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -333,7 +333,7 @@ def _id_and_mark(f): greaterthanorequalcass31 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.1'), 'Cassandra version 3.1 or greater required') greaterthanorequalcass36 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.6'), 'Cassandra version 3.6 or greater required') greaterthanorequalcass3_10 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.10'), 'Cassandra version 3.10 or greater required') -greaterthanorequalcass3_11 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.11'), 'Cassandra version 3.10 or greater required') +greaterthanorequalcass3_11 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.11'), 'Cassandra version 3.11 or greater required') greaterthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION >= Version('4.0'), 'Cassandra version 4.0 or greater required') lessthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION <= Version('4.0'), 'Cassandra version less or equal to 4.0 required') lessthancass40 = unittest.skipUnless(CASSANDRA_VERSION < Version('4.0'), 'Cassandra version less than 4.0 required') @@ -350,7 +350,7 @@ def _id_and_mark(f): requiresmallclockgranularity = unittest.skipIf("Windows" in platform.system() or "asyncore" in EVENT_LOOP_MANAGER, "This test is not suitible for environments with large clock granularity") requiressimulacron = unittest.skipIf(SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"), "Simulacron jar hasn't been specified or C* version is 2.0") - +requirecassandra = unittest.skipIf(DSE_VERSION, "Cassandra required") def wait_for_node_socket(node, timeout): binary_itf = node.network_interfaces['binary'] diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index d40a66f9c8..7d3b7976e2 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -26,11 +26,14 @@ from cassandra import ConsistencyLevel, WriteTimeout, Unavailable, ReadTimeout from cassandra.protocol import SyntaxException -from cassandra.cluster import Cluster, NoHostAvailable +from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT from tests.integration import get_cluster, get_node, use_singledc, PROTOCOL_VERSION, execute_until_pass from greplin import scales from tests.integration import BasicSharedKeyspaceUnitTestCaseRF3WM, BasicExistingKeyspaceUnitTestCase, local +import pprint as pp + + def setup_module(): use_singledc() @@ -40,10 +43,15 @@ class MetricsTests(unittest.TestCase): def setUp(self): contact_point = ['127.0.0.2'] self.cluster = Cluster(contact_points=contact_point, metrics_enabled=True, protocol_version=PROTOCOL_VERSION, - load_balancing_policy=HostFilterPolicy( - RoundRobinPolicy(), lambda host: host.address in contact_point - ), - default_retry_policy=FallthroughRetryPolicy()) + execution_profiles= + {EXEC_PROFILE_DEFAULT: + ExecutionProfile( + load_balancing_policy=HostFilterPolicy( + RoundRobinPolicy(), lambda host: host.address in contact_point), + retry_policy=FallthroughRetryPolicy() + ) + } + ) self.session = self.cluster.connect("test3rf", wait_for_all_pools=True) def tearDown(self): @@ -196,7 +204,7 @@ def test_metrics_per_cluster(self): """ cluster2 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, - default_retry_policy=FallthroughRetryPolicy()) + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(retry_policy=FallthroughRetryPolicy())}) cluster2.connect(self.ks_name, wait_for_all_pools=True) self.assertEqual(len(cluster2.metadata.all_hosts()), 3) @@ -248,10 +256,12 @@ def test_duplicate_metrics_per_cluster(self): @test_category metrics """ cluster2 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, - default_retry_policy=FallthroughRetryPolicy()) + monitor_reporting_enabled=False, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(retry_policy=FallthroughRetryPolicy())}) cluster3 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, - default_retry_policy=FallthroughRetryPolicy()) + monitor_reporting_enabled=False, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(retry_policy=FallthroughRetryPolicy())}) # Ensure duplicate metric names are not allowed cluster2.metrics.set_stats_name("appcluster") @@ -334,6 +344,10 @@ def __str__(self): class MetricsRequestSize(BasicExistingKeyspaceUnitTestCase): + @classmethod + def setUpClass(cls): + cls.common_setup(1, keyspace_creation=False, monitor_reporting_enabled=False) + def wait_for_count(self, ra, expected_count, error=False): for _ in range(10): if not error: diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 5a985e5c92..53b6494437 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -17,7 +17,7 @@ except ImportError: import unittest # noqa -from cassandra.cluster import Cluster, ExecutionProfile, ResponseFuture +from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, SimpleConvictionPolicy, \ WhiteListRoundRobinPolicy from cassandra.pool import Host @@ -27,6 +27,7 @@ from concurrent.futures import wait as wait_futures + def setup_module(): use_singledc() @@ -51,8 +52,10 @@ def test_predicate_changes(self): all_hosts = {Host(DefaultEndPoint("127.0.0.{}".format(i)), SimpleConvictionPolicy) for i in (1, 2, 3)} predicate = lambda host: host.endpoint == contact_point if external_event else True - cluster = Cluster((contact_point,), load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), - predicate=predicate), + hfp = ExecutionProfile( + load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), predicate=predicate) + ) + cluster = Cluster((contact_point,), execution_profiles={EXEC_PROFILE_DEFAULT: hfp}, protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0) session = cluster.connect(wait_for_all_pools=True) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 76073d7cba..dffa906967 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -23,16 +23,16 @@ from cassandra import ConsistencyLevel, ProtocolVersion from cassandra.cluster import Cluster -from cassandra.query import PreparedStatement, UNSET_VALUE, tuple_factory -from tests.integration import (get_server_versions, greaterthanorequalcass40, - set_default_beta_flag_true, - BasicSharedKeyspaceUnitTestCase) +from cassandra.query import PreparedStatement, UNSET_VALUE +from tests.integration import (get_server_versions, greaterthanorequalcass40, greaterthanorequaldse50, + requirecassandra, BasicSharedKeyspaceUnitTestCase) import logging LOG = logging.getLogger(__name__) + def setup_module(): use_singledc() @@ -546,12 +546,12 @@ def test_id_is_not_updated_conditional_v4(self): self.addCleanup(cluster.shutdown) self._test_updated_conditional(session, 9) + @requirecassandra def test_id_is_not_updated_conditional_v5(self): """ Test that verifies that the result_metadata and the result_metadata_id are udpated correctly in conditional statements in protocol V5 - @since 3.13 @jira_ticket PYTHON-847 """ @@ -560,6 +560,36 @@ def test_id_is_not_updated_conditional_v5(self): self.addCleanup(cluster.shutdown) self._test_updated_conditional(session, 10) + @greaterthanorequaldse50 + def test_id_is_not_updated_conditional_dsev1(self): + """ + Test that verifies that the result_metadata and the + result_metadata_id are udpated correctly in conditional statements + in protocol DSE V1 + + @since 3.13 + @jira_ticket PYTHON-847 + """ + cluster = Cluster(protocol_version=ProtocolVersion.DSE_V1) + session = cluster.connect() + self.addCleanup(cluster.shutdown) + self._test_updated_conditional(session, 10) + + @greaterthanorequaldse50 + def test_id_is_not_updated_conditional_dsev2(self): + """ + Test that verifies that the result_metadata and the + result_metadata_id are udpated correctly in conditional statements + in protocol DSE V2 + + @since 3.13 + @jira_ticket PYTHON-847 + """ + cluster = Cluster(protocol_version=ProtocolVersion.DSE_V2) + session = cluster.connect() + self.addCleanup(cluster.shutdown) + self._test_updated_conditional(session, 10) + def _test_updated_conditional(self, session, value): prepared_statement = session.prepare( "INSERT INTO {}(a, b, d) VALUES " @@ -573,7 +603,7 @@ def check_result_and_metadata(expected): expected ) self.assertEqual(prepared_statement.result_metadata_id, first_id) - self.assertEqual(prepared_statement.result_metadata, []) + self.assertIsNone(prepared_statement.result_metadata) # Successful conditional update check_result_and_metadata((True,)) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 97a1e68f6d..62478974eb 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -28,7 +28,7 @@ from cassandra.policies import HostDistance, RoundRobinPolicy, WhiteListRoundRobinPolicy from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, \ greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, local, get_cluster, setup_keyspace, \ - USE_CASS_EXTERNAL, greaterthanorequalcass40 + USE_CASS_EXTERNAL, greaterthanorequalcass40, DSE_VERSION from tests import notwindows from tests.integration import greaterthanorequalcass30, get_node @@ -122,18 +122,20 @@ def test_trace_id_to_resultset(self): self.assertListEqual([rs_trace], rs.get_all_query_traces()) def test_trace_ignores_row_factory(self): - self.session.row_factory = dict_factory + with Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) as cluster: - query = "SELECT * FROM system.local" - statement = SimpleStatement(query) - rs = self.session.execute(statement, trace=True) + s = cluster.connect() + query = "SELECT * FROM system.local" + statement = SimpleStatement(query) + rs = s.execute(statement, trace=True) - # Ensure this does not throw an exception - trace = rs.get_query_trace() - self.assertTrue(trace.events) - str(trace) - for event in trace.events: - str(event) + # Ensure this does not throw an exception + trace = rs.get_query_trace() + self.assertTrue(trace.events) + str(trace) + for event in trace.events: + str(event) @local @greaterthanprotocolv3 @@ -773,15 +775,19 @@ def test_unicode(self): self.session.execute("DROP TABLE test3rf.testtext") def test_too_many_statements(self): + # The actual max # of statements is 0xFFFF, but this can occasionally cause a server write timeout. + large_batch = 0xFFF max_statements = 0xFFFF ss = SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (0, 0)") b = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=ConsistencyLevel.ONE) - # max works - b.add_all([ss] * max_statements, [None] * max_statements) - self.session.execute(b) + # large number works works + b.add_all([ss] * large_batch, [None] * large_batch) + self.session.execute(b, timeout=30.0) + b = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=ConsistencyLevel.ONE) # max + 1 raises + b.add_all([ss] * max_statements, [None] * max_statements) self.assertRaises(ValueError, b.add, ss) # also would have bombed trying to encode @@ -1392,6 +1398,7 @@ def tearDownClass(cls): cls.session.execute(ddl) cls.cluster.shutdown() + class QueryKeyspaceTests(BaseKeyspaceTests): def test_setting_keyspace(self): @@ -1435,7 +1442,8 @@ def test_setting_keyspace_and_session_after_created(self): @test_category query """ - cluster = Cluster(protocol_version=ProtocolVersion.V5, allow_beta_protocol_version=True) + pv = ProtocolVersion.DSE_V2 if DSE_VERSION else ProtocolVersion.V5 + cluster = Cluster(protocol_version=pv, allow_beta_protocol_version=True) session = cluster.connect() self.addCleanup(cluster.shutdown) @@ -1453,7 +1461,8 @@ def test_setting_keyspace_and_same_session(self): @test_category query """ - cluster = Cluster(protocol_version=ProtocolVersion.V5, allow_beta_protocol_version=True) + pv = ProtocolVersion.DSE_V2 if DSE_VERSION else ProtocolVersion.V5 + cluster = Cluster(protocol_version=pv, allow_beta_protocol_version=True) session = cluster.connect(self.ks_name) self.addCleanup(cluster.shutdown) From e0c65ee5e0a45ff518b47ebeb527d867d1ae1635 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 5 Sep 2019 21:11:09 -0400 Subject: [PATCH 1576/2005] Add tests/integration/standard/test_row_factories.py --- .../standard/test_row_factories.py | 114 ++++++------------ 1 file changed, 37 insertions(+), 77 deletions(-) diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index df709c34bd..48ceb1d949 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -19,7 +19,7 @@ except ImportError: import unittest # noqa -from cassandra.cluster import Cluster, ResultSet +from cassandra.cluster import Cluster, ResultSet, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.query import tuple_factory, named_tuple_factory, dict_factory, ordered_dict_factory from cassandra.util import OrderedDict @@ -31,8 +31,7 @@ def setup_module(): class NameTupleFactory(BasicSharedKeyspaceUnitTestCase): def setUp(self): - super(NameTupleFactory, self).setUp() - self.session.row_factory = named_tuple_factory + self.common_setup(1, execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=named_tuple_factory)}) ddl = ''' CREATE TABLE {0}.{1} ( k int PRIMARY KEY, @@ -78,38 +77,22 @@ class RowFactoryTests(BasicSharedKeyspaceUnitTestCaseWFunctionTable): """ Test different row_factories and access code """ - def setUp(self): - super(RowFactoryTests, self).setUp() - self.insert1 = ''' - INSERT INTO {0}.{1} - ( k , v ) - VALUES - ( 1 , 1 ) - '''.format(self.keyspace_name, self.function_table_name) - - self.insert2 = ''' - INSERT INTO {0}.{1} - ( k , v ) - VALUES - ( 2 , 2 ) - '''.format(self.keyspace_name, self.function_table_name) - - self.select = ''' - SELECT * FROM {0}.{1} - '''.format(self.keyspace_name, self.function_table_name) - - def tearDown(self): - self.drop_function_table() + @classmethod + def setUpClass(cls): + cls.common_setup(rf=1, create_class_table=True) + q = "INSERT INTO {0}.{1} (k, v) VALUES (%s, %s)".format(cls.ks_name, cls.ks_name) + cls.session.execute(q, (1, 1)) + cls.session.execute(q, (2, 2)) + cls.select = "SELECT * FROM {0}.{1}".format(cls.ks_name, cls.ks_name) + + def _results_from_row_factory(self, row_factory): + cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=row_factory)}) + with cluster: + return cluster.connect().execute(self.select) def test_tuple_factory(self): - session = self.session - session.row_factory = tuple_factory - - session.execute(self.insert1) - session.execute(self.insert2) - - result = session.execute(self.select) - + result = self._results_from_row_factory(tuple_factory) self.assertIsInstance(result, ResultSet) self.assertIsInstance(result[0], tuple) @@ -122,14 +105,7 @@ def test_tuple_factory(self): self.assertEqual(result[1][0], 2) def test_named_tuple_factory(self): - session = self.session - session.row_factory = named_tuple_factory - - session.execute(self.insert1) - session.execute(self.insert2) - - result = session.execute(self.select) - + result = self._results_from_row_factory(named_tuple_factory) self.assertIsInstance(result, ResultSet) result = list(result) @@ -141,17 +117,10 @@ def test_named_tuple_factory(self): self.assertEqual(result[1].k, result[1].v) self.assertEqual(result[1].k, 2) - def test_dict_factory(self): - session = self.session - session.row_factory = dict_factory - - session.execute(self.insert1) - session.execute(self.insert2) - - result = session.execute(self.select) - + def _test_dict_factory(self, row_factory, row_type): + result = self._results_from_row_factory(row_factory) self.assertIsInstance(result, ResultSet) - self.assertIsInstance(result[0], dict) + self.assertIsInstance(result[0], row_type) for row in result: self.assertEqual(row['k'], row['v']) @@ -161,25 +130,11 @@ def test_dict_factory(self): self.assertEqual(result[1]['k'], result[1]['v']) self.assertEqual(result[1]['k'], 2) - def test_ordered_dict_factory(self): - session = self.session - session.row_factory = ordered_dict_factory - - session.execute(self.insert1) - session.execute(self.insert2) - - result = session.execute(self.select) - - self.assertIsInstance(result, ResultSet) - self.assertIsInstance(result[0], OrderedDict) - - for row in result: - self.assertEqual(row['k'], row['v']) + def test_dict_factory(self): + self._test_dict_factory(dict_factory, dict) - self.assertEqual(result[0]['k'], result[0]['v']) - self.assertEqual(result[0]['k'], 1) - self.assertEqual(result[1]['k'], result[1]['v']) - self.assertEqual(result[1]['k'], 2) + def test_ordered_dict_factory(self): + self._test_dict_factory(ordered_dict_factory, OrderedDict) def test_generator_row_factory(self): """ @@ -201,7 +156,12 @@ def _gen_row_factory(rows): session = self.session session.row_factory = generator_row_factory - session.execute(self.insert1) + session.execute(''' + INSERT INTO {0}.{1} + ( k , v ) + VALUES + ( 1 , 1 ) + '''.format(self.keyspace_name, self.function_table_name)) result = session.execute(self.select) self.assertIsInstance(result, ResultSet) first_row = result.one() @@ -218,8 +178,7 @@ def setup_class(cls): cls.session = cls.cluster.connect() cls._cass_version, cls._cql_version = get_server_versions() ddl = ''' - CREATE TABLE test1rf.table_num_col ( key blob PRIMARY KEY, "626972746864617465" blob ) - WITH COMPACT STORAGE''' + CREATE TABLE test1rf.table_num_col ( key blob PRIMARY KEY, "626972746864617465" blob )''' cls.session.execute(ddl) @classmethod @@ -252,8 +211,9 @@ def test_can_select_with_dict_factory(self): """ can SELECT numeric column using dict_factory """ - self.session.row_factory = dict_factory - try: - self.session.execute('SELECT * FROM test1rf.table_num_col') - except ValueError as e: - self.fail("Unexpected ValueError exception: %s" % e.message) + with Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) as cluster: + try: + cluster.connect().execute('SELECT * FROM test1rf.table_num_col') + except ValueError as e: + self.fail("Unexpected ValueError exception: %s" % e.message) From 4f3b9ab2eb9f679f1b3877ea239b27a48913c994 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 6 Sep 2019 08:27:44 -0400 Subject: [PATCH 1577/2005] Merged protocol.py --- cassandra/protocol.py | 183 ++++++++++++++++++++---------------------- 1 file changed, 86 insertions(+), 97 deletions(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 425c353749..57ed781e2e 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -30,7 +30,7 @@ UnsupportedOperation, UserFunctionDescriptor, UserAggregateDescriptor, SchemaTargetType) from cassandra.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack, - int8_pack, int8_unpack, uint64_pack, header_pack, + uint8_pack, int8_unpack, uint64_pack, header_pack, v3_header_pack, uint32_pack) from cassandra.cqltypes import (AsciiType, BytesType, BooleanType, CounterColumnType, DateType, DecimalType, @@ -387,6 +387,11 @@ def to_exception(self): return AlreadyExists(**self.info) +class ClientWriteError(RequestExecutionException): + summary = 'Client write failure.' + error_code = 0x8000 + + class StartupMessage(_MessageType): opcode = 0x01 name = 'STARTUP' @@ -512,31 +517,34 @@ def recv_body(cls, f, *args): _PAGE_SIZE_FLAG = 0x04 _WITH_PAGING_STATE_FLAG = 0x08 _WITH_SERIAL_CONSISTENCY_FLAG = 0x10 -_PROTOCOL_TIMESTAMP = 0x20 +_PROTOCOL_TIMESTAMP_FLAG = 0x20 +_NAMES_FOR_VALUES_FLAG = 0x40 # not used here _WITH_KEYSPACE_FLAG = 0x80 _PREPARED_WITH_KEYSPACE_FLAG = 0x01 +_PAGE_SIZE_BYTES_FLAG = 0x40000000 +_PAGING_OPTIONS_FLAG = 0x80000000 -class QueryMessage(_MessageType): - opcode = 0x07 - name = 'QUERY' +class _QueryMessage(_MessageType): - def __init__(self, query, consistency_level, serial_consistency_level=None, - fetch_size=None, paging_state=None, timestamp=None, keyspace=None): - self.query = query + def __init__(self, query_params, consistency_level, + serial_consistency_level=None, fetch_size=None, + paging_state=None, timestamp=None, skip_meta=False, + continuous_paging_options=None, keyspace=None): + self.query_params = query_params self.consistency_level = consistency_level self.serial_consistency_level = serial_consistency_level self.fetch_size = fetch_size self.paging_state = paging_state self.timestamp = timestamp + self.skip_meta = skip_meta + self.continuous_paging_options = continuous_paging_options self.keyspace = keyspace - self._query_params = None # only used internally. May be set to a list of native-encoded values to have them sent with the request. - def send_body(self, f, protocol_version): - write_longstring(f, self.query) + def _write_query_params(self, f, protocol_version): write_consistency_level(f, self.consistency_level) flags = 0x00 - if self._query_params is not None: + if self.query_params is not None: flags |= _VALUES_FLAG # also v2+, but we're only setting params internally right now if self.serial_consistency_level: @@ -565,7 +573,15 @@ def send_body(self, f, protocol_version): "2 or higher. Consider setting Cluster.protocol_version to 2.") if self.timestamp is not None: - flags |= _PROTOCOL_TIMESTAMP + flags |= _PROTOCOL_TIMESTAMP_FLAG + + if self.continuous_paging_options: + if ProtocolVersion.has_continuous_paging_support(protocol_version): + flags |= _PAGING_OPTIONS_FLAG + else: + raise UnsupportedOperation( + "Continuous paging may only be used with protocol version " + "ProtocolVersion.DSE_V1 or higher. Consider setting Cluster.protocol_version to ProtocolVersion.DSE_V1.") if self.keyspace is not None: if ProtocolVersion.uses_keyspace_flag(protocol_version): @@ -573,18 +589,17 @@ def send_body(self, f, protocol_version): else: raise UnsupportedOperation( "Keyspaces may only be set on queries with protocol version " - "5 or higher. Consider setting Cluster.protocol_version to 5.") + "DSE_V2 or higher. Consider setting Cluster.protocol_version to ProtocolVersion.DSE_V2.") if ProtocolVersion.uses_int_query_flags(protocol_version): write_uint(f, flags) else: write_byte(f, flags) - if self._query_params is not None: - write_short(f, len(self._query_params)) - for param in self._query_params: + if self.query_params is not None: + write_short(f, len(self.query_params)) + for param in self.query_params: write_value(f, param) - if self.fetch_size: write_int(f, self.fetch_size) if self.paging_state: @@ -595,6 +610,49 @@ def send_body(self, f, protocol_version): write_long(f, self.timestamp) if self.keyspace is not None: write_string(f, self.keyspace) + if self.continuous_paging_options: + self._write_paging_options(f, self.continuous_paging_options, protocol_version) + + def _write_paging_options(self, f, paging_options, protocol_version): + write_int(f, paging_options.max_pages) + write_int(f, paging_options.max_pages_per_second) + if ProtocolVersion.has_continuous_paging_next_pages(protocol_version): + write_int(f, paging_options.max_queue_size) + + +class QueryMessage(_QueryMessage): + opcode = 0x07 + name = 'QUERY' + + def __init__(self, query, consistency_level, serial_consistency_level=None, + fetch_size=None, paging_state=None, timestamp=None, continuous_paging_options=None, keyspace=None): + self.query = query + super(QueryMessage, self).__init__(None, consistency_level, serial_consistency_level, fetch_size, + paging_state, timestamp, False, continuous_paging_options, keyspace) + + def send_body(self, f, protocol_version): + write_longstring(f, self.query) + self._write_query_params(f, protocol_version) + + +class ExecuteMessage(_QueryMessage): + opcode = 0x0A + name = 'EXECUTE' + + def __init__(self, query_id, query_params, consistency_level, + serial_consistency_level=None, fetch_size=None, + paging_state=None, timestamp=None, skip_meta=False, + continuous_paging_options=None, result_metadata_id=None): + self.query_id = query_id + self.result_metadata_id = result_metadata_id + super(ExecuteMessage, self).__init__(query_params, consistency_level, serial_consistency_level, fetch_size, + paging_state, timestamp, skip_meta, continuous_paging_options) + + def send_body(self, f, protocol_version): + write_string(f, self.query_id) + if ProtocolVersion.uses_prepared_metadata(protocol_version): + write_string(f, self.result_metadata_id) + self._write_query_params(f, protocol_version) CUSTOM_TYPE = object() @@ -611,6 +669,8 @@ class ResultMessage(_MessageType): name = 'RESULT' kind = None + results = None + paging_state = None # Names match type name in module scope. Most are imported from cassandra.cqltypes (except CUSTOM_TYPE) type_codes = _cqltypes_by_code = dict((v, globals()[k]) for k, v in type_codes.__dict__.items() if not k.startswith('_')) @@ -622,6 +682,8 @@ class ResultMessage(_MessageType): _CONTINUOUS_PAGING_LAST_FLAG = 0x80000000 _METADATA_ID_FLAG = 0x0008 + kind = None + # These are all the things a result message might contain. They are populated according to 'kind' column_names = None column_types = None @@ -635,7 +697,6 @@ class ResultMessage(_MessageType): bind_metadata = None pk_indexes = None schema_change_event = None - result_metadata_id = None def __init__(self, kind): self.kind = kind @@ -818,7 +879,7 @@ def send_body(self, f, protocol_version): else: raise UnsupportedOperation( "Keyspaces may only be set on queries with protocol version " - "5 or higher. Consider setting Cluster.protocol_version to 5.") + "5 or DSE_V2 or higher. Consider setting Cluster.protocol_version.") if ProtocolVersion.uses_prepare_flags(protocol_version): write_uint(f, flags) @@ -829,7 +890,7 @@ def send_body(self, f, protocol_version): "Attempted to set flags with value {flags:0=#8x} on" "protocol version {pv}, which doesn't support flags" "in prepared statements." - "Consider setting Cluster.protocol_version to 5." + "Consider setting Cluster.protocol_version to 5 or DSE_V2." "".format(flags=flags, pv=protocol_version)) if ProtocolVersion.uses_keyspace_flag(protocol_version): @@ -837,79 +898,6 @@ def send_body(self, f, protocol_version): write_string(f, self.keyspace) -class ExecuteMessage(_MessageType): - opcode = 0x0A - name = 'EXECUTE' - - def __init__(self, query_id, query_params, consistency_level, - serial_consistency_level=None, fetch_size=None, - paging_state=None, timestamp=None, skip_meta=False, - result_metadata_id=None): - self.query_id = query_id - self.query_params = query_params - self.consistency_level = consistency_level - self.serial_consistency_level = serial_consistency_level - self.fetch_size = fetch_size - self.paging_state = paging_state - self.timestamp = timestamp - self.skip_meta = skip_meta - self.result_metadata_id = result_metadata_id - - def send_body(self, f, protocol_version): - write_string(f, self.query_id) - if ProtocolVersion.uses_prepared_metadata(protocol_version): - write_string(f, self.result_metadata_id) - if protocol_version == 1: - if self.serial_consistency_level: - raise UnsupportedOperation( - "Serial consistency levels require the use of protocol version " - "2 or higher. Consider setting Cluster.protocol_version to 2 " - "to support serial consistency levels.") - if self.fetch_size or self.paging_state: - raise UnsupportedOperation( - "Automatic query paging may only be used with protocol version " - "2 or higher. Consider setting Cluster.protocol_version to 2.") - write_short(f, len(self.query_params)) - for param in self.query_params: - write_value(f, param) - write_consistency_level(f, self.consistency_level) - else: - write_consistency_level(f, self.consistency_level) - flags = _VALUES_FLAG - if self.serial_consistency_level: - flags |= _WITH_SERIAL_CONSISTENCY_FLAG - if self.fetch_size: - flags |= _PAGE_SIZE_FLAG - if self.paging_state: - flags |= _WITH_PAGING_STATE_FLAG - if self.timestamp is not None: - if protocol_version >= 3: - flags |= _PROTOCOL_TIMESTAMP - else: - raise UnsupportedOperation( - "Protocol-level timestamps may only be used with protocol version " - "3 or higher. Consider setting Cluster.protocol_version to 3.") - if self.skip_meta: - flags |= _SKIP_METADATA_FLAG - - if ProtocolVersion.uses_int_query_flags(protocol_version): - write_uint(f, flags) - else: - write_byte(f, flags) - - write_short(f, len(self.query_params)) - for param in self.query_params: - write_value(f, param) - if self.fetch_size: - write_int(f, self.fetch_size) - if self.paging_state: - write_longstring(f, self.paging_state) - if self.serial_consistency_level: - write_consistency_level(f, self.serial_consistency_level) - if self.timestamp is not None: - write_long(f, self.timestamp) - - class BatchMessage(_MessageType): opcode = 0x0D name = 'BATCH' @@ -945,7 +933,7 @@ def send_body(self, f, protocol_version): if self.serial_consistency_level: flags |= _WITH_SERIAL_CONSISTENCY_FLAG if self.timestamp is not None: - flags |= _PROTOCOL_TIMESTAMP + flags |= _PROTOCOL_TIMESTAMP_FLAG if self.keyspace: if ProtocolVersion.uses_keyspace_flag(protocol_version): flags |= _WITH_KEYSPACE_FLAG @@ -1146,7 +1134,7 @@ def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcod else: custom_payload = None - flags &= USE_BETA_MASK # will only be set if we asserted it in connection estabishment + flags &= USE_BETA_MASK # will only be set if we asserted it in connection estabishment if flags: log.warning("Unknown protocol flags set: %02x. May cause problems.", flags) @@ -1164,6 +1152,7 @@ def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcod return msg + def cython_protocol_handler(colparser): """ Given a column parser to deserialize ResultMessages, return a suitable @@ -1229,7 +1218,7 @@ def read_byte(f): def write_byte(f, b): - f.write(int8_pack(b)) + f.write(uint8_pack(b)) def read_int(f): From 3fe62d6d7005fd68a281307f734b7ca567bc2c41 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 5 Sep 2019 16:31:38 -0500 Subject: [PATCH 1578/2005] misc integration tests --- tests/integration/standard/test_udts.py | 152 ++++++++++++------------ 1 file changed, 74 insertions(+), 78 deletions(-) diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 514c562390..4f23f9d5a9 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -22,12 +22,12 @@ import six from cassandra import InvalidRequest -from cassandra.cluster import Cluster, UserTypeDoesNotExist +from cassandra.cluster import Cluster, UserTypeDoesNotExist, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.query import dict_factory from cassandra.util import OrderedMap -from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass, BasicSegregatedKeyspaceUnitTestCase, \ - greaterthancass20, greaterthanorequalcass36, lessthancass30 +from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass, \ + BasicSegregatedKeyspaceUnitTestCase, greaterthancass20, lessthancass30, greaterthanorequalcass36 from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, PRIMITIVE_DATATYPES_KEYS, \ COLLECTION_TYPES, get_sample, get_collection_sample @@ -308,12 +308,12 @@ def test_can_insert_udts_with_varying_lengths(self): c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name, wait_for_all_pools=True) - MAX_TEST_LENGTH = 254 + max_test_length = 254 # create the seed udt, increase timeout to avoid the query failure on slow systems s.execute("CREATE TYPE lengthy_udt ({0})" .format(', '.join(['v_{0} int'.format(i) - for i in range(MAX_TEST_LENGTH)]))) + for i in range(max_test_length)]))) # create a table with multiple sizes of nested udts # no need for all nested types, only a spot checked few and the largest one @@ -322,13 +322,13 @@ def test_can_insert_udts_with_varying_lengths(self): "v frozen)") # create and register the seed udt type - udt = namedtuple('lengthy_udt', tuple(['v_{0}'.format(i) for i in range(MAX_TEST_LENGTH)])) + udt = namedtuple('lengthy_udt', tuple(['v_{0}'.format(i) for i in range(max_test_length)])) c.register_user_type(self.keyspace_name, "lengthy_udt", udt) # verify inserts and reads - for i in (0, 1, 2, 3, MAX_TEST_LENGTH): + for i in (0, 1, 2, 3, max_test_length): # create udt - params = [j for j in range(i)] + [None for j in range(MAX_TEST_LENGTH - i)] + params = [j for j in range(i)] + [None for j in range(max_test_length - i)] created_udt = udt(*params) # write udt @@ -340,12 +340,12 @@ def test_can_insert_udts_with_varying_lengths(self): c.shutdown() - def nested_udt_schema_helper(self, session, MAX_NESTING_DEPTH): + def nested_udt_schema_helper(self, session, max_nesting_depth): # create the seed udt execute_until_pass(session, "CREATE TYPE depth_0 (age int, name text)") # create the nested udts - for i in range(MAX_NESTING_DEPTH): + for i in range(max_nesting_depth): execute_until_pass(session, "CREATE TYPE depth_{0} (value frozen)".format(i + 1, i)) # create a table with multiple sizes of nested udts @@ -356,7 +356,7 @@ def nested_udt_schema_helper(self, session, MAX_NESTING_DEPTH): "v_1 frozen, " "v_2 frozen, " "v_3 frozen, " - "v_{0} frozen)".format(MAX_NESTING_DEPTH)) + "v_{0} frozen)".format(max_nesting_depth)) def nested_udt_creation_helper(self, udts, i): if i == 0: @@ -364,8 +364,8 @@ def nested_udt_creation_helper(self, udts, i): else: return udts[i](self.nested_udt_creation_helper(udts, i - 1)) - def nested_udt_verification_helper(self, session, MAX_NESTING_DEPTH, udts): - for i in (0, 1, 2, 3, MAX_NESTING_DEPTH): + def nested_udt_verification_helper(self, session, max_nesting_depth, udts): + for i in (0, 1, 2, 3, max_nesting_depth): # create udt udt = self.nested_udt_creation_helper(udts, i) @@ -384,75 +384,72 @@ def nested_udt_verification_helper(self, session, MAX_NESTING_DEPTH, udts): result = session.execute("SELECT v_{0} FROM mytable WHERE k=1".format(i))[0] self.assertEqual(udt, result["v_{0}".format(i)]) + def _cluster_default_dict_factory(self): + return Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) + def test_can_insert_nested_registered_udts(self): """ Test for ensuring nested registered udts are properly inserted """ + with self._cluster_default_dict_factory() as c: + s = c.connect(self.keyspace_name, wait_for_all_pools=True) - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect(self.keyspace_name, wait_for_all_pools=True) - s.row_factory = dict_factory - - MAX_NESTING_DEPTH = 16 + max_nesting_depth = 16 - # create the schema - self.nested_udt_schema_helper(s, MAX_NESTING_DEPTH) + # create the schema + self.nested_udt_schema_helper(s, max_nesting_depth) - # create and register the seed udt type - udts = [] - udt = namedtuple('depth_0', ('age', 'name')) - udts.append(udt) - c.register_user_type(self.keyspace_name, "depth_0", udts[0]) - - # create and register the nested udt types - for i in range(MAX_NESTING_DEPTH): - udt = namedtuple('depth_{0}'.format(i + 1), ('value')) + # create and register the seed udt type + udts = [] + udt = namedtuple('depth_0', ('age', 'name')) udts.append(udt) - c.register_user_type(self.keyspace_name, "depth_{0}".format(i + 1), udts[i + 1]) + c.register_user_type(self.keyspace_name, "depth_0", udts[0]) - # insert udts and verify inserts with reads - self.nested_udt_verification_helper(s, MAX_NESTING_DEPTH, udts) + # create and register the nested udt types + for i in range(max_nesting_depth): + udt = namedtuple('depth_{0}'.format(i + 1), ('value')) + udts.append(udt) + c.register_user_type(self.keyspace_name, "depth_{0}".format(i + 1), udts[i + 1]) - c.shutdown() + # insert udts and verify inserts with reads + self.nested_udt_verification_helper(s, max_nesting_depth, udts) def test_can_insert_nested_unregistered_udts(self): """ Test for ensuring nested unregistered udts are properly inserted """ - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect(self.keyspace_name, wait_for_all_pools=True) - s.row_factory = dict_factory - - MAX_NESTING_DEPTH = 16 + with self._cluster_default_dict_factory() as c: + s = c.connect(self.keyspace_name, wait_for_all_pools=True) - # create the schema - self.nested_udt_schema_helper(s, MAX_NESTING_DEPTH) + max_nesting_depth = 16 - # create the seed udt type - udts = [] - udt = namedtuple('depth_0', ('age', 'name')) - udts.append(udt) + # create the schema + self.nested_udt_schema_helper(s, max_nesting_depth) - # create the nested udt types - for i in range(MAX_NESTING_DEPTH): - udt = namedtuple('depth_{0}'.format(i + 1), ('value')) + # create the seed udt type + udts = [] + udt = namedtuple('depth_0', ('age', 'name')) udts.append(udt) - # insert udts via prepared statements and verify inserts with reads - for i in (0, 1, 2, 3, MAX_NESTING_DEPTH): - # create udt - udt = self.nested_udt_creation_helper(udts, i) + # create the nested udt types + for i in range(max_nesting_depth): + udt = namedtuple('depth_{0}'.format(i + 1), ('value')) + udts.append(udt) - # write udt - insert = s.prepare("INSERT INTO mytable (k, v_{0}) VALUES (0, ?)".format(i)) - s.execute(insert, [udt]) + # insert udts via prepared statements and verify inserts with reads + for i in (0, 1, 2, 3, max_nesting_depth): + # create udt + udt = self.nested_udt_creation_helper(udts, i) - # verify udt was written and read correctly - result = s.execute("SELECT v_{0} FROM mytable WHERE k=0".format(i))[0] - self.assertEqual(udt, result["v_{0}".format(i)]) + # write udt + insert = s.prepare("INSERT INTO mytable (k, v_{0}) VALUES (0, ?)".format(i)) + s.execute(insert, [udt]) - c.shutdown() + # verify udt was written and read correctly + result = s.execute("SELECT v_{0} FROM mytable WHERE k=0".format(i))[0] + self.assertEqual(udt, result["v_{0}".format(i)]) def test_can_insert_nested_registered_udts_with_different_namedtuples(self): """ @@ -460,31 +457,28 @@ def test_can_insert_nested_registered_udts_with_different_namedtuples(self): created namedtuples are use names that are different the cql type. """ - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect(self.keyspace_name, wait_for_all_pools=True) - s.row_factory = dict_factory + with self._cluster_default_dict_factory() as c: + s = c.connect(self.keyspace_name, wait_for_all_pools=True) - MAX_NESTING_DEPTH = 16 + max_nesting_depth = 16 - # create the schema - self.nested_udt_schema_helper(s, MAX_NESTING_DEPTH) + # create the schema + self.nested_udt_schema_helper(s, max_nesting_depth) - # create and register the seed udt type - udts = [] - udt = namedtuple('level_0', ('age', 'name')) - udts.append(udt) - c.register_user_type(self.keyspace_name, "depth_0", udts[0]) - - # create and register the nested udt types - for i in range(MAX_NESTING_DEPTH): - udt = namedtuple('level_{0}'.format(i + 1), ('value')) + # create and register the seed udt type + udts = [] + udt = namedtuple('level_0', ('age', 'name')) udts.append(udt) - c.register_user_type(self.keyspace_name, "depth_{0}".format(i + 1), udts[i + 1]) + c.register_user_type(self.keyspace_name, "depth_0", udts[0]) - # insert udts and verify inserts with reads - self.nested_udt_verification_helper(s, MAX_NESTING_DEPTH, udts) + # create and register the nested udt types + for i in range(max_nesting_depth): + udt = namedtuple('level_{0}'.format(i + 1), ('value')) + udts.append(udt) + c.register_user_type(self.keyspace_name, "depth_{0}".format(i + 1), udts[i + 1]) - c.shutdown() + # insert udts and verify inserts with reads + self.nested_udt_verification_helper(s, max_nesting_depth, udts) def test_raise_error_on_nonexisting_udts(self): """ @@ -694,6 +688,9 @@ def test_non_alphanum_identifiers(self): @lessthancass30 def test_type_alteration(self): + """ + Support for ALTER TYPE was removed in CASSANDRA-12443 + """ s = self.session type_name = "type_name" self.assertNotIn(type_name, s.cluster.metadata.keyspaces['udttests'].user_types) @@ -757,4 +754,3 @@ def test_alter_udt(self): for result in results: self.assertTrue(hasattr(result.typetoalter, 'a')) self.assertTrue(hasattr(result.typetoalter, 'b')) - From 5d5d3578cd5fa0cfbced56933e86b6b342e60538 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 6 Sep 2019 08:48:18 -0500 Subject: [PATCH 1579/2005] adding util.py --- cassandra/util.py | 688 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 683 insertions(+), 5 deletions(-) diff --git a/cassandra/util.py b/cassandra/util.py index 2b9e9161a3..76cfc2397b 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -16,12 +16,21 @@ import calendar import datetime from functools import total_ordering +import logging +from geomet import wkt +from itertools import chain import random +import re import six import uuid import sys DATETIME_EPOC = datetime.datetime(1970, 1, 1) +UTC_DATETIME_EPOC = datetime.datetime.utcfromtimestamp(0) + +_nan = float('nan') + +log = logging.getLogger(__name__) assert sys.byteorder in ('little', 'big') is_little_endian = sys.byteorder == 'little' @@ -38,6 +47,28 @@ def datetime_from_timestamp(timestamp): return dt +def utc_datetime_from_ms_timestamp(timestamp): + """ + Creates a UTC datetime from a timestamp in milliseconds. See + :meth:`datetime_from_timestamp`. + + Raises an `OverflowError` if the timestamp is out of range for + :class:`~datetime.datetime`. + + :param timestamp: timestamp, in milliseconds + """ + return UTC_DATETIME_EPOC + datetime.timedelta(milliseconds=timestamp) + + +def ms_timestamp_from_datetime(dt): + """ + Converts a datetime to a timestamp expressed in milliseconds. + + :param dt: a :class:`datetime.datetime` + """ + return int(round((dt - UTC_DATETIME_EPOC).total_seconds() * 1000)) + + def unix_time_from_uuid1(uuid_arg): """ Converts a version 1 :class:`uuid.UUID` to a timestamp with the same precision @@ -136,6 +167,42 @@ def uuid_from_time(time_arg, node=None, clock_seq=None): """ The highest possible TimeUUID, as sorted by Cassandra. """ +def _addrinfo_or_none(contact_point, port): + """ + A helper function that wraps socket.getaddrinfo and returns None + when it fails to, e.g. resolve one of the hostnames. Used to address + PYTHON-895. + """ + try: + value = socket.getaddrinfo(contact_point, port, + socket.AF_UNSPEC, socket.SOCK_STREAM) + return value + except socket.gaierror: + log.debug('Could not resolve hostname "{}" ' + 'with port {}'.format(contact_point, port)) + return None + + +def _addrinfo_to_ip_strings(addrinfo): + """ + Helper function that consumes the data output by socket.getaddrinfo and + extracts the IP address from the sockaddr portion of the result. + + Since this is meant to be used in conjunction with _addrinfo_or_none, + this will pass None and EndPont instances through unaffected. + """ + if addrinfo is None: + return None + return [entry[4][0] for entry in addrinfo] + + +def _resolve_contact_points_to_string_map(contact_points, port): + return OrderedDict( + (cp, _addrinfo_to_ip_strings(_addrinfo_or_none(cp, port))) + for cp in contact_points + ) + + try: from collections import OrderedDict except ImportError: @@ -742,11 +809,6 @@ class OrderedMap(Mapping): This class derives from the (immutable) Mapping API. Objects in these maps are not intended be modified. - - Note: Because of the way Cassandra encodes nested types, when using the - driver with nested collections, :attr:`~.Cluster.protocol_version` must be 3 - or higher. - ''' def __init__(self, *args, **kwargs): @@ -1197,6 +1259,264 @@ def _sanitize_identifiers(field_names): return names_out +def list_contents_to_tuple(to_convert): + if isinstance(to_convert, list): + for n, i in enumerate(to_convert): + if isinstance(to_convert[n], list): + to_convert[n] = tuple(to_convert[n]) + return tuple(to_convert) + else: + return to_convert + + +class Point(object): + """ + Represents a point geometry for DSE + """ + + x = None + """ + x coordinate of the point + """ + + y = None + """ + y coordinate of the point + """ + + def __init__(self, x=_nan, y=_nan): + self.x = x + self.y = y + + def __eq__(self, other): + return isinstance(other, Point) and self.x == other.x and self.y == other.y + + def __hash__(self): + return hash((self.x, self.y)) + + def __str__(self): + """ + Well-known text representation of the point + """ + return "POINT (%r %r)" % (self.x, self.y) + + def __repr__(self): + return "%s(%r, %r)" % (self.__class__.__name__, self.x, self.y) + + @staticmethod + def from_wkt(s): + """ + Parse a Point geometry from a wkt string and return a new Point object. + """ + try: + geom = wkt.loads(s) + except ValueError: + raise ValueError("Invalid WKT geometry: '{0}'".format(s)) + + if geom['type'] != 'Point': + raise ValueError("Invalid WKT geometry type. Expected 'Point', got '{0}': '{1}'".format(geom['type'], s)) + + coords = geom['coordinates'] + if len(coords) < 2: + x = y = _nan + else: + x = coords[0] + y = coords[1] + + return Point(x=x, y=y) + + +class LineString(object): + """ + Represents a linestring geometry for DSE + """ + + coords = None + """ + Tuple of (x, y) coordinates in the linestring + """ + def __init__(self, coords=tuple()): + """ + 'coords`: a sequence of (x, y) coordinates of points in the linestring + """ + self.coords = tuple(coords) + + def __eq__(self, other): + return isinstance(other, LineString) and self.coords == other.coords + + def __hash__(self): + return hash(self.coords) + + def __str__(self): + """ + Well-known text representation of the LineString + """ + if not self.coords: + return "LINESTRING EMPTY" + return "LINESTRING (%s)" % ', '.join("%r %r" % (x, y) for x, y in self.coords) + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, self.coords) + + @staticmethod + def from_wkt(s): + """ + Parse a LineString geometry from a wkt string and return a new LineString object. + """ + try: + geom = wkt.loads(s) + except ValueError: + raise ValueError("Invalid WKT geometry: '{0}'".format(s)) + + if geom['type'] != 'LineString': + raise ValueError("Invalid WKT geometry type. Expected 'LineString', got '{0}': '{1}'".format(geom['type'], s)) + + geom['coordinates'] = list_contents_to_tuple(geom['coordinates']) + + return LineString(coords=geom['coordinates']) + + +class _LinearRing(object): + # no validation, no implicit closing; just used for poly composition, to + # mimic that of shapely.geometry.Polygon + def __init__(self, coords=tuple()): + self.coords = list_contents_to_tuple(coords) + + def __eq__(self, other): + return isinstance(other, _LinearRing) and self.coords == other.coords + + def __hash__(self): + return hash(self.coords) + + def __str__(self): + if not self.coords: + return "LINEARRING EMPTY" + return "LINEARRING (%s)" % ', '.join("%r %r" % (x, y) for x, y in self.coords) + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, self.coords) + + +class Polygon(object): + """ + Represents a polygon geometry for DSE + """ + + exterior = None + """ + _LinearRing representing the exterior of the polygon + """ + + interiors = None + """ + Tuple of _LinearRings representing interior holes in the polygon + """ + + def __init__(self, exterior=tuple(), interiors=None): + """ + 'exterior`: a sequence of (x, y) coordinates of points in the linestring + `interiors`: None, or a sequence of sequences or (x, y) coordinates of points describing interior linear rings + """ + self.exterior = _LinearRing(exterior) + self.interiors = tuple(_LinearRing(e) for e in interiors) if interiors else tuple() + + def __eq__(self, other): + return isinstance(other, Polygon) and self.exterior == other.exterior and self.interiors == other.interiors + + def __hash__(self): + return hash((self.exterior, self.interiors)) + + def __str__(self): + """ + Well-known text representation of the polygon + """ + if not self.exterior.coords: + return "POLYGON EMPTY" + rings = [ring.coords for ring in chain((self.exterior,), self.interiors)] + rings = ["(%s)" % ', '.join("%r %r" % (x, y) for x, y in ring) for ring in rings] + return "POLYGON (%s)" % ', '.join(rings) + + def __repr__(self): + return "%s(%r, %r)" % (self.__class__.__name__, self.exterior.coords, [ring.coords for ring in self.interiors]) + + @staticmethod + def from_wkt(s): + """ + Parse a Polygon geometry from a wkt string and return a new Polygon object. + """ + try: + geom = wkt.loads(s) + except ValueError: + raise ValueError("Invalid WKT geometry: '{0}'".format(s)) + + if geom['type'] != 'Polygon': + raise ValueError("Invalid WKT geometry type. Expected 'Polygon', got '{0}': '{1}'".format(geom['type'], s)) + + coords = geom['coordinates'] + exterior = coords[0] if len(coords) > 0 else tuple() + interiors = coords[1:] if len(coords) > 1 else None + + return Polygon(exterior=exterior, interiors=interiors) + + +_distance_wkt_pattern = re.compile("distance *\\( *\\( *([\\d\\.-]+) *([\\d+\\.-]+) *\\) *([\\d+\\.-]+) *\\) *$", re.IGNORECASE) + + +class Distance(object): + """ + Represents a Distance geometry for DSE + """ + + x = None + """ + x coordinate of the center point + """ + + y = None + """ + y coordinate of the center point + """ + + radius = None + """ + radius to represent the distance from the center point + """ + + def __init__(self, x=_nan, y=_nan, radius=_nan): + self.x = x + self.y = y + self.radius = radius + + def __eq__(self, other): + return isinstance(other, Distance) and self.x == other.x and self.y == other.y and self.radius == other.radius + + def __hash__(self): + return hash((self.x, self.y, self.radius)) + + def __str__(self): + """ + Well-known text representation of the point + """ + return "DISTANCE ((%r %r) %r)" % (self.x, self.y, self.radius) + + def __repr__(self): + return "%s(%r, %r, %r)" % (self.__class__.__name__, self.x, self.y, self.radius) + + @staticmethod + def from_wkt(s): + """ + Parse a Distance geometry from a wkt string and return a new Distance object. + """ + + distance_match = _distance_wkt_pattern.match(s) + + if distance_match is None: + raise ValueError("Invalid WKT geometry: '{0}'".format(s)) + + x, y, radius = distance_match.groups() + return Distance(x, y, radius) + + class Duration(object): """ Cassandra Duration Type @@ -1227,6 +1547,364 @@ def __str__(self): ) +class DateRangePrecision(object): + """ + An "enum" representing the valid values for :attr:`DateRange.precision`. + """ + YEAR = 'YEAR' + """ + """ + + MONTH = 'MONTH' + """ + """ + + DAY = 'DAY' + """ + """ + + HOUR = 'HOUR' + """ + """ + + MINUTE = 'MINUTE' + """ + """ + + SECOND = 'SECOND' + """ + """ + + MILLISECOND = 'MILLISECOND' + """ + """ + + PRECISIONS = (YEAR, MONTH, DAY, HOUR, + MINUTE, SECOND, MILLISECOND) + """ + """ + + @classmethod + def _to_int(cls, precision): + return cls.PRECISIONS.index(precision.upper()) + + @classmethod + def _round_to_precision(cls, ms, precision, default_dt): + try: + dt = utc_datetime_from_ms_timestamp(ms) + except OverflowError: + return ms + precision_idx = cls._to_int(precision) + replace_kwargs = {} + if precision_idx <= cls._to_int(DateRangePrecision.YEAR): + replace_kwargs['month'] = default_dt.month + if precision_idx <= cls._to_int(DateRangePrecision.MONTH): + replace_kwargs['day'] = default_dt.day + if precision_idx <= cls._to_int(DateRangePrecision.DAY): + replace_kwargs['hour'] = default_dt.hour + if precision_idx <= cls._to_int(DateRangePrecision.HOUR): + replace_kwargs['minute'] = default_dt.minute + if precision_idx <= cls._to_int(DateRangePrecision.MINUTE): + replace_kwargs['second'] = default_dt.second + if precision_idx <= cls._to_int(DateRangePrecision.SECOND): + # truncate to nearest 1000 so we deal in ms, not us + replace_kwargs['microsecond'] = (default_dt.microsecond // 1000) * 1000 + if precision_idx == cls._to_int(DateRangePrecision.MILLISECOND): + replace_kwargs['microsecond'] = int(round(dt.microsecond, -3)) + return ms_timestamp_from_datetime(dt.replace(**replace_kwargs)) + + @classmethod + def round_up_to_precision(cls, ms, precision): + # PYTHON-912: this is the only case in which we can't take as upper bound + # datetime.datetime.max because the month from ms may be February and we'd + # be setting 31 as the month day + if precision == cls.MONTH: + date_ms = utc_datetime_from_ms_timestamp(ms) + upper_date = datetime.datetime.max.replace(year=date_ms.year, month=date_ms.month, + day=calendar.monthrange(date_ms.year, date_ms.month)[1]) + else: + upper_date = datetime.datetime.max + return cls._round_to_precision(ms, precision, upper_date) + + @classmethod + def round_down_to_precision(cls, ms, precision): + return cls._round_to_precision(ms, precision, datetime.datetime.min) + + +@total_ordering +class DateRangeBound(object): + """DateRangeBound(value, precision) + Represents a single date value and its precision for :class:`DateRange`. + + .. attribute:: milliseconds + + Integer representing milliseconds since the UNIX epoch. May be negative. + + .. attribute:: precision + + String representing the precision of a bound. Must be a valid + :class:`DateRangePrecision` member. + + :class:`DateRangeBound` uses a millisecond offset from the UNIX epoch to + allow :class:`DateRange` to represent values `datetime.datetime` cannot. + For such values, string representions will show this offset rather than the + CQL representation. + """ + milliseconds = None + precision = None + + def __init__(self, value, precision): + """ + :param value: a value representing ms since the epoch. Accepts an + integer or a datetime. + :param precision: a string representing precision + """ + if precision is not None: + try: + self.precision = precision.upper() + except AttributeError: + raise TypeError('precision must be a string; got %r' % precision) + + if value is None: + milliseconds = None + elif isinstance(value, six.integer_types): + milliseconds = value + elif isinstance(value, datetime.datetime): + value = value.replace( + microsecond=int(round(value.microsecond, -3)) + ) + milliseconds = ms_timestamp_from_datetime(value) + else: + raise ValueError('%r is not a valid value for DateRangeBound' % value) + + self.milliseconds = milliseconds + self.validate() + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return (self.milliseconds == other.milliseconds and + self.precision == other.precision) + + def __lt__(self, other): + return ((str(self.milliseconds), str(self.precision)) < + (str(other.milliseconds), str(other.precision))) + + def datetime(self): + """ + Return :attr:`milliseconds` as a :class:`datetime.datetime` if possible. + Raises an `OverflowError` if the value is out of range. + """ + return utc_datetime_from_ms_timestamp(self.milliseconds) + + def validate(self): + attrs = self.milliseconds, self.precision + if attrs == (None, None): + return + if None in attrs: + raise TypeError( + ("%s.datetime and %s.precision must not be None unless both " + "are None; Got: %r") % (self.__class__.__name__, + self.__class__.__name__, + self) + ) + if self.precision not in DateRangePrecision.PRECISIONS: + raise ValueError( + "%s.precision: expected value in %r; got %r" % ( + self.__class__.__name__, + DateRangePrecision.PRECISIONS, + self.precision + ) + ) + + @classmethod + def from_value(cls, value): + """ + Construct a new :class:`DateRangeBound` from a given value. If + possible, use the `value['milliseconds']` and `value['precision']` keys + of the argument. Otherwise, use the argument as a `(milliseconds, + precision)` iterable. + + :param value: a dictlike or iterable object + """ + if isinstance(value, cls): + return value + + # if possible, use as a mapping + try: + milliseconds, precision = value.get('milliseconds'), value.get('precision') + except AttributeError: + milliseconds = precision = None + if milliseconds is not None and precision is not None: + return DateRangeBound(value=milliseconds, precision=precision) + + # otherwise, use as an iterable + return DateRangeBound(*value) + + def round_up(self): + if self.milliseconds is None or self.precision is None: + return self + self.milliseconds = DateRangePrecision.round_up_to_precision( + self.milliseconds, self.precision + ) + return self + + def round_down(self): + if self.milliseconds is None or self.precision is None: + return self + self.milliseconds = DateRangePrecision.round_down_to_precision( + self.milliseconds, self.precision + ) + return self + + _formatter_map = { + DateRangePrecision.YEAR: '%Y', + DateRangePrecision.MONTH: '%Y-%m', + DateRangePrecision.DAY: '%Y-%m-%d', + DateRangePrecision.HOUR: '%Y-%m-%dT%HZ', + DateRangePrecision.MINUTE: '%Y-%m-%dT%H:%MZ', + DateRangePrecision.SECOND: '%Y-%m-%dT%H:%M:%SZ', + DateRangePrecision.MILLISECOND: '%Y-%m-%dT%H:%M:%S', + } + + def __str__(self): + if self == OPEN_BOUND: + return '*' + + try: + dt = self.datetime() + except OverflowError: + return '%sms' % (self.milliseconds,) + + formatted = dt.strftime(self._formatter_map[self.precision]) + + if self.precision == DateRangePrecision.MILLISECOND: + # we'd like to just format with '%Y-%m-%dT%H:%M:%S.%fZ', but %f + # gives us more precision than we want, so we strftime up to %S and + # do the rest ourselves + return '%s.%03dZ' % (formatted, dt.microsecond / 1000) + + return formatted + + def __repr__(self): + return '%s(milliseconds=%r, precision=%r)' % ( + self.__class__.__name__, self.milliseconds, self.precision + ) + + +OPEN_BOUND = DateRangeBound(value=None, precision=None) +""" +Represents `*`, an open value or bound for :class:`DateRange`. +""" + + +@total_ordering +class DateRange(object): + """DateRange(lower_bound=None, upper_bound=None, value=None) + DSE DateRange Type + + .. attribute:: lower_bound + + :class:`~DateRangeBound` representing the lower bound of a bounded range. + + .. attribute:: upper_bound + + :class:`~DateRangeBound` representing the upper bound of a bounded range. + + .. attribute:: value + + :class:`~DateRangeBound` representing the value of a single-value range. + + As noted in its documentation, :class:`DateRangeBound` uses a millisecond + offset from the UNIX epoch to allow :class:`DateRange` to represent values + `datetime.datetime` cannot. For such values, string representions will show + this offset rather than the CQL representation. + """ + lower_bound = None + upper_bound = None + value = None + + def __init__(self, lower_bound=None, upper_bound=None, value=None): + """ + :param lower_bound: a :class:`DateRangeBound` or object accepted by + :meth:`DateRangeBound.from_value` to be used as a + :attr:`lower_bound`. Mutually exclusive with `value`. If + `upper_bound` is specified and this is not, the :attr:`lower_bound` + will be open. + :param upper_bound: a :class:`DateRangeBound` or object accepted by + :meth:`DateRangeBound.from_value` to be used as a + :attr:`upper_bound`. Mutually exclusive with `value`. If + `lower_bound` is specified and this is not, the :attr:`upper_bound` + will be open. + :param value: a :class:`DateRangeBound` or object accepted by + :meth:`DateRangeBound.from_value` to be used as :attr:`value`. Mutually + exclusive with `lower_bound` and `lower_bound`. + """ + + # if necessary, transform non-None args to DateRangeBounds + lower_bound = (DateRangeBound.from_value(lower_bound).round_down() + if lower_bound else lower_bound) + upper_bound = (DateRangeBound.from_value(upper_bound).round_up() + if upper_bound else upper_bound) + value = (DateRangeBound.from_value(value).round_down() + if value else value) + + # if we're using a 2-ended range but one bound isn't specified, specify + # an open bound + if lower_bound is None and upper_bound is not None: + lower_bound = OPEN_BOUND + if upper_bound is None and lower_bound is not None: + upper_bound = OPEN_BOUND + + self.lower_bound, self.upper_bound, self.value = ( + lower_bound, upper_bound, value + ) + self.validate() + + def validate(self): + if self.value is None: + if self.lower_bound is None or self.upper_bound is None: + raise ValueError( + '%s instances where value attribute is None must set ' + 'lower_bound or upper_bound; got %r' % ( + self.__class__.__name__, + self + ) + ) + else: # self.value is not None + if self.lower_bound is not None or self.upper_bound is not None: + raise ValueError( + '%s instances where value attribute is not None must not ' + 'set lower_bound or upper_bound; got %r' % ( + self.__class__.__name__, + self + ) + ) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return (self.lower_bound == other.lower_bound and + self.upper_bound == other.upper_bound and + self.value == other.value) + + def __lt__(self, other): + return ((str(self.lower_bound), str(self.upper_bound), str(self.value)) < + (str(other.lower_bound), str(other.upper_bound), str(other.value))) + + def __str__(self): + if self.value: + return str(self.value) + else: + return '[%s TO %s]' % (self.lower_bound, self.upper_bound) + + def __repr__(self): + return '%s(lower_bound=%r, upper_bound=%r, value=%r)' % ( + self.__class__.__name__, + self.lower_bound, self.upper_bound, self.value + ) + + @total_ordering class Version(object): """ From f7be55dba6b5e40a0b10a0f65c09f6d9e8c2c819 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 6 Sep 2019 10:20:40 -0500 Subject: [PATCH 1580/2005] cqltypes.py and marshal.py --- cassandra/cqltypes.py | 274 +++++++++++++++++++++++++++++++++++++++++- cassandra/marshal.py | 7 ++ 2 files changed, 276 insertions(+), 5 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 55bb022e8c..a6c78c55e1 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -34,26 +34,31 @@ from collections import namedtuple from decimal import Decimal import io +from itertools import chain import logging import re import socket import time import six from six.moves import range +import struct import sys from uuid import UUID -import warnings - -if six.PY3: - import ipaddress from cassandra.marshal import (int8_pack, int8_unpack, int16_pack, int16_unpack, uint16_pack, uint16_unpack, uint32_pack, uint32_unpack, int32_pack, int32_unpack, int64_pack, int64_unpack, float_pack, float_unpack, double_pack, double_unpack, - varint_pack, varint_unpack, vints_pack, vints_unpack) + varint_pack, varint_unpack, point_be, point_le, + vints_pack, vints_unpack) from cassandra import util +_little_endian_flag = 1 # we always serialize LE +if six.PY3: + import ipaddress + +_ord = ord if six.PY2 else lambda x: x + apache_cassandra_type_prefix = 'org.apache.cassandra.db.marshal.' cassandra_empty_type = 'org.apache.cassandra.db.marshal.EmptyType' @@ -1145,3 +1150,262 @@ def cql_typename(casstypename): 'list' """ return lookup_casstype(casstypename).cql_parameterized_type() + + +class WKBGeometryType(object): + POINT = 1 + LINESTRING = 2 + POLYGON = 3 + + +class PointType(CassandraType): + typename = 'PointType' + + _type = struct.pack('[[]] + type_ = int8_unpack(byts[0:1]) + + if type_ in (BoundKind.to_int(BoundKind.BOTH_OPEN_RANGE), + BoundKind.to_int(BoundKind.SINGLE_DATE_OPEN)): + time0 = precision0 = None + else: + time0 = int64_unpack(byts[1:9]) + precision0 = int8_unpack(byts[9:10]) + + if type_ == BoundKind.to_int(BoundKind.CLOSED_RANGE): + time1 = int64_unpack(byts[10:18]) + precision1 = int8_unpack(byts[18:19]) + else: + time1 = precision1 = None + + if time0 is not None: + date_range_bound0 = util.DateRangeBound( + time0, + cls._decode_precision(precision0) + ) + if time1 is not None: + date_range_bound1 = util.DateRangeBound( + time1, + cls._decode_precision(precision1) + ) + + if type_ == BoundKind.to_int(BoundKind.SINGLE_DATE): + return util.DateRange(value=date_range_bound0) + if type_ == BoundKind.to_int(BoundKind.CLOSED_RANGE): + return util.DateRange(lower_bound=date_range_bound0, + upper_bound=date_range_bound1) + if type_ == BoundKind.to_int(BoundKind.OPEN_RANGE_HIGH): + return util.DateRange(lower_bound=date_range_bound0, + upper_bound=util.OPEN_BOUND) + if type_ == BoundKind.to_int(BoundKind.OPEN_RANGE_LOW): + return util.DateRange(lower_bound=util.OPEN_BOUND, + upper_bound=date_range_bound0) + if type_ == BoundKind.to_int(BoundKind.BOTH_OPEN_RANGE): + return util.DateRange(lower_bound=util.OPEN_BOUND, + upper_bound=util.OPEN_BOUND) + if type_ == BoundKind.to_int(BoundKind.SINGLE_DATE_OPEN): + return util.DateRange(value=util.OPEN_BOUND) + raise ValueError('Could not deserialize %r' % (byts,)) + + @classmethod + def serialize(cls, v, protocol_version): + buf = io.BytesIO() + bound_kind, bounds = None, () + + try: + value = v.value + except AttributeError: + raise ValueError( + '%s.serialize expects an object with a value attribute; got' + '%r' % (cls.__name__, v) + ) + + if value is None: + try: + lower_bound, upper_bound = v.lower_bound, v.upper_bound + except AttributeError: + raise ValueError( + '%s.serialize expects an object with lower_bound and ' + 'upper_bound attributes; got %r' % (cls.__name__, v) + ) + if lower_bound == util.OPEN_BOUND and upper_bound == util.OPEN_BOUND: + bound_kind = BoundKind.BOTH_OPEN_RANGE + elif lower_bound == util.OPEN_BOUND: + bound_kind = BoundKind.OPEN_RANGE_LOW + bounds = (upper_bound,) + elif upper_bound == util.OPEN_BOUND: + bound_kind = BoundKind.OPEN_RANGE_HIGH + bounds = (lower_bound,) + else: + bound_kind = BoundKind.CLOSED_RANGE + bounds = lower_bound, upper_bound + else: # value is not None + if value == util.OPEN_BOUND: + bound_kind = BoundKind.SINGLE_DATE_OPEN + else: + bound_kind = BoundKind.SINGLE_DATE + bounds = (value,) + + if bound_kind is None: + raise ValueError( + 'Cannot serialize %r; could not find bound kind' % (v,) + ) + + buf.write(int8_pack(BoundKind.to_int(bound_kind))) + for bound in bounds: + buf.write(int64_pack(bound.milliseconds)) + buf.write(int8_pack(cls._encode_precision(bound.precision))) + + return buf.getvalue() diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 3b80f34043..7533ebd307 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -93,6 +93,13 @@ def varint_pack(big): return six.binary_type(revbytes) +point_be = struct.Struct('>dd') +point_le = struct.Struct('ddd') +circle_le = struct.Struct('> 63) From 0a6f4d02502f74588ae7df6b86e88529e7c4c926 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 6 Sep 2019 10:21:38 -0500 Subject: [PATCH 1581/2005] add requiredse decorator for integration tests --- tests/integration/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 729373c3eb..5870191ea8 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -351,6 +351,8 @@ def _id_and_mark(f): "This test is not suitible for environments with large clock granularity") requiressimulacron = unittest.skipIf(SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"), "Simulacron jar hasn't been specified or C* version is 2.0") requirecassandra = unittest.skipIf(DSE_VERSION, "Cassandra required") +requiredse = unittest.skipUnless(DSE_VERSION, "DSE required") + def wait_for_node_socket(node, timeout): binary_itf = node.network_interfaces['binary'] From 7783e594aba924c12584b97e3dda1cb549dc95cf Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 6 Sep 2019 10:22:01 -0500 Subject: [PATCH 1582/2005] tests/integration/standard/test_types.py --- tests/integration/standard/test_types.py | 406 +++++++++++++++++++++-- 1 file changed, 385 insertions(+), 21 deletions(-) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index b49ee06ca1..bdb7a257cb 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -19,11 +19,13 @@ from datetime import datetime import math +from packaging.version import Version import six import cassandra from cassandra import InvalidRequest -from cassandra.cluster import Cluster +from cassandra import util +from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.concurrent import execute_concurrent_with_args from cassandra.cqltypes import Int32Type, EMPTY from cassandra.query import dict_factory, ordered_dict_factory @@ -31,7 +33,8 @@ from tests.unit.cython.utils import cythontest from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass, notprotocolv1, \ - BasicSharedKeyspaceUnitTestCase, greaterthancass21, lessthancass30, greaterthanorequalcass3_10 + BasicSharedKeyspaceUnitTestCase, greaterthancass21, lessthancass30, greaterthanorequaldse51, \ + DSE_VERSION, greaterthanorequalcass3_10, requiredse from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, PRIMITIVE_DATATYPES_KEYS, \ get_sample, get_all_samples, get_collection_sample @@ -127,7 +130,7 @@ def test_des_bytes_type_array(self): self.assertEqual(expected, actual) finally: if original is not None: - cassandra.deserializers.DesBytesType=original + dse.deserializers.DesBytesType=original def test_can_insert_primitive_datatypes(self): """ @@ -200,9 +203,9 @@ def test_can_insert_primitive_datatypes(self): self.assertEqual(actual, expected) # verify data with with prepared statement, use dictionary with no explicit columns - s.row_factory = ordered_dict_factory select = s.prepare("SELECT * FROM alltypes") - results = s.execute(select)[0] + results = s.execute(select, + execution_profile=s.execution_profile_clone_update(EXEC_PROFILE_DEFAULT, row_factory=ordered_dict_factory))[0] for expected, actual in zip(params, results.values()): self.assertEqual(actual, expected) @@ -278,9 +281,10 @@ def test_can_insert_collection_datatypes(self): self.assertEqual(actual, expected) # verify data with with prepared statement, use dictionary with no explicit columns - s.row_factory = ordered_dict_factory select = s.prepare("SELECT * FROM allcoltypes") - results = s.execute(select)[0] + results = s.execute(select, + execution_profile=s.execution_profile_clone_update(EXEC_PROFILE_DEFAULT, + row_factory=ordered_dict_factory))[0] for expected, actual in zip(params, results.values()): self.assertEqual(actual, expected) @@ -497,12 +501,11 @@ def test_can_insert_tuples_with_varying_lengths(self): if self.cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) s = c.connect(self.keyspace_name) - # set the row_factory to dict_factory for programmatic access # set the encoder for tuples for the ability to write tuples - s.row_factory = dict_factory s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple # programmatically create the table with tuples of said sizes @@ -564,12 +567,11 @@ def test_can_insert_tuples_all_collection_datatypes(self): if self.cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) s = c.connect(self.keyspace_name) - # set the row_factory to dict_factory for programmatic access # set the encoder for tuples for the ability to write tuples - s.row_factory = dict_factory s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple values = [] @@ -663,12 +665,11 @@ def test_can_insert_nested_tuples(self): if self.cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) s = c.connect(self.keyspace_name) - # set the row_factory to dict_factory for programmatic access # set the encoder for tuples for the ability to write tuples - s.row_factory = dict_factory s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple # create a table with multiple sizes of nested tuples @@ -865,6 +866,372 @@ def test_smoke_duration_values(self): self.assertRaises(ValueError, self.session.execute, prepared, (1, Duration(int("8FFFFFFFFFFFFFF0", 16), 0, 0))) + +@requiredse +class AbstractDateRangeTest(): + + def test_single_value_daterange_round_trip(self): + self._daterange_round_trip( + util.DateRange( + value=util.DateRangeBound( + datetime(2014, 10, 1, 0), + util.DateRangePrecision.YEAR + ) + ), + util.DateRange( + value=util.DateRangeBound( + datetime(2014, 1, 1, 0), + util.DateRangePrecision.YEAR + ) + ) + ) + + def test_open_high_daterange_round_trip(self): + self._daterange_round_trip( + util.DateRange( + lower_bound=util.DateRangeBound( + datetime(2013, 10, 1, 6, 20, 39), + util.DateRangePrecision.SECOND + ) + ) + ) + + def test_open_low_daterange_round_trip(self): + self._daterange_round_trip( + util.DateRange( + upper_bound=util.DateRangeBound( + datetime(2013, 10, 28), + util.DateRangePrecision.DAY + ) + ) + ) + + def test_open_both_daterange_round_trip(self): + self._daterange_round_trip( + util.DateRange( + lower_bound=util.OPEN_BOUND, + upper_bound=util.OPEN_BOUND, + ) + ) + + def test_closed_daterange_round_trip(self): + insert = util.DateRange( + lower_bound=util.DateRangeBound( + datetime(2015, 3, 1, 10, 15, 30, 1000), + util.DateRangePrecision.MILLISECOND + ), + upper_bound=util.DateRangeBound( + datetime(2016, 1, 1, 10, 15, 30, 999000), + util.DateRangePrecision.MILLISECOND + ) + ) + self._daterange_round_trip(insert) + + def test_epoch_value_round_trip(self): + insert = util.DateRange( + value=util.DateRangeBound( + datetime(1970, 1, 1), + util.DateRangePrecision.YEAR + ) + ) + self._daterange_round_trip(insert) + + def test_double_bounded_daterange_round_trip_from_string(self): + self._daterange_round_trip( + '[2015-03-01T10:15:30.010Z TO 2016-01-01T10:15:30.999Z]', + util.DateRange( + lower_bound=util.DateRangeBound( + datetime(2015, 3, 1, 10, 15, 30, 10000), + util.DateRangePrecision.MILLISECOND + ), + upper_bound=util.DateRangeBound( + datetime(2016, 1, 1, 10, 15, 30, 999000), + util.DateRangePrecision.MILLISECOND + ), + ) + ) + + def test_open_high_daterange_round_trip_from_string(self): + self._daterange_round_trip( + '[2015-03 TO *]', + util.DateRange( + lower_bound=util.DateRangeBound( + datetime(2015, 3, 1, 0, 0), + util.DateRangePrecision.MONTH + ), + upper_bound=util.DateRangeBound(None, None) + ) + ) + + def test_open_low_daterange_round_trip_from_string(self): + self._daterange_round_trip( + '[* TO 2015-03]', + util.DateRange( + lower_bound=util.DateRangeBound(None, None), + upper_bound=util.DateRangeBound( + datetime(2015, 3, 1, 0, 0), + 'MONTH' + ) + ) + ) + + def test_no_bounds_daterange_round_trip_from_string(self): + self._daterange_round_trip( + '[* TO *]', + util.DateRange( + lower_bound=(None, None), + upper_bound=(None, None) + ) + ) + + def test_single_no_bounds_daterange_round_trip_from_string(self): + self._daterange_round_trip( + '*', + util.DateRange( + value=(None, None) + ) + ) + + def test_single_value_daterange_round_trip_from_string(self): + self._daterange_round_trip( + '2001-01-01T12:30:30.000Z', + util.DateRange( + value=util.DateRangeBound( + datetime(2001, 1, 1, 12, 30, 30), + 'MILLISECOND' + ) + ) + ) + + def test_daterange_with_negative_bound_round_trip_from_string(self): + self._daterange_round_trip( + '[-1991-01-01T00:00:00.001 TO 1990-02-03]', + util.DateRange( + lower_bound=(-124997039999999, 'MILLISECOND'), + upper_bound=util.DateRangeBound( + datetime(1990, 2, 3, 12, 30, 30), + 'DAY' + ) + ) + ) + + def test_epoch_value_round_trip_from_string(self): + self._daterange_round_trip( + '1970', + util.DateRange( + value=util.DateRangeBound( + datetime(1970, 1, 1), + util.DateRangePrecision.YEAR + ) + ) + ) + + +@greaterthanorequaldse51 +class TestDateRangePrepared(AbstractDateRangeTest, BasicSharedKeyspaceUnitTestCase): + """ + Tests various inserts and queries using Date-ranges and prepared queries + + @since 2.0.0 + @jira_ticket PYTHON-668 + @expected_result Date ranges will be inserted and retrieved succesfully + + @test_category data_types + """ + + @classmethod + def setUpClass(cls): + super(TestDateRangePrepared, cls).setUpClass() + cls.session.set_keyspace(cls.ks_name) + if DSE_VERSION and DSE_VERSION >= Version('5.1'): + cls.session.execute("CREATE TABLE tab (dr 'DateRangeType' PRIMARY KEY)") + + + def _daterange_round_trip(self, to_insert, expected=None): + if isinstance(to_insert, util.DateRange): + prep = self.session.prepare("INSERT INTO tab (dr) VALUES (?);") + self.session.execute(prep, (to_insert,)) + prep_sel = self.session.prepare("SELECT * FROM tab WHERE dr = ? ") + results = self.session.execute(prep_sel, (to_insert,)) + else: + prep = self.session.prepare("INSERT INTO tab (dr) VALUES ('%s');" % (to_insert,)) + self.session.execute(prep) + prep_sel = self.session.prepare("SELECT * FROM tab WHERE dr = '%s' " % (to_insert,)) + results = self.session.execute(prep_sel) + + dr = results[0].dr + # sometimes this is truncated in the assertEquals output on failure; + if isinstance(expected, six.string_types): + self.assertEqual(str(dr), expected) + else: + self.assertEqual(dr, expected or to_insert) + + # This can only be run as a prepared statement + def test_daterange_wide(self): + self._daterange_round_trip( + util.DateRange( + lower_bound=(-9223372036854775808, 'MILLISECOND'), + upper_bound=(9223372036854775807, 'MILLISECOND') + ), + '[-9223372036854775808ms TO 9223372036854775807ms]' + ) + # This can only be run as a prepared statement + def test_daterange_with_negative_bound_round_trip_to_string(self): + self._daterange_round_trip( + util.DateRange( + lower_bound=(-124997039999999, 'MILLISECOND'), + upper_bound=util.DateRangeBound( + datetime(1990, 2, 3, 12, 30, 30), + 'DAY' + ) + ), + '[-124997039999999ms TO 1990-02-03]' + ) + +@greaterthanorequaldse51 +class TestDateRangeSimple(AbstractDateRangeTest, BasicSharedKeyspaceUnitTestCase): + """ + Tests various inserts and queries using Date-ranges and simple queries + + @since 2.0.0 + @jira_ticket PYTHON-668 + @expected_result DateRanges will be inserted and retrieved successfully + @test_category data_types + """ + @classmethod + def setUpClass(cls): + super(TestDateRangeSimple, cls).setUpClass() + cls.session.set_keyspace(cls.ks_name) + if DSE_VERSION and DSE_VERSION >= Version('5.1'): + cls.session.execute("CREATE TABLE tab (dr 'DateRangeType' PRIMARY KEY)") + + + def _daterange_round_trip(self, to_insert, expected=None): + + query = "INSERT INTO tab (dr) VALUES ('{0}');".format(to_insert) + self.session.execute("INSERT INTO tab (dr) VALUES ('{0}');".format(to_insert)) + query = "SELECT * FROM tab WHERE dr = '{0}' ".format(to_insert) + results= self.session.execute("SELECT * FROM tab WHERE dr = '{0}' ".format(to_insert)) + + dr = results[0].dr + # sometimes this is truncated in the assertEquals output on failure; + if isinstance(expected, six.string_types): + self.assertEqual(str(dr), expected) + else: + self.assertEqual(dr, expected or to_insert) + + +@greaterthanorequaldse51 +class TestDateRangeCollection(BasicSharedKeyspaceUnitTestCase): + + + @classmethod + def setUpClass(cls): + super(TestDateRangeCollection, cls).setUpClass() + cls.session.set_keyspace(cls.ks_name) + + def test_date_range_collection(self): + """ + Tests DateRange type in collections + + @since 2.0.0 + @jira_ticket PYTHON-668 + @expected_result DateRanges will be inserted and retrieved successfully when part of a list or map + @test_category data_types + """ + self.session.execute("CREATE TABLE dateRangeIntegrationTest5 (k int PRIMARY KEY, l list<'DateRangeType'>, s set<'DateRangeType'>, dr2i map<'DateRangeType', int>, i2dr map)") + self.session.execute("INSERT INTO dateRangeIntegrationTest5 (k, l, s, i2dr, dr2i) VALUES (" + + "1, " + + "['[2000-01-01T10:15:30.001Z TO 2020]', '[2010-01-01T10:15:30.001Z TO 2020]', '2001-01-02'], " + + "{'[2000-01-01T10:15:30.001Z TO 2020]', '[2000-01-01T10:15:30.001Z TO 2020]', '[2010-01-01T10:15:30.001Z TO 2020]'}, " + + "{1: '[2000-01-01T10:15:30.001Z TO 2020]', 2: '[2010-01-01T10:15:30.001Z TO 2020]'}, " + + "{'[2000-01-01T10:15:30.001Z TO 2020]': 1, '[2010-01-01T10:15:30.001Z TO 2020]': 2})") + results = list(self.session.execute("SELECT * FROM dateRangeIntegrationTest5")) + self.assertEqual(len(results),1) + + lower_bound_1 = util.DateRangeBound(datetime(2000, 1, 1, 10, 15, 30, 1000), 'MILLISECOND') + + lower_bound_2 = util.DateRangeBound(datetime(2010, 1, 1, 10, 15, 30, 1000), 'MILLISECOND') + + upper_bound_1 = util.DateRangeBound(datetime(2020, 1, 1), 'YEAR') + + value_1 = util.DateRangeBound(datetime(2001, 1, 2), 'DAY') + + dt = util.DateRange(lower_bound=lower_bound_1, upper_bound=upper_bound_1) + dt2 = util.DateRange(lower_bound=lower_bound_2, upper_bound=upper_bound_1) + dt3 = util.DateRange(value=value_1) + + + + list_result = results[0].l + self.assertEqual(3, len(list_result)) + self.assertEqual(list_result[0],dt) + self.assertEqual(list_result[1],dt2) + self.assertEqual(list_result[2],dt3) + + set_result = results[0].s + self.assertEqual(len(set_result), 2) + self.assertIn(dt, set_result) + self.assertIn(dt2, set_result) + + d2i = results[0].dr2i + self.assertEqual(len(d2i), 2) + self.assertEqual(d2i[dt],1) + self.assertEqual(d2i[dt2],2) + + i2r = results[0].i2dr + self.assertEqual(len(i2r), 2) + self.assertEqual(i2r[1],dt) + self.assertEqual(i2r[2],dt2) + + def test_allow_date_range_in_udt_tuple(self): + """ + Tests DateRanges in tuples and udts + + @since 2.0.0 + @jira_ticket PYTHON-668 + @expected_result DateRanges will be inserted and retrieved successfully in udt's and tuples + @test_category data_types + """ + self.session.execute("CREATE TYPE IF NOT EXISTS test_udt (i int, range 'DateRangeType')") + self.session.execute("CREATE TABLE dateRangeIntegrationTest4 (k int PRIMARY KEY, u test_udt, uf frozen, t tuple<'DateRangeType', int>, tf frozen>)") + self.session.execute("INSERT INTO dateRangeIntegrationTest4 (k, u, uf, t, tf) VALUES (" + + "1, " + + "{i: 10, range: '[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]'}, " + + "{i: 20, range: '[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]'}, " + + "('[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]', 30), " + + "('[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]', 40))") + + lower_bound = util.DateRangeBound( + datetime(2000, 1, 1, 10, 15, 30, 3000), + 'MILLISECOND') + + upper_bound = util.DateRangeBound( + datetime(2020, 1, 1, 10, 15, 30, 1000), + 'MILLISECOND') + + expected_dt = util.DateRange(lower_bound=lower_bound ,upper_bound=upper_bound) + + results_list = list(self.session.execute("SELECT * FROM dateRangeIntegrationTest4")) + self.assertEqual(len(results_list), 1) + udt = results_list[0].u + self.assertEqual(udt.range, expected_dt) + self.assertEqual(udt.i, 10) + + + uf = results_list[0].uf + self.assertEqual(uf.range, expected_dt) + self.assertEqual(uf.i, 20) + + t = results_list[0].t + self.assertEqual(t[0], expected_dt) + self.assertEqual(t[1], 30) + + tf = results_list[0].tf + self.assertEqual(tf[0], expected_dt) + self.assertEqual(tf[1], 40) + + class TypeTestsProtocol(BasicSharedKeyspaceUnitTestCase): @greaterthancass21 @@ -903,9 +1270,9 @@ def test_nested_types_with_protocol_version(self): self.session.execute(ddl) - for pvi in range(1, 5): + for pvi in range(3, 5): self.run_inserts_at_version(pvi) - for pvr in range(1, 5): + for pvr in range(3, 5): self.read_inserts_at_level(pvr) def read_inserts_at_level(self, proto_ver): @@ -943,6 +1310,3 @@ def run_inserts_at_version(self, proto_ver): finally: session.cluster.shutdown() - - - From bb8311aefd622d626cdc6e3a550d00dc510f12cf Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 6 Sep 2019 11:19:13 -0500 Subject: [PATCH 1583/2005] temporary change to build.yaml for some initial testing --- build.yaml | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/build.yaml b/build.yaml index afca3c7c0e..81457ec2c8 100644 --- a/build.yaml +++ b/build.yaml @@ -122,6 +122,19 @@ schedules: - python: [3.4, 3.6, 3.7] - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse'] + # Temporary, don't merge me + python_oss_next_tests: + schedule: adhoc + disable_pull_requests: true + branches: + include: ['oss-next'] + env_vars: | + EVENT_LOOP_MANAGER='libev' + EXCLUDE_LONG=1 + matrix: + exclude: + - python: [3.4, 3.6, 3.7] + python: - 2.7 - 3.4 @@ -133,15 +146,20 @@ os: - ubuntu/bionic64/python-driver cassandra: + - '2.0' - '2.1' - '2.2' - '3.0' - '3.11' - - 'test-dse' + - 'dse-4.8' + - 'dse-5.0' + - 'dse-5.1' + - 'dse-6.0' + - 'dse-6.7' env: CYTHON: - - CYTHON +# - CYTHON - NO_CYTHON build: From b02ea9be4ccdce58805b565cf31a56b2b686e1fb Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 6 Sep 2019 15:14:04 -0500 Subject: [PATCH 1584/2005] setup.py and requirements so we can run on jenkins --- requirements.txt | 1 + setup.py | 10 ++++++---- test-requirements.txt | 7 +++++-- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/requirements.txt b/requirements.txt index 25c0d62c73..42bc6d0e9e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ +geomet>=0.1,<0.2 six >=1.9 futures <=2.2.0 # Futures is not required for Python 3, but it works up through 2.2.0 (after which it introduced breaking syntax). diff --git a/setup.py b/setup.py index b79774476b..1e1eb0adb3 100644 --- a/setup.py +++ b/setup.py @@ -307,7 +307,7 @@ def _setup_extensions(self): try: from Cython.Build import cythonize cython_candidates = ['cluster', 'concurrent', 'connection', 'cqltypes', 'metadata', - 'pool', 'protocol', 'query', 'util'] + 'pool', 'hosts', 'protocol', 'query', 'util'] compile_args = [] if is_windows else ['-Wno-unused-function'] self.extensions.extend(cythonize( [Extension('cassandra.%s' % m, ['cassandra/%s.py' % m], @@ -403,7 +403,8 @@ def run_setup(extensions): else: sys.stderr.write("Bypassing Cython setup requirement\n") - dependencies = ['six >=1.9'] + dependencies = ['six >=1.9', + 'geomet>=0.1,<0.2'] if not PY3: dependencies.append('futures') @@ -416,8 +417,8 @@ def run_setup(extensions): url='http://github.com/datastax/python-driver', author='Tyler Hobbs', author_email='tyler@datastax.com', - packages=['cassandra', 'cassandra.io', 'cassandra.cqlengine'], - keywords='cassandra,cql,orm', + packages=['cassandra', 'cassandra.io', 'cassandra.cqlengine', 'cassandra.graph', 'cassandra.insights'], + keywords='cassandra,cql,orm,dse,graph', include_package_data=True, install_requires=dependencies, tests_require=['nose', 'mock>=2.0.0', 'PyYAML', 'pytz', 'sure'], @@ -439,6 +440,7 @@ def run_setup(extensions): ], **kw) + run_setup(None) if has_cqlengine: diff --git a/test-requirements.txt b/test-requirements.txt index e8552d57ea..a2e99663c4 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,8 +1,8 @@ -r requirements.txt scales nose -mock!=1.1.* -ccm>=2.0 +mock>1.1 +ccm>=2.1.2 unittest2 pytz sure @@ -12,5 +12,8 @@ gevent>=1.0 eventlet cython>=0.20,<0.30 packaging +kerberos +requests +backports.ssl_match_hostname; python_version < '2.7.9' futurist; python_version >= '3.7' asynctest; python_version > '3.4' From c852d0e18ed8cfbd2d3184acaea5a9720c42c57e Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 6 Sep 2019 15:34:53 -0500 Subject: [PATCH 1585/2005] stop only running test_dse.py for DSE builds --- build.yaml | 8 -------- tests/integration/standard/test_dse.py | 3 +-- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/build.yaml b/build.yaml index 81457ec2c8..13ec6cdf39 100644 --- a/build.yaml +++ b/build.yaml @@ -197,14 +197,6 @@ build: exit 0 fi - if [[ $CCM_IS_DSE == 'true' ]]; then - # We only use a DSE version for unreleased DSE versions, so we only need to run the smoke tests here - echo "CCM_IS_DSE: $CCM_IS_DSE" - echo "==========RUNNING SMOKE TESTS===========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION DSE_VERSION='6.7.0' MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true - exit 0 - fi - # Run the unit tests, this is not done in travis because # it takes too much time for the whole matrix to build with cython if [[ $CYTHON == 'CYTHON' ]]; then diff --git a/tests/integration/standard/test_dse.py b/tests/integration/standard/test_dse.py index a8a3d647bb..40bcb68495 100644 --- a/tests/integration/standard/test_dse.py +++ b/tests/integration/standard/test_dse.py @@ -58,8 +58,7 @@ def _test_basic(self, dse_version): cluster_name = '{}-{}'.format( self.__class__.__name__, dse_version.base_version.replace('.', '_') ) - use_cluster(cluster_name=cluster_name, nodes=[3], - dse_cluster=True, dse_options={}, dse_version=dse_version) + use_cluster(cluster_name=cluster_name, nodes=[3], dse_options={}) cluster = Cluster( allow_beta_protocol_version=(dse_version >= Version('6.7.0'))) From 6f13f3b321ee146c3e42f07e57843b765925051a Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 6 Sep 2019 16:01:07 -0500 Subject: [PATCH 1586/2005] Add smoke test section back into build.yaml --- build.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/build.yaml b/build.yaml index 13ec6cdf39..39f3e39b37 100644 --- a/build.yaml +++ b/build.yaml @@ -197,6 +197,14 @@ build: exit 0 fi + if [[ $JUST_SMOKE == 'true' ]]; then + # When we ONLY want to run the smoke tests + echo "JUST_SMOKE: $JUST_SMOKE" + echo "==========RUNNING SMOKE TESTS===========" + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION DSE_VERSION='6.7.0' MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true + exit 0 + fi + # Run the unit tests, this is not done in travis because # it takes too much time for the whole matrix to build with cython if [[ $CYTHON == 'CYTHON' ]]; then From 4e3aee3fceb00a1036cf2793d1891669eb26d63a Mon Sep 17 00:00:00 2001 From: James Falcon Date: Sat, 7 Sep 2019 00:31:35 -0500 Subject: [PATCH 1587/2005] Add protocol v1 EXECUTE functionality back --- cassandra/protocol.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 57ed781e2e..1c9db58eee 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -648,6 +648,24 @@ def __init__(self, query_id, query_params, consistency_level, super(ExecuteMessage, self).__init__(query_params, consistency_level, serial_consistency_level, fetch_size, paging_state, timestamp, skip_meta, continuous_paging_options) + def _write_query_params(self, f, protocol_version): + if protocol_version == 1: + if self.serial_consistency_level: + raise UnsupportedOperation( + "Serial consistency levels require the use of protocol version " + "2 or higher. Consider setting Cluster.protocol_version to 2 " + "to support serial consistency levels.") + if self.fetch_size or self.paging_state: + raise UnsupportedOperation( + "Automatic query paging may only be used with protocol version " + "2 or higher. Consider setting Cluster.protocol_version to 2.") + write_short(f, len(self.query_params)) + for param in self.query_params: + write_value(f, param) + write_consistency_level(f, self.consistency_level) + else: + super(ExecuteMessage, self)._write_query_params(f, protocol_version) + def send_body(self, f, protocol_version): write_string(f, self.query_id) if ProtocolVersion.uses_prepared_metadata(protocol_version): From 0f68b7d923b3ce91af4aa8f151e67d837e8faa63 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Sat, 7 Sep 2019 00:32:10 -0500 Subject: [PATCH 1588/2005] Missed a dse to cassandra import --- tests/integration/standard/test_types.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index bdb7a257cb..f0e56879c7 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -130,7 +130,7 @@ def test_des_bytes_type_array(self): self.assertEqual(expected, actual) finally: if original is not None: - dse.deserializers.DesBytesType=original + cassandra.deserializers.DesBytesType=original def test_can_insert_primitive_datatypes(self): """ From 7ca13ff31178ed02517395e0e2467f67cad253ec Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 9 Sep 2019 08:44:42 -0500 Subject: [PATCH 1589/2005] simulacron tests, not advanced --- tests/integration/simulacron/__init__.py | 32 ++++++++- .../integration/simulacron/test_connection.py | 19 +++-- .../simulacron/test_empty_column.py | 21 +++--- tests/integration/simulacron/test_endpoint.py | 7 +- tests/integration/simulacron/test_policies.py | 21 +++--- tests/integration/simulacron/utils.py | 72 ++++++++++++++++--- 6 files changed, 132 insertions(+), 40 deletions(-) diff --git a/tests/integration/simulacron/__init__.py b/tests/integration/simulacron/__init__.py index 665d6b3a15..b53e86289d 100644 --- a/tests/integration/simulacron/__init__.py +++ b/tests/integration/simulacron/__init__.py @@ -16,14 +16,21 @@ except ImportError: import unittest # noqa -from tests.integration.simulacron.utils import stop_simulacron, clear_queries -from tests.integration import PROTOCOL_VERSION, SIMULACRON_JAR, CASSANDRA_VERSION -from tests.integration.simulacron.utils import start_and_prime_singledc +from tests.integration import requiredse, CASSANDRA_VERSION, DSE_VERSION, SIMULACRON_JAR +from tests.integration.simulacron.utils import ( + clear_queries, + start_and_prime_singledc, + stop_simulacron, + start_and_prime_cluster_defaults, +) from cassandra.cluster import Cluster from packaging.version import Version + +PROTOCOL_VERSION = 4 if (DSE_VERSION is None or DSE_VERSION >= Version('5.0')) else 3 + def teardown_package(): stop_simulacron() @@ -56,3 +63,22 @@ def tearDownClass(cls): if cls.cluster: cls.cluster.shutdown() stop_simulacron() + + +@requiredse +class DseSimulacronCluster(SimulacronBase): + + simulacron_cluster = None + cluster, connect = None, True + nodes_per_dc = 1 + + @classmethod + def setUpClass(cls): + if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"): + return + + cls.simulacron_cluster = start_and_prime_cluster_defaults(dse_version=DSE_VERSION, + nodes_per_dc=cls.nodes_per_dc) + if cls.connect: + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) + cls.session = cls.cluster.connect(wait_for_all_pools=True) diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 4bc52d58de..1d71263fb5 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -25,11 +25,11 @@ from cassandra.cluster import (EXEC_PROFILE_DEFAULT, Cluster, ExecutionProfile, _Scheduler, NoHostAvailable) from cassandra.policies import HostStateListener, RoundRobinPolicy -from cassandra.io.asyncorereactor import AsyncoreConnection + from tests import connection_class, thread_pool_executor_class -from tests.integration import (PROTOCOL_VERSION, requiressimulacron) +from tests.integration import requiressimulacron from tests.integration.util import assert_quiescent_pool_state -from tests.integration.simulacron import SimulacronBase +from tests.integration.simulacron import SimulacronBase, PROTOCOL_VERSION from tests.integration.simulacron.utils import (NO_THEN, PrimeOptions, prime_query, prime_request, start_and_prime_cluster_defaults, @@ -104,6 +104,7 @@ def test_heart_beat_timeout(self): cluster = Cluster(compression=False, idle_heartbeat_interval=idle_heartbeat_interval, idle_heartbeat_timeout=idle_heartbeat_timeout, + protocol_version=PROTOCOL_VERSION, executor_threads=8, execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=RoundRobinPolicy())}) @@ -203,7 +204,7 @@ def test_close_when_query(self): "scope": "connection" } - prime_query(query_to_prime, then=then, rows=None, column_types=None) + prime_query(query_to_prime, rows=None, column_types=None, then=then) self.assertRaises(NoHostAvailable, session.execute, query_to_prime) def test_retry_after_defunct(self): @@ -344,7 +345,8 @@ class ExtendedConnection(connection_class): pass cluster = Cluster(contact_points=["127.0.0.2"], - connection_class=ExtendedConnection) + connection_class=ExtendedConnection, + compression=False) cluster.connect() cluster.shutdown() @@ -357,10 +359,13 @@ def test_driver_recovers_nework_isolation(self): listener = TrackDownListener() cluster = Cluster(['127.0.0.1'], - load_balancing_policy=RoundRobinPolicy(), idle_heartbeat_timeout=idle_heartbeat_timeout, idle_heartbeat_interval=idle_heartbeat_interval, - executor_threads=16) + executor_threads=16, + compression=False, + execution_profiles={ + EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) + }) session = cluster.connect(wait_for_all_pools=True) cluster.register_listener(listener) diff --git a/tests/integration/simulacron/test_empty_column.py b/tests/integration/simulacron/test_empty_column.py index e07534f429..5a6ac38b4d 100644 --- a/tests/integration/simulacron/test_empty_column.py +++ b/tests/integration/simulacron/test_empty_column.py @@ -18,7 +18,8 @@ from collections import namedtuple, OrderedDict -from cassandra.cluster import Cluster +from cassandra import ProtocolVersion +from cassandra.cluster import Cluster, EXEC_PROFILE_DEFAULT from cassandra.query import (named_tuple_factory, tuple_factory, dict_factory, ordered_dict_factory) @@ -31,6 +32,10 @@ from tests.integration.simulacron.utils import PrimeQuery, prime_request +PROTOCOL_VERSION = 4 if PROTOCOL_VERSION in \ + (ProtocolVersion.DSE_V1, ProtocolVersion.DSE_V2) else PROTOCOL_VERSION + + @requiressimulacron class EmptyColumnTests(SimulacronCluster): """ @@ -74,29 +79,29 @@ def test_empty_columns_with_all_row_factories(self): query = 'SELECT "", " " FROM testks.testtable' self._prime_testtable_query() - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) self.session = self.cluster.connect(wait_for_all_pools=True) # Test all row factories - self.session.row_factory = named_tuple_factory + self.cluster.profile_manager.profiles[EXEC_PROFILE_DEFAULT].row_factory = named_tuple_factory self.assertEqual( list(self.session.execute(query)), [namedtuple('Row', ['field_0_', 'field_1_'])('testval', 'testval1')] ) - self.session.row_factory = tuple_factory + self.cluster.profile_manager.profiles[EXEC_PROFILE_DEFAULT].row_factory = tuple_factory self.assertEqual( list(self.session.execute(query)), [('testval', 'testval1')] ) - self.session.row_factory = dict_factory + self.cluster.profile_manager.profiles[EXEC_PROFILE_DEFAULT].row_factory = dict_factory self.assertEqual( list(self.session.execute(query)), [{'': 'testval', ' ': 'testval1'}] ) - self.session.row_factory = ordered_dict_factory + self.cluster.profile_manager.profiles[EXEC_PROFILE_DEFAULT].row_factory = ordered_dict_factory self.assertEqual( list(self.session.execute(query)), [OrderedDict((('', 'testval'), (' ', 'testval1')))] @@ -230,7 +235,7 @@ def test_empty_columns_in_system_schema(self): query = PrimeQuery(query, then=then) prime_request(query) - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) self.session = self.cluster.connect(wait_for_all_pools=True) table_metadata = self.cluster.metadata.keyspaces['testks'].tables['testtable'] @@ -241,7 +246,7 @@ def test_empty_columns_in_system_schema(self): def test_empty_columns_with_cqlengine(self): self._prime_testtable_query() - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) self.session = self.cluster.connect(wait_for_all_pools=True) set_session(self.session) diff --git a/tests/integration/simulacron/test_endpoint.py b/tests/integration/simulacron/test_endpoint.py index 1af5995f94..ff4d530a8f 100644 --- a/tests/integration/simulacron/test_endpoint.py +++ b/tests/integration/simulacron/test_endpoint.py @@ -20,8 +20,8 @@ from cassandra.cluster import Cluster from cassandra.connection import DefaultEndPoint, EndPoint, EndPointFactory -from tests.integration import (PROTOCOL_VERSION, requiressimulacron) -from tests.integration.simulacron import SimulacronCluster +from tests.integration import requiressimulacron +from tests.integration.simulacron import SimulacronCluster, PROTOCOL_VERSION @total_ordering @@ -99,7 +99,8 @@ def test_custom_endpoint(self): cluster = Cluster( contact_points=[AddressEndPoint('127.0.0.1')], protocol_version=PROTOCOL_VERSION, - endpoint_factory=AddressEndPointFactory() + endpoint_factory=AddressEndPointFactory(), + compression=False, ) cluster.connect(wait_for_all_pools=True) diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index d7a6775003..209929dfa1 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -17,12 +17,13 @@ import unittest # noqa from cassandra import OperationTimedOut, WriteTimeout -from cassandra.cluster import Cluster, ExecutionProfile, ResponseFuture +from cassandra.cluster import Cluster, ExecutionProfile, ResponseFuture, EXEC_PROFILE_DEFAULT from cassandra.query import SimpleStatement from cassandra.policies import ConstantSpeculativeExecutionPolicy, RoundRobinPolicy, RetryPolicy, WriteType -from tests.integration import PROTOCOL_VERSION, greaterthancass21, requiressimulacron, SIMULACRON_JAR, \ +from tests.integration import greaterthancass21, requiressimulacron, SIMULACRON_JAR, \ CASSANDRA_VERSION +from tests.integration.simulacron import PROTOCOL_VERSION from tests.integration.simulacron.utils import start_and_prime_singledc, prime_query, \ stop_simulacron, NO_THEN, clear_queries @@ -182,7 +183,7 @@ def test_delay_can_be_0(self): spec = ExecutionProfile(load_balancing_policy=RoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0, number_of_requests)) - cluster = Cluster() + cluster = Cluster(compression=False) cluster.add_execution_profile("spec", spec) session = cluster.connect(wait_for_all_pools=True) self.addCleanup(cluster.shutdown) @@ -192,7 +193,6 @@ def test_delay_can_be_0(self): def patch_and_count(f): def patched(*args, **kwargs): next(counter) - print("patched") f(*args, **kwargs) return patched @@ -269,8 +269,13 @@ def tearDown(self): clear_queries() def set_cluster(self, retry_policy): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False, - default_retry_policy=retry_policy) + self.cluster = Cluster( + protocol_version=PROTOCOL_VERSION, + compression=False, + execution_profiles={ + EXEC_PROFILE_DEFAULT: ExecutionProfile(retry_policy=retry_policy) + }, + ) self.session = self.cluster.connect(wait_for_all_pools=True) self.addCleanup(self.cluster.shutdown) @@ -299,9 +304,9 @@ def test_retry_policy_ignores_and_rethrows(self): "write_type": "SIMPLE", "ignore_on_prepare": True } - prime_query(query_to_prime_simple, then=then, rows=None, column_types=None) + prime_query(query_to_prime_simple, rows=None, column_types=None, then=then) then["write_type"] = "CDC" - prime_query(query_to_prime_cdc, then=then, rows=None, column_types=None) + prime_query(query_to_prime_cdc, rows=None, column_types=None, then=then) with self.assertRaises(WriteTimeout): self.session.execute(query_to_prime_simple) diff --git a/tests/integration/simulacron/utils.py b/tests/integration/simulacron/utils.py index 5ec53831fe..0d474651c2 100644 --- a/tests/integration/simulacron/utils.py +++ b/tests/integration/simulacron/utils.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License -from six.moves.urllib.request import build_opener, Request, HTTPHandler import json -from tests.integration import CASSANDRA_VERSION, SIMULACRON_JAR import subprocess import time -import nose +from six.moves.urllib.request import build_opener, Request, HTTPHandler + +from cassandra.metadata import SchemaParserV4 + +from tests.integration import CASSANDRA_VERSION, SIMULACRON_JAR, DSE_VERSION DEFAULT_CLUSTER = "python_simulacron_cluster" @@ -86,9 +88,10 @@ def start_simulacron(): SERVER_SIMULACRON.start() - #TODO improve this sleep, maybe check the logs like ccm + # TODO improve this sleep, maybe check the logs like ccm time.sleep(5) + def stop_simulacron(): SERVER_SIMULACRON.stop() @@ -117,6 +120,8 @@ def prime_server_versions(self): system_local_row = {} system_local_row["cql_version"] = CASSANDRA_VERSION.base_version system_local_row["release_version"] = CASSANDRA_VERSION.base_version + "-SNAPSHOT" + if DSE_VERSION: + system_local_row["dse_version"] = DSE_VERSION.base_version column_types = {"cql_version": "ascii", "release_version": "ascii"} system_local = PrimeQuery("SELECT cql_version, release_version FROM system.local", rows=[system_local_row], @@ -274,13 +279,15 @@ def fetch_url_params(self): def method(self): return "POST" + class ClusterQuery(SimulacronRequest): """ Class used for creating a cluster """ - def __init__(self, cluster_name, cassandra_version, data_centers="3", json_dict=None): + def __init__(self, cluster_name, cassandra_version, data_centers="3", json_dict=None, dse_version=None): self.cluster_name = cluster_name self.cassandra_version = cassandra_version + self.dse_version = dse_version self.data_centers = data_centers if json_dict is None: self.json_dict = {} @@ -293,13 +300,42 @@ def fetch_json(self): return self.json_dict def fetch_url_params(self): - return "?cassandra_version={0}&data_centers={1}&name={2}".\ + q = "?cassandra_version={0}&data_centers={1}&name={2}".\ format(self.cassandra_version, self.data_centers, self.cluster_name) + if self.dse_version: + q += "&dse_version={0}".format(self.dse_version) + + return q @property def method(self): return "POST" + +class GetLogsQuery(SimulacronRequest): + """ + Class used to get logs from simulacron + """ + def __init__(self, cluster_name=DEFAULT_CLUSTER, dc_id=0): + self.path = "log/{}/{}".format(cluster_name, dc_id) + + @property + def method(self): + return "GET" + + +class ClearLogsQuery(SimulacronRequest): + """ + Class used to get logs from simulacron + """ + def __init__(self, cluster_name=DEFAULT_CLUSTER, dc_id=0): + self.path = "log/{}/{}".format(cluster_name, dc_id) + + @property + def method(self): + return "DELETE" + + def prime_driver_defaults(): """ Function to prime the necessary queries so the test harness can run @@ -307,8 +343,20 @@ def prime_driver_defaults(): client_simulacron = SimulacronClient() client_simulacron.prime_server_versions() + # prepare InvalidResponses for virtual tables + for query in [SchemaParserV4._SELECT_VIRTUAL_KEYSPACES, + SchemaParserV4._SELECT_VIRTUAL_TABLES, + SchemaParserV4._SELECT_VIRTUAL_COLUMNS]: + client_simulacron.submit_request( + PrimeQuery(query, result='invalid', + then={"result": "invalid", + "delay_in_ms": 0, + "ignore_on_prepare": True, + "message": "Invalid Query!"}) + ) + -def prime_cluster(data_centers="3", version=CASSANDRA_VERSION, cluster_name=DEFAULT_CLUSTER): +def prime_cluster(data_centers="3", version=None, cluster_name=DEFAULT_CLUSTER, dse_version=None): """ Creates a new cluster in the simulacron server :param cluster_name: name of the cluster @@ -316,8 +364,8 @@ def prime_cluster(data_centers="3", version=CASSANDRA_VERSION, cluster_name=DEFA datacenters of 2 nodes and three nodes :param version: C* version """ - version = version or CASSANDRA_VERSION.base_version - cluster_query = ClusterQuery(cluster_name, version, data_centers) + version = version or CASSANDRA_VERSION + cluster_query = ClusterQuery(cluster_name, version, data_centers, dse_version=dse_version) client_simulacron = SimulacronClient() response = client_simulacron.submit_request(cluster_query) return SimulacronCluster(response) @@ -332,7 +380,8 @@ def start_and_prime_singledc(cluster_name=DEFAULT_CLUSTER): return start_and_prime_cluster_defaults(number_of_dc=1, nodes_per_dc=3, cluster_name=cluster_name) -def start_and_prime_cluster_defaults(number_of_dc=1, nodes_per_dc=3, version=CASSANDRA_VERSION, cluster_name=DEFAULT_CLUSTER): +def start_and_prime_cluster_defaults(number_of_dc=1, nodes_per_dc=3, version=CASSANDRA_VERSION, + cluster_name=DEFAULT_CLUSTER, dse_version=None): """ :param number_of_dc: number of datacentes :param nodes_per_dc: number of nodes per datacenter @@ -340,7 +389,8 @@ def start_and_prime_cluster_defaults(number_of_dc=1, nodes_per_dc=3, version=CAS """ start_simulacron() data_centers = ",".join([str(nodes_per_dc)] * number_of_dc) - simulacron_cluster = prime_cluster(data_centers=data_centers, version=version, cluster_name=cluster_name) + simulacron_cluster = prime_cluster(data_centers=data_centers, version=version, + cluster_name=cluster_name, dse_version=dse_version) prime_driver_defaults() return simulacron_cluster From 28b231ea060ccc3ba58aaa356bb0790eabbb588e Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 9 Sep 2019 09:11:37 -0500 Subject: [PATCH 1590/2005] Add protocols v2 and v1 to c* 2.1 tests --- build.yaml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/build.yaml b/build.yaml index 39f3e39b37..4252a63da5 100644 --- a/build.yaml +++ b/build.yaml @@ -122,7 +122,7 @@ schedules: - python: [3.4, 3.6, 3.7] - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse'] - # Temporary, don't merge me + # TODO: Temporary, don't merge me python_oss_next_tests: schedule: adhoc disable_pull_requests: true @@ -146,7 +146,6 @@ os: - ubuntu/bionic64/python-driver cassandra: - - '2.0' - '2.1' - '2.2' - '3.0' @@ -226,6 +225,14 @@ build: echo "==========RUNNING INTEGRATION TESTS==========" EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true + # TODO: Temporary, don't merge this 2.1 stuff... + if [ "$CCM_CASSANDRA_VERSION" == "2.1.21" ]; then + echo "Testing cassandra 2.1 with protocol V2" + PROTOCOL_VERSION=2 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results_v2.xml tests/integration/standard/ || true + echo "Testing cassandra 2.1 with protocol V1" + PROTOCOL_VERSION=1 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results_v1.xml tests/integration/standard/ || true + fi + if [ -z "$EXCLUDE_LONG" ]; then echo "==========RUNNING LONG INTEGRATION TESTS==========" EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true From 4c357cface3e280adf9285095dd84aa7472a9e52 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 9 Sep 2019 10:31:35 -0400 Subject: [PATCH 1591/2005] Merged query.py --- cassandra/query.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index 74a9896b9a..0e7a41dc2d 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -245,8 +245,7 @@ class Statement(object): keyspace = None """ The string name of the keyspace this query acts on. This is used when - :class:`~.TokenAwarePolicy` is configured for - :attr:`.Cluster.load_balancing_policy` + :class:`~.TokenAwarePolicy` is configured in the profile load balancing policy. It is set implicitly on :class:`.BoundStatement`, and :class:`.BatchStatement`, but must be set explicitly on :class:`.SimpleStatement`. @@ -1087,3 +1086,17 @@ def __init__(self, description, timeuuid, source, source_elapsed, thread_name): def __str__(self): return "%s on %s[%s] at %s" % (self.description, self.source, self.thread_name, self.datetime) + + +# TODO remove next major since we can target using the `host` attribute of session.execute +class HostTargetingStatement(object): + """ + Wraps any query statement and attaches a target host, making + it usable in a targeted LBP without modifying the user's statement. + """ + def __init__(self, inner_statement, target_host): + self.__class__ = type(inner_statement.__class__.__name__, + (self.__class__, inner_statement.__class__), + {}) + self.__dict__ = inner_statement.__dict__ + self.target_host = target_host From c60c0f2104b0c3ee15ca50e5b3e30fe51dd3ed0f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 9 Sep 2019 10:57:17 -0400 Subject: [PATCH 1592/2005] Merged pool.py --- cassandra/pool.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cassandra/pool.py b/cassandra/pool.py index 790b1d2a6e..a4429aeed6 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -105,6 +105,8 @@ class Host(object): """ DSE workload queried from the control connection system tables. Only populated when connecting to DSE with this property available. Not queried if :attr:`~.Cluster.token_metadata_enabled` is ``False``. + This is a legacy attribute that does not portray multiple workloads in a uniform fashion. + See also :attr:`~.Host.dse_workloads`. """ dse_workloads = None @@ -428,7 +430,7 @@ def _replace(self, connection): log.debug("Replacing connection (%s) to %s", id(connection), self.host) try: - conn = self._session.cluster.connection_factory(self.host) + conn = self._session.cluster.connection_factory(self.host.endpoint) if self._keyspace: conn.set_keyspace_blocking(self._keyspace) self._connection = conn From 9ee9331dc047aa2eff2fb76c14d425e99a46451d Mon Sep 17 00:00:00 2001 From: nick Date: Mon, 9 Sep 2019 17:10:16 +0200 Subject: [PATCH 1593/2005] Initial fix for PYTHON-1044 --- cassandra/cluster.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 9818ce2f12..680510e07b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3722,11 +3722,14 @@ def _on_timeout(self, _attempts=0): if self._connection is not None: try: self._connection._requests.pop(self._req_id) - # This prevents the race condition of the - # event loop thread just receiving the waited message - # If it arrives after this, it will be ignored + # PYTHON-1044 + # This request might have been removed from the connection after the latter was defunct by heartbeat. + # We should still raise OperationTimedOut to reject the future so that the main event thread will not + # wait for it endlessly except KeyError: - return + key = "Connection defunct by heartbeat" + errors = {key: "Client request timeout. See Session.execute[_async](timeout)"} + self._set_final_exception(OperationTimedOut(errors, self._current_host)) pool = self.session._pools.get(self._current_host) if pool and not pool.is_shutdown: From ebba6113ded6fa52a2ecd6dad1851bc99933576f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 9 Sep 2019 11:57:03 -0400 Subject: [PATCH 1594/2005] connection.py, asyncorereactor.py, policies.py, small fix in protocol.py --- cassandra/connection.py | 223 ++++++++++++++++++++++++++++++-- cassandra/io/asyncorereactor.py | 2 +- cassandra/policies.py | 68 +++++++++- cassandra/protocol.py | 28 ++++ 4 files changed, 305 insertions(+), 16 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 8552faa6e3..29c26e6845 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -24,7 +24,7 @@ import socket import struct import sys -from threading import Thread, Event, RLock +from threading import Thread, Event, RLock, Condition import time try: @@ -45,7 +45,7 @@ InvalidRequestException, SupportedMessage, AuthResponseMessage, AuthChallengeMessage, AuthSuccessMessage, ProtocolException, - RegisterMessage) + RegisterMessage, ReviseRequestMessage) from cassandra.util import OrderedDict @@ -306,6 +306,161 @@ class ProtocolError(Exception): pass +class ContinuousPagingState(object): + """ + A class for specifying continuous paging state, only supported starting with DSE_V2. + """ + + num_pages_requested = None + """ + How many pages we have already requested + """ + + num_pages_received = None + """ + How many pages we have already received + """ + + max_queue_size = None + """ + The max queue size chosen by the user via the options + """ + + def __init__(self, max_queue_size): + self.num_pages_requested = max_queue_size # the initial query requests max_queue_size + self.num_pages_received = 0 + self.max_queue_size = max_queue_size + + +class ContinuousPagingSession(object): + def __init__(self, stream_id, decoder, row_factory, connection, state): + self.stream_id = stream_id + self.decoder = decoder + self.row_factory = row_factory + self.connection = connection + self._condition = Condition() + self._stop = False + self._page_queue = deque() + self._state = state + self.released = False + + def on_message(self, result): + if isinstance(result, ResultMessage): + self.on_page(result) + elif isinstance(result, ErrorMessage): + self.on_error(result) + + def on_page(self, result): + with self._condition: + if self._state: + self._state.num_pages_received += 1 + self._page_queue.appendleft((result.column_names, result.parsed_rows, None)) + self._stop |= result.continuous_paging_last + self._condition.notify() + + if result.continuous_paging_last: + self.released = True + + def on_error(self, error): + if isinstance(error, ErrorMessage): + error = error.to_exception() + + log.debug("Got error %s for session %s", error, self.stream_id) + + with self._condition: + self._page_queue.appendleft((None, None, error)) + self._stop = True + self._condition.notify() + + self.released = True + + def results(self): + try: + self._condition.acquire() + while True: + while not self._page_queue and not self._stop: + self._condition.wait(timeout=5) + while self._page_queue: + names, rows, err = self._page_queue.pop() + if err: + raise err + self.maybe_request_more() + self._condition.release() + for row in self.row_factory(names, rows): + yield row + self._condition.acquire() + if self._stop: + break + finally: + try: + self._condition.release() + except RuntimeError: + # This exception happens if the CP results are not entirely consumed + # and the session is terminated by the runtime. See PYTHON-1054 + pass + + def maybe_request_more(self): + if not self._state: + return + + max_queue_size = self._state.max_queue_size + num_in_flight = self._state.num_pages_requested - self._state.num_pages_received + space_in_queue = max_queue_size - len(self._page_queue) - num_in_flight + log.debug("Session %s from %s, space in CP queue: %s, requested: %s, received: %s, num_in_flight: %s", + self.stream_id, self.connection.host, space_in_queue, self._state.num_pages_requested, + self._state.num_pages_received, num_in_flight) + + if space_in_queue >= max_queue_size / 2: + self.update_next_pages(space_in_queue) + + def update_next_pages(self, num_next_pages): + try: + self._state.num_pages_requested += num_next_pages + log.debug("Updating backpressure for session %s from %s", self.stream_id, self.connection.host) + with self.connection.lock: + self.connection.send_msg(ReviseRequestMessage(ReviseRequestMessage.RevisionType.PAGING_BACKPRESSURE, + self.stream_id, + next_pages=num_next_pages), + self.connection.get_request_id(), + self._on_backpressure_response) + except ConnectionShutdown as ex: + log.debug("Failed to update backpressure for session %s from %s, connection is shutdown", + self.stream_id, self.connection.host) + self.on_error(ex) + + def _on_backpressure_response(self, response): + if isinstance(response, ResultMessage): + log.debug("Paging session %s backpressure updated.", self.stream_id) + else: + log.error("Failed updating backpressure for session %s from %s: %s", self.stream_id, self.connection.host, + response.to_exception() if hasattr(response, 'to_exception') else response) + self.on_error(response) + + def cancel(self): + try: + log.debug("Canceling paging session %s from %s", self.stream_id, self.connection.host) + with self.connection.lock: + self.connection.send_msg(ReviseRequestMessage(ReviseRequestMessage.RevisionType.PAGING_CANCEL, + self.stream_id), + self.connection.get_request_id(), + self._on_cancel_response) + except ConnectionShutdown: + log.debug("Failed to cancel session %s from %s, connection is shutdown", + self.stream_id, self.connection.host) + + with self._condition: + self._stop = True + self._condition.notify() + + def _on_cancel_response(self, response): + if isinstance(response, ResultMessage): + log.debug("Paging session %s canceled.", self.stream_id) + else: + log.error("Failed canceling streaming session %s from %s: %s", self.stream_id, self.connection.host, + response.to_exception() if hasattr(response, 'to_exception') else response) + self.released = True + + def defunct_on_error(f): @wraps(f) @@ -339,6 +494,7 @@ class Connection(object): keyspace = None compression = True + _compression_type = None compressor = None decompressor = None @@ -414,6 +570,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self._push_watchers = defaultdict(set) self._requests = {} self._iobuf = io.BytesIO() + self._continuous_paging_sessions = {} if ssl_options: self._check_hostname = bool(self.ssl_options.pop('check_hostname', False)) @@ -547,10 +704,16 @@ def defunct(self, exc): self.last_error = exc self.close() + self.error_all_cp_sessions(exc) self.error_all_requests(exc) self.connected_event.set() return exc + def error_all_cp_sessions(self, exc): + stream_ids = list(self._continuous_paging_sessions.keys()) + for stream_id in stream_ids: + self._continuous_paging_sessions[stream_id].on_error(exc) + def error_all_requests(self, exc): with self.lock: requests = self._requests @@ -560,6 +723,7 @@ def error_all_requests(self, exc): return new_exc = ConnectionShutdown(str(exc)) + def try_callback(cb): try: cb(new_exc) @@ -703,7 +867,7 @@ def _read_frame_header(self): pos = len(buf) if pos: version = int_from_buf_item(buf[0]) & PROTOCOL_VERSION_MASK - if version > ProtocolVersion.MAX_SUPPORTED: + if version not in ProtocolVersion.SUPPORTED_VERSIONS: raise ProtocolError("This version of the driver does not support protocol version %d" % version) frame_header = frame_header_v3 if version >= 3 else frame_header_v1_v2 # this frame header struct is everything after the version byte @@ -748,15 +912,18 @@ def process_msg(self, header, body): decoder = ProtocolHandler.decode_message result_metadata = None else: - try: - callback, decoder, result_metadata = self._requests.pop(stream_id) - # This can only happen if the stream_id was - # removed due to an OperationTimedOut - except KeyError: - return - - with self.lock: - self.request_ids.append(stream_id) + if stream_id in self._continuous_paging_sessions: + paging_session = self._continuous_paging_sessions[stream_id] + callback = paging_session.on_message + decoder = paging_session.decoder + result_metadata = None + else: + try: + callback, decoder, result_metadata = self._requests.pop(stream_id) + # This can only happen if the stream_id was + # removed due to an OperationTimedOut + except KeyError: + return try: response = decoder(header.version, self.user_type_map, stream_id, @@ -784,6 +951,29 @@ def process_msg(self, header, body): except Exception: log.exception("Callback handler errored, ignoring:") + # done after callback because the callback might signal this as a paging session + if stream_id >= 0: + if stream_id in self._continuous_paging_sessions: + if self._continuous_paging_sessions[stream_id].released: + self.remove_continuous_paging_session(stream_id) + else: + with self.lock: + self.request_ids.append(stream_id) + + def new_continuous_paging_session(self, stream_id, decoder, row_factory, state): + session = ContinuousPagingSession(stream_id, decoder, row_factory, self, state) + self._continuous_paging_sessions[stream_id] = session + return session + + def remove_continuous_paging_session(self, stream_id): + try: + self._continuous_paging_sessions.pop(stream_id) + with self.lock: + log.debug("Returning cp session stream id %s", stream_id) + self.request_ids.append(stream_id) + except KeyError: + pass + @defunct_on_error def _send_options_message(self): if self.cql_version is None and (not self.compression or not locally_supported_compressions): @@ -855,6 +1045,7 @@ def _handle_options_response(self, options_response): # set the decompressor here, but set the compressor only after # a successful Ready message + self._compression_type = compression_type self._compressor, self.decompressor = \ locally_supported_compressions[compression_type] @@ -893,7 +1084,10 @@ def _handle_startup_response(self, startup_response, did_authenticate=False): id(self), self.endpoint, startup_response.authenticator) if self.authenticator is None: - raise AuthenticationFailed('Remote end requires authentication.') + log.error("Failed to authenticate to %s. If you are trying to connect to a DSE cluster, " + "consider using TransitionalModePlainTextAuthProvider " + "if DSE authentication is configured with transitional mode" % (self.host,)) + raise AuthenticationFailed('Remote end requires authentication') if isinstance(self.authenticator, dict): log.debug("Sending credentials-based auth response on %s", self) @@ -905,7 +1099,8 @@ def _handle_startup_response(self, startup_response, did_authenticate=False): self.authenticator.server_authenticator_class = startup_response.authenticator initial_response = self.authenticator.initial_response() initial_response = "" if initial_response is None else initial_response - self.send_msg(AuthResponseMessage(initial_response), self.get_request_id(), self._handle_auth_response) + self.send_msg(AuthResponseMessage(initial_response), self.get_request_id(), + self._handle_auth_response) elif isinstance(startup_response, ErrorMessage): log.debug("Received ErrorMessage on new connection (%s) from %s: %s", id(self), self.endpoint, startup_response.summary_msg()) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index d3dd0cf0e3..c5737d191d 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -461,4 +461,4 @@ def writable(self): return self._writable def readable(self): - return self._readable or (self.is_control_connection and not (self.is_defunct or self.is_closed)) + return self._readable or ((self.is_control_connection or self._continuous_paging_sessions) and not (self.is_defunct or self.is_closed)) diff --git a/cassandra/policies.py b/cassandra/policies.py index d61066665e..2364f2a6c0 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -421,7 +421,7 @@ def __init__(self, hosts): The `hosts` parameter should be a sequence of hosts to permit connections to. """ - self._allowed_hosts = hosts + self._allowed_hosts = tuple(hosts) self._allowed_hosts_resolved = [endpoint[4][0] for a in self._allowed_hosts for endpoint in socket.getaddrinfo(a, None, socket.AF_UNSPEC, socket.SOCK_STREAM)] @@ -1102,3 +1102,69 @@ def next_execution(self, host): def new_plan(self, keyspace, statement): return self.ConstantSpeculativeExecutionPlan(self.delay, self.max_attempts) + + +class WrapperPolicy(LoadBalancingPolicy): + + def __init__(self, child_policy): + self._child_policy = child_policy + + def distance(self, *args, **kwargs): + return self._child_policy.distance(*args, **kwargs) + + def populate(self, cluster, hosts): + self._child_policy.populate(cluster, hosts) + + def on_up(self, *args, **kwargs): + return self._child_policy.on_up(*args, **kwargs) + + def on_down(self, *args, **kwargs): + return self._child_policy.on_down(*args, **kwargs) + + def on_add(self, *args, **kwargs): + return self._child_policy.on_add(*args, **kwargs) + + def on_remove(self, *args, **kwargs): + return self._child_policy.on_remove(*args, **kwargs) + + +class DSELoadBalancingPolicy(WrapperPolicy): + """ + A :class:`.LoadBalancingPolicy` wrapper that adds the ability to target a specific host first. + + If no host is set on the query, the child policy's query plan will be used as is. + """ + + _cluster_metadata = None + + def populate(self, cluster, hosts): + self._cluster_metadata = cluster.metadata + self._child_policy.populate(cluster, hosts) + + def make_query_plan(self, working_keyspace=None, query=None): + if query and query.keyspace: + keyspace = query.keyspace + else: + keyspace = working_keyspace + + addr = getattr(query, 'target_host', None) if query else None + target_host = self._cluster_metadata.get_host(addr) + + child = self._child_policy + if target_host and target_host.is_up: + yield target_host + for h in child.make_query_plan(keyspace, query): + if h != target_host: + yield h + else: + for h in child.make_query_plan(keyspace, query): + yield h + + +class NeverRetryPolicy(RetryPolicy): + def _rethrow(self, *args, **kwargs): + return self.RETHROW, None + + on_read_timeout = _rethrow + on_write_timeout = _rethrow + on_unavailable = _rethrow diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 1c9db58eee..0fe7439f2d 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -1049,6 +1049,34 @@ def recv_schema_change(cls, f, protocol_version): return event +class ReviseRequestMessage(_MessageType): + + class RevisionType(object): + PAGING_CANCEL = 1 + PAGING_BACKPRESSURE = 2 + + opcode = 0xFF + name = 'REVISE_REQUEST' + + def __init__(self, op_type, op_id, next_pages=0): + self.op_type = op_type + self.op_id = op_id + self.next_pages = next_pages + + def send_body(self, f, protocol_version): + write_int(f, self.op_type) + write_int(f, self.op_id) + if self.op_type == ReviseRequestMessage.RevisionType.PAGING_BACKPRESSURE: + if self.next_pages <= 0: + raise UnsupportedOperation("Continuous paging backpressure requires next_pages > 0") + elif not ProtocolVersion.has_continuous_paging_next_pages(protocol_version): + raise UnsupportedOperation( + "Continuous paging backpressure may only be used with protocol version " + "ProtocolVersion.DSE_V2 or higher. Consider setting Cluster.protocol_version to ProtocolVersion.DSE_V2.") + else: + write_int(f, self.next_pages) + + class _ProtocolHandler(object): """ _ProtocolHander handles encoding and decoding messages. From c356a700d3c79b31287a228bef5eb5318e50e97b Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 9 Sep 2019 15:48:39 -0500 Subject: [PATCH 1595/2005] adding insights --- cassandra/insights/__init__.py | 0 cassandra/insights/registry.py | 109 ++++++++++++++++ cassandra/insights/reporter.py | 198 ++++++++++++++++++++++++++++ cassandra/insights/serializers.py | 207 ++++++++++++++++++++++++++++++ cassandra/insights/util.py | 61 +++++++++ 5 files changed, 575 insertions(+) create mode 100644 cassandra/insights/__init__.py create mode 100644 cassandra/insights/registry.py create mode 100644 cassandra/insights/reporter.py create mode 100644 cassandra/insights/serializers.py create mode 100644 cassandra/insights/util.py diff --git a/cassandra/insights/__init__.py b/cassandra/insights/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cassandra/insights/registry.py b/cassandra/insights/registry.py new file mode 100644 index 0000000000..9b7b3f2b8b --- /dev/null +++ b/cassandra/insights/registry.py @@ -0,0 +1,109 @@ +import six +from collections import OrderedDict +from warnings import warn + +from cassandra.insights.util import namespace + +_NOT_SET = object() + + +def _default_serializer_for_object(obj, policy): + # the insights server expects an 'options' dict for policy + # objects, but not for other objects + if policy: + return {'type': obj.__class__.__name__, + 'namespace': namespace(obj.__class__), + 'options': {}} + else: + return {'type': obj.__class__.__name__, + 'namespace': namespace(obj.__class__)} + + +class InsightsSerializerRegistry(object): + + initialized = False + + def __init__(self, mapping_dict=None): + mapping_dict = mapping_dict or {} + class_order = self._class_topological_sort(mapping_dict) + self._mapping_dict = OrderedDict( + ((cls, mapping_dict[cls]) for cls in class_order) + ) + + def serialize(self, obj, policy=False, default=_NOT_SET, cls=None): + try: + return self._get_serializer(cls if cls is not None else obj.__class__)(obj) + except Exception: + if default is _NOT_SET: + result = _default_serializer_for_object(obj, policy) + else: + result = default + + return result + + def _get_serializer(self, cls): + try: + return self._mapping_dict[cls] + except KeyError: + for registered_cls, serializer in six.iteritems(self._mapping_dict): + if issubclass(cls, registered_cls): + return self._mapping_dict[registered_cls] + raise ValueError + + def register(self, cls, serializer): + self._mapping_dict[cls] = serializer + self._mapping_dict = OrderedDict( + ((cls, self._mapping_dict[cls]) + for cls in self._class_topological_sort(self._mapping_dict)) + ) + + def register_serializer_for(self, cls): + """ + Parameterized registration helper decorator. Given a class `cls`, + produces a function that registers the decorated function as a + serializer for it. + """ + def decorator(serializer): + self.register(cls, serializer) + return serializer + + return decorator + + @staticmethod + def _class_topological_sort(classes): + """ + A simple topological sort for classes. Takes an iterable of class objects + and returns a list A of those classes, ordered such that A[X] is never a + superclass of A[Y] for X < Y. + + This is an inefficient sort, but that's ok because classes are infrequently + registered. It's more important that this be maintainable than fast. + + We can't use `.sort()` or `sorted()` with a custom `key` -- those assume + a total ordering, which we don't have. + """ + unsorted, sorted_ = list(classes), [] + while unsorted: + head, tail = unsorted[0], unsorted[1:] + + # if head has no subclasses remaining, it can safely go in the list + if not any(issubclass(x, head) for x in tail): + sorted_.append(head) + else: + # move to the back -- head has to wait until all its subclasses + # are sorted into the list + tail.append(head) + + unsorted = tail + + # check that sort is valid + for i, head in enumerate(sorted_): + for after_head_value in sorted_[(i + 1):]: + if issubclass(after_head_value, head): + warn('Sorting classes produced an invalid ordering.\n' + 'In: {classes}\n' + 'Out: {sorted_}'.format(classes=classes, sorted_=sorted_)) + return sorted_ + + +insights_registry = InsightsSerializerRegistry() diff --git a/cassandra/insights/reporter.py b/cassandra/insights/reporter.py new file mode 100644 index 0000000000..5f620ec046 --- /dev/null +++ b/cassandra/insights/reporter.py @@ -0,0 +1,198 @@ +from collections import Counter +import datetime +import json +import logging +import multiprocessing +import random +import platform +import socket +import ssl +import sys +from threading import Event, Thread +import time +import six + +from cassandra.policies import HostDistance +from cassandra.util import ms_timestamp_from_datetime +from cassandra.insights.registry import insights_registry +from cassandra.insights.serializers import initialize_registry + +log = logging.getLogger(__name__) + + +class MonitorReporter(Thread): + + def __init__(self, interval_sec, session): + """ + takes an int indicating interval between requests, a function returning + the connection to be used, and the timeout per request + """ + # Thread is an old-style class so we can't super() + Thread.__init__(self, name='monitor_reporter') + + initialize_registry(insights_registry) + + self._interval, self._session = interval_sec, session + + self._shutdown_event = Event() + self.daemon = True + self.start() + + def run(self): + self._send_via_rpc(self._get_startup_data()) + + # introduce some jitter -- send up to 1/10 of _interval early + self._shutdown_event.wait(self._interval * random.uniform(.9, 1)) + + while not self._shutdown_event.is_set(): + start_time = time.time() + + self._send_via_rpc(self._get_status_data()) + + elapsed = time.time() - start_time + self._shutdown_event.wait(max(self._interval - elapsed, 0.01)) + + # TODO: redundant with ConnectionHeartbeat.ShutdownException + class ShutDownException(Exception): + pass + + def _send_via_rpc(self, data): + try: + self._session.execute( + "CALL InsightsRpc.reportInsight(%s)", (json.dumps(data),) + ) + log.debug('Insights RPC data: {}'.format(data)) + except Exception as e: + log.debug('Insights RPC send failed with {}'.format(e)) + log.debug('Insights RPC data: {}'.format(data)) + + def _get_status_data(self): + cc = self._session.cluster.control_connection + + connected_nodes = { + host.address: { + 'connections': state['open_count'], + 'inFlightQueries': state['in_flights'] + } + for (host, state) in self._session.get_pool_state().items() + } + + return { + 'metadata': { + # shared across drivers; never change + 'name': 'driver.status', + # format version + 'insightMappingId': 'v1', + 'insightType': 'EVENT', + # since epoch + 'timestamp': ms_timestamp_from_datetime(datetime.datetime.utcnow()), + 'tags': { + 'language': 'python' + } + }, + # // 'clientId', 'sessionId' and 'controlConnection' are mandatory + # // the rest of the properties are optional + 'data': { + # // 'clientId' must be the same as the one provided in the startup message + 'clientId': str(self._session.cluster.client_id), + # // 'sessionId' must be the same as the one provided in the startup message + 'sessionId': str(self._session.session_id), + 'controlConnection': cc._connection.host if cc._connection else None, + 'connectedNodes': connected_nodes + } + } + + def _get_startup_data(self): + cc = self._session.cluster.control_connection + try: + local_ipaddr = cc._connection._socket.getsockname()[0] + except Exception as e: + log.debug('Unable to get local socket addr from {}: {}'.format(cc._connection, e)) + hostname = socket.getfqdn() + + host_distances_counter = Counter( + self._session.cluster.profile_manager.distance(host) + for host in self._session.hosts + ) + host_distances_dict = { + 'local': host_distances_counter[HostDistance.LOCAL], + 'remote': host_distances_counter[HostDistance.REMOTE], + 'ignored': host_distances_counter[HostDistance.IGNORED] + } + + compression_type = cc._connection._compression_type if cc._connection else 'NONE' + + if self._session.cluster.ssl_context: + cert_validation = self._session.cluster.ssl_context.verify_mode == ssl.CERT_REQUIRED + elif self._session.cluster.ssl_options: + cert_validation = self._session.cluster.ssl_options.get('cert_reqs') == ssl.CERT_REQUIRED + else: + cert_validation = None + + uname_info = platform.uname() + + return { + 'metadata': { + 'name': 'driver.startup', + 'insightMappingId': 'v1', + 'insightType': 'EVENT', + 'timestamp': ms_timestamp_from_datetime(datetime.datetime.utcnow()), + 'tags': { + 'language': 'python' + }, + }, + 'data': { + 'driverName': 'DataStax Enterprise Python Driver', + 'driverVersion': sys.modules['dse'].__version__, + 'clientId': str(self._session.cluster.client_id), + 'sessionId': str(self._session.session_id), + 'applicationName': self._session.cluster.application_name or 'python', + 'applicationNameWasGenerated': not self._session.cluster.application_name, + 'applicationVersion': self._session.cluster.application_version, + 'contactPoints': self._session.cluster._endpoint_map_for_insights, + 'dataCenters': list(set(h.datacenter for h in self._session.cluster.metadata.all_hosts() + if (h.datacenter and + self._session.cluster.profile_manager.distance(h) == HostDistance.LOCAL))), + 'initialControlConnection': cc._connection.host if cc._connection else None, + 'protocolVersion': self._session.cluster.protocol_version, + 'localAddress': local_ipaddr, + 'hostName': hostname, + 'executionProfiles': insights_registry.serialize(self._session.cluster.profile_manager), + 'configuredConnectionLength': host_distances_dict, + 'heartbeatInterval': self._session.cluster.idle_heartbeat_interval, + 'compression': compression_type.upper() if compression_type else 'NONE', + 'reconnectionPolicy': insights_registry.serialize(self._session.cluster.reconnection_policy), + 'sslConfigured': { + 'enabled': bool(self._session.cluster.ssl_options or self._session.cluster.ssl_context), + 'certValidation': cert_validation + }, + 'authProvider': { + 'type': (self._session.cluster.auth_provider.__class__.__name__ + if self._session.cluster.auth_provider else + None) + }, + 'otherOptions': { + }, + 'platformInfo': { + 'os': { + 'name': uname_info.system if six.PY3 else uname_info[0], + 'version': uname_info.release if six.PY3 else uname_info[2], + 'arch': uname_info.machine if six.PY3 else uname_info[4] + }, + 'cpus': { + 'length': multiprocessing.cpu_count(), + 'model': platform.processor() + }, + 'runtime': { + 'python': sys.version, + 'event_loop': self._session.cluster.connection_class.__name__ + } + }, + 'periodicStatusInterval': self._interval + } + } + + def stop(self): + log.debug("Shutting down Monitor Reporter") + self._shutdown_event.set() + self.join() diff --git a/cassandra/insights/serializers.py b/cassandra/insights/serializers.py new file mode 100644 index 0000000000..b72589e658 --- /dev/null +++ b/cassandra/insights/serializers.py @@ -0,0 +1,207 @@ +import six + + +def initialize_registry(insights_registry): + # This will be called from the cluster module, so we put all this behavior + # in a function to avoid circular imports + + if insights_registry.initialized: + return False + + from cassandra import ConsistencyLevel + from cassandra.cluster import ( + ExecutionProfile, GraphExecutionProfile, + ProfileManager, ContinuousPagingOptions, + EXEC_PROFILE_DEFAULT, EXEC_PROFILE_GRAPH_DEFAULT, + EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT, + EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, + _NOT_SET + ) + from cassandra.graph.query import GraphOptions + from cassandra.insights.registry import insights_registry + from cassandra.insights.util import namespace + from cassandra.policies import ( + RoundRobinPolicy, + DCAwareRoundRobinPolicy, + TokenAwarePolicy, + WhiteListRoundRobinPolicy, + HostFilterPolicy, + ConstantReconnectionPolicy, + ExponentialReconnectionPolicy, + RetryPolicy, + SpeculativeExecutionPolicy, + ConstantSpeculativeExecutionPolicy, + WrapperPolicy + ) + + import logging + + log = logging.getLogger(__name__) + + @insights_registry.register_serializer_for(RoundRobinPolicy) + def round_robin_policy_insights_serializer(policy): + return {'type': policy.__class__.__name__, + 'namespace': namespace(policy.__class__), + 'options': {}} + + @insights_registry.register_serializer_for(DCAwareRoundRobinPolicy) + def dc_aware_round_robin_policy_insights_serializer(policy): + return {'type': policy.__class__.__name__, + 'namespace': namespace(policy.__class__), + 'options': {'local_dc': policy.local_dc, + 'used_hosts_per_remote_dc': policy.used_hosts_per_remote_dc} + } + + @insights_registry.register_serializer_for(TokenAwarePolicy) + def token_aware_policy_insights_serializer(policy): + return {'type': policy.__class__.__name__, + 'namespace': namespace(policy.__class__), + 'options': {'child_policy': insights_registry.serialize(policy._child_policy, + policy=True), + 'shuffle_replicas': policy.shuffle_replicas} + } + + @insights_registry.register_serializer_for(WhiteListRoundRobinPolicy) + def whitelist_round_robin_policy_insights_serializer(policy): + return {'type': policy.__class__.__name__, + 'namespace': namespace(policy.__class__), + 'options': {'allowed_hosts': policy._allowed_hosts} + } + + @insights_registry.register_serializer_for(HostFilterPolicy) + def host_filter_policy_insights_serializer(policy): + return { + 'type': policy.__class__.__name__, + 'namespace': namespace(policy.__class__), + 'options': {'child_policy': insights_registry.serialize(policy._child_policy, + policy=True), + 'predicate': policy.predicate.__name__} + } + + @insights_registry.register_serializer_for(ConstantReconnectionPolicy) + def constant_reconnection_policy_insights_serializer(policy): + return {'type': policy.__class__.__name__, + 'namespace': namespace(policy.__class__), + 'options': {'delay': policy.delay, + 'max_attempts': policy.max_attempts} + } + + @insights_registry.register_serializer_for(ExponentialReconnectionPolicy) + def exponential_reconnection_policy_insights_serializer(policy): + return {'type': policy.__class__.__name__, + 'namespace': namespace(policy.__class__), + 'options': {'base_delay': policy.base_delay, + 'max_delay': policy.max_delay, + 'max_attempts': policy.max_attempts} + } + + @insights_registry.register_serializer_for(RetryPolicy) + def retry_policy_insights_serializer(policy): + return {'type': policy.__class__.__name__, + 'namespace': namespace(policy.__class__), + 'options': {}} + + @insights_registry.register_serializer_for(SpeculativeExecutionPolicy) + def speculative_execution_policy_insights_serializer(policy): + return {'type': policy.__class__.__name__, + 'namespace': namespace(policy.__class__), + 'options': {}} + + @insights_registry.register_serializer_for(ConstantSpeculativeExecutionPolicy) + def constant_speculative_execution_policy_insights_serializer(policy): + return {'type': policy.__class__.__name__, + 'namespace': namespace(policy.__class__), + 'options': {'delay': policy.delay, + 'max_attempts': policy.max_attempts} + } + + @insights_registry.register_serializer_for(WrapperPolicy) + def wrapper_policy_insights_serializer(policy): + return {'type': policy.__class__.__name__, + 'namespace': namespace(policy.__class__), + 'options': { + 'child_policy': insights_registry.serialize(policy._child_policy, + policy=True) + }} + + @insights_registry.register_serializer_for(ExecutionProfile) + def execution_profile_insights_serializer(profile): + return { + 'loadBalancing': insights_registry.serialize(profile.load_balancing_policy, + policy=True), + 'retry': insights_registry.serialize(profile.retry_policy, + policy=True), + 'readTimeout': profile.request_timeout, + 'consistency': ConsistencyLevel.value_to_name.get(profile.consistency_level, None), + 'serialConsistency': ConsistencyLevel.value_to_name.get(profile.serial_consistency_level, None), + 'continuousPagingOptions': (insights_registry.serialize(profile.continuous_paging_options) + if (profile.continuous_paging_options is not None and + profile.continuous_paging_options is not _NOT_SET) else + None), + 'speculativeExecution': insights_registry.serialize(profile.speculative_execution_policy), + 'graphOptions': None + } + + @insights_registry.register_serializer_for(GraphExecutionProfile) + def graph_execution_profile_insights_serializer(profile): + rv = insights_registry.serialize(profile, cls=ExecutionProfile) + rv['graphOptions'] = insights_registry.serialize(profile.graph_options) + return rv + + _EXEC_PROFILE_DEFAULT_KEYS = (EXEC_PROFILE_DEFAULT, + EXEC_PROFILE_GRAPH_DEFAULT, + EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, + EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT) + + @insights_registry.register_serializer_for(ProfileManager) + def profile_manager_insights_serializer(manager): + defaults = { + # Insights's expected default + 'default': insights_registry.serialize(manager.profiles[EXEC_PROFILE_DEFAULT]), + # remaining named defaults for driver's defaults, including duplicated default + 'EXEC_PROFILE_DEFAULT': insights_registry.serialize(manager.profiles[EXEC_PROFILE_DEFAULT]), + 'EXEC_PROFILE_GRAPH_DEFAULT': insights_registry.serialize(manager.profiles[EXEC_PROFILE_GRAPH_DEFAULT]), + 'EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT': insights_registry.serialize( + manager.profiles[EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT] + ), + 'EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT': insights_registry.serialize( + manager.profiles[EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT] + ) + } + other = { + key: insights_registry.serialize(value) + for key, value in manager.profiles.items() + if key not in _EXEC_PROFILE_DEFAULT_KEYS + } + overlapping_keys = set(defaults) & set(other) + if overlapping_keys: + log.debug('The following key names overlap default key sentinel keys ' + 'and these non-default EPs will not be displayed in Insights ' + ': {}'.format(list(overlapping_keys))) + + other.update(defaults) + return other + + @insights_registry.register_serializer_for(GraphOptions) + def graph_options_insights_serializer(options): + rv = { + 'source': options.graph_source, + 'language': options.graph_language, + 'graphProtocol': options.graph_protocol + } + updates = {k: v.decode('utf-8') for k, v in six.iteritems(rv) + if isinstance(v, six.binary_type)} + rv.update(updates) + return rv + + @insights_registry.register_serializer_for(ContinuousPagingOptions) + def continuous_paging_options_insights_serializer(paging_options): + return { + 'page_unit': paging_options.page_unit, + 'max_pages': paging_options.max_pages, + 'max_pages_per_second': paging_options.max_pages_per_second, + 'max_queue_size': paging_options.max_queue_size + } + + insights_registry.initialized = True + return True diff --git a/cassandra/insights/util.py b/cassandra/insights/util.py new file mode 100644 index 0000000000..c97819251b --- /dev/null +++ b/cassandra/insights/util.py @@ -0,0 +1,61 @@ +import logging +import traceback +from warnings import warn + +from cassandra.util import Version + + +DSE_60 = Version('6.0.0') +DSE_51_MIN_SUPPORTED = Version('5.1.13') +DSE_60_MIN_SUPPORTED = Version('6.0.5') + + +log = logging.getLogger(__name__) + + +def namespace(cls): + """ + Best-effort method for getting the namespace in which a class is defined. + """ + try: + # __module__ can be None + module = cls.__module__ or '' + except Exception: + warn("Unable to obtain namespace for {cls} for Insights, returning ''. " + "Exception: \n{e}".format(e=traceback.format_exc(), cls=cls)) + module = '' + + module_internal_namespace = _module_internal_namespace_or_emtpy_string(cls) + if module_internal_namespace: + return '.'.join((module, module_internal_namespace)) + return module + + +def _module_internal_namespace_or_emtpy_string(cls): + """ + Best-effort method for getting the module-internal namespace in which a + class is defined -- i.e. the namespace _inside_ the module. + """ + try: + qualname = cls.__qualname__ + except AttributeError: + return '' + + return '.'.join( + # the last segment is the name of the class -- use everything else + qualname.split('.')[:-1] + ) + + +def version_supports_insights(dse_version): + if dse_version: + try: + dse_version = Version(dse_version) + return (DSE_51_MIN_SUPPORTED <= dse_version < DSE_60 + or + DSE_60_MIN_SUPPORTED <= dse_version) + except Exception: + warn("Unable to check version {v} for Insights compatibility, returning False. " + "Exception: \n{e}".format(e=traceback.format_exc(), v=dse_version)) + + return False From 3de0ac168ded3cbc9ab8a8738de347464c0fd673 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 9 Sep 2019 16:10:32 -0500 Subject: [PATCH 1596/2005] Who needs copyright? --- cassandra/insights/__init__.py | 13 +++++++++++++ cassandra/insights/registry.py | 14 ++++++++++++++ cassandra/insights/reporter.py | 14 ++++++++++++++ cassandra/insights/serializers.py | 14 ++++++++++++++ cassandra/insights/util.py | 14 ++++++++++++++ 5 files changed, 69 insertions(+) diff --git a/cassandra/insights/__init__.py b/cassandra/insights/__init__.py index e69de29bb2..2c9ca172f8 100644 --- a/cassandra/insights/__init__.py +++ b/cassandra/insights/__init__.py @@ -0,0 +1,13 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/cassandra/insights/registry.py b/cassandra/insights/registry.py index 9b7b3f2b8b..b65b89a268 100644 --- a/cassandra/insights/registry.py +++ b/cassandra/insights/registry.py @@ -1,3 +1,17 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import six from collections import OrderedDict from warnings import warn diff --git a/cassandra/insights/reporter.py b/cassandra/insights/reporter.py index 5f620ec046..77d4f02ac4 100644 --- a/cassandra/insights/reporter.py +++ b/cassandra/insights/reporter.py @@ -1,3 +1,17 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from collections import Counter import datetime import json diff --git a/cassandra/insights/serializers.py b/cassandra/insights/serializers.py index b72589e658..384a75f4c3 100644 --- a/cassandra/insights/serializers.py +++ b/cassandra/insights/serializers.py @@ -1,3 +1,17 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import six diff --git a/cassandra/insights/util.py b/cassandra/insights/util.py index c97819251b..a483b3f64d 100644 --- a/cassandra/insights/util.py +++ b/cassandra/insights/util.py @@ -1,3 +1,17 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import logging import traceback from warnings import warn From 67c09e1494323c46c92827ccc680d9e62482a542 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 9 Sep 2019 16:23:44 -0500 Subject: [PATCH 1597/2005] adding graph --- cassandra/graph/__init__.py | 22 ++ cassandra/graph/graphson.py | 689 ++++++++++++++++++++++++++++++++++++ cassandra/graph/query.py | 298 ++++++++++++++++ cassandra/graph/types.py | 158 +++++++++ 4 files changed, 1167 insertions(+) create mode 100644 cassandra/graph/__init__.py create mode 100644 cassandra/graph/graphson.py create mode 100644 cassandra/graph/query.py create mode 100644 cassandra/graph/types.py diff --git a/cassandra/graph/__init__.py b/cassandra/graph/__init__.py new file mode 100644 index 0000000000..e7880e34d5 --- /dev/null +++ b/cassandra/graph/__init__.py @@ -0,0 +1,22 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from cassandra.graph.types import Element, Vertex, VertexProperty, Edge, Path +from cassandra.graph.query import ( + GraphOptions, GraphProtocol, GraphStatement, SimpleGraphStatement, Result, + graph_object_row_factory, single_object_row_factory, + graph_result_row_factory, graph_graphson2_row_factory +) +from cassandra.graph.graphson import * diff --git a/cassandra/graph/graphson.py b/cassandra/graph/graphson.py new file mode 100644 index 0000000000..91034db4b2 --- /dev/null +++ b/cassandra/graph/graphson.py @@ -0,0 +1,689 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import base64 +import uuid +import re +import json +from decimal import Decimal +from collections import OrderedDict + +import six + +if six.PY3: + import ipaddress + +from cassandra.util import Polygon, Point, LineString +from cassandra.graph.types import Vertex, VertexProperty, Edge, Path + +__all__ = ['GraphSON1Serializer', 'GraphSON1Deserializer', 'GraphSON1TypeDeserializer', + 'GraphSON2Serializer', 'GraphSON2Deserializer', + 'GraphSON2Reader'] + +""" +Supported types: + +DSE Graph GraphSON 2.0 Python Driver +------------ | -------------- | ------------ +text | ------ | str +boolean | g:Boolean | bool +bigint | g:Int64 | long +int | g:Int32 | int +double | g:Double | float +float | g:Float | float +uuid | g:UUID | UUID +bigdecimal | gx:BigDecimal | Decimal +duration | gx:Duration | timedelta +inet | gx:InetAddress | str (unicode), IPV4Address/IPV6Address (PY3) +timestamp | gx:Instant | datetime.datetime +date | gx:LocalDate | datetime.date +time | gx:LocalTime | datetime.time +smallint | gx:Int16 | int +varint | gx:BigInteger | long +date | gx:LocalDate | Date +polygon | dse:Polygon | Polygon +point | dse:Point | Point +linestring | dse:LineString | LineString +blob | dse:Blob | bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) +""" + +MAX_INT32 = 2 ** 32 - 1 +MIN_INT32 = -2 ** 31 + + +class _GraphSONTypeType(type): + """GraphSONType metaclass, required to create a class property.""" + + @property + def graphson_type(cls): + return "{0}:{1}".format(cls.prefix, cls.graphson_base_type) + + +@six.add_metaclass(_GraphSONTypeType) +class GraphSONTypeIO(object): + """Represent a serializable GraphSON type""" + + prefix = 'g' + graphson_base_type = None + + @classmethod + def serialize(cls, value): + return six.text_type(value) + + @classmethod + def deserialize(cls, value, reader=None): + return value + + @classmethod + def get_specialized_serializer(cls, value): + return cls + + +class BooleanTypeIO(GraphSONTypeIO): + graphson_base_type = 'Boolean' + + @classmethod + def serialize(cls, value): + return bool(value) + + +class IntegerTypeIO(GraphSONTypeIO): + + @classmethod + def serialize(cls, value): + return value + + @classmethod + def get_specialized_serializer(cls, value): + if type(value) in six.integer_types and (value > MAX_INT32 or value < MIN_INT32): + return Int64TypeIO + + return Int32TypeIO + + +class Int16TypeIO(IntegerTypeIO): + prefix = 'gx' + graphson_base_type = 'Int16' + + +class Int32TypeIO(IntegerTypeIO): + graphson_base_type = 'Int32' + + +class Int64TypeIO(IntegerTypeIO): + graphson_base_type = 'Int64' + + @classmethod + def deserialize(cls, value, reader=None): + if six.PY3: + return value + return long(value) + + +class FloatTypeIO(GraphSONTypeIO): + graphson_base_type = 'Float' + + @classmethod + def deserialize(cls, value, reader=None): + return float(value) + + +class DoubleTypeIO(FloatTypeIO): + graphson_base_type = 'Double' + + +class BigIntegerTypeIO(IntegerTypeIO): + prefix = 'gx' + graphson_base_type = 'BigInteger' + + +class LocalDateTypeIO(GraphSONTypeIO): + FORMAT = '%Y-%m-%d' + + prefix = 'gx' + graphson_base_type = 'LocalDate' + + @classmethod + def serialize(cls, value): + return value.isoformat() + + @classmethod + def deserialize(cls, value, reader=None): + try: + return datetime.datetime.strptime(value, cls.FORMAT).date() + except ValueError: + # negative date + return value + + @classmethod + def get_specialized_serializer(cls, value): + if isinstance(value, datetime.datetime): + return InstantTypeIO + + return cls + + +class InstantTypeIO(GraphSONTypeIO): + prefix = 'gx' + graphson_base_type = 'Instant' + + @classmethod + def serialize(cls, value): + if isinstance(value, datetime.datetime): + value = datetime.datetime(*value.utctimetuple()[:6]).replace(microsecond=value.microsecond) + else: + value = datetime.datetime.combine(value, datetime.datetime.min.time()) + + return "{0}Z".format(value.isoformat()) + + @classmethod + def deserialize(cls, value, reader=None): + try: + d = datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + except ValueError: + d = datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + return d + + +class LocalTimeTypeIO(GraphSONTypeIO): + FORMATS = [ + '%H:%M', + '%H:%M:%S', + '%H:%M:%S.%f' + ] + + prefix = 'gx' + graphson_base_type = 'LocalTime' + + @classmethod + def serialize(cls, value): + return value.strftime(cls.FORMATS[2]) + + @classmethod + def deserialize(cls, value, reader=None): + dt = None + for f in cls.FORMATS: + try: + dt = datetime.datetime.strptime(value, f) + break + except ValueError: + continue + + if dt is None: + raise ValueError('Unable to decode LocalTime: {0}'.format(value)) + + return dt.time() + + +class BlobTypeIO(GraphSONTypeIO): + prefix = 'dse' + graphson_base_type = 'Blob' + + @classmethod + def serialize(cls, value): + value = base64.b64encode(value) + if six.PY3: + value = value.decode('utf-8') + return value + + @classmethod + def deserialize(cls, value, reader=None): + return bytearray(base64.b64decode(value)) + + +class UUIDTypeIO(GraphSONTypeIO): + graphson_base_type = 'UUID' + + @classmethod + def deserialize(cls, value, reader=None): + return uuid.UUID(value) + + +class BigDecimalTypeIO(GraphSONTypeIO): + prefix = 'gx' + graphson_base_type = 'BigDecimal' + + @classmethod + def deserialize(cls, value, reader=None): + return Decimal(value) + + +class DurationTypeIO(GraphSONTypeIO): + prefix = 'gx' + graphson_base_type = 'Duration' + + _duration_regex = re.compile(r""" + ^P((?P\d+)D)? + T((?P\d+)H)? + ((?P\d+)M)? + ((?P[0-9.]+)S)?$ + """, re.VERBOSE) + _duration_format = "P{days}DT{hours}H{minutes}M{seconds}S" + + _seconds_in_minute = 60 + _seconds_in_hour = 60 * _seconds_in_minute + _seconds_in_day = 24 * _seconds_in_hour + + @classmethod + def serialize(cls, value): + total_seconds = int(value.total_seconds()) + days, total_seconds = divmod(total_seconds, cls._seconds_in_day) + hours, total_seconds = divmod(total_seconds, cls._seconds_in_hour) + minutes, total_seconds = divmod(total_seconds, cls._seconds_in_minute) + total_seconds += value.microseconds / 1e6 + + return cls._duration_format.format( + days=int(days), hours=int(hours), minutes=int(minutes), seconds=total_seconds + ) + + @classmethod + def deserialize(cls, value, reader=None): + duration = cls._duration_regex.match(value) + if duration is None: + raise ValueError('Invalid duration: {0}'.format(value)) + + duration = {k: float(v) if v is not None else 0 + for k, v in six.iteritems(duration.groupdict())} + return datetime.timedelta(days=duration['days'], hours=duration['hours'], + minutes=duration['minutes'], seconds=duration['seconds']) + + +class PointTypeIO(GraphSONTypeIO): + prefix = 'dse' + graphson_base_type = 'Point' + + @classmethod + def deserialize(cls, value, reader=None): + return Point.from_wkt(value) + + +class LineStringTypeIO(GraphSONTypeIO): + prefix = 'dse' + graphson_base_type = 'LineString' + + @classmethod + def deserialize(cls, value, reader=None): + return LineString.from_wkt(value) + + +class PolygonTypeIO(GraphSONTypeIO): + prefix = 'dse' + graphson_base_type = 'Polygon' + + @classmethod + def deserialize(cls, value, reader=None): + return Polygon.from_wkt(value) + + +class InetTypeIO(GraphSONTypeIO): + prefix = 'gx' + graphson_base_type = 'InetAddress' + + +class VertexTypeIO(GraphSONTypeIO): + graphson_base_type = 'Vertex' + + @classmethod + def deserialize(cls, value, reader=None): + vertex = Vertex(id=reader.deserialize(value["id"]), + label=value["label"] if "label" in value else "vertex", + type='vertex', + properties={}) + # avoid the properties processing in Vertex.__init__ + vertex.properties = reader.deserialize(value.get('properties', {})) + return vertex + + +class VertexPropertyTypeIO(GraphSONTypeIO): + graphson_base_type = 'VertexProperty' + + @classmethod + def deserialize(cls, value, reader=None): + return VertexProperty(label=value['label'], + value=reader.deserialize(value["value"]), + properties=reader.deserialize(value.get('properties', {}))) + + +class EdgeTypeIO(GraphSONTypeIO): + graphson_base_type = 'Edge' + + @classmethod + def deserialize(cls, value, reader=None): + in_vertex = Vertex(id=reader.deserialize(value["inV"]), + label=value['inVLabel'], + type='vertex', + properties={}) + out_vertex = Vertex(id=reader.deserialize(value["outV"]), + label=value['outVLabel'], + type='vertex', + properties={}) + return Edge( + id=reader.deserialize(value["id"]), + label=value["label"] if "label" in value else "vertex", + type='edge', + properties=reader.deserialize(value.get("properties", {})), + inV=in_vertex, + inVLabel=value['inVLabel'], + outV=out_vertex, + outVLabel=value['outVLabel'] + ) + + +class PropertyTypeIO(GraphSONTypeIO): + graphson_base_type = 'Property' + + @classmethod + def deserialize(cls, value, reader=None): + return {value["key"]: reader.deserialize(value["value"])} + + +class PathTypeIO(GraphSONTypeIO): + graphson_base_type = 'Path' + + @classmethod + def deserialize(cls, value, reader=None): + labels = [set(label) for label in value['labels']] + objects = [reader.deserialize(obj) for obj in value['objects']] + p = Path(labels, []) + p.objects = objects # avoid the object processing in Path.__init__ + return p + + +class _BaseGraphSONSerializer(object): + + _serializers = OrderedDict() + + @classmethod + def register(cls, type, serializer): + cls._serializers[type] = serializer + + @classmethod + def get_type_definitions(cls): + return cls._serializers.copy() + + @classmethod + def get_serializer(cls, value): + """ + Get the serializer for a python object. + + :param value: The python object. + """ + + # The serializer matching logic is as follow: + # 1. Try to find the python type by direct access. + # 2. Try to find the first serializer by class inheritance. + # 3. If no serializer found, return the raw value. + + # Note that when trying to find the serializer by class inheritance, + # the order that serializers are registered is important. The use of + # an OrderedDict is to avoid the difference between executions. + serializer = None + try: + serializer = cls._serializers[type(value)] + except KeyError: + for key, serializer_ in cls._serializers.items(): + if isinstance(value, key): + serializer = serializer_ + break + + if serializer: + # A serializer can have specialized serializers (e.g for Int32 and Int64, so value dependant) + serializer = serializer.get_specialized_serializer(value) + + return serializer + + @classmethod + def serialize(cls, value): + """ + Serialize a python object to graphson. + + :param value: The python object to serialize. + """ + serializer = cls.get_serializer(value) + if serializer: + return serializer.serialize(value) + + return value + + +class GraphSON1Serializer(_BaseGraphSONSerializer): + """ + Serialize python objects to graphson types. + """ + + # When we fall back to a superclass's serializer, we iterate over this map. + # We want that iteration order to be consistent, so we use an OrderedDict, + # not a dict. + _serializers = OrderedDict([ + (bool, BooleanTypeIO), + (bytearray, BlobTypeIO), + (Decimal, BigDecimalTypeIO), + (datetime.date, LocalDateTypeIO), + (datetime.time, LocalTimeTypeIO), + (datetime.timedelta, DurationTypeIO), + (uuid.UUID, UUIDTypeIO), + (Polygon, PolygonTypeIO), + (Point, PointTypeIO), + (LineString, LineStringTypeIO) + ]) + + +if six.PY2: + GraphSON1Serializer.register(buffer, BlobTypeIO) +else: + GraphSON1Serializer.register(memoryview, BlobTypeIO) + GraphSON1Serializer.register(bytes, BlobTypeIO) + GraphSON1Serializer.register(ipaddress.IPv4Address, InetTypeIO) + GraphSON1Serializer.register(ipaddress.IPv6Address, InetTypeIO) + + +class _BaseGraphSONDeserializer(object): + + _deserializers = {} + + @classmethod + def get_type_definitions(cls): + return cls._deserializers.copy() + + @classmethod + def register(cls, graphson_type, serializer): + cls._deserializers[graphson_type] = serializer + + @classmethod + def get_deserializer(cls, graphson_type): + try: + return cls._deserializers[graphson_type] + except KeyError: + raise ValueError('Invalid `graphson_type` specified: {}'.format(graphson_type)) + + @classmethod + def deserialize(cls, graphson_type, value): + """ + Deserialize a `graphson_type` value to a python object. + + :param graphson_base_type: The graphson graphson_type. e.g. 'gx:Instant' + :param value: The graphson value to deserialize. + """ + return cls.get_deserializer(graphson_type).deserialize(value) + + +class GraphSON1Deserializer(_BaseGraphSONDeserializer): + """ + Deserialize graphson1 types to python objects. + """ + _TYPES = [UUIDTypeIO, BigDecimalTypeIO, InstantTypeIO, BlobTypeIO, + PointTypeIO, LineStringTypeIO, PolygonTypeIO, LocalDateTypeIO, + LocalTimeTypeIO, DurationTypeIO, InetTypeIO] + + _deserializers = { + t.graphson_type: t + for t in _TYPES + } + + @classmethod + def deserialize_date(cls, value): + return cls._deserializers[LocalDateTypeIO.graphson_type].deserialize(value) + + @classmethod + def deserialize_time(cls, value): + return cls._deserializers[LocalTimeTypeIO.graphson_type].deserialize(value) + + @classmethod + def deserialize_timestamp(cls, value): + return cls._deserializers[InstantTypeIO.graphson_type].deserialize(value) + + @classmethod + def deserialize_duration(cls, value): + return cls._deserializers[DurationTypeIO.graphson_type].deserialize(value) + + @classmethod + def deserialize_int(cls, value): + return int(value) + + deserialize_smallint = deserialize_int + + deserialize_varint = deserialize_int + + @classmethod + def deserialize_bigint(cls, value): + if six.PY3: + return cls.deserialize_int(value) + return long(value) + + @classmethod + def deserialize_double(cls, value): + return float(value) + + deserialize_float = deserialize_double + + @classmethod + def deserialize_uuid(cls, value): + return cls._deserializers[UUIDTypeIO.graphson_type].deserialize(value) + + @classmethod + def deserialize_decimal(cls, value): + return cls._deserializers[BigDecimalTypeIO.graphson_type].deserialize(value) + + @classmethod + def deserialize_blob(cls, value): + return cls._deserializers[BlobTypeIO.graphson_type].deserialize(value) + + @classmethod + def deserialize_point(cls, value): + return cls._deserializers[PointTypeIO.graphson_type].deserialize(value) + + @classmethod + def deserialize_linestring(cls, value): + return cls._deserializers[LineStringTypeIO.graphson_type].deserialize(value) + + @classmethod + def deserialize_polygon(cls, value): + return cls._deserializers[PolygonTypeIO.graphson_type].deserialize(value) + + @classmethod + def deserialize_inet(cls, value): + return value + + @classmethod + def deserialize_boolean(cls, value): + return value + + +# Remove in the next major +GraphSON1TypeDeserializer = GraphSON1Deserializer +GraphSON1TypeSerializer = GraphSON1Serializer + + +class GraphSON2Serializer(_BaseGraphSONSerializer): + TYPE_KEY = "@type" + VALUE_KEY = "@value" + + _serializers = GraphSON1Serializer.get_type_definitions() + + @classmethod + def serialize(cls, value): + """ + Serialize a type to GraphSON2. + + e.g {'@type': 'gx:Duration', '@value': 'P2DT4H'} + + :param value: The python object to serialize. + """ + serializer = cls.get_serializer(value) + if not serializer: + # if no serializer found, we can't type it. `value` will be jsonized as string. + return value + + value = serializer.serialize(value) + out = {cls.TYPE_KEY: serializer.graphson_type} + if value is not None: + out[cls.VALUE_KEY] = value + + return out + + +GraphSON2Serializer.register(int, IntegerTypeIO) +if six.PY2: + GraphSON2Serializer.register(long, IntegerTypeIO) + + +class GraphSON2Deserializer(_BaseGraphSONDeserializer): + + _TYPES = GraphSON1Deserializer._TYPES + [ + Int16TypeIO, Int32TypeIO, Int64TypeIO, DoubleTypeIO, FloatTypeIO, + BigIntegerTypeIO, VertexTypeIO, VertexPropertyTypeIO, EdgeTypeIO, + PathTypeIO, PropertyTypeIO] + + _deserializers = { + t.graphson_type: t + for t in _TYPES + } + + +class GraphSON2Reader(object): + """ + GraphSON2 Reader that parse json and deserialize to python objects. + """ + + def __init__(self, extra_deserializer_map=None): + """ + :param extra_deserializer_map: map from GraphSON type tag to deserializer instance implementing `deserialize` + """ + self.deserializers = GraphSON2Deserializer.get_type_definitions() + if extra_deserializer_map: + self.deserializers.update(extra_deserializer_map) + + def read(self, json_data): + """ + Read and deserialize ``json_data``. + """ + return self.deserialize(json.loads(json_data)) + + def deserialize(self, obj): + """ + Deserialize GraphSON type-tagged dict values into objects mapped in self.deserializers + """ + if isinstance(obj, dict): + try: + des = self.deserializers[obj[GraphSON2Serializer.TYPE_KEY]] + return des.deserialize(obj[GraphSON2Serializer.VALUE_KEY], self) + except KeyError: + pass + # list and map are treated as normal json objs (could be isolated deserializers) + return {self.deserialize(k): self.deserialize(v) for k, v in six.iteritems(obj)} + elif isinstance(obj, list): + return [self.deserialize(o) for o in obj] + else: + return obj diff --git a/cassandra/graph/query.py b/cassandra/graph/query.py new file mode 100644 index 0000000000..7a543e5bb6 --- /dev/null +++ b/cassandra/graph/query.py @@ -0,0 +1,298 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from warnings import warn + +import six + +from cassandra import ConsistencyLevel +from cassandra.query import Statement, SimpleStatement +from cassandra.graph.types import Vertex, Edge, Path +from cassandra.graph.graphson import GraphSON2Reader + + +# (attr, description, server option) +_graph_options = ( + ('graph_name', 'name of the targeted graph.', 'graph-name'), + ('graph_source', 'choose the graph traversal source, configured on the server side.', 'graph-source'), + ('graph_language', 'the language used in the queries (default "gremlin-groovy")', 'graph-language'), + ('graph_protocol', 'the graph protocol that the server should use for query results (default "graphson-1-0")', 'graph-results'), + ('graph_read_consistency_level', '''read `dse.ConsistencyLevel `_ for graph queries (if distinct from session default). +Setting this overrides the native `Statement.consistency_level `_ for read operations from Cassandra persistence''', 'graph-read-consistency'), + ('graph_write_consistency_level', '''write `dse.ConsistencyLevel `_ for graph queries (if distinct from session default). +Setting this overrides the native `Statement.consistency_level `_ for write operations to Cassandra persistence.''', 'graph-write-consistency') +) +_graph_option_names = tuple(option[0] for option in _graph_options) + +# this is defined by the execution profile attribute, not in graph options +_request_timeout_key = 'request-timeout' + +_graphson2_reader = GraphSON2Reader() + + +class GraphProtocol(object): + + GRAPHSON_1_0 = 'graphson-1.0' + """ + GraphSON1 + """ + + GRAPHSON_2_0 = 'graphson-2.0' + """ + GraphSON2 + """ + + +class GraphOptions(object): + """ + Options for DSE Graph Query handler. + """ + # See _graph_options map above for notes on valid options + + def __init__(self, **kwargs): + self._graph_options = {} + kwargs.setdefault('graph_source', 'g') + kwargs.setdefault('graph_language', 'gremlin-groovy') + kwargs.setdefault('graph_protocol', GraphProtocol.GRAPHSON_1_0) + for attr, value in six.iteritems(kwargs): + if attr not in _graph_option_names: + warn("Unknown keyword argument received for GraphOptions: {0}".format(attr)) + setattr(self, attr, value) + + def copy(self): + new_options = GraphOptions() + new_options._graph_options = self._graph_options.copy() + return new_options + + def update(self, options): + self._graph_options.update(options._graph_options) + + def get_options_map(self, other_options=None): + """ + Returns a map for these options updated with other options, + and mapped to graph payload types. + """ + options = self._graph_options.copy() + if other_options: + options.update(other_options._graph_options) + + # cls are special-cased so they can be enums in the API, and names in the protocol + for cl in ('graph-write-consistency', 'graph-read-consistency'): + cl_enum = options.get(cl) + if cl_enum is not None: + options[cl] = six.b(ConsistencyLevel.value_to_name[cl_enum]) + return options + + def set_source_default(self): + """ + Sets ``graph_source`` to the server-defined default traversal source ('default') + """ + self.graph_source = 'default' + + def set_source_analytics(self): + """ + Sets ``graph_source`` to the server-defined analytic traversal source ('a') + """ + self.graph_source = 'a' + + def set_source_graph(self): + """ + Sets ``graph_source`` to the server-defined graph traversal source ('g') + """ + self.graph_source = 'g' + + def set_graph_protocol(self, protocol): + """ + Sets ``graph_protocol`` as server graph results format (See :class:`dse.graph.GraphProtocol`) + """ + self.graph_protocol = protocol + + @property + def is_default_source(self): + return self.graph_source in (b'default', None) + + @property + def is_analytics_source(self): + """ + True if ``graph_source`` is set to the server-defined analytics traversal source ('a') + """ + return self.graph_source == b'a' + + @property + def is_graph_source(self): + """ + True if ``graph_source`` is set to the server-defined graph traversal source ('g') + """ + return self.graph_source == b'g' + + +for opt in _graph_options: + + def get(self, key=opt[2]): + return self._graph_options.get(key) + + def set(self, value, key=opt[2]): + if value is not None: + # normalize text here so it doesn't have to be done every time we get options map + if isinstance(value, six.text_type) and not isinstance(value, six.binary_type): + value = six.b(value) + self._graph_options[key] = value + else: + self._graph_options.pop(key, None) + + def delete(self, key=opt[2]): + self._graph_options.pop(key, None) + + setattr(GraphOptions, opt[0], property(get, set, delete, opt[1])) + + +class GraphStatement(Statement): + """ An abstract class representing a graph query.""" + + @property + def query(self): + raise NotImplementedError() + + def __str__(self): + return u''.format(self.query) + __repr__ = __str__ + + +class SimpleGraphStatement(GraphStatement, SimpleStatement): + """ + Simple graph statement for :meth:`.Session.execute_graph`. + Takes the same parameters as :class:`.SimpleStatement`. + """ + @property + def query(self): + return self._query_string + + +def single_object_row_factory(column_names, rows): + """ + returns the JSON string value of graph results + """ + return [row[0] for row in rows] + + +def graph_result_row_factory(column_names, rows): + """ + Returns a :class:`cassandra.graph.Result` object that can load graph results and produce specific types. + The Result JSON is deserialized and unpacked from the top-level 'result' dict. + """ + return [Result(json.loads(row[0])['result']) for row in rows] + + +def graph_object_row_factory(column_names, rows): + """ + Like :func:`~.graph_result_row_factory`, except known element types (:class:`~.Vertex`, :class:`~.Edge`) are + converted to their simplified objects. Some low-level metadata is shed in this conversion. Unknown result types are + still returned as :class:`dse.graph.Result`. + """ + return _graph_object_sequence(json.loads(row[0])['result'] for row in rows) + + +def _graph_object_sequence(objects): + for o in objects: + res = Result(o) + if isinstance(o, dict): + typ = res.value.get('type') + if typ == 'vertex': + res = res.as_vertex() + elif typ == 'edge': + res = res.as_edge() + yield res + + +def graph_graphson2_row_factory(column_names, rows): + """ + Row Factory that returns the decoded graphson as DSE types. + """ + return [_graphson2_reader.read(row[0])['result'] for row in rows] + + +class Result(object): + """ + Represents deserialized graph results. + Property and item getters are provided for convenience. + """ + + value = None + """ + Deserialized value from the result + """ + + def __init__(self, value): + self.value = value + + def __getattr__(self, attr): + if not isinstance(self.value, dict): + raise ValueError("Value cannot be accessed as a dict") + + if attr in self.value: + return self.value[attr] + + raise AttributeError("Result has no top-level attribute %r" % (attr,)) + + def __getitem__(self, item): + if isinstance(self.value, dict) and isinstance(item, six.string_types): + return self.value[item] + elif isinstance(self.value, list) and isinstance(item, int): + return self.value[item] + else: + raise ValueError("Result cannot be indexed by %r" % (item,)) + + def __str__(self): + return str(self.value) + + def __repr__(self): + return "%s(%r)" % (Result.__name__, self.value) + + def __eq__(self, other): + return self.value == other.value + + def as_vertex(self): + """ + Return a :class:`Vertex` parsed from this result + + Raises TypeError if parsing fails (i.e. the result structure is not valid). + """ + try: + return Vertex(self.id, self.label, self.type, self.value.get('properties', {})) + except (AttributeError, ValueError, TypeError): + raise TypeError("Could not create Vertex from %r" % (self,)) + + def as_edge(self): + """ + Return a :class:`Edge` parsed from this result + + Raises TypeError if parsing fails (i.e. the result structure is not valid). + """ + try: + return Edge(self.id, self.label, self.type, self.value.get('properties', {}), + self.inV, self.inVLabel, self.outV, self.outVLabel) + except (AttributeError, ValueError, TypeError): + raise TypeError("Could not create Edge from %r" % (self,)) + + def as_path(self): + """ + Return a :class:`Path` parsed from this result + + Raises TypeError if parsing fails (i.e. the result structure is not valid). + """ + try: + return Path(self.labels, self.objects) + except (AttributeError, ValueError, TypeError): + raise TypeError("Could not create Path from %r" % (self,)) diff --git a/cassandra/graph/types.py b/cassandra/graph/types.py new file mode 100644 index 0000000000..63bd6c7f88 --- /dev/null +++ b/cassandra/graph/types.py @@ -0,0 +1,158 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class Element(object): + + element_type = None + + _attrs = ('id', 'label', 'type', 'properties') + + def __init__(self, id, label, type, properties): + if type != self.element_type: + raise TypeError("Attempted to create %s from %s element", (type, self.element_type)) + + self.id = id + self.label = label + self.type = type + self.properties = self._extract_properties(properties) + + @staticmethod + def _extract_properties(properties): + return dict(properties) + + def __eq__(self, other): + return all(getattr(self, attr) == getattr(other, attr) for attr in self._attrs) + + def __str__(self): + return str(dict((k, getattr(self, k)) for k in self._attrs)) + + +class Vertex(Element): + """ + Represents a Vertex element from a graph query. + + Vertex ``properties`` are extracted into a ``dict`` of property names to list of :class:`~VertexProperty` (list + because they are always encoded that way, and sometimes have multiple cardinality; VertexProperty because sometimes + the properties themselves have property maps). + """ + + element_type = 'vertex' + + @staticmethod + def _extract_properties(properties): + # vertex properties are always encoded as a list, regardless of Cardinality + return dict((k, [VertexProperty(k, p['value'], p.get('properties')) for p in v]) for k, v in properties.items()) + + def __repr__(self): + properties = dict((name, [{'label': prop.label, 'value': prop.value, 'properties': prop.properties} for prop in prop_list]) + for name, prop_list in self.properties.items()) + return "%s(%r, %r, %r, %r)" % (self.__class__.__name__, + self.id, self.label, + self.type, properties) + + +class VertexProperty(object): + """ + Vertex properties have a top-level value and an optional ``dict`` of properties. + """ + + label = None + """ + label of the property + """ + + value = None + """ + Value of the property + """ + + properties = None + """ + dict of properties attached to the property + """ + + def __init__(self, label, value, properties=None): + self.label = label + self.value = value + self.properties = properties or {} + + def __eq__(self, other): + return isinstance(other, VertexProperty) and self.label == other.label and self.value == other.value and self.properties == other.properties + + def __repr__(self): + return "%s(%r, %r, %r)" % (self.__class__.__name__, self.label, self.value, self.properties) + + +class Edge(Element): + """ + Represents an Edge element from a graph query. + + Attributes match initializer parameters. + """ + + element_type = 'edge' + + _attrs = Element._attrs + ('inV', 'inVLabel', 'outV', 'outVLabel') + + def __init__(self, id, label, type, properties, + inV, inVLabel, outV, outVLabel): + super(Edge, self).__init__(id, label, type, properties) + self.inV = inV + self.inVLabel = inVLabel + self.outV = outV + self.outVLabel = outVLabel + + def __repr__(self): + return "%s(%r, %r, %r, %r, %r, %r, %r, %r)" %\ + (self.__class__.__name__, + self.id, self.label, + self.type, self.properties, + self.inV, self.inVLabel, + self.outV, self.outVLabel) + + +class Path(object): + """ + Represents a graph path. + + Labels list is taken verbatim from the results. + + Objects are either :class:`~.Result` or :class:`~.Vertex`/:class:`~.Edge` for recognized types + """ + + labels = None + """ + List of labels in the path + """ + + objects = None + """ + List of objects in the path + """ + + def __init__(self, labels, objects): + # The Path class should not do any deserialization by itself. To fix in the next major. + from cassandra.graph.query import _graph_object_sequence + self.labels = labels + self.objects = list(_graph_object_sequence(objects)) + + def __eq__(self, other): + return self.labels == other.labels and self.objects == other.objects + + def __str__(self): + return str({'labels': self.labels, 'objects': self.objects}) + + def __repr__(self): + return "%s(%r, %r)" % (self.__class__.__name__, self.labels, [o.value for o in self.objects]) From fe6d2e410f5362ce48c5429d55ff19df11ed2a59 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 9 Sep 2019 18:42:22 -0400 Subject: [PATCH 1598/2005] auth.py encoder.py cluster.py --- cassandra/auth.py | 151 +++++++++++-- cassandra/cluster.py | 501 +++++++++++++++++++++++++++++++++++++------ cassandra/encoder.py | 14 +- 3 files changed, 586 insertions(+), 80 deletions(-) diff --git a/cassandra/auth.py b/cassandra/auth.py index 1d94817b64..276646a63b 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -6,15 +6,35 @@ # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and +# http://www.datastax.com/terms/datastax-dse-driver-license-terms +import socket +import logging + +try: + import kerberos + _have_kerberos = True +except ImportError: + _have_kerberos = False + +try: + from puresasl.client import SASLClient + _have_puresasl = True +except ImportError: + _have_puresasl = False + try: from puresasl.client import SASLClient except ImportError: SASLClient = None +import six + +log = logging.getLogger(__name__) + +# Custom payload keys related to DSE Unified Auth +_proxy_execute_key = 'ProxyExecute' + + class AuthProvider(object): """ An abstract class that defines the interface that will be used for @@ -113,22 +133,31 @@ def new_authenticator(self, host): return PlainTextAuthenticator(self.username, self.password) -class PlainTextAuthenticator(Authenticator): +class TransitionalModePlainTextAuthProvider(object): """ - An :class:`~.Authenticator` that works with Cassandra's PasswordAuthenticator. + An :class:`~.AuthProvider` that works with DSE TransitionalModePlainTextAuthenticator. - .. versionadded:: 2.0.0 - """ + Example usage:: - def __init__(self, username, password): - self.username = username - self.password = password + from dse.cluster import Cluster + from dse.auth import TransitionalModePlainTextAuthProvider - def initial_response(self): - return "\x00%s\x00%s" % (self.username, self.password) + auth_provider = TransitionalModePlainTextAuthProvider() + cluster = Cluster(auth_provider=auth_provider) - def evaluate_challenge(self, challenge): - return None + .. warning:: TransitionalModePlainTextAuthProvider will be removed in dse-driver + 3.0. The transitional mode will be handled internally without the need + of any auth provider. + """ + + def __init__(self): + # TODO remove next major + log.warning("TransitionalModePlainTextAuthProvider will be removed in cassandra-driver " + "4.0. The transitional mode will be handled internally without the need " + "of any auth provider.") + + def new_authenticator(self, host): + return TransitionalModePlainTextAuthenticator() class SaslAuthProvider(AuthProvider): @@ -180,3 +209,95 @@ def initial_response(self): def evaluate_challenge(self, challenge): return self.sasl.process(challenge) + + +DSEPlainTextAuthProvider = PlainTextAuthProvider + + +class DSEGSSAPIAuthProvider(AuthProvider): + """ + Auth provider for GSS API authentication. Works with legacy `KerberosAuthenticator` + or `DseAuthenticator` if `kerberos` scheme is enabled. + """ + def __init__(self, service='dse', qops=('auth',), resolve_host_name=True, **properties): + """ + :param service: name of the service + :param qops: iterable of "Quality of Protection" allowed; see ``puresasl.QOP`` + :param resolve_host_name: boolean flag indicating whether the authenticator should reverse-lookup an FQDN when + creating a new authenticator. Default is ``True``, which will resolve, or return the numeric address if there is no PTR + record. Setting ``False`` creates the authenticator with the numeric address known by Cassandra + :param properties: additional keyword properties to pass for the ``puresasl.mechanisms.GSSAPIMechanism`` class. + Presently, 'principal' (user) is the only one referenced in the ``pure-sasl`` implementation + """ + if not _have_puresasl: + raise ImportError('The puresasl library has not been installed') + if not _have_kerberos: + raise ImportError('The kerberos library has not been installed') + self.service = service + self.qops = qops + self.resolve_host_name = resolve_host_name + self.properties = properties + + def new_authenticator(self, host): + if self.resolve_host_name: + host = socket.getnameinfo((host, 0), 0)[0] + return GSSAPIAuthenticator(host, self.service, self.qops, self.properties) + + +class BaseDSEAuthenticator(Authenticator): + def get_mechanism(self): + raise NotImplementedError("get_mechanism not implemented") + + def get_initial_challenge(self): + raise NotImplementedError("get_initial_challenge not implemented") + + def initial_response(self): + if self.server_authenticator_class == "com.datastax.bdp.cassandra.auth.DseAuthenticator": + return self.get_mechanism() + else: + return self.evaluate_challenge(self.get_initial_challenge()) + + +class PlainTextAuthenticator(BaseDSEAuthenticator): + + def __init__(self, username, password): + self.username = username + self.password = password + + def get_mechanism(self): + return six.b("PLAIN") + + def get_initial_challenge(self): + return six.b("PLAIN-START") + + def evaluate_challenge(self, challenge): + if challenge == six.b('PLAIN-START'): + return six.b("\x00%s\x00%s" % (self.username, self.password)) + raise Exception('Did not receive a valid challenge response from server') + + +class TransitionalModePlainTextAuthenticator(PlainTextAuthenticator): + """ + Authenticator that accounts for DSE authentication is configured with transitional mode. + """ + + def __init__(self): + super(TransitionalModePlainTextAuthenticator, self).__init__('', '') + + +class GSSAPIAuthenticator(BaseDSEAuthenticator): + def __init__(self, host, service, qops, properties): + properties = properties or {} + self.sasl = SASLClient(host, service, 'GSSAPI', qops=qops, **properties) + + def get_mechanism(self): + return six.b("GSSAPI") + + def get_initial_challenge(self): + return six.b("GSSAPI-START") + + def evaluate_challenge(self, challenge): + if challenge == six.b('GSSAPI-START'): + return self.sasl.process() + else: + return self.sasl.process(challenge) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 731a948c4a..a040e12deb 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -23,7 +23,8 @@ from concurrent.futures import ThreadPoolExecutor, FIRST_COMPLETED, wait as wait_futures from copy import copy from functools import partial, wraps -from itertools import groupby, count +from itertools import groupby, count, chain +import json import logging from warnings import warn from random import random @@ -33,6 +34,7 @@ import sys import time from threading import Lock, RLock, Thread, Event +import uuid import weakref from weakref import WeakValueDictionary @@ -45,9 +47,11 @@ OperationTimedOut, UnsupportedOperation, SchemaTargetType, DriverException, ProtocolVersion, UnresolvableContactPoints) +from cassandra.auth import _proxy_execute_key from cassandra.connection import (ConnectionException, ConnectionShutdown, ConnectionHeartbeat, ProtocolVersionUnsupported, - EndPoint, DefaultEndPoint, DefaultEndPointFactory) + EndPoint, DefaultEndPoint, DefaultEndPointFactory, + ContinuousPagingState) from cassandra.cqltypes import UserType from cassandra.encoder import Encoder from cassandra.protocol import (QueryMessage, ResultMessage, @@ -67,15 +71,20 @@ from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, RetryPolicy, IdentityTranslator, NoSpeculativeExecutionPlan, - NoSpeculativeExecutionPolicy) + NoSpeculativeExecutionPolicy, DSELoadBalancingPolicy) from cassandra.pool import (Host, _ReconnectionHandler, _HostReconnectionHandler, HostConnectionPool, HostConnection, NoConnectionsAvailable) from cassandra.query import (SimpleStatement, PreparedStatement, BoundStatement, BatchStatement, bind_params, QueryTrace, TraceUnavailable, - named_tuple_factory, dict_factory, tuple_factory, FETCH_SIZE_UNSET) + named_tuple_factory, dict_factory, tuple_factory, FETCH_SIZE_UNSET, + HostTargetingStatement) from cassandra.timestamps import MonotonicTimestampGenerator from cassandra.compat import Mapping +from cassandra.util import _resolve_contact_points_to_string_map + +if six.PY3: + long = int def _is_eventlet_monkey_patched(): @@ -197,6 +206,47 @@ def default_lbp_factory(): return DCAwareRoundRobinPolicy() +class ContinuousPagingOptions(object): + + class PagingUnit(object): + BYTES = 1 + ROWS = 2 + + page_unit = None + """ + Value of PagingUnit. Default is PagingUnit.ROWS. + + Units refer to the :attr:`~.Statement.fetch_size` or :attr:`~.Session.default_fetch_size`. + """ + + max_pages = None + """ + Max number of pages to send + """ + + max_pages_per_second = None + """ + Max rate at which to send pages + """ + + max_queue_size = None + """ + The maximum queue size for caching pages, only honored for protocol version DSE_V2 and higher, + by default it is 4 and it must be at least 2. + """ + + def __init__(self, page_unit=PagingUnit.ROWS, max_pages=0, max_pages_per_second=0, max_queue_size=4): + self.page_unit = page_unit + self.max_pages = max_pages + self.max_pages_per_second = max_pages_per_second + if max_queue_size < 2: + raise ValueError('ContinuousPagingOptions.max_queue_size must be 2 or greater') + self.max_queue_size = max_queue_size + + def page_unit_bytes(self): + return self.page_unit == ContinuousPagingOptions.PagingUnit.BYTES + + def _addrinfo_or_none(contact_point, port): """ A helper function that wraps socket.getaddrinfo and returns None @@ -212,23 +262,17 @@ def _addrinfo_or_none(contact_point, port): return None -def _resolve_contact_points(contact_points, port): - resolved = tuple(_addrinfo_or_none(p, port) - for p in contact_points) - - if resolved and all((x is None for x in resolved)): - raise UnresolvableContactPoints(contact_points, port) - - resolved = tuple(r for r in resolved if r is not None) - - return [endpoint[4][0] - for addrinfo in resolved - for endpoint in addrinfo] +def _execution_profile_to_string(name): + default_profiles = { + EXEC_PROFILE_DEFAULT: 'EXEC_PROFILE_DEFAULT', + EXEC_PROFILE_GRAPH_DEFAULT: 'EXEC_PROFILE_GRAPH_DEFAULT', + EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT: 'EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT', + EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT: 'EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT', + } + if name in default_profiles: + return default_profiles[name] -def _execution_profile_to_string(name): - if name is EXEC_PROFILE_DEFAULT: - return 'EXEC_PROFILE_DEFAULT' return '"%s"' % (name,) @@ -285,13 +329,27 @@ class ExecutionProfile(object): Defaults to :class:`.NoSpeculativeExecutionPolicy` if not specified """ + continuous_paging_options = None + """ + *Note:* This feature is implemented to facilitate server integration testing. It is not intended for general use in the Python driver. + See :attr:`.Statement.fetch_size` or :attr:`Session.default_fetch_size` for configuring normal paging. + + When set, requests will use DSE's continuous paging, which streams multiple pages without + intermediate requests. + + This has the potential to materialize all results in memory at once if the consumer cannot keep up. Use options + to constrain page size and rate. + + This is only available for DSE clusters. + """ + # indicates if lbp was set explicitly or uses default values _load_balancing_policy_explicit = False def __init__(self, load_balancing_policy=_NOT_SET, retry_policy=None, consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, - request_timeout=10.0, row_factory=named_tuple_factory, speculative_execution_policy=None): - + request_timeout=10.0, row_factory=named_tuple_factory, speculative_execution_policy=None, + continuous_paging_options=None): if load_balancing_policy is _NOT_SET: self._load_balancing_policy_explicit = False self.load_balancing_policy = default_lbp_factory() @@ -311,6 +369,67 @@ def __init__(self, load_balancing_policy=_NOT_SET, retry_policy=None, self.request_timeout = request_timeout self.row_factory = row_factory self.speculative_execution_policy = speculative_execution_policy or NoSpeculativeExecutionPolicy() + self.continuous_paging_options = continuous_paging_options + + +class GraphExecutionProfile(ExecutionProfile): + graph_options = None + """ + :class:`.GraphOptions` to use with this execution + + Default options for graph queries, initialized as follows by default:: + + GraphOptions(graph_language=b'gremlin-groovy') + + See cassandra.graph.GraphOptions + """ + + def __init__(self, load_balancing_policy=_NOT_SET, retry_policy=None, + consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, + request_timeout=30.0, row_factory=None, # TODO GRAPH graph_object_row_factory + graph_options=None): + """ + Default execution profile for graph execution. + + See :class:`.ExecutionProfile` + for base attributes. + + In addition to default parameters shown in the signature, this profile also defaults ``retry_policy`` to + :class:`cassandra.policies.NeverRetryPolicy`. + """ + retry_policy = retry_policy or NeverRetryPolicy() + super(GraphExecutionProfile, self).__init__(load_balancing_policy, retry_policy, consistency_level, + serial_consistency_level, request_timeout, row_factory) + self.graph_options = graph_options or GraphOptions(graph_source=b'g', + graph_language=b'gremlin-groovy') + + +class GraphAnalyticsExecutionProfile(GraphExecutionProfile): + + def __init__(self, load_balancing_policy=None, retry_policy=None, + consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, + request_timeout=3600. * 24. * 7., row_factory=None, # TODO GRAPH graph_object_row_factory + graph_options=None): + """ + Execution profile with timeout and load balancing appropriate for graph analytics queries. + + See also :class:`~.GraphExecutionPolicy`. + + In addition to default parameters shown in the signature, this profile also defaults ``retry_policy`` to + :class:`dse.policies.NeverRetryPolicy`, and ``load_balancing_policy`` to one that targets the current Spark + master. + + Note: The graph_options.graph_source is set automatically to b'a' (analytics) + when using GraphAnalyticsExecutionProfile. This is mandatory to target analytics nodes. + """ + load_balancing_policy = load_balancing_policy or DSELoadBalancingPolicy(default_lbp_factory()) + graph_options = graph_options or GraphOptions(graph_language=b'gremlin-groovy') + super(GraphAnalyticsExecutionProfile, self).__init__(load_balancing_policy, retry_policy, consistency_level, + serial_consistency_level, request_timeout, row_factory, + graph_options) + # ensure the graph_source is analytics, since this is the purpose of the GraphAnalyticsExecutionProfile + self.graph_options.set_source_analytics() + class ProfileManager(object): @@ -373,6 +492,31 @@ def default(self): Use this as the key in ``Cluster(execution_profiles)`` to override the default profile. """ +EXEC_PROFILE_GRAPH_DEFAULT = object() +""" +Key for the default graph execution profile, used when no other profile is selected in +``Session.execute_graph(execution_profile)``. + +Use this as the key in :doc:`Cluster(execution_profiles) ` +to override the default graph profile. +""" + +EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT = object() +""" +Key for the default graph system execution profile. This can be used for graph statements using the DSE graph +system API. + +Selected using ``Session.execute_graph(execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)``. +""" + +EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT = object() +""" +Key for the default graph analytics execution profile. This can be used for graph statements intended to +use Spark/analytics as the traversal source. + +Selected using ``Session.execute_graph(execution_profile=EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT)``. +""" + class _ConfigMode(object): UNCOMMITTED = 0 @@ -780,6 +924,34 @@ def default_retry_policy(self, policy): documentation for :meth:`Session.timestamp_generator`. """ + monitor_reporting_enabled = True + """ + A boolean indicating if monitor reporting, which sends gathered data to + Insights when running against DSE 6.8 and higher. + """ + + monitor_reporting_interval = 30 + """ + A boolean indicating if monitor reporting, which sends gathered data to + Insights when running against DSE 6.8 and higher. + """ + + client_id = None + """ + A UUID that uniquely identifies this Cluster object to Insights. This will + be generated automatically unless the user provides one. + """ + + application_name = '' + """ + A string identifying this application to Insights. + """ + + application_version = '' + """ + A string identifiying this application's version to Insights + """ + @property def schema_metadata_enabled(self): """ @@ -876,7 +1048,11 @@ def __init__(self, no_compact=False, ssl_context=None, endpoint_factory=None, - monitor_reporting_enabled=False): # TODO just added for tests purpose before insights integration + application_name=None, + application_version=None, + monitor_reporting_enabled=True, + monitor_reporting_interval=30, + client_id=None): """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as extablishing connection pools or refreshing metadata. @@ -904,14 +1080,24 @@ def __init__(self, raw_contact_points = [cp for cp in self.contact_points if not isinstance(cp, EndPoint)] self.endpoints_resolved = [cp for cp in self.contact_points if isinstance(cp, EndPoint)] + self._endpoint_map_for_insights = {repr(ep): '{ip}:{port}'.format(ip=ep.address, port=ep.port) + for ep in self.endpoints_resolved} + + strs_resolved_map = _resolve_contact_points_to_string_map(raw_contact_points, port) + self.endpoints_resolved.extend(list(chain( + *[ + [DefaultEndPoint(x, port) for x in xs if x is not None] + for xs in strs_resolved_map.values() if xs is not None + ] + ))) + self._endpoint_map_for_insights.update( + {key: ['{ip}:{port}'.format(ip=ip, port=port) for ip in value] + for key, value in strs_resolved_map.items() if value is not None} + ) - try: - self.endpoints_resolved += [DefaultEndPoint(address, self.port) - for address in _resolve_contact_points(raw_contact_points, self.port)] - except UnresolvableContactPoints: - # rethrow if no EndPoint was provided - if not self.endpoints_resolved: - raise + if contact_points and (not self.endpoints_resolved): + # only want to raise here if the user specified CPs but resolution failed + raise UnresolvableContactPoints(self._endpoint_map_for_insights) self.compression = compression @@ -1029,6 +1215,8 @@ def __init__(self, self.connect_timeout = connect_timeout self.prepare_on_all_hosts = prepare_on_all_hosts self.reprepare_on_up = reprepare_on_up + self.monitor_reporting_enabled = monitor_reporting_enabled + self.monitor_reporting_interval = monitor_reporting_interval self._listeners = set() self._listener_lock = Lock() @@ -1078,6 +1266,13 @@ def __init__(self, self.status_event_refresh_window, schema_metadata_enabled, token_metadata_enabled) + if client_id is None: + self.client_id = uuid.uuid4() + if application_name is not None: + self.application_name = application_name + if application_version is not None: + self.application_version = application_version + def _create_thread_pool_executor(self, **kwargs): """ Create a ThreadPoolExecutor for the cluster. In most cases, the built-in @@ -1360,7 +1555,6 @@ def _make_connection_kwargs(self, endpoint, kwargs_dict): def protocol_downgrade(self, host_endpoint, previous_version): if self._protocol_version_explicit: raise DriverException("ProtocolError returned from server while using explicitly set client protocol_version %d" % (previous_version,)) - new_version = ProtocolVersion.get_lower_supported(previous_version) if new_version < ProtocolVersion.MIN_SUPPORTED: raise DriverException( @@ -2026,6 +2220,8 @@ class Session(object): hosts = None keyspace = None is_shutdown = False + session_id = None + _monitor_reporter = None _row_factory = staticmethod(named_tuple_factory) @property @@ -2210,6 +2406,12 @@ def default_serial_consistency_level(self, cl): When compiled with Cython, there are also built-in faster alternatives. See :ref:`faster_deser` """ + session_id = None + """ + A UUID that uniquely identifies this Session to Insights. This will be + generated automatically. + """ + _lock = None _pools = None _profile_manager = None @@ -2247,9 +2449,27 @@ def __init__(self, cluster, hosts, keyspace=None): msg += " using keyspace '%s'" % self.keyspace raise NoHostAvailable(msg, [h.address for h in hosts]) + # TODO INSIGHT + # cc_host = self.cluster.get_control_connection_host() + # valid_insights_version = (cc_host and version_supports_insights(cc_host.dse_version)) + # if self.cluster.monitor_reporting_enabled and valid_insights_version: + # self._monitor_reporter = MonitorReporter( + # interval_sec=self.cluster.monitor_reporting_interval, + # session=self, + # ) + # else: + # if cc_host: + # log.debug('Not starting MonitorReporter thread for Insights; ' + # 'not supported by server version {v} on ' + # 'ControlConnection host {c}'.format(v=cc_host.release_version, c=cc_host)) + # + # self.session_id = uuid.uuid4() + # log.debug('Started Session with client_id {} and session_id {}'.format(self.cluster.client_id, + # self.session_id)) + def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None, execution_profile=EXEC_PROFILE_DEFAULT, - paging_state=None, host=None): + paging_state=None, host=None, execute_as=None): """ Execute the given query and synchronously wait for the response. @@ -2265,9 +2485,8 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, `timeout` should specify a floating-point timeout (in seconds) after which an :exc:`.OperationTimedOut` exception will be raised if the query - has not completed. If not set, the timeout defaults to - :attr:`~.Session.default_timeout`. If set to :const:`None`, there is - no timeout. Please see :meth:`.ResponseFuture.result` for details on + has not completed. If not set, the timeout defaults to the request_timeout of the selected ``execution_profile``. + If set to :const:`None`, there is no timeout. Please see :meth:`.ResponseFuture.result` for details on the scope and effect of this timeout. If `trace` is set to :const:`True`, the query will be sent with tracing enabled. @@ -2286,13 +2505,15 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, `host` is the :class:`cassandra.pool.Host` that should handle the query. If the host specified is down or not yet connected, the query will fail with :class:`NoHostAvailable`. Using this is discouraged except in a few cases, e.g., querying node-local tables and applying schema changes. + + `execute_as` the user that will be used on the server to execute the request. """ - return self.execute_async(query, parameters, trace, custom_payload, - timeout, execution_profile, paging_state, host).result() + + return self.execute_async(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state, host, execute_as).result() def execute_async(self, query, parameters=None, trace=False, custom_payload=None, timeout=_NOT_SET, execution_profile=EXEC_PROFILE_DEFAULT, - paging_state=None, host=None): + paging_state=None, host=None, execute_as=None): """ Execute the given query and return a :class:`~.ResponseFuture` object which callbacks may be attached to for asynchronous response @@ -2327,6 +2548,10 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None ... log.exception("Operation failed:") """ + custom_payload = custom_payload if custom_payload else {} + if execute_as: + custom_payload[_proxy_execute_key] = six.b(execute_as) + future = self._create_response_future( query, parameters, trace, custom_payload, timeout, execution_profile, paging_state, host) @@ -2335,6 +2560,100 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None future.send_request() return future + def execute_graph(self, query, parameters=None, trace=False, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT, execute_as=None): + """ + Executes a Gremlin query string or GraphStatement synchronously, + and returns a ResultSet from this execution. + + `parameters` is dict of named parameters to bind. The values must be + JSON-serializable. + + `execution_profile`: Selects an execution profile for the request. + + `execute_as` the user that will be used on the server to execute the request. + """ + return self.execute_graph_async(query, parameters, trace, execution_profile, execute_as).result() + + def execute_graph_async(self, query, parameters=None, trace=False, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT, execute_as=None): + """ + Execute the graph query and return a :class:`ResponseFuture` + object which callbacks may be attached to for asynchronous response delivery. You may also call ``ResponseFuture.result()`` to synchronously block for + results at any time. + """ + # TODO GRAPH + # if not isinstance(query, GraphStatement): + # query = SimpleGraphStatement(query) + + execution_profile = self._maybe_get_execution_profile(execution_profile) # look up instance here so we can apply the extended attributes + + try: + options = execution_profile.graph_options.copy() + except AttributeError: + raise ValueError("Execution profile for graph queries must derive from GraphExecutionProfile, and provide graph_options") + + graph_parameters = None + if parameters: + graph_parameters = self._transform_params(parameters, graph_options=options) + + custom_payload = options.get_options_map() + if execute_as: + custom_payload[_proxy_execute_key] = six.b(execute_as) + custom_payload[_request_timeout_key] = int64_pack(long(execution_profile.request_timeout * 1000)) + + future = self._create_response_future(query, parameters=None, trace=trace, custom_payload=custom_payload, + timeout=_NOT_SET, execution_profile=execution_profile) + + future.message.query_params = graph_parameters + future._protocol_handler = self.client_protocol_handler + + if options.is_analytics_source and isinstance(execution_profile.load_balancing_policy, DSELoadBalancingPolicy): + self._target_analytics_master(future) + else: + future.send_request() + return future + + def _transform_params(self, parameters, graph_options): + if not isinstance(parameters, dict): + raise ValueError('The parameters must be a dictionary. Unnamed parameters are not allowed.') + + # Serialize python types to graphson + serializer = GraphSON1Serializer + if graph_options.graph_protocol == GraphProtocol.GRAPHSON_2_0: + serializer = GraphSON2Serializer + + serialized_parameters = { + p: serializer.serialize(v) + for p, v in six.iteritems(parameters) + } + return [json.dumps(serialized_parameters).encode('utf-8')] + + def _target_analytics_master(self, future): + future._start_timer() + master_query_future = self._create_response_future("CALL DseClientTool.getAnalyticsGraphServer()", + parameters=None, trace=False, + custom_payload=None, timeout=future.timeout) + master_query_future.row_factory = tuple_factory + master_query_future.send_request() + + cb = self._on_analytics_master_result + args = (master_query_future, future) + master_query_future.add_callbacks(callback=cb, callback_args=args, errback=cb, errback_args=args) + + def _on_analytics_master_result(self, response, master_future, query_future): + try: + row = master_future.result()[0] + addr = row[0]['location'] + delimiter_index = addr.rfind(':') # assumes : - not robust, but that's what is being provided + if delimiter_index > 0: + addr = addr[:delimiter_index] + targeted_query = HostTargetingStatement(query_future.query, addr) + query_future.query_plan = query_future._load_balancer.make_query_plan(self.keyspace, targeted_query) + except Exception: + log.debug("Failed querying analytics master (request might not be routed optimally). " + "Make sure the session is connecting to a graph analytics datacenter.", exc_info=True) + + self.submit(query_future.send_request) + def _create_response_future(self, query, parameters, trace, custom_payload, timeout, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None, host=None): @@ -2369,6 +2688,7 @@ def _create_response_future(self, query, parameters, trace, custom_payload, cl = query.consistency_level if query.consistency_level is not None else execution_profile.consistency_level serial_cl = query.serial_consistency_level if query.serial_consistency_level is not None else execution_profile.serial_consistency_level + continuous_paging_options = execution_profile.continuous_paging_options retry_policy = query.retry_policy or execution_profile.retry_policy row_factory = execution_profile.row_factory @@ -2387,6 +2707,14 @@ def _create_response_future(self, query, parameters, trace, custom_payload, else: timestamp = None + supports_continuous_paging_state = ( + ProtocolVersion.has_continuous_paging_next_pages(self._protocol_version) + ) + if continuous_paging_options and supports_continuous_paging_state: + continuous_paging_state = ContinuousPagingState(continuous_paging_options.max_queue_size) + else: + continuous_paging_state = None + if isinstance(query, SimpleStatement): query_string = query.query_string statement_keyspace = query.keyspace if ProtocolVersion.uses_keyspace_flag(self._protocol_version) else None @@ -2394,14 +2722,15 @@ def _create_response_future(self, query, parameters, trace, custom_payload, query_string = bind_params(query_string, parameters, self.encoder) message = QueryMessage( query_string, cl, serial_cl, - fetch_size, timestamp=timestamp, - keyspace=statement_keyspace) + fetch_size, paging_state, timestamp, + continuous_paging_options, statement_keyspace) elif isinstance(query, BoundStatement): prepared_statement = query.prepared_statement message = ExecuteMessage( prepared_statement.query_id, query.values, cl, - serial_cl, fetch_size, - timestamp=timestamp, skip_meta=bool(prepared_statement.result_metadata), + serial_cl, fetch_size, paging_state, timestamp, + skip_meta=bool(prepared_statement.result_metadata), + continuous_paging_options=continuous_paging_options, result_metadata_id=prepared_statement.result_metadata_id) elif isinstance(query, BatchStatement): if self._protocol_version < 2: @@ -2413,20 +2742,23 @@ def _create_response_future(self, query, parameters, trace, custom_payload, message = BatchMessage( query.batch_type, query._statements_and_parameters, cl, serial_cl, timestamp, statement_keyspace) + elif isinstance(query, GraphStatement): + # the statement_keyspace is not aplicable to GraphStatement + message = QueryMessage(query.query, cl, serial_cl, fetch_size, + paging_state, timestamp, + continuous_paging_options) message.tracing = trace - message.update_custom_payload(query.custom_payload) message.update_custom_payload(custom_payload) message.allow_beta_protocol_version = self.cluster.allow_beta_protocol_version - message.paging_state = paging_state spec_exec_plan = spec_exec_policy.new_plan(query.keyspace or self.keyspace, query) if query.is_idempotent and spec_exec_policy else None return ResponseFuture( self, message, query, timeout, metrics=self._metrics, prepared_statement=prepared_statement, retry_policy=retry_policy, row_factory=row_factory, load_balancer=load_balancing_policy, start_time=start_time, speculative_execution_plan=spec_exec_plan, - host=host) + continuous_paging_state=continuous_paging_state, host=host) def get_execution_profile(self, name): """ @@ -2536,14 +2868,14 @@ def prepare(self, query, custom_payload=None, keyspace=None): future = ResponseFuture(self, message, query=None, timeout=self.default_timeout) try: future.send_request() - response = future.result().one() + response = future.result()[0] except Exception: log.exception("Error preparing query:") raise prepared_keyspace = keyspace if keyspace else None prepared_statement = PreparedStatement.from_message( - response.query_id, response.bind_metadata, response.pk_indexes, self.cluster.metadata, query, self.keyspace, + response.query_id, response.bind_metadata, response.pk_indexes, self.cluster.metadata, query, prepared_keyspace, self._protocol_version, response.column_metadata, response.result_metadata_id) prepared_statement.custom_payload = future.custom_payload @@ -2610,6 +2942,9 @@ def shutdown(self): future.cancel() wait_futures(self._initial_connect_futures) + if self._monitor_reporter: + self._monitor_reporter.stop() + for pool in tuple(self._pools.values()): pool.shutdown() @@ -2885,15 +3220,17 @@ class ControlConnection(object): """ _SELECT_PEERS = "SELECT * FROM system.peers" - _SELECT_PEERS_NO_TOKENS = "SELECT host_id, peer, data_center, rack, rpc_address, release_version, schema_version FROM system.peers" + _SELECT_PEERS_NO_TOKENS_TEMPLATE = "SELECT host_id, peer, data_center, rack, rpc_address, {nt_col_name}, release_version, schema_version FROM system.peers" _SELECT_LOCAL = "SELECT * FROM system.local WHERE key='local'" _SELECT_LOCAL_NO_TOKENS = "SELECT host_id, cluster_name, data_center, rack, partitioner, release_version, schema_version FROM system.local WHERE key='local'" # Used only when token_metadata_enabled is set to False _SELECT_LOCAL_NO_TOKENS_RPC_ADDRESS = "SELECT rpc_address FROM system.local WHERE key='local'" - _SELECT_SCHEMA_PEERS = "SELECT peer, rpc_address, schema_version FROM system.peers" + _SELECT_SCHEMA_PEERS_TEMPLATE = "SELECT peer, {nt_col_name}, schema_version FROM system.peers" _SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'" + _MINIMUM_NATIVE_ADDRESS_VERSION = "4.0" + _is_shutdown = False _timeout = None _protocol_version = None @@ -3017,7 +3354,7 @@ def _try_connect(self, host): "SCHEMA_CHANGE": partial(_watch_callback, self_weakref, '_handle_schema_change') }, register_timeout=self._timeout) - sel_peers = self._SELECT_PEERS if self._token_meta_enabled else self._SELECT_PEERS_NO_TOKENS + sel_peers = self._peers_query_for_version(connection, self._SELECT_PEERS_NO_TOKENS_TEMPLATE) sel_local = self._SELECT_LOCAL if self._token_meta_enabled else self._SELECT_LOCAL_NO_TOKENS peers_query = QueryMessage(query=sel_peers, consistency_level=ConsistencyLevel.ONE) local_query = QueryMessage(query=sel_local, consistency_level=ConsistencyLevel.ONE) @@ -3154,7 +3491,7 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, cl = ConsistencyLevel.ONE if not self._token_meta_enabled: log.debug("[control connection] Refreshing node list without token map") - sel_peers = self._SELECT_PEERS_NO_TOKENS + sel_peers = self._peers_query_for_version(connection, self._SELECT_PEERS_NO_TOKENS_TEMPLATE) sel_local = self._SELECT_LOCAL_NO_TOKENS else: log.debug("[control connection] Refreshing node list and token map") @@ -3254,7 +3591,7 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, host.dse_workload = row.get("workload") host.dse_workloads = row.get("workloads") - if partitioner and tokens: + if partitioner and tokens and self._token_meta_enabled: token_map[host] = tokens for old_host in self._cluster.metadata.all_hosts(): @@ -3368,8 +3705,10 @@ def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wai elapsed = 0 cl = ConsistencyLevel.ONE schema_mismatches = None + select_peers_query = self._peers_query_for_version(connection, self._SELECT_SCHEMA_PEERS_TEMPLATE) + while elapsed < total_timeout: - peers_query = QueryMessage(query=self._SELECT_SCHEMA_PEERS, consistency_level=cl) + peers_query = QueryMessage(query=select_peers_query, consistency_level=cl) local_query = QueryMessage(query=self._SELECT_SCHEMA_LOCAL, consistency_level=cl) try: timeout = min(self._timeout, total_timeout - elapsed) @@ -3434,9 +3773,30 @@ def _address_from_row(self, row): addr = row.get("native_transport_address") if not addr or addr in ["0.0.0.0", "::"]: addr = row.get("peer") - return addr - + + def _peers_query_for_version(self, connection, peers_query_template): + """ + Given a connection: + + - find the server product version running on the connection's host, + - use that to choose the column name for the transport address (see APOLLO-1130), and + - use that column name in the provided peers query template. + + The provided template should be a string with a format replacement + field named nt_col_name. + """ + host_release_version = self._cluster.metadata.get_host(connection.endpoint).release_version + if host_release_version: + use_native_address_query = host_release_version >= self._MINIMUM_NATIVE_ADDRESS_VERSION + if use_native_address_query: + select_peers_query = peers_query_template.format(nt_col_name="native_transport_address") + else: + select_peers_query = peers_query_template.format(nt_col_name="rpc_address") + else: + select_peers_query = self._SELECT_PEERS + return select_peers_query + def _signal_error(self): with self._lock: if self._is_shutdown: @@ -3660,13 +4020,15 @@ class ResponseFuture(object): _timer = None _protocol_handler = ProtocolHandler _spec_execution_plan = NoSpeculativeExecutionPlan() + _continuous_paging_options = None + _continuous_paging_session = None _host = None _warned_timeout = False def __init__(self, session, message, query, timeout, metrics=None, prepared_statement=None, retry_policy=RetryPolicy(), row_factory=None, load_balancer=None, start_time=None, - speculative_execution_plan=None, host=None): + speculative_execution_plan=None, continuous_paging_state=None, host=None): self.session = session # TODO: normalize handling of retry policy and row factory self.row_factory = row_factory or session.row_factory @@ -3688,6 +4050,7 @@ def __init__(self, session, message, query, timeout, metrics=None, prepared_stat self._errbacks = [] self.attempted_hosts = [] self._start_timer() + self._continuous_paging_state = continuous_paging_state @property def _time_remaining(self): @@ -3799,7 +4162,6 @@ def send_request(self, error_no_hosts=True): if self.timeout is not None and time.time() - self._start_time > self.timeout: self._on_timeout() return True - if error_no_hosts: self._set_final_exception(NoHostAvailable( "Unable to complete the operation against any hosts", self._errors)) @@ -3961,10 +4323,12 @@ def _set_result(self, host, connection, pool, response): self, connection, **response.schema_change_event) elif response.kind == RESULT_KIND_ROWS: self._paging_state = response.paging_state - self._col_types = response.column_types self._col_names = response.column_names - self._set_final_result(self.row_factory( - response.column_names, response.parsed_rows)) + self._col_types = response.column_types + if getattr(self.message, 'continuous_paging_options', None): + self._handle_continuous_paging_first_response(connection, response) + else: + self._set_final_result(self.row_factory(response.column_names, response.parsed_rows)) elif response.kind == RESULT_KIND_VOID: self._set_final_result(None) else: @@ -4072,6 +4436,14 @@ def _set_result(self, host, connection, pool, response): log.exception("Unexpected exception while handling result in ResponseFuture:") self._set_final_exception(exc) + def _handle_continuous_paging_first_response(self, connection, response): + self._continuous_paging_session = connection.new_continuous_paging_session(response.stream_id, + self._protocol_handler.decode_message, + self.row_factory, + self._continuous_paging_state) + self._continuous_paging_session.on_message(response) + self._set_final_result(self._continuous_paging_session.results()) + def _set_keyspace_completed(self, errors): if not errors: self._set_final_result(None) @@ -4482,8 +4854,9 @@ def next(self): self._current_rows = [] raise - self.fetch_next_page() - self._page_iter = iter(self._current_rows) + if not self.response_future._continuous_paging_session: + self.fetch_next_page() + self._page_iter = iter(self._current_rows) return next(self._page_iter) @@ -4556,6 +4929,12 @@ def get_all_query_traces(self, max_wait_sec_per=None): """ return self.response_future.get_all_query_traces(max_wait_sec_per) + def cancel_continuous_paging(self): + try: + self.response_future._continuous_paging_session.cancel() + except AttributeError: + raise DriverException("Attempted to cancel paging with no active session. This is only for requests with ContinuousdPagingOptions.") + @property def was_applied(self): """ diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 00f7bf1b40..f2c3f8dfed 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -29,12 +29,12 @@ from uuid import UUID import six +from cassandra.util import (OrderedDict, OrderedMap, OrderedMapSerializedKey, + sortedset, Time, Date, Point, LineString, Polygon) + if six.PY3: import ipaddress -from cassandra.util import (OrderedDict, OrderedMap, OrderedMapSerializedKey, - sortedset, Time, Date) - if six.PY3: long = int @@ -91,7 +91,10 @@ def __init__(self): sortedset: self.cql_encode_set_collection, frozenset: self.cql_encode_set_collection, types.GeneratorType: self.cql_encode_list_collection, - ValueSequence: self.cql_encode_sequence + ValueSequence: self.cql_encode_sequence, + Point: self.cql_encode_str_quoted, + LineString: self.cql_encode_str_quoted, + Polygon: self.cql_encode_str_quoted } if six.PY2: @@ -128,6 +131,9 @@ def cql_encode_str(self, val): """ return cql_quote(val) + def cql_encode_str_quoted(self, val): + return "'%s'" % val + if six.PY3: def cql_encode_bytes(self, val): return (b'0x' + hexlify(val)).decode('utf-8') From 03824f2d10583539a8d13b89934215c5d70ff8e1 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 9 Sep 2019 18:52:52 -0400 Subject: [PATCH 1599/2005] uncomment graph and insight stuff in cluster.py --- cassandra/cluster.py | 52 ++++++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index a040e12deb..e955063470 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -79,10 +79,18 @@ BatchStatement, bind_params, QueryTrace, TraceUnavailable, named_tuple_factory, dict_factory, tuple_factory, FETCH_SIZE_UNSET, HostTargetingStatement) +from cassandra.marshal import int64_pack from cassandra.timestamps import MonotonicTimestampGenerator from cassandra.compat import Mapping from cassandra.util import _resolve_contact_points_to_string_map +from cassandra.insights.reporter import MonitorReporter +from cassandra.insights.util import version_supports_insights + +from cassandra.graph import (graph_object_row_factory, GraphOptions, GraphSON1Serializer, + GraphProtocol, GraphSON2Serializer, GraphStatement, SimpleGraphStatement) +from cassandra.graph.query import _request_timeout_key + if six.PY3: long = int @@ -386,7 +394,7 @@ class GraphExecutionProfile(ExecutionProfile): def __init__(self, load_balancing_policy=_NOT_SET, retry_policy=None, consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, - request_timeout=30.0, row_factory=None, # TODO GRAPH graph_object_row_factory + request_timeout=30.0, row_factory=graph_object_row_factory, graph_options=None): """ Default execution profile for graph execution. @@ -408,7 +416,7 @@ class GraphAnalyticsExecutionProfile(GraphExecutionProfile): def __init__(self, load_balancing_policy=None, retry_policy=None, consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, - request_timeout=3600. * 24. * 7., row_factory=None, # TODO GRAPH graph_object_row_factory + request_timeout=3600. * 24. * 7., row_factory=graph_object_row_factory, graph_options=None): """ Execution profile with timeout and load balancing appropriate for graph analytics queries. @@ -431,7 +439,6 @@ def __init__(self, load_balancing_policy=None, retry_policy=None, self.graph_options.set_source_analytics() - class ProfileManager(object): def __init__(self): @@ -2449,23 +2456,22 @@ def __init__(self, cluster, hosts, keyspace=None): msg += " using keyspace '%s'" % self.keyspace raise NoHostAvailable(msg, [h.address for h in hosts]) - # TODO INSIGHT - # cc_host = self.cluster.get_control_connection_host() - # valid_insights_version = (cc_host and version_supports_insights(cc_host.dse_version)) - # if self.cluster.monitor_reporting_enabled and valid_insights_version: - # self._monitor_reporter = MonitorReporter( - # interval_sec=self.cluster.monitor_reporting_interval, - # session=self, - # ) - # else: - # if cc_host: - # log.debug('Not starting MonitorReporter thread for Insights; ' - # 'not supported by server version {v} on ' - # 'ControlConnection host {c}'.format(v=cc_host.release_version, c=cc_host)) - # - # self.session_id = uuid.uuid4() - # log.debug('Started Session with client_id {} and session_id {}'.format(self.cluster.client_id, - # self.session_id)) + cc_host = self.cluster.get_control_connection_host() + valid_insights_version = (cc_host and version_supports_insights(cc_host.dse_version)) + if self.cluster.monitor_reporting_enabled and valid_insights_version: + self._monitor_reporter = MonitorReporter( + interval_sec=self.cluster.monitor_reporting_interval, + session=self, + ) + else: + if cc_host: + log.debug('Not starting MonitorReporter thread for Insights; ' + 'not supported by server version {v} on ' + 'ControlConnection host {c}'.format(v=cc_host.release_version, c=cc_host)) + + self.session_id = uuid.uuid4() + log.debug('Started Session with client_id {} and session_id {}'.format(self.cluster.client_id, + self.session_id)) def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None, execution_profile=EXEC_PROFILE_DEFAULT, @@ -2580,9 +2586,8 @@ def execute_graph_async(self, query, parameters=None, trace=False, execution_pro object which callbacks may be attached to for asynchronous response delivery. You may also call ``ResponseFuture.result()`` to synchronously block for results at any time. """ - # TODO GRAPH - # if not isinstance(query, GraphStatement): - # query = SimpleGraphStatement(query) + if not isinstance(query, GraphStatement): + query = SimpleGraphStatement(query) execution_profile = self._maybe_get_execution_profile(execution_profile) # look up instance here so we can apply the extended attributes @@ -2680,6 +2685,7 @@ def _create_response_future(self, query, parameters, trace, custom_payload, row_factory = self.row_factory load_balancing_policy = self.cluster.load_balancing_policy spec_exec_policy = None + continuous_paging_options = None else: execution_profile = self._maybe_get_execution_profile(execution_profile) From 2299039454f2be98aea49688acfe18552de04bf9 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 10 Sep 2019 11:34:08 -0400 Subject: [PATCH 1600/2005] Move insights and graph to cassandra.datastax and add insights integration tests --- cassandra/cluster.py | 10 +- cassandra/{insights => datastax}/__init__.py | 0 cassandra/{ => datastax}/graph/__init__.py | 6 +- cassandra/{ => datastax}/graph/graphson.py | 2 +- cassandra/{ => datastax}/graph/query.py | 4 +- cassandra/{ => datastax}/graph/types.py | 2 +- cassandra/datastax/insights/__init__.py | 13 ++ cassandra/{ => datastax}/insights/registry.py | 2 +- cassandra/{ => datastax}/insights/reporter.py | 6 +- .../{ => datastax}/insights/serializers.py | 6 +- cassandra/{ => datastax}/insights/util.py | 0 tests/integration/simulacron/__init__.py | 2 +- .../simulacron/advanced/__init__.py | 13 ++ .../simulacron/advanced/test_insights.py | 111 ++++++++++++++++++ 14 files changed, 157 insertions(+), 20 deletions(-) rename cassandra/{insights => datastax}/__init__.py (100%) rename cassandra/{ => datastax}/graph/__init__.py (80%) rename cassandra/{ => datastax}/graph/graphson.py (99%) rename cassandra/{ => datastax}/graph/query.py (98%) rename cassandra/{ => datastax}/graph/types.py (98%) create mode 100644 cassandra/datastax/insights/__init__.py rename cassandra/{ => datastax}/insights/registry.py (98%) rename cassandra/{ => datastax}/insights/reporter.py (97%) rename cassandra/{ => datastax}/insights/serializers.py (98%) rename cassandra/{ => datastax}/insights/util.py (100%) create mode 100644 tests/integration/simulacron/advanced/__init__.py create mode 100644 tests/integration/simulacron/advanced/test_insights.py diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e955063470..e0457c272f 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -84,12 +84,12 @@ from cassandra.compat import Mapping from cassandra.util import _resolve_contact_points_to_string_map -from cassandra.insights.reporter import MonitorReporter -from cassandra.insights.util import version_supports_insights +from cassandra.datastax.insights.reporter import MonitorReporter +from cassandra.datastax.insights.util import version_supports_insights -from cassandra.graph import (graph_object_row_factory, GraphOptions, GraphSON1Serializer, - GraphProtocol, GraphSON2Serializer, GraphStatement, SimpleGraphStatement) -from cassandra.graph.query import _request_timeout_key +from cassandra.datastax.graph import (graph_object_row_factory, GraphOptions, GraphSON1Serializer, + GraphProtocol, GraphSON2Serializer, GraphStatement, SimpleGraphStatement) +from cassandra.datastax.graph.query import _request_timeout_key if six.PY3: long = int diff --git a/cassandra/insights/__init__.py b/cassandra/datastax/__init__.py similarity index 100% rename from cassandra/insights/__init__.py rename to cassandra/datastax/__init__.py diff --git a/cassandra/graph/__init__.py b/cassandra/datastax/graph/__init__.py similarity index 80% rename from cassandra/graph/__init__.py rename to cassandra/datastax/graph/__init__.py index e7880e34d5..0c03c9249d 100644 --- a/cassandra/graph/__init__.py +++ b/cassandra/datastax/graph/__init__.py @@ -13,10 +13,10 @@ # limitations under the License. -from cassandra.graph.types import Element, Vertex, VertexProperty, Edge, Path -from cassandra.graph.query import ( +from cassandra.datastax.graph.types import Element, Vertex, VertexProperty, Edge, Path +from cassandra.datastax.graph.query import ( GraphOptions, GraphProtocol, GraphStatement, SimpleGraphStatement, Result, graph_object_row_factory, single_object_row_factory, graph_result_row_factory, graph_graphson2_row_factory ) -from cassandra.graph.graphson import * +from cassandra.datastax.graph.graphson import * diff --git a/cassandra/graph/graphson.py b/cassandra/datastax/graph/graphson.py similarity index 99% rename from cassandra/graph/graphson.py rename to cassandra/datastax/graph/graphson.py index 91034db4b2..8cc5561ba8 100644 --- a/cassandra/graph/graphson.py +++ b/cassandra/datastax/graph/graphson.py @@ -26,7 +26,7 @@ import ipaddress from cassandra.util import Polygon, Point, LineString -from cassandra.graph.types import Vertex, VertexProperty, Edge, Path +from cassandra.datastax.graph.types import Vertex, VertexProperty, Edge, Path __all__ = ['GraphSON1Serializer', 'GraphSON1Deserializer', 'GraphSON1TypeDeserializer', 'GraphSON2Serializer', 'GraphSON2Deserializer', diff --git a/cassandra/graph/query.py b/cassandra/datastax/graph/query.py similarity index 98% rename from cassandra/graph/query.py rename to cassandra/datastax/graph/query.py index 7a543e5bb6..657068fbff 100644 --- a/cassandra/graph/query.py +++ b/cassandra/datastax/graph/query.py @@ -19,8 +19,8 @@ from cassandra import ConsistencyLevel from cassandra.query import Statement, SimpleStatement -from cassandra.graph.types import Vertex, Edge, Path -from cassandra.graph.graphson import GraphSON2Reader +from cassandra.datastax.graph.types import Vertex, Edge, Path +from cassandra.datastax.graph.graphson import GraphSON2Reader # (attr, description, server option) diff --git a/cassandra/graph/types.py b/cassandra/datastax/graph/types.py similarity index 98% rename from cassandra/graph/types.py rename to cassandra/datastax/graph/types.py index 63bd6c7f88..9b06a0609c 100644 --- a/cassandra/graph/types.py +++ b/cassandra/datastax/graph/types.py @@ -144,7 +144,7 @@ class Path(object): def __init__(self, labels, objects): # The Path class should not do any deserialization by itself. To fix in the next major. - from cassandra.graph.query import _graph_object_sequence + from cassandra.datastax.graph import _graph_object_sequence self.labels = labels self.objects = list(_graph_object_sequence(objects)) diff --git a/cassandra/datastax/insights/__init__.py b/cassandra/datastax/insights/__init__.py new file mode 100644 index 0000000000..2c9ca172f8 --- /dev/null +++ b/cassandra/datastax/insights/__init__.py @@ -0,0 +1,13 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/cassandra/insights/registry.py b/cassandra/datastax/insights/registry.py similarity index 98% rename from cassandra/insights/registry.py rename to cassandra/datastax/insights/registry.py index b65b89a268..3dd1d255ae 100644 --- a/cassandra/insights/registry.py +++ b/cassandra/datastax/insights/registry.py @@ -16,7 +16,7 @@ from collections import OrderedDict from warnings import warn -from cassandra.insights.util import namespace +from cassandra.datastax.insights.util import namespace _NOT_SET = object() diff --git a/cassandra/insights/reporter.py b/cassandra/datastax/insights/reporter.py similarity index 97% rename from cassandra/insights/reporter.py rename to cassandra/datastax/insights/reporter.py index 77d4f02ac4..6fb2d8f037 100644 --- a/cassandra/insights/reporter.py +++ b/cassandra/datastax/insights/reporter.py @@ -28,8 +28,8 @@ from cassandra.policies import HostDistance from cassandra.util import ms_timestamp_from_datetime -from cassandra.insights.registry import insights_registry -from cassandra.insights.serializers import initialize_registry +from cassandra.datastax.insights.registry import insights_registry +from cassandra.datastax.insights.serializers import initialize_registry log = logging.getLogger(__name__) @@ -157,7 +157,7 @@ def _get_startup_data(self): }, 'data': { 'driverName': 'DataStax Enterprise Python Driver', - 'driverVersion': sys.modules['dse'].__version__, + 'driverVersion': sys.modules['cassandra'].__version__, 'clientId': str(self._session.cluster.client_id), 'sessionId': str(self._session.session_id), 'applicationName': self._session.cluster.application_name or 'python', diff --git a/cassandra/insights/serializers.py b/cassandra/datastax/insights/serializers.py similarity index 98% rename from cassandra/insights/serializers.py rename to cassandra/datastax/insights/serializers.py index 384a75f4c3..aec4467a6a 100644 --- a/cassandra/insights/serializers.py +++ b/cassandra/datastax/insights/serializers.py @@ -31,9 +31,9 @@ def initialize_registry(insights_registry): EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, _NOT_SET ) - from cassandra.graph.query import GraphOptions - from cassandra.insights.registry import insights_registry - from cassandra.insights.util import namespace + from cassandra.datastax.graph import GraphOptions + from cassandra.datastax.insights.registry import insights_registry + from cassandra.datastax.insights.util import namespace from cassandra.policies import ( RoundRobinPolicy, DCAwareRoundRobinPolicy, diff --git a/cassandra/insights/util.py b/cassandra/datastax/insights/util.py similarity index 100% rename from cassandra/insights/util.py rename to cassandra/datastax/insights/util.py diff --git a/tests/integration/simulacron/__init__.py b/tests/integration/simulacron/__init__.py index b53e86289d..fee6b39a01 100644 --- a/tests/integration/simulacron/__init__.py +++ b/tests/integration/simulacron/__init__.py @@ -74,7 +74,7 @@ class DseSimulacronCluster(SimulacronBase): @classmethod def setUpClass(cls): - if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"): + if DSE_VERSION is None and SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"): return cls.simulacron_cluster = start_and_prime_cluster_defaults(dse_version=DSE_VERSION, diff --git a/tests/integration/simulacron/advanced/__init__.py b/tests/integration/simulacron/advanced/__init__.py new file mode 100644 index 0000000000..2c9ca172f8 --- /dev/null +++ b/tests/integration/simulacron/advanced/__init__.py @@ -0,0 +1,13 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/integration/simulacron/advanced/test_insights.py b/tests/integration/simulacron/advanced/test_insights.py new file mode 100644 index 0000000000..3da14659af --- /dev/null +++ b/tests/integration/simulacron/advanced/test_insights.py @@ -0,0 +1,111 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import time +import json +import re + +from cassandra.cluster import Cluster +from cassandra.datastax.insights.util import version_supports_insights + +from tests.integration import requiressimulacron, requiredse, DSE_VERSION +from tests.integration.simulacron import DseSimulacronCluster, PROTOCOL_VERSION +from tests.integration.simulacron.utils import SimulacronClient, GetLogsQuery, ClearLogsQuery + + +@requiredse +@requiressimulacron +@unittest.skipUnless(DSE_VERSION and version_supports_insights(str(DSE_VERSION)), 'DSE {} does not support insights'.format(DSE_VERSION)) +class InsightsTests(DseSimulacronCluster): + """ + Tests insights integration + + @since 3.18 + @jira_ticket PYTHON-1047 + @expected_result startup and status messages are sent + """ + + connect = False + + def tearDown(self): + if self.cluster: + self.cluster.shutdown() + + @staticmethod + def _get_node_logs(raw_data): + return list(filter(lambda q: q['type'] == 'QUERY' and q['query'].startswith('CALL InsightsRpc.reportInsight'), + json.loads(raw_data)['data_centers'][0]['nodes'][0]['queries'])) + + @staticmethod + def _parse_data(data, index=0): + return json.loads(re.match( + r"CALL InsightsRpc.reportInsight\('(.+)'\)", + data[index]['frame']['message']['query']).group(1)) + + def test_startup_message(self): + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) + self.session = self.cluster.connect(wait_for_all_pools=True) + + time.sleep(1) # wait the monitor thread is started + response = SimulacronClient().submit_request(GetLogsQuery()) + self.assertTrue('CALL InsightsRpc.reportInsight' in response) + + node_queries = self._get_node_logs(response) + self.assertEqual(1, len(node_queries)) + self.assertTrue(node_queries, "RPC query not found") + + message = self._parse_data(node_queries) + + self.assertEqual(message['metadata']['name'], 'driver.startup') + self.assertEqual(message['data']['initialControlConnection'], + self.cluster.control_connection._connection.host) + self.assertEqual(message['data']['sessionId'], str(self.session.session_id)) + self.assertEqual(message['data']['clientId'], str(self.cluster.client_id)) + self.assertEqual(message['data']['compression'], 'NONE') + + def test_status_message(self): + SimulacronClient().submit_request(ClearLogsQuery()) + + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False, monitor_reporting_interval=1) + self.session = self.cluster.connect(wait_for_all_pools=True) + + time.sleep(1.1) + response = SimulacronClient().submit_request(GetLogsQuery()) + self.assertTrue('CALL InsightsRpc.reportInsight' in response) + + node_queries = self._get_node_logs(response) + self.assertEqual(2, len(node_queries)) + self.assertTrue(node_queries, "RPC query not found") + + message = self._parse_data(node_queries, 1) + + self.assertEqual(message['metadata']['name'], 'driver.status') + self.assertEqual(message['data']['controlConnection'], + self.cluster.control_connection._connection.host) + self.assertEqual(message['data']['sessionId'], str(self.session.session_id)) + self.assertEqual(message['data']['clientId'], str(self.cluster.client_id)) + self.assertEqual(message['metadata']['insightType'], 'EVENT') + + def test_monitor_disabled(self): + SimulacronClient().submit_request(ClearLogsQuery()) + + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False, monitor_reporting_enabled=False) + self.session = self.cluster.connect(wait_for_all_pools=True) + + response = SimulacronClient().submit_request(GetLogsQuery()) + self.assertFalse('CALL InsightsRpc.reportInsight' in response) From 4b1ef56b05f93b2d0b38b104e8f5648eefe81cc7 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 10 Sep 2019 13:18:18 -0400 Subject: [PATCH 1601/2005] Disable insights for some cluster tests --- tests/integration/standard/test_cluster.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index d3636bea88..b45b65cd89 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -745,7 +745,8 @@ def _warning_are_issued_when_auth(self, auth_provider): def test_idle_heartbeat(self): interval = 2 - cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=interval) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=interval, + monitor_reporting_enabled=False) if PROTOCOL_VERSION < 3: cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) session = cluster.connect(wait_for_all_pools=True) @@ -867,7 +868,7 @@ def test_profile_load_balancing(self): RoundRobinPolicy(), lambda host: host.address == CASSANDRA_IP ) ) - with Cluster(execution_profiles={'node1': node1}) as cluster: + with Cluster(execution_profiles={'node1': node1}, monitor_reporting_enabled=False) as cluster: session = cluster.connect(wait_for_all_pools=True) # default is DCA RR for all hosts From e0cef14ecba429ff08f827e79566a07f92c6af3d Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 10 Sep 2019 12:24:13 -0500 Subject: [PATCH 1602/2005] Most non-advanced unit tests --- tests/unit/io/test_asyncioreactor.py | 1 + tests/unit/io/test_asyncorereactor.py | 4 +- tests/unit/io/utils.py | 13 +- tests/unit/test_cluster.py | 76 +++- tests/unit/test_control_connection.py | 80 ++-- tests/unit/test_metadata.py | 131 +++++- tests/unit/test_parameter_binding.py | 1 + tests/unit/test_policies.py | 6 +- tests/unit/test_protocol.py | 66 ++- tests/unit/test_response_future.py | 67 +-- tests/unit/test_resultset.py | 10 +- tests/unit/test_timestamps.py | 10 +- tests/unit/test_types.py | 612 +++++++++++++++++++++++--- tests/unit/util.py | 30 ++ 14 files changed, 936 insertions(+), 171 deletions(-) create mode 100644 tests/unit/util.py diff --git a/tests/unit/io/test_asyncioreactor.py b/tests/unit/io/test_asyncioreactor.py index be3c2bcd2e..aa00a32943 100644 --- a/tests/unit/io/test_asyncioreactor.py +++ b/tests/unit/io/test_asyncioreactor.py @@ -1,3 +1,4 @@ +AsyncioConnection, ASYNCIO_AVAILABLE = None, False try: from cassandra.io.asyncioreactor import AsyncioConnection import asynctest diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 7e55059daa..4e0e540327 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -18,7 +18,7 @@ from mock import patch import socket -import cassandra.io.asyncorereactor +import cassandra.io.asyncorereactor as asyncorereactor from cassandra.io.asyncorereactor import AsyncoreConnection from tests import is_monkey_patched from tests.unit.io.utils import ReactorTestMixin, TimerTestMixin, noop_if_monkey_patched @@ -77,7 +77,7 @@ def create_timer(self): @property def _timers(self): - return cassandra.io.asyncorereactor._global_loop._timers + return asyncorereactor._global_loop._timers def setUp(self): if is_monkey_patched(): diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 2856b9dfe8..168a5e8b76 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -12,13 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from cassandra.connection import (ConnectionException, ProtocolError, - HEADER_DIRECTION_TO_CLIENT) -from cassandra.marshal import int32_pack, uint8_pack, uint32_pack -from cassandra.protocol import (write_stringmultimap, write_int, write_string, - SupportedMessage, ReadyMessage, ServerError) +from cassandra.connection import ( + ConnectionException, ProtocolError, HEADER_DIRECTION_TO_CLIENT +) +from cassandra.marshal import uint8_pack, uint32_pack +from cassandra.protocol import ( + write_stringmultimap, write_int, write_string, SupportedMessage, ReadyMessage, ServerError +) from cassandra.connection import DefaultEndPoint - from tests import is_monkey_patched import io diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index fd46731bf0..66799fb393 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -22,13 +22,12 @@ from mock import patch, Mock from cassandra import ConsistencyLevel, DriverException, Timeout, Unavailable, RequestExecutionException, ReadTimeout, WriteTimeout, CoordinationFailure, ReadFailure, WriteFailure, FunctionFailure, AlreadyExists,\ - InvalidRequest, Unauthorized, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, RequestValidationException, ConfigurationException -from cassandra.cluster import _Scheduler, Session, Cluster, _NOT_SET, default_lbp_factory, \ - ExecutionProfile, _ConfigMode, EXEC_PROFILE_DEFAULT, NoHostAvailable -from cassandra.policies import HostDistance, RetryPolicy, RoundRobinPolicy, \ - DowngradingConsistencyRetryPolicy, SimpleConvictionPolicy -from cassandra.query import SimpleStatement, named_tuple_factory, tuple_factory + InvalidRequest, Unauthorized, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, RequestValidationException, ConfigurationException, ProtocolVersion +from cassandra.cluster import _Scheduler, Session, Cluster, default_lbp_factory, \ + ExecutionProfile, _ConfigMode, EXEC_PROFILE_DEFAULT from cassandra.pool import Host +from cassandra.policies import HostDistance, RetryPolicy, RoundRobinPolicy, DowngradingConsistencyRetryPolicy, SimpleConvictionPolicy +from cassandra.query import SimpleStatement, named_tuple_factory, tuple_factory from tests.unit.utils import mock_session_pools from tests import connection_class @@ -136,14 +135,43 @@ def setUp(self): # TODO: this suite could be expanded; for now just adding a test covering a PR @mock_session_pools - def test_default_serial_consistency_level(self, *_): + def test_default_serial_consistency_level_ep(self, *_): + """ + Make sure default_serial_consistency_level passes through to a query message using execution profiles. + Also make sure Statement.serial_consistency_level overrides the default. + + PR #510 + """ + c = Cluster(protocol_version=4) + s = Session(c, [Host("127.0.0.1", SimpleConvictionPolicy)]) + + # default is None + default_profile = c.profile_manager.default + self.assertIsNone(default_profile.serial_consistency_level) + + for cl in (None, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL): + s.get_execution_profile(EXEC_PROFILE_DEFAULT).serial_consistency_level = cl + + # default is passed through + f = s.execute_async(query='') + self.assertEqual(f.message.serial_consistency_level, cl) + + # any non-None statement setting takes precedence + for cl_override in (ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL): + f = s.execute_async(SimpleStatement(query_string='', serial_consistency_level=cl_override)) + self.assertEqual(default_profile.serial_consistency_level, cl) + self.assertEqual(f.message.serial_consistency_level, cl_override) + + @mock_session_pools + def test_default_serial_consistency_level_legacy(self, *_): """ - Make sure default_serial_consistency_level passes through to a query message. + Make sure default_serial_consistency_level passes through to a query message using legacy settings. Also make sure Statement.serial_consistency_level overrides the default. PR #510 """ - s = Session(Cluster(protocol_version=4), [Host("127.0.0.1", SimpleConvictionPolicy)]) + c = Cluster(protocol_version=4) + s = Session(c, [Host("127.0.0.1", SimpleConvictionPolicy)]) # default is None self.assertIsNone(s.default_serial_consistency_level) @@ -157,10 +185,6 @@ def test_default_serial_consistency_level(self, *_): for cl in (None, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL): s.default_serial_consistency_level = cl - # default is passed through - f = s.execute_async(query='') - self.assertEqual(f.message.serial_consistency_level, cl) - # any non-None statement setting takes precedence for cl_override in (ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL): f = s.execute_async(SimpleStatement(query_string='', serial_consistency_level=cl_override)) @@ -168,6 +192,24 @@ def test_default_serial_consistency_level(self, *_): self.assertEqual(f.message.serial_consistency_level, cl_override) +class ProtocolVersionTests(unittest.TestCase): + + def test_protocol_downgrade_test(self): + + lower = ProtocolVersion.get_lower_supported(ProtocolVersion.DSE_V1) + self.assertEqual(ProtocolVersion.V4,lower) + lower = ProtocolVersion.get_lower_supported(ProtocolVersion.V4) + self.assertEqual(ProtocolVersion.V3,lower) + lower = ProtocolVersion.get_lower_supported(ProtocolVersion.V3) + self.assertEqual(0,lower) + + self.assertTrue(ProtocolVersion.uses_error_code_map(ProtocolVersion.DSE_V1)) + self.assertTrue(ProtocolVersion.uses_int_query_flags(ProtocolVersion.DSE_V1)) + + self.assertFalse(ProtocolVersion.uses_error_code_map(ProtocolVersion.V4)) + self.assertFalse(ProtocolVersion.uses_int_query_flags(ProtocolVersion.V4)) + + class ExecutionProfileTest(unittest.TestCase): def setUp(self): if connection_class is None: @@ -187,12 +229,18 @@ def test_default_exec_parameters(self): cluster = Cluster() self.assertEqual(cluster._config_mode, _ConfigMode.UNCOMMITTED) self.assertEqual(cluster.load_balancing_policy.__class__, default_lbp_factory().__class__) + self.assertEqual(cluster.profile_manager.default.load_balancing_policy.__class__, default_lbp_factory().__class__) self.assertEqual(cluster.default_retry_policy.__class__, RetryPolicy) + self.assertEqual(cluster.profile_manager.default.retry_policy.__class__, RetryPolicy) session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) self.assertEqual(session.default_timeout, 10.0) + self.assertEqual(cluster.profile_manager.default.request_timeout, 10.0) self.assertEqual(session.default_consistency_level, ConsistencyLevel.LOCAL_ONE) + self.assertEqual(cluster.profile_manager.default.consistency_level, ConsistencyLevel.LOCAL_ONE) self.assertEqual(session.default_serial_consistency_level, None) + self.assertEqual(cluster.profile_manager.default.serial_consistency_level, None) self.assertEqual(session.row_factory, named_tuple_factory) + self.assertEqual(cluster.profile_manager.default.row_factory, named_tuple_factory) @mock_session_pools def test_default_legacy(self): @@ -216,7 +264,7 @@ def test_default_profile(self): self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES) - default_profile = session.get_execution_profile(EXEC_PROFILE_DEFAULT) + default_profile = cluster.profile_manager.profiles[EXEC_PROFILE_DEFAULT] rf = session.execute_async("query") self._verify_response_future_profile(rf, default_profile) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index e76fbd2559..c2b6b7b4c5 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -25,7 +25,7 @@ from cassandra import OperationTimedOut, SchemaTargetType, SchemaChangeType from cassandra.protocol import ResultMessage, RESULT_KIND_ROWS from cassandra.cluster import ControlConnection, _Scheduler, ProfileManager, EXEC_PROFILE_DEFAULT, ExecutionProfile -from cassandra.pool import Host +from cassandra.hosts import Host from cassandra.connection import EndPoint, DefaultEndPoint, DefaultEndPointFactory from cassandra.policies import (SimpleConvictionPolicy, RoundRobinPolicy, ConstantReconnectionPolicy, IdentityTranslator) @@ -43,6 +43,7 @@ def __init__(self): } for host in self.hosts.values(): host.set_up() + host.release_version = "3.11" self.cluster_name = None self.partitioner = None @@ -98,6 +99,21 @@ def on_down(self, host, is_host_addition): self.down_host = host +def _node_meta_results(local_results, peer_results): + """ + creates a pair of ResultMessages from (col_names, parsed_rows) + """ + local_response = ResultMessage(kind=RESULT_KIND_ROWS) + local_response.column_names = local_results[0] + local_response.parsed_rows = local_results[1] + + peer_response = ResultMessage(kind=RESULT_KIND_ROWS) + peer_response.column_names = peer_results[0] + peer_response.parsed_rows = peer_results[1] + + return peer_response, local_response + + class MockConnection(object): is_defunct = False @@ -114,12 +130,7 @@ def __init__(self): [["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"]], ["192.168.1.2", "10.0.0.2", "a", "dc1", "rack1", ["2", "102", "202"]]] ] - local_response = ResultMessage( - kind=RESULT_KIND_ROWS, results=self.local_results) - peer_response = ResultMessage( - kind=RESULT_KIND_ROWS, results=self.peer_results) - - self.wait_for_responses = Mock(return_value=(peer_response, local_response)) + self.wait_for_responses = Mock(return_value=_node_meta_results(self.local_results, self.peer_results)) class FakeTime(object): @@ -136,6 +147,20 @@ def sleep(self, amount): class ControlConnectionTest(unittest.TestCase): + _matching_schema_preloaded_results = _node_meta_results( + local_results=(["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"], + [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]]), + peer_results=(["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens"], + [["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"]], + ["192.168.1.2", "10.0.0.2", "a", "dc1", "rack1", ["2", "102", "202"]]])) + + _nonmatching_schema_preloaded_results = _node_meta_results( + local_results=(["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"], + [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]]), + peer_results=(["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens"], + [["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"]], + ["192.168.1.2", "10.0.0.2", "b", "dc1", "rack1", ["2", "102", "202"]]])) + def setUp(self): self.cluster = MockCluster() self.connection = MockConnection() @@ -145,38 +170,6 @@ def setUp(self): self.control_connection._connection = self.connection self.control_connection._time = self.time - def _get_matching_schema_preloaded_results(self): - local_results = [ - ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"], - [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]] - ] - local_response = ResultMessage(kind=RESULT_KIND_ROWS, results=local_results) - - peer_results = [ - ["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens"], - [["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"]], - ["192.168.1.2", "10.0.0.2", "a", "dc1", "rack1", ["2", "102", "202"]]] - ] - peer_response = ResultMessage(kind=RESULT_KIND_ROWS, results=peer_results) - - return (peer_response, local_response) - - def _get_nonmatching_schema_preloaded_results(self): - local_results = [ - ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"], - [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]] - ] - local_response = ResultMessage(kind=RESULT_KIND_ROWS, results=local_results) - - peer_results = [ - ["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens"], - [["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"]], - ["192.168.1.2", "10.0.0.2", "b", "dc1", "rack1", ["2", "102", "202"]]] - ] - peer_response = ResultMessage(kind=RESULT_KIND_ROWS, results=peer_results) - - return (peer_response, local_response) - def test_wait_for_schema_agreement(self): """ Basic test with all schema versions agreeing @@ -189,8 +182,7 @@ def test_wait_for_schema_agreement_uses_preloaded_results_if_given(self): """ wait_for_schema_agreement uses preloaded results if given for shared table queries """ - preloaded_results = self._get_matching_schema_preloaded_results() - + preloaded_results = self._matching_schema_preloaded_results self.assertTrue(self.control_connection.wait_for_schema_agreement(preloaded_results=preloaded_results)) # the control connection should not have slept at all self.assertEqual(self.time.clock, 0) @@ -201,8 +193,7 @@ def test_wait_for_schema_agreement_falls_back_to_querying_if_schemas_dont_match_ """ wait_for_schema_agreement requery if schema does not match using preloaded results """ - preloaded_results = self._get_nonmatching_schema_preloaded_results() - + preloaded_results = self._nonmatching_schema_preloaded_results self.assertTrue(self.control_connection.wait_for_schema_agreement(preloaded_results=preloaded_results)) # the control connection should not have slept at all self.assertEqual(self.time.clock, 0) @@ -281,8 +272,7 @@ def test_refresh_nodes_and_tokens_uses_preloaded_results_if_given(self): """ refresh_nodes_and_tokens uses preloaded results if given for shared table queries """ - preloaded_results = self._get_matching_schema_preloaded_results() - + preloaded_results = self._matching_schema_preloaded_results self.control_connection._refresh_node_list_and_token_map(self.connection, preloaded_results=preloaded_results) meta = self.cluster.metadata self.assertEqual(meta.partitioner, 'Murmur3Partitioner') diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 49b2627c67..44365a9ee1 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -485,7 +485,7 @@ def test_as_cql_query_removes_frozen(self): class UserDefinedAggregateTest(unittest.TestCase): def test_as_cql_query_removes_frozen(self): - aggregate = Aggregate("ks1", "myaggregate", ["frozen>"], "statefunc", "frozen>", "finalfunc", "(0)", "tuple") + aggregate = Aggregate("ks1", "myaggregate", ["frozen>"], "statefunc", "frozen>", "finalfunc", "(0)", "tuple", False) expected_result = ( "CREATE AGGREGATE ks1.myaggregate(tuple) " "SFUNC statefunc " @@ -504,7 +504,7 @@ def test_build_index_as_cql(self): column_meta.table.name = 'table_name_here' column_meta.table.keyspace_name = 'keyspace_name_here' column_meta.table.columns = {column_meta.name: column_meta} - parser = get_schema_parser(Mock(), '2.1.0', 0.1) + parser = get_schema_parser(Mock(), '2.1.0', None, 0.1) row = {'index_name': 'index_name_here', 'index_type': 'index_type_here'} index_meta = parser._build_index_metadata(column_meta, row) @@ -574,11 +574,17 @@ def test_index(self): log.debug(im.export_as_string()) def test_function(self): - fm = Function(self.name, self.name, (u'int', u'int'), (u'x', u'y'), u'int', u'language', self.name, False) + fm = Function(keyspace=self.name, name=self.name, + argument_types=(u'int', u'int'), + argument_names=(u'x', u'y'), + return_type=u'int', language=u'language', + body=self.name, called_on_null_input=False, + deterministic=True, + monotonic=False, monotonic_on=(u'x',)) fm.export_as_string() def test_aggregate(self): - am = Aggregate(self.name, self.name, (u'text',), self.name, u'text', self.name, self.name, u'text') + am = Aggregate(self.name, self.name, (u'text',), self.name, u'text', self.name, self.name, u'text', True) am.export_as_string() def test_user_type(self): @@ -586,6 +592,123 @@ def test_user_type(self): um.export_as_string() +class FunctionToCQLTests(unittest.TestCase): + + base_vars = { + 'keyspace': 'ks_name', + 'name': 'function_name', + 'argument_types': (u'int', u'int'), + 'argument_names': (u'x', u'y'), + 'return_type': u'int', + 'language': u'language', + 'body': 'body', + 'called_on_null_input': False, + 'deterministic': True, + 'monotonic': False, + 'monotonic_on': () + } + + def _function_with_kwargs(self, **kwargs): + return Function(**dict(self.base_vars, + **kwargs) + ) + + def test_non_monotonic(self): + self.assertNotIn( + 'MONOTONIC', + self._function_with_kwargs( + monotonic=False, + monotonic_on=() + ).export_as_string() + ) + + def test_monotonic_all(self): + mono_function = self._function_with_kwargs( + monotonic=True, + monotonic_on=() + ) + self.assertIn( + 'MONOTONIC LANG', + mono_function.as_cql_query(formatted=False) + ) + self.assertIn( + 'MONOTONIC\n LANG', + mono_function.as_cql_query(formatted=True) + ) + + def test_monotonic_one(self): + mono_on_function = self._function_with_kwargs( + monotonic=False, + monotonic_on=('x',) + ) + self.assertIn( + 'MONOTONIC ON x LANG', + mono_on_function.as_cql_query(formatted=False) + ) + self.assertIn( + 'MONOTONIC ON x\n LANG', + mono_on_function.as_cql_query(formatted=True) + ) + + def test_nondeterministic(self): + self.assertNotIn( + 'DETERMINISTIC', + self._function_with_kwargs( + deterministic=False + ).as_cql_query(formatted=False) + ) + + def test_deterministic(self): + self.assertIn( + 'DETERMINISTIC', + self._function_with_kwargs( + deterministic=True + ).as_cql_query(formatted=False) + ) + self.assertIn( + 'DETERMINISTIC\n', + self._function_with_kwargs( + deterministic=True + ).as_cql_query(formatted=True) + ) + + +class AggregateToCQLTests(unittest.TestCase): + base_vars = { + 'keyspace': 'ks_name', + 'name': 'function_name', + 'argument_types': (u'int', u'int'), + 'state_func': 'funcname', + 'state_type': u'int', + 'return_type': u'int', + 'final_func': None, + 'initial_condition': '0', + 'deterministic': True + } + + def _aggregate_with_kwargs(self, **kwargs): + return Aggregate(**dict(self.base_vars, + **kwargs) + ) + + def test_nondeterministic(self): + self.assertNotIn( + 'DETERMINISTIC', + self._aggregate_with_kwargs( + deterministic=False + ).as_cql_query(formatted=True) + ) + + def test_deterministic(self): + for formatted in (True, False): + query = self._aggregate_with_kwargs( + deterministic=True + ).as_cql_query(formatted=formatted) + self.assertTrue(query.endswith('DETERMINISTIC'), + msg="'DETERMINISTIC' not found in {}".format(query) + ) + + class HostsTests(unittest.TestCase): def test_iterate_all_hosts_and_modify(self): """ diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index 9c91679bc6..228f3f4432 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -219,5 +219,6 @@ def test_unset_value(self): self.bound.bind((0, 0, 0, UNSET_VALUE)) self.assertEqual(self.bound.values[-1], UNSET_VALUE) + class BoundStatementTestV5(BoundStatementTestV4): protocol_version = 5 diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 15fa3160b0..5c0c11281b 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -156,8 +156,10 @@ def host_down(): sys.setcheckinterval(0) else: sys.setswitchinterval(0.0001) - map(lambda t: t.start(), threads) - map(lambda t: t.join(), threads) + for t in threads: + t.start() + for t in threads: + t.join() finally: if check: sys.setcheckinterval(original_interval) diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index 21223d27d1..b43b21eeff 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -1,13 +1,35 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + try: import unittest2 as unittest except ImportError: import unittest # noqa from mock import Mock + from cassandra import ProtocolVersion, UnsupportedOperation -from cassandra.protocol import (PrepareMessage, QueryMessage, ExecuteMessage, - BatchMessage) -from cassandra.query import SimpleStatement, BatchType +from cassandra.protocol import ( + PrepareMessage, QueryMessage, ExecuteMessage, UnsupportedOperation, + _PAGING_OPTIONS_FLAG, _WITH_SERIAL_CONSISTENCY_FLAG, + _PAGE_SIZE_FLAG, _WITH_PAGING_STATE_FLAG, + BatchMessage +) +from cassandra.query import BatchType +from cassandra.marshal import uint32_unpack +from cassandra.cluster import ContinuousPagingOptions + class MessageTest(unittest.TestCase): @@ -74,12 +96,48 @@ def _check_calls(self, io, expected): tuple(expected) ) + def test_continuous_paging(self): + """ + Test to check continuous paging throws an Exception if it's not supported and the correct valuesa + are written to the buffer if the option is enabled. + + @since DSE 2.0b3 GRAPH 1.0b1 + @jira_ticket PYTHON-694 + @expected_result the values are correctly written + + @test_category connection + """ + max_pages = 4 + max_pages_per_second = 3 + continuous_paging_options = ContinuousPagingOptions(max_pages=max_pages, + max_pages_per_second=max_pages_per_second) + message = QueryMessage("a", 3, continuous_paging_options=continuous_paging_options) + io = Mock() + for version in [version for version in ProtocolVersion.SUPPORTED_VERSIONS + if not ProtocolVersion.has_continuous_paging_support(version)]: + self.assertRaises(UnsupportedOperation, message.send_body, io, version) + + io.reset_mock() + message.send_body(io, ProtocolVersion.DSE_V1) + + # continuous paging adds two write calls to the buffer + self.assertEqual(len(io.write.mock_calls), 6) + # Check that the appropriate flag is set to True + self.assertEqual(uint32_unpack(io.write.mock_calls[3][1][0]) & _WITH_SERIAL_CONSISTENCY_FLAG, 0) + self.assertEqual(uint32_unpack(io.write.mock_calls[3][1][0]) & _PAGE_SIZE_FLAG, 0) + self.assertEqual(uint32_unpack(io.write.mock_calls[3][1][0]) & _WITH_PAGING_STATE_FLAG, 0) + self.assertEqual(uint32_unpack(io.write.mock_calls[3][1][0]) & _PAGING_OPTIONS_FLAG, _PAGING_OPTIONS_FLAG) + + # Test max_pages and max_pages_per_second are correctly written + self.assertEqual(uint32_unpack(io.write.mock_calls[4][1][0]), max_pages) + self.assertEqual(uint32_unpack(io.write.mock_calls[5][1][0]), max_pages_per_second) + def test_prepare_flag(self): """ Test to check the prepare flag is properly set, This should only happen for V5 at the moment. @since 3.9 - @jira_ticket PYTHON-713 + @jira_ticket PYTHON-694, PYTHON-713 @expected_result the values are correctly written @test_category connection diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 08a1f8b108..ae8c220669 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -37,7 +37,9 @@ class ResponseFutureTests(unittest.TestCase): def make_basic_session(self): - return Mock(spec=Session, row_factory=lambda *x: list(x)) + s = Mock(spec=Session) + s.cluster._default_row_factory = lambda col_names, rows: [(col_names, rows)] + return s def make_pool(self): pool = Mock() @@ -56,8 +58,8 @@ def make_response_future(self, session): message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) return ResponseFuture(session, message, query, 1) - def make_mock_response(self, results): - return Mock(spec=ResultMessage, kind=RESULT_KIND_ROWS, results=results, paging_state=None, col_types=None) + def make_mock_response(self, col_names, rows): + return Mock(spec=ResultMessage, kind=RESULT_KIND_ROWS, column_names=col_names, parsed_rows=rows, paging_state=None, col_types=None) def test_result_message(self): session = self.make_basic_session() @@ -76,9 +78,10 @@ def test_result_message(self): connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=[]) - rf._set_result(None, None, None, self.make_mock_response([{'col': 'val'}])) - result = rf.result() - self.assertEqual(result, [{'col': 'val'}]) + expected_result = (object(), object()) + rf._set_result(None, None, None, self.make_mock_response(expected_result[0], expected_result[1])) + result = rf.result()[0] + self.assertEqual(result, expected_result) def test_unknown_result_class(self): session = self.make_session() @@ -112,7 +115,7 @@ def test_schema_change_result(self): 'keyspace': "keyspace1", "table": "table1"} result = Mock(spec=ResultMessage, kind=RESULT_KIND_SCHEMA_CHANGE, - results=event_results) + schema_change_event=event_results) connection = Mock() rf._set_result(None, connection, None, result) session.submit.assert_called_once_with(ANY, ANY, rf, connection, **event_results) @@ -121,9 +124,9 @@ def test_other_result_message_kind(self): session = self.make_session() rf = self.make_response_future(session) rf.send_request() - result = [1, 2, 3] - rf._set_result(None, None, None, Mock(spec=ResultMessage, kind=999, results=result)) - self.assertListEqual(list(rf.result()), result) + result = Mock(spec=ResultMessage, kind=999, results=[1, 2, 3]) + rf._set_result(None, None, None, result) + self.assertEqual(rf.result()[0], result) def test_read_timeout_error_message(self): session = self.make_session() @@ -309,10 +312,11 @@ def test_first_pool_shutdown(self): rf = self.make_response_future(session) rf.send_request() - rf._set_result(None, None, None, self.make_mock_response([{'col': 'val'}])) + expected_result = (object(), object()) + rf._set_result(None, None, None, self.make_mock_response(expected_result[0], expected_result[1])) - result = rf.result() - self.assertEqual(result, [{'col': 'val'}]) + result = rf.result()[0] + self.assertEqual(result, expected_result) def test_timeout_getting_connection_from_pool(self): session = self.make_basic_session() @@ -333,8 +337,9 @@ def test_timeout_getting_connection_from_pool(self): rf = self.make_response_future(session) rf.send_request() - rf._set_result(None, None, None, self.make_mock_response([{'col': 'val'}])) - self.assertEqual(rf.result(), [{'col': 'val'}]) + expected_result = (object(), object()) + rf._set_result(None, None, None, self.make_mock_response(expected_result[0], expected_result[1])) + self.assertEqual(rf.result()[0], expected_result) # make sure the exception is recorded correctly self.assertEqual(rf._errors, {'ip1': exc}) @@ -345,20 +350,20 @@ def test_callback(self): rf.send_request() callback = Mock() - expected_result = [{'col': 'val'}] + expected_result = (object(), object()) arg = "positional" kwargs = {'one': 1, 'two': 2} rf.add_callback(callback, arg, **kwargs) - rf._set_result(None, None, None, self.make_mock_response(expected_result)) + rf._set_result(None, None, None, self.make_mock_response(expected_result[0], expected_result[1])) - result = rf.result() + result = rf.result()[0] self.assertEqual(result, expected_result) - callback.assert_called_once_with(expected_result, arg, **kwargs) + callback.assert_called_once_with([expected_result], arg, **kwargs) # this should get called immediately now that the result is set - rf.add_callback(self.assertEqual, [{'col': 'val'}]) + rf.add_callback(self.assertEqual, [expected_result]) def test_errback(self): session = self.make_session() @@ -390,7 +395,7 @@ def test_multiple_callbacks(self): rf.send_request() callback = Mock() - expected_result = [{'col': 'val'}] + expected_result = (object(), object()) arg = "positional" kwargs = {'one': 1, 'two': 2} rf.add_callback(callback, arg, **kwargs) @@ -400,13 +405,13 @@ def test_multiple_callbacks(self): kwargs2 = {'three': 3, 'four': 4} rf.add_callback(callback2, arg2, **kwargs2) - rf._set_result(None, None, None, self.make_mock_response(expected_result)) + rf._set_result(None, None, None, self.make_mock_response(expected_result[0], expected_result[1])) - result = rf.result() + result = rf.result()[0] self.assertEqual(result, expected_result) - callback.assert_called_once_with(expected_result, arg, **kwargs) - callback2.assert_called_once_with(expected_result, arg2, **kwargs2) + callback.assert_called_once_with([expected_result], arg, **kwargs) + callback2.assert_called_once_with([expected_result], arg2, **kwargs2) def test_multiple_errbacks(self): session = self.make_session() @@ -467,17 +472,17 @@ def test_add_callbacks(self): rf.send_request() callback = Mock() - expected_result = [{'col': 'val'}] + expected_result = (object(), object()) arg = "positional" kwargs = {'one': 1, 'two': 2} rf.add_callbacks( callback=callback, callback_args=(arg,), callback_kwargs=kwargs, errback=self.assertIsInstance, errback_args=(Exception,)) - rf._set_result(None, None, None, self.make_mock_response(expected_result)) - self.assertEqual(rf.result(), expected_result) + rf._set_result(None, None, None, self.make_mock_response(expected_result[0], expected_result[1])) + self.assertEqual(rf.result()[0], expected_result) - callback.assert_called_once_with(expected_result, arg, **kwargs) + callback.assert_called_once_with([expected_result], arg, **kwargs) def test_prepared_query_not_found(self): session = self.make_session() @@ -528,7 +533,9 @@ def test_repeat_orig_query_after_succesful_reprepare(self): session = self.make_session() rf = self.make_response_future(session) - response = Mock(spec=ResultMessage, kind=RESULT_KIND_PREPARED) + response = Mock(spec=ResultMessage, + kind=RESULT_KIND_PREPARED, + result_metadata_id='foo') response.results = (None, None, None, None, None) rf._query = Mock(return_value=True) diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index 541ef6fe32..c1a2562360 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -33,7 +33,7 @@ def test_iter_non_paged(self): def test_iter_paged(self): expected = list(range(10)) - response_future = Mock(has_more_pages=True) + response_future = Mock(has_more_pages=True, _continuous_paging_session=None) response_future.result.side_effect = (ResultSet(Mock(), expected[-5:]), ) # ResultSet is iterable, so it must be protected in order to be returned whole by the Mock rs = ResultSet(response_future, expected[:5]) itr = iter(rs) @@ -52,7 +52,7 @@ def test_list_non_paged(self): def test_list_paged(self): # list access on RS for backwards-compatibility expected = list(range(10)) - response_future = Mock(has_more_pages=True) + response_future = Mock(has_more_pages=True, _continuous_paging_session=None) response_future.result.side_effect = (ResultSet(Mock(), expected[-5:]), ) # ResultSet is iterable, so it must be protected in order to be returned whole by the Mock rs = ResultSet(response_future, expected[:5]) # this is brittle, depends on internal impl details. Would like to find a better way @@ -85,7 +85,7 @@ def test_iterate_then_index(self): self.assertFalse(list(rs)) # RuntimeError if indexing during or after pages - response_future = Mock(has_more_pages=True) + response_future = Mock(has_more_pages=True, _continuous_paging_session=None) response_future.result.side_effect = (ResultSet(Mock(), expected[-5:]), ) # ResultSet is iterable, so it must be protected in order to be returned whole by the Mock rs = ResultSet(response_future, expected[:5]) type(response_future).has_more_pages = PropertyMock(side_effect=(True, False)) @@ -118,7 +118,7 @@ def test_index_list_mode(self): self.assertTrue(rs) # pages - response_future = Mock(has_more_pages=True) + response_future = Mock(has_more_pages=True, _continuous_paging_session=None) response_future.result.side_effect = (ResultSet(Mock(), expected[-5:]), ) # ResultSet is iterable, so it must be protected in order to be returned whole by the Mock rs = ResultSet(response_future, expected[:5]) # this is brittle, depends on internal impl details. Would like to find a better way @@ -146,7 +146,7 @@ def test_eq(self): self.assertTrue(rs) # pages - response_future = Mock(has_more_pages=True) + response_future = Mock(has_more_pages=True, _continuous_paging_session=None) response_future.result.side_effect = (ResultSet(Mock(), expected[-5:]), ) # ResultSet is iterable, so it must be protected in order to be returned whole by the Mock rs = ResultSet(response_future, expected[:5]) type(response_future).has_more_pages = PropertyMock(side_effect=(True, True, True, False)) diff --git a/tests/unit/test_timestamps.py b/tests/unit/test_timestamps.py index bbca3527f6..8903fbc99b 100644 --- a/tests/unit/test_timestamps.py +++ b/tests/unit/test_timestamps.py @@ -20,9 +20,9 @@ import mock from cassandra import timestamps -import time from threading import Thread, Lock + class _TimestampTestMixin(object): @mock.patch('cassandra.timestamps.time') @@ -61,6 +61,8 @@ class TestTimestampGeneratorOutput(unittest.TestCase, _TimestampTestMixin): def test_timestamps_during_and_after_same_system_time(self): """ + Timestamps should increase monotonically over repeated system time. + Test that MonotonicTimestampGenerator's output increases by 1 when the underlying system time is the same, then returns to normal when the system time increases again. @@ -79,13 +81,11 @@ def test_timestamps_during_and_after_same_system_time(self): def test_timestamps_during_and_after_backwards_system_time(self): """ + Timestamps should increase monotonically over system time going backwards. + Test that MonotonicTimestampGenerator's output increases by 1 when the underlying system time goes backward, then returns to normal when the system time increases again. - - @since 3.8.0 - @expected_result Timestamps should increase monotonically over system time going backwards. - @test_category timing """ self._call_and_check_results( system_time_expected_stamp_pairs=( diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index c8f3011d07..e638bd48c9 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -16,27 +16,37 @@ except ImportError: import unittest # noqa -from binascii import unhexlify import datetime import tempfile -import six import time +from binascii import unhexlify + +import six import cassandra -from cassandra.cqltypes import (BooleanType, lookup_casstype_simple, lookup_casstype, - LongType, DecimalType, SetType, cql_typename, - CassandraType, UTF8Type, parse_casstype_args, - SimpleDateType, TimeType, ByteType, ShortType, - EmptyValue, _CassandraType, DateType, int64_pack) +from cassandra import util +from cassandra.cqltypes import ( + CassandraType, DateRangeType, DateType, DecimalType, + EmptyValue, LongType, SetType, UTF8Type, + cql_typename, int8_pack, int64_pack, lookup_casstype, + lookup_casstype_simple, parse_casstype_args +) from cassandra.encoder import cql_quote -from cassandra.protocol import (write_string, read_longstring, write_stringmap, - read_stringmap, read_inet, write_inet, - read_string, write_longstring) -from cassandra.query import named_tuple_factory from cassandra.pool import Host -from cassandra.policies import SimpleConvictionPolicy, ConvictionPolicy -from cassandra.util import Date, Time from cassandra.metadata import Token +from cassandra.policies import ConvictionPolicy, SimpleConvictionPolicy +from cassandra.protocol import ( + read_inet, read_longstring, read_string, + read_stringmap, write_inet, write_longstring, + write_string, write_stringmap +) +from cassandra.query import named_tuple_factory +from cassandra.util import ( + OPEN_BOUND, Date, DateRange, DateRangeBound, + DateRangePrecision, Time, ms_timestamp_from_datetime, + datetime_from_timestamp +) +from tests.unit.util import check_sequence_consistency class TypeTests(unittest.TestCase): @@ -72,6 +82,7 @@ def test_lookup_casstype_simple(self): self.assertEqual(lookup_casstype_simple('ColumnToCollectionType'), cassandra.cqltypes.ColumnToCollectionType) self.assertEqual(lookup_casstype_simple('ReversedType'), cassandra.cqltypes.ReversedType) self.assertEqual(lookup_casstype_simple('DurationType'), cassandra.cqltypes.DurationType) + self.assertEqual(lookup_casstype_simple('DateRangeType'), cassandra.cqltypes.DateRangeType) self.assertEqual(str(lookup_casstype_simple('unknown')), str(cassandra.cqltypes.mkUnrecognizedType('unknown'))) @@ -106,6 +117,7 @@ def test_lookup_casstype(self): self.assertEqual(lookup_casstype('ColumnToCollectionType'), cassandra.cqltypes.ColumnToCollectionType) self.assertEqual(lookup_casstype('ReversedType'), cassandra.cqltypes.ReversedType) self.assertEqual(lookup_casstype('DurationType'), cassandra.cqltypes.DurationType) + self.assertEqual(lookup_casstype('DateRangeType'), cassandra.cqltypes.DateRangeType) self.assertEqual(str(lookup_casstype('unknown')), str(cassandra.cqltypes.mkUnrecognizedType('unknown'))) @@ -163,9 +175,9 @@ class BarType(FooType): ctype = parse_casstype_args(''.join(( 'org.apache.cassandra.db.marshal.FooType(', - '63697479:org.apache.cassandra.db.marshal.UTF8Type,', - 'BarType(61646472657373:org.apache.cassandra.db.marshal.UTF8Type),', - '7a6970:org.apache.cassandra.db.marshal.UTF8Type', + '63697479:org.apache.cassandra.db.marshal.UTF8Type,', + 'BarType(61646472657373:org.apache.cassandra.db.marshal.UTF8Type),', + '7a6970:org.apache.cassandra.db.marshal.UTF8Type', ')'))) self.assertEqual(FooType, ctype.__class__) @@ -254,27 +266,519 @@ def test_cql_quote(self): self.assertEqual(cql_quote(0), '0') -class TestOrdering(unittest.TestCase): - def _check_order_consistency(self, smaller, bigger, equal=False): - self.assertLessEqual(smaller, bigger) - self.assertGreaterEqual(bigger, smaller) - if equal: - self.assertEqual(smaller, bigger) - else: - self.assertNotEqual(smaller, bigger) - self.assertLess(smaller, bigger) - self.assertGreater(bigger, smaller) +ZERO = datetime.timedelta(0) + + +class UTC(datetime.tzinfo): + """UTC""" + + def utcoffset(self, dt): + return ZERO + + def tzname(self, dt): + return "UTC" + + def dst(self, dt): + return ZERO + + +try: + utc_timezone = datetime.timezone.utc +except AttributeError: + utc_timezone = UTC() + + +class DateRangeTypeTests(unittest.TestCase): + dt = datetime.datetime(1990, 2, 3, 13, 58, 45, 777777) + timestamp = 1485963732404 + + def test_month_rounding_creation_failure(self): + """ + @jira_ticket PYTHON-912 + """ + feb_stamp = ms_timestamp_from_datetime( + datetime.datetime(2018, 2, 25, 18, 59, 59, 0) + ) + dr = DateRange(OPEN_BOUND, + DateRangeBound(feb_stamp, DateRangePrecision.MONTH)) + dt = datetime_from_timestamp(dr.upper_bound.milliseconds / 1000) + self.assertEqual(dt.day, 28) + + # Leap year + feb_stamp_leap_year = ms_timestamp_from_datetime( + datetime.datetime(2016, 2, 25, 18, 59, 59, 0) + ) + dr = DateRange(OPEN_BOUND, + DateRangeBound(feb_stamp_leap_year, DateRangePrecision.MONTH)) + dt = datetime_from_timestamp(dr.upper_bound.milliseconds / 1000) + self.assertEqual(dt.day, 29) + + def test_decode_precision(self): + self.assertEqual(DateRangeType._decode_precision(6), 'MILLISECOND') + + def test_decode_precision_error(self): + with self.assertRaises(ValueError): + DateRangeType._decode_precision(-1) + + def test_encode_precision(self): + self.assertEqual(DateRangeType._encode_precision('SECOND'), 5) + + def test_encode_precision_error(self): + with self.assertRaises(ValueError): + DateRangeType._encode_precision('INVALID') + + def test_deserialize_single_value(self): + serialized = (int8_pack(0) + + int64_pack(self.timestamp) + + int8_pack(3)) + self.assertEqual( + DateRangeType.deserialize(serialized, 5), + util.DateRange(value=util.DateRangeBound( + value=datetime.datetime(2017, 2, 1, 15, 42, 12, 404000), + precision='HOUR') + ) + ) + + def test_deserialize_closed_range(self): + serialized = (int8_pack(1) + + int64_pack(self.timestamp) + + int8_pack(2) + + int64_pack(self.timestamp) + + int8_pack(6)) + self.assertEqual( + DateRangeType.deserialize(serialized, 5), + util.DateRange( + lower_bound=util.DateRangeBound( + value=datetime.datetime(2017, 2, 1, 0, 0), + precision='DAY' + ), + upper_bound=util.DateRangeBound( + value=datetime.datetime(2017, 2, 1, 15, 42, 12, 404000), + precision='MILLISECOND' + ) + ) + ) + + def test_deserialize_open_high(self): + serialized = (int8_pack(2) + + int64_pack(self.timestamp) + + int8_pack(3)) + deserialized = DateRangeType.deserialize(serialized, 5) + self.assertEqual( + deserialized, + util.DateRange( + lower_bound=util.DateRangeBound( + value=datetime.datetime(2017, 2, 1, 15, 0), + precision='HOUR' + ), + upper_bound=util.OPEN_BOUND + ) + ) + + def test_deserialize_open_low(self): + serialized = (int8_pack(3) + + int64_pack(self.timestamp) + + int8_pack(4)) + deserialized = DateRangeType.deserialize(serialized, 5) + self.assertEqual( + deserialized, + util.DateRange( + lower_bound=util.OPEN_BOUND, + upper_bound=util.DateRangeBound( + value=datetime.datetime(2017, 2, 1, 15, 42, 20, 1000), + precision='MINUTE' + ) + ) + ) + + def test_deserialize_single_open(self): + self.assertEqual( + util.DateRange(value=util.OPEN_BOUND), + DateRangeType.deserialize(int8_pack(5), 5) + ) + + def test_serialize_single_value(self): + serialized = (int8_pack(0) + + int64_pack(self.timestamp) + + int8_pack(5)) + deserialized = DateRangeType.deserialize(serialized, 5) + self.assertEqual( + deserialized, + util.DateRange( + value=util.DateRangeBound( + value=datetime.datetime(2017, 2, 1, 15, 42, 12), + precision='SECOND' + ) + ) + ) + + def test_serialize_closed_range(self): + serialized = (int8_pack(1) + + int64_pack(self.timestamp) + + int8_pack(5) + + int64_pack(self.timestamp) + + int8_pack(0)) + deserialized = DateRangeType.deserialize(serialized, 5) + self.assertEqual( + deserialized, + util.DateRange( + lower_bound=util.DateRangeBound( + value=datetime.datetime(2017, 2, 1, 15, 42, 12), + precision='SECOND' + ), + upper_bound=util.DateRangeBound( + value=datetime.datetime(2017, 12, 31), + precision='YEAR' + ) + ) + ) + + def test_serialize_open_high(self): + serialized = (int8_pack(2) + + int64_pack(self.timestamp) + + int8_pack(2)) + deserialized = DateRangeType.deserialize(serialized, 5) + self.assertEqual( + deserialized, + util.DateRange( + lower_bound=util.DateRangeBound( + value=datetime.datetime(2017, 2, 1), + precision='DAY' + ), + upper_bound=util.OPEN_BOUND + ) + ) + + def test_serialize_open_low(self): + serialized = (int8_pack(2) + + int64_pack(self.timestamp) + + int8_pack(3)) + deserialized = DateRangeType.deserialize(serialized, 5) + self.assertEqual( + deserialized, + util.DateRange( + lower_bound=util.DateRangeBound( + value=datetime.datetime(2017, 2, 1, 15), + precision='HOUR' + ), + upper_bound=util.OPEN_BOUND + ) + ) + + def test_deserialize_both_open(self): + serialized = (int8_pack(4)) + deserialized = DateRangeType.deserialize(serialized, 5) + self.assertEqual( + deserialized, + util.DateRange( + lower_bound=util.OPEN_BOUND, + upper_bound=util.OPEN_BOUND + ) + ) + + def test_serialize_single_open(self): + serialized = DateRangeType.serialize(util.DateRange( + value=util.OPEN_BOUND, + ), 5) + self.assertEqual(int8_pack(5), serialized) + + def test_serialize_both_open(self): + serialized = DateRangeType.serialize(util.DateRange( + lower_bound=util.OPEN_BOUND, + upper_bound=util.OPEN_BOUND + ), 5) + self.assertEqual(int8_pack(4), serialized) + + def test_failure_to_serialize_no_value_object(self): + self.assertRaises(ValueError, DateRangeType.serialize, object(), 5) + + def test_failure_to_serialize_no_bounds_object(self): + class no_bounds_object(object): + value = lower_bound = None + self.assertRaises(ValueError, DateRangeType.serialize, no_bounds_object, 5) + + def test_serialized_value_round_trip(self): + vals = [six.b('\x01\x00\x00\x01%\xe9a\xf9\xd1\x06\x00\x00\x01v\xbb>o\xff\x00'), + six.b('\x01\x00\x00\x00\xdcm\x03-\xd1\x06\x00\x00\x01v\xbb>o\xff\x00')] + for serialized in vals: + self.assertEqual( + serialized, + DateRangeType.serialize(DateRangeType.deserialize(serialized, 0), 0) + ) + + def test_serialize_zero_datetime(self): + """ + Test serialization where timestamp = 0 + + Companion test for test_deserialize_zero_datetime + + @since 2.0.0 + @jira_ticket PYTHON-729 + @expected_result serialization doesn't raise an error + + @test_category data_types + """ + DateRangeType.serialize(util.DateRange( + lower_bound=(datetime.datetime(1970, 1, 1), 'YEAR'), + upper_bound=(datetime.datetime(1970, 1, 1), 'YEAR') + ), 5) + + def test_deserialize_zero_datetime(self): + """ + Test deserialization where timestamp = 0 + + Reproduces PYTHON-729 + + @since 2.0.0 + @jira_ticket PYTHON-729 + @expected_result deserialization doesn't raise an error + + @test_category data_types + """ + DateRangeType.deserialize( + (int8_pack(1) + + int64_pack(0) + int8_pack(0) + + int64_pack(0) + int8_pack(0)), + 5 + ) + + +class DateRangeDeserializationTests(unittest.TestCase): + """ + These tests iterate over different timestamp values + and assert deserialization gives the expected value + """ + + starting_lower_value = 1514744108923 + """ + Sample starting value for the lower bound for DateRange + """ + starting_upper_value = 2148761288922 + """ + Sample starting value for the upper bound for DateRange + """ + + epoch = datetime.datetime(1970, 1, 1, tzinfo=utc_timezone) + + def test_deserialize_date_range_milliseconds(self): + """ + Test rounding from DateRange for milliseconds + + @since 2.0.0 + @jira_ticket PYTHON-898 + @expected_result + + @test_category data_types + """ + for i in range(1000): + lower_value = self.starting_lower_value + i + upper_value = self.starting_upper_value + i + dr = DateRange(DateRangeBound(lower_value, DateRangePrecision.MILLISECOND), + DateRangeBound(upper_value, DateRangePrecision.MILLISECOND)) + self.assertEqual(lower_value, dr.lower_bound.milliseconds) + self.assertEqual(upper_value, dr.upper_bound.milliseconds) + + def test_deserialize_date_range_seconds(self): + """ + Test rounding from DateRange for milliseconds + @since 2.0.0 + @jira_ticket PYTHON-898 + @expected_result + + @test_category data_types + """ + + def truncate_last_figures(number, n=3): + """ + Truncates last n digits of a number + """ + return int(str(number)[:-n] + '0' * n) + + for i in range(1000): + lower_value = self.starting_lower_value + i * 900 + upper_value = self.starting_upper_value + i * 900 + dr = DateRange(DateRangeBound(lower_value, DateRangePrecision.SECOND), + DateRangeBound(upper_value, DateRangePrecision.SECOND)) + + self.assertEqual(truncate_last_figures(lower_value), dr.lower_bound.milliseconds) + upper_value = truncate_last_figures(upper_value) + 999 + self.assertEqual(upper_value, dr.upper_bound.milliseconds) + + def test_deserialize_date_range_minutes(self): + """ + Test rounding from DateRange for seconds + + @since 2.4.0 + @jira_ticket PYTHON-898 + @expected_result + + @test_category data_types + """ + self._deserialize_date_range({"second": 0, "microsecond": 0}, + DateRangePrecision.MINUTE, + # This lambda function given a truncated date adds + # one day minus one microsecond in microseconds + lambda x: x + 59 * 1000 + 999, + lambda original_value, i: original_value + i * 900 * 50) + + def test_deserialize_date_range_hours(self): + """ + Test rounding from DateRange for hours + + @since 2.4.0 + @jira_ticket PYTHON-898 + @expected_result + + @test_category data_types + """ + self._deserialize_date_range({"minute": 0, "second": 0, "microsecond": 0}, + DateRangePrecision.HOUR, + # This lambda function given a truncated date adds + # one hour minus one microsecond in microseconds + lambda x: x + + 59 * 60 * 1000 + + 59 * 1000 + + 999, + lambda original_value, i: original_value + i * 900 * 50 * 60) + + def test_deserialize_date_range_day(self): + """ + Test rounding from DateRange for hours + + @since 2.4.0 + @jira_ticket PYTHON-898 + @expected_result + + @test_category data_types + """ + self._deserialize_date_range({"hour": 0, "minute": 0, "second": 0, "microsecond": 0}, + DateRangePrecision.DAY, + # This lambda function given a truncated date adds + # one day minus one microsecond in microseconds + lambda x: x + + 23 * 60 * 60 * 1000 + + 59 * 60 * 1000 + + 59 * 1000 + + 999, + lambda original_value, i: original_value + i * 900 * 50 * 60 * 24) + + @unittest.skip("This is currently failig, see PYTHON-912") + def test_deserialize_date_range_month(self): + """ + Test rounding from DateRange for months + + @since 2.4.0 + @jira_ticket PYTHON-898 + @expected_result + + @test_category data_types + """ + def get_upper_bound(seconds): + """ + function that given a truncated date in seconds from the epoch returns that same date + but with the microseconds set to 999999, seconds to 59, minutes to 59, hours to 23 + and days 28, 29, 30 or 31 depending on the month. + The way to do this is to add one month and leave the date at YEAR-MONTH-01 00:00:00 000000. + Then substract one millisecond. + """ + dt = datetime.datetime.fromtimestamp(seconds / 1000.0, tz=utc_timezone) + dt = dt + datetime.timedelta(days=32) + dt = dt.replace(day=1) - datetime.timedelta(microseconds=1) + return int((dt - self.epoch).total_seconds() * 1000) + self._deserialize_date_range({"day": 1, "hour": 0, "minute": 0, "second": 0, "microsecond": 0}, + DateRangePrecision.MONTH, + get_upper_bound, + lambda original_value, i: original_value + i * 900 * 50 * 60 * 24 * 30) + + def test_deserialize_date_range_year(self): + """ + Test rounding from DateRange for year + + @since 2.4.0 + @jira_ticket PYTHON-898 + @expected_result + + @test_category data_types + """ + def get_upper_bound(seconds): + """ + function that given a truncated date in seconds from the epoch returns that same date + but with the microseconds set to 999999, seconds to 59, minutes to 59, hours to 23 + days 28, 29, 30 or 31 depending on the month and months to 12. + The way to do this is to add one year and leave the date at YEAR-01-01 00:00:00 000000. + Then substract one millisecond. + """ + dt = datetime.datetime.fromtimestamp(seconds / 1000.0, tz=utc_timezone) + dt = dt + datetime.timedelta(days=370) + dt = dt.replace(day=1) - datetime.timedelta(microseconds=1) + + diff = time.mktime(dt.timetuple()) - time.mktime(self.epoch.timetuple()) + return diff * 1000 + 999 + # This doesn't work for big values because it loses precision + #return int((dt - self.epoch).total_seconds() * 1000) + self._deserialize_date_range({"month": 1, "day": 1, "hour": 0, "minute": 0, "second": 0, "microsecond": 0}, + DateRangePrecision.YEAR, + get_upper_bound, + lambda original_value, i: original_value + i * 900 * 50 * 60 * 24 * 30 * 12 * 7) + + def _deserialize_date_range(self, truncate_kwargs, precision, + round_up_truncated_upper_value, increment_loop_variable): + """ + This functions iterates over several DateRange objects determined by + lower_value upper_value which are given as a value that represents seconds since the epoch. + We want to make sure the lower_value is correctly rounded down and the upper value is correctly rounded up. + In the case of rounding down we verify that the rounded down value + has the appropriate fields set to the minimum they could possible have. That is + 1 for months, 1 for days, 0 for hours, 0 for minutes, 0 for seconds, 0 for microseconds. + We use the generic function truncate_date which depends on truncate_kwargs for this + + In the case of rounding up we verify that the rounded up value has the appropriate fields set + to the maximum they could possible have. This is calculated by round_up_truncated_upper_value + which input is the truncated value from before. It is passed as an argument as the way + of calculating this is is different for every precision. + + :param truncate_kwargs: determine what values to truncate in truncate_date + :param precision: :class:`~util.DateRangePrecision` + :param round_up_truncated_upper_value: this is a function that gets a truncated date and + returns a new date with some fields set to the maximum possible value + :param increment_loop_variable: this is a function that given a starting value and the iteration + value returns a new date to serve as lower_bound/upper_bound. We need this because the value by which + dates are incremented depends on if the precision is seconds, minutes, hours, days and months + :return: + """ + + def truncate_date(number): + """ + Given a date in seconds since the epoch truncates ups to a certain precision depending on + truncate_kwargs. + The return is the truncated date in seconds since the epoch. + For example if truncate_kwargs = {"hour": 0, "minute": 0, "second": 0, "microsecond": 0} the returned + value will be the original given date but with the hours, minutes, seconds and microseconds set to 0 + """ + dt = datetime.datetime.fromtimestamp(number / 1000.0, tz=utc_timezone) + dt = dt.replace(**truncate_kwargs) + return round((dt - self.epoch).total_seconds() * 1000.0) + + for i in range(1000): + # We increment the lower_value and upper_value according to increment_loop_variable + lower_value = increment_loop_variable(self.starting_lower_value, i) + upper_value = increment_loop_variable(self.starting_upper_value, i) + + # Inside the __init__ for DateRange the rounding up and down should happen + dr = DateRange(DateRangeBound(lower_value, precision), + DateRangeBound(upper_value, precision)) + + # We verify that rounded value corresponds with what we would expect + self.assertEqual(truncate_date(lower_value), dr.lower_bound.milliseconds) + upper_value = round_up_truncated_upper_value(truncate_date(upper_value)) + self.assertEqual(upper_value, dr.upper_bound.milliseconds) + + +class TestOrdering(unittest.TestCase): def _shuffle_lists(self, *args): return [item for sublist in zip(*args) for item in sublist] - def _check_sequence_consistency(self, ordered_sequence, equal=False): - for i, el in enumerate(ordered_sequence): - for previous in ordered_sequence[:i]: - self._check_order_consistency(previous, el, equal) - for posterior in ordered_sequence[i + 1:]: - self._check_order_consistency(el, posterior, equal) - def test_host_order(self): """ Test Host class is ordered consistently @@ -290,9 +794,9 @@ def test_host_order(self): hosts_equal = [Host(addr, SimpleConvictionPolicy) for addr in ("127.0.0.1", "127.0.0.1")] hosts_equal_conviction = [Host("127.0.0.1", SimpleConvictionPolicy), Host("127.0.0.1", ConvictionPolicy)] - self._check_sequence_consistency(hosts) - self._check_sequence_consistency(hosts_equal, equal=True) - self._check_sequence_consistency(hosts_equal_conviction, equal=True) + check_sequence_consistency(self, hosts) + check_sequence_consistency(self, hosts_equal, equal=True) + check_sequence_consistency(self, hosts_equal_conviction, equal=True) def test_date_order(self): """ @@ -306,8 +810,8 @@ def test_date_order(self): """ dates_from_string = [Date("2017-01-01"), Date("2017-01-05"), Date("2017-01-09"), Date("2017-01-13")] dates_from_string_equal = [Date("2017-01-01"), Date("2017-01-01")] - self._check_sequence_consistency(dates_from_string) - self._check_sequence_consistency(dates_from_string_equal, equal=True) + check_sequence_consistency(self, dates_from_string) + check_sequence_consistency(self, dates_from_string_equal, equal=True) date_format = "%Y-%m-%d" @@ -317,15 +821,15 @@ def test_date_order(self): for dtstr in ("2017-01-02", "2017-01-06", "2017-01-10", "2017-01-14") ] dates_from_value_equal = [Date(1), Date(1)] - self._check_sequence_consistency(dates_from_value) - self._check_sequence_consistency(dates_from_value_equal, equal=True) + check_sequence_consistency(self, dates_from_value) + check_sequence_consistency(self, dates_from_value_equal, equal=True) dates_from_datetime = [Date(datetime.datetime.strptime(dtstr, date_format)) for dtstr in ("2017-01-03", "2017-01-07", "2017-01-11", "2017-01-15")] dates_from_datetime_equal = [Date(datetime.datetime.strptime("2017-01-01", date_format)), Date(datetime.datetime.strptime("2017-01-01", date_format))] - self._check_sequence_consistency(dates_from_datetime) - self._check_sequence_consistency(dates_from_datetime_equal, equal=True) + check_sequence_consistency(self, dates_from_datetime) + check_sequence_consistency(self, dates_from_datetime_equal, equal=True) dates_from_date = [ Date(datetime.datetime.strptime(dtstr, date_format).date()) for dtstr in @@ -334,10 +838,10 @@ def test_date_order(self): dates_from_date_equal = [datetime.datetime.strptime(dtstr, date_format) for dtstr in ("2017-01-09", "2017-01-9")] - self._check_sequence_consistency(dates_from_date) - self._check_sequence_consistency(dates_from_date_equal, equal=True) + check_sequence_consistency(self, dates_from_date) + check_sequence_consistency(self, dates_from_date_equal, equal=True) - self._check_sequence_consistency(self._shuffle_lists(dates_from_string, dates_from_value, + check_sequence_consistency(self, self._shuffle_lists(dates_from_string, dates_from_value, dates_from_datetime, dates_from_date)) def test_timer_order(self): @@ -352,23 +856,23 @@ def test_timer_order(self): """ time_from_int = [Time(1000), Time(4000), Time(7000), Time(10000)] time_from_int_equal = [Time(1), Time(1)] - self._check_sequence_consistency(time_from_int) - self._check_sequence_consistency(time_from_int_equal, equal=True) + check_sequence_consistency(self, time_from_int) + check_sequence_consistency(self, time_from_int_equal, equal=True) time_from_datetime = [Time(datetime.time(hour=0, minute=0, second=0, microsecond=us)) for us in (2, 5, 8, 11)] time_from_datetime_equal = [Time(datetime.time(hour=0, minute=0, second=0, microsecond=us)) for us in (1, 1)] - self._check_sequence_consistency(time_from_datetime) - self._check_sequence_consistency(time_from_datetime_equal, equal=True) + check_sequence_consistency(self, time_from_datetime) + check_sequence_consistency(self, time_from_datetime_equal, equal=True) time_from_string = [Time("00:00:00.000003000"), Time("00:00:00.000006000"), Time("00:00:00.000009000"), Time("00:00:00.000012000")] time_from_string_equal = [Time("00:00:00.000004000"), Time("00:00:00.000004000")] - self._check_sequence_consistency(time_from_string) - self._check_sequence_consistency(time_from_string_equal, equal=True) + check_sequence_consistency(self, time_from_string) + check_sequence_consistency(self, time_from_string_equal, equal=True) - self._check_sequence_consistency(self._shuffle_lists(time_from_int, time_from_datetime, time_from_string)) + check_sequence_consistency(self, self._shuffle_lists(time_from_int, time_from_datetime, time_from_string)) def test_token_order(self): """ @@ -382,5 +886,5 @@ def test_token_order(self): """ tokens = [Token(1), Token(2), Token(3), Token(4)] tokens_equal = [Token(1), Token(1)] - self._check_sequence_consistency(tokens) - self._check_sequence_consistency(tokens_equal, equal=True) + check_sequence_consistency(self, tokens) + check_sequence_consistency(self, tokens_equal, equal=True) diff --git a/tests/unit/util.py b/tests/unit/util.py new file mode 100644 index 0000000000..e57fa6c3ee --- /dev/null +++ b/tests/unit/util.py @@ -0,0 +1,30 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def check_sequence_consistency(unit_test, ordered_sequence, equal=False): + for i, el in enumerate(ordered_sequence): + for previous in ordered_sequence[:i]: + _check_order_consistency(unit_test, previous, el, equal) + for posterior in ordered_sequence[i + 1:]: + _check_order_consistency(unit_test, el, posterior, equal) + + +def _check_order_consistency(unit_test, smaller, bigger, equal=False): + unit_test.assertLessEqual(smaller, bigger) + unit_test.assertGreaterEqual(bigger, smaller) + if equal: + unit_test.assertEqual(smaller, bigger) + else: + unit_test.assertNotEqual(smaller, bigger) + unit_test.assertLess(smaller, bigger) + unit_test.assertGreater(bigger, smaller) From 2c323850ba2c76521ced395feee47cba10bd0d83 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 11 Sep 2019 09:51:21 -0400 Subject: [PATCH 1603/2005] Add unit/advanced/test_insights.py --- cassandra/cluster.py | 3 +- tests/unit/advanced/__init__.py | 0 tests/unit/advanced/test_insights.py | 303 +++++++++++++++++++++++++++ 3 files changed, 305 insertions(+), 1 deletion(-) create mode 100644 tests/unit/advanced/__init__.py create mode 100644 tests/unit/advanced/test_insights.py diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e0457c272f..032399cb8d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -71,7 +71,8 @@ from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, RetryPolicy, IdentityTranslator, NoSpeculativeExecutionPlan, - NoSpeculativeExecutionPolicy, DSELoadBalancingPolicy) + NoSpeculativeExecutionPolicy, DSELoadBalancingPolicy, + NeverRetryPolicy) from cassandra.pool import (Host, _ReconnectionHandler, _HostReconnectionHandler, HostConnectionPool, HostConnection, NoConnectionsAvailable) diff --git a/tests/unit/advanced/__init__.py b/tests/unit/advanced/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/advanced/test_insights.py b/tests/unit/advanced/test_insights.py new file mode 100644 index 0000000000..b200761a4d --- /dev/null +++ b/tests/unit/advanced/test_insights.py @@ -0,0 +1,303 @@ +# Copyright DataStax, Inc. +# +# Licensed under the DataStax DSE Driver License; +# you may not use this file except in compliance with the License. +# +# You may obtain a copy of the License at +# +# http://www.datastax.com/terms/datastax-dse-driver-license-terms + + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import logging +from mock import sentinel +import sys + +from cassandra import ConsistencyLevel +from cassandra.cluster import ( + ExecutionProfile, GraphExecutionProfile, ProfileManager, + GraphAnalyticsExecutionProfile, + EXEC_PROFILE_DEFAULT, EXEC_PROFILE_GRAPH_DEFAULT, + EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT, + EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT +) +from cassandra.datastax.graph.query import GraphOptions +from cassandra.datastax.insights.registry import insights_registry +from cassandra.datastax.insights.serializers import initialize_registry +from cassandra.datastax.insights.util import namespace +from cassandra.policies import ( + RoundRobinPolicy, + LoadBalancingPolicy, + DCAwareRoundRobinPolicy, + TokenAwarePolicy, + WhiteListRoundRobinPolicy, + HostFilterPolicy, + ConstantReconnectionPolicy, + ExponentialReconnectionPolicy, + RetryPolicy, + SpeculativeExecutionPolicy, + ConstantSpeculativeExecutionPolicy, + WrapperPolicy +) + + +log = logging.getLogger(__name__) + +initialize_registry(insights_registry) + + +class TestGetConfig(unittest.TestCase): + + def test_invalid_object(self): + class NoConfAsDict(object): + pass + + obj = NoConfAsDict() + + ns = 'tests.unit.advanced.test_insights' + if sys.version_info > (3,): + ns += '.TestGetConfig.test_invalid_object.' + + # no default + # ... as a policy + self.assertEqual(insights_registry.serialize(obj, policy=True), + {'type': 'NoConfAsDict', + 'namespace': ns, + 'options': {}}) + # ... not as a policy (default) + self.assertEqual(insights_registry.serialize(obj), + {'type': 'NoConfAsDict', + 'namespace': ns, + }) + # with default + self.assertIs(insights_registry.serialize(obj, default=sentinel.attr_err_default), + sentinel.attr_err_default) + + def test_successful_return(self): + + class SuperclassSentinel(object): + pass + + class SubclassSentinel(SuperclassSentinel): + pass + + @insights_registry.register_serializer_for(SuperclassSentinel) + def superclass_sentinel_serializer(obj): + return sentinel.serialized_superclass + + self.assertIs(insights_registry.serialize(SuperclassSentinel()), + sentinel.serialized_superclass) + self.assertIs(insights_registry.serialize(SubclassSentinel()), + sentinel.serialized_superclass) + + # with default -- same behavior + self.assertIs(insights_registry.serialize(SubclassSentinel(), default=object()), + sentinel.serialized_superclass) + +class TestConfigAsDict(unittest.TestCase): + + # graph/query.py + def test_graph_options(self): + self.maxDiff = None + + go = GraphOptions(graph_name='name_for_test', + graph_source='source_for_test', + graph_language='lang_for_test', + graph_protocol='protocol_for_test', + graph_read_consistency_level=ConsistencyLevel.ANY, + graph_write_consistency_level=ConsistencyLevel.ONE, + graph_invalid_option='invalid') + + log.debug(go._graph_options) + + self.assertEqual( + insights_registry.serialize(go), + {'source': 'source_for_test', + 'language': 'lang_for_test', + 'graphProtocol': 'protocol_for_test', + # no graph_invalid_option + } + ) + + # cluster.py + def test_execution_profile(self): + self.maxDiff = None + self.assertEqual( + insights_registry.serialize(ExecutionProfile()), + {'consistency': 'LOCAL_ONE', + 'continuousPagingOptions': None, + 'loadBalancing': {'namespace': 'cassandra.policies', + 'options': {'child_policy': {'namespace': 'cassandra.policies', + 'options': {'local_dc': '', + 'used_hosts_per_remote_dc': 0}, + 'type': 'DCAwareRoundRobinPolicy'}, + 'shuffle_replicas': False}, + 'type': 'TokenAwarePolicy'}, + 'readTimeout': 10.0, + 'retry': {'namespace': 'cassandra.policies', 'options': {}, 'type': 'RetryPolicy'}, + 'serialConsistency': None, + 'speculativeExecution': {'namespace': 'cassandra.policies', + 'options': {}, 'type': 'NoSpeculativeExecutionPolicy'}, + 'graphOptions': None + } + ) + + def test_graph_execution_profile(self): + self.maxDiff = None + self.assertEqual( + insights_registry.serialize(GraphExecutionProfile()), + {'consistency': 'LOCAL_ONE', + 'continuousPagingOptions': None, + 'loadBalancing': {'namespace': 'cassandra.policies', + 'options': {'child_policy': {'namespace': 'cassandra.policies', + 'options': {'local_dc': '', + 'used_hosts_per_remote_dc': 0}, + 'type': 'DCAwareRoundRobinPolicy'}, + 'shuffle_replicas': False}, + 'type': 'TokenAwarePolicy'}, + 'readTimeout': 30.0, + 'retry': {'namespace': 'cassandra.policies', 'options': {}, 'type': 'NeverRetryPolicy'}, + 'serialConsistency': None, + 'speculativeExecution': {'namespace': 'cassandra.policies', + 'options': {}, 'type': 'NoSpeculativeExecutionPolicy'}, + 'graphOptions': {'graphProtocol': 'graphson-1.0', + 'language': 'gremlin-groovy', + 'source': 'g'}, + } + ) + + def test_graph_analytics_execution_profile(self): + self.maxDiff = None + self.assertEqual( + insights_registry.serialize(GraphAnalyticsExecutionProfile()), + {'consistency': 'LOCAL_ONE', + 'continuousPagingOptions': None, + 'loadBalancing': {'namespace': 'cassandra.policies', + 'options': {'child_policy': {'namespace': 'cassandra.policies', + 'options': {'child_policy': {'namespace': 'cassandra.policies', + 'options': {'local_dc': '', + 'used_hosts_per_remote_dc': 0}, + 'type': 'DCAwareRoundRobinPolicy'}, + 'shuffle_replicas': False}, + 'type': 'TokenAwarePolicy'}}, + 'type': 'DSELoadBalancingPolicy'}, + 'readTimeout': 604800.0, + 'retry': {'namespace': 'cassandra.policies', 'options': {}, 'type': 'NeverRetryPolicy'}, + 'serialConsistency': None, + 'speculativeExecution': {'namespace': 'cassandra.policies', + 'options': {}, 'type': 'NoSpeculativeExecutionPolicy'}, + 'graphOptions': {'graphProtocol': 'graphson-1.0', + 'language': 'gremlin-groovy', + 'source': 'a'}, + } + ) + + # policies.py + def test_DC_aware_round_robin_policy(self): + self.assertEqual( + insights_registry.serialize(DCAwareRoundRobinPolicy()), + {'namespace': 'cassandra.policies', + 'options': {'local_dc': '', 'used_hosts_per_remote_dc': 0}, + 'type': 'DCAwareRoundRobinPolicy'} + ) + self.assertEqual( + insights_registry.serialize(DCAwareRoundRobinPolicy(local_dc='fake_local_dc', + used_hosts_per_remote_dc=15)), + {'namespace': 'cassandra.policies', + 'options': {'local_dc': 'fake_local_dc', 'used_hosts_per_remote_dc': 15}, + 'type': 'DCAwareRoundRobinPolicy'} + ) + + def test_token_aware_policy(self): + self.assertEqual( + insights_registry.serialize(TokenAwarePolicy(child_policy=LoadBalancingPolicy())), + {'namespace': 'cassandra.policies', + 'options': {'child_policy': {'namespace': 'cassandra.policies', + 'options': {}, + 'type': 'LoadBalancingPolicy'}, + 'shuffle_replicas': False}, + 'type': 'TokenAwarePolicy'} + ) + + def test_whitelist_round_robin_policy(self): + self.assertEqual( + insights_registry.serialize(WhiteListRoundRobinPolicy(['123.123'])), + {'namespace': 'cassandra.policies', + 'options': {'allowed_hosts': ('123.123',)}, + 'type': 'WhiteListRoundRobinPolicy'} + ) + + def test_host_filter_policy(self): + def my_predicate(s): + return False + + self.assertEqual( + insights_registry.serialize(HostFilterPolicy(LoadBalancingPolicy(), my_predicate)), + {'namespace': 'cassandra.policies', + 'options': {'child_policy': {'namespace': 'cassandra.policies', + 'options': {}, + 'type': 'LoadBalancingPolicy'}, + 'predicate': 'my_predicate'}, + 'type': 'HostFilterPolicy'} + ) + + def test_constant_reconnection_policy(self): + self.assertEqual( + insights_registry.serialize(ConstantReconnectionPolicy(3, 200)), + {'type': 'ConstantReconnectionPolicy', + 'namespace': 'cassandra.policies', + 'options': {'delay': 3, 'max_attempts': 200} + } + ) + + def test_exponential_reconnection_policy(self): + self.assertEqual( + insights_registry.serialize(ExponentialReconnectionPolicy(4, 100, 10)), + {'type': 'ExponentialReconnectionPolicy', + 'namespace': 'cassandra.policies', + 'options': {'base_delay': 4, 'max_delay': 100, 'max_attempts': 10} + } + ) + + def test_retry_policy(self): + self.assertEqual( + insights_registry.serialize(RetryPolicy()), + {'type': 'RetryPolicy', + 'namespace': 'cassandra.policies', + 'options': {} + } + ) + + def test_spec_exec_policy(self): + self.assertEqual( + insights_registry.serialize(SpeculativeExecutionPolicy()), + {'type': 'SpeculativeExecutionPolicy', + 'namespace': 'cassandra.policies', + 'options': {} + } + ) + + def test_constant_spec_exec_policy(self): + self.assertEqual( + insights_registry.serialize(ConstantSpeculativeExecutionPolicy(100, 101)), + {'type': 'ConstantSpeculativeExecutionPolicy', + 'namespace': 'cassandra.policies', + 'options': {'delay': 100, + 'max_attempts': 101} + } + ) + + def test_wrapper_policy(self): + self.assertEqual( + insights_registry.serialize(WrapperPolicy(LoadBalancingPolicy())), + {'namespace': 'cassandra.policies', + 'options': {'child_policy': {'namespace': 'cassandra.policies', + 'options': {}, + 'type': 'LoadBalancingPolicy'} + }, + 'type': 'WrapperPolicy'} + ) From f845a5b65c206bcfe53f9c6ea819109c9030c5da Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 30 Aug 2019 09:30:46 -0500 Subject: [PATCH 1604/2005] [PYTHON-1009] Support execution profiles in cqlengine --- CHANGELOG.rst | 8 ++ cassandra/cqlengine/connection.py | 25 +++--- .../cqlengine/connections/test_connection.py | 81 +++++++++++++++++-- tests/unit/cqlengine/test_connection.py | 7 +- 4 files changed, 104 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 040ea29028..adbe0879f9 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +3.20.0 +====== +Unreleased + +Bug Fixes +--------- +* Connection setup methods prevent using ExecutionProfile in cqlengine (PYTHON-1009) + 3.19.0 ====== August 26, 2019 diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 9322e4b96a..884e04ed74 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -17,7 +17,7 @@ import six import threading -from cassandra.cluster import Cluster, _NOT_SET, NoHostAvailable, UserTypeDoesNotExist, ConsistencyLevel +from cassandra.cluster import Cluster, _ConfigMode, _NOT_SET, NoHostAvailable, UserTypeDoesNotExist, ConsistencyLevel from cassandra.query import SimpleStatement, dict_factory from cassandra.cqlengine import CQLEngineException @@ -108,9 +108,6 @@ def setup(self): self.lazy_connect = True raise - if self.consistency is not None: - self.session.default_consistency_level = self.consistency - if DEFAULT_CONNECTION in _connections and _connections[DEFAULT_CONNECTION] == self: cluster = _connections[DEFAULT_CONNECTION].cluster session = _connections[DEFAULT_CONNECTION].session @@ -118,7 +115,14 @@ def setup(self): self.setup_session() def setup_session(self): - self.session.row_factory = dict_factory + if self.cluster._config_mode == _ConfigMode.PROFILES: + self.cluster.profile_manager.default.row_factory = dict_factory + if self.consistency is not None: + self.cluster.profile_manager.default.consistency_level = self.consistency + else: + self.session.row_factory = dict_factory + if self.consistency is not None: + self.session.default_consistency_level = self.consistency enc = self.session.encoder enc.mapping[tuple] = enc.cql_encode_tuple _register_known_types(self.session.cluster) @@ -182,10 +186,7 @@ def register_connection(name, hosts=None, consistency=None, lazy_connect=False, "Session configuration arguments and 'session' argument are mutually exclusive" ) conn = Connection.from_session(name, session=session) - conn.setup_session() else: # use hosts argument - if consistency is None: - consistency = ConsistencyLevel.LOCAL_ONE conn = Connection( name, hosts=hosts, consistency=consistency, lazy_connect=lazy_connect, @@ -281,8 +282,12 @@ def set_session(s): if conn.session: log.warning("configuring new default session for cqlengine when one was already set") - if s.row_factory is not dict_factory: - raise CQLEngineException("Failed to initialize: 'Session.row_factory' must be 'dict_factory'.") + if not any([ + s.cluster.profile_manager.default.row_factory is dict_factory and s.cluster._config_mode in [_ConfigMode.PROFILES, _ConfigMode.UNCOMMITTED], + s.row_factory is dict_factory and s.cluster._config_mode in [_ConfigMode.LEGACY, _ConfigMode.UNCOMMITTED], + ]): + raise CQLEngineException("Failed to initialize: row_factory must be 'dict_factory'") + conn.session = s conn.cluster = s.cluster diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index c10a56e66b..bbc0231565 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -18,18 +18,17 @@ import unittest # noqa +from cassandra import ConsistencyLevel from cassandra.cqlengine.models import Model -from cassandra.cqlengine import columns, connection +from cassandra.cqlengine import columns, connection, models from cassandra.cqlengine.management import sync_table -from cassandra.cluster import Cluster, _clusters_for_shutdown +from cassandra.cluster import Cluster, ExecutionProfile, _clusters_for_shutdown, _ConfigMode, EXEC_PROFILE_DEFAULT +from cassandra.policies import RoundRobinPolicy from cassandra.query import dict_factory -from tests.integration import PROTOCOL_VERSION, execute_with_long_wait_retry, local +from tests.integration import CASSANDRA_IP, PROTOCOL_VERSION, execute_with_long_wait_retry, local from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine import DEFAULT_KEYSPACE, setup_connection -from cassandra.cqlengine import models - -from mock import patch class TestConnectModel(Model): @@ -127,3 +126,73 @@ def test_connection_session_switch(self): connection.set_session(self.session2) self.assertEqual(1, TestConnectModel.objects.count()) self.assertEqual(TestConnectModel.objects.first(), TCM2) + + +class ConnectionModel(Model): + key = columns.Integer(primary_key=True) + some_data = columns.Text() + + +class ConnectionInitTest(unittest.TestCase): + def test_default_connection_uses_legacy(self): + connection.default() + conn = connection.get_connection() + self.assertEqual(conn.cluster._config_mode, _ConfigMode.LEGACY) + + def test_connection_with_legacy_settings(self): + connection.setup( + hosts=[CASSANDRA_IP], + default_keyspace=DEFAULT_KEYSPACE, + consistency=ConsistencyLevel.LOCAL_ONE + ) + conn = connection.get_connection() + self.assertEqual(conn.cluster._config_mode, _ConfigMode.LEGACY) + + def test_connection_from_session_with_execution_profile(self): + cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) + session = cluster.connect() + connection.default() + connection.set_session(session) + conn = connection.get_connection() + self.assertEqual(conn.cluster._config_mode, _ConfigMode.PROFILES) + + def test_connection_from_session_with_legacy_settings(self): + cluster = Cluster(load_balancing_policy=RoundRobinPolicy()) + session = cluster.connect() + session.row_factory = dict_factory + connection.set_session(session) + conn = connection.get_connection() + self.assertEqual(conn.cluster._config_mode, _ConfigMode.LEGACY) + + def test_uncommitted_session_uses_legacy(self): + cluster = Cluster() + session = cluster.connect() + session.row_factory = dict_factory + connection.set_session(session) + conn = connection.get_connection() + self.assertEqual(conn.cluster._config_mode, _ConfigMode.LEGACY) + + def test_legacy_insert_query(self): + connection.setup( + hosts=[CASSANDRA_IP], + default_keyspace=DEFAULT_KEYSPACE, + consistency=ConsistencyLevel.LOCAL_ONE + ) + self.assertEqual(connection.get_connection().cluster._config_mode, _ConfigMode.LEGACY) + + sync_table(ConnectionModel) + ConnectionModel.objects.create(key=0, some_data='text0') + ConnectionModel.objects.create(key=1, some_data='text1') + self.assertEqual(ConnectionModel.objects(key=0)[0].some_data, 'text0') + + def test_execution_profile_insert_query(self): + cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) + session = cluster.connect() + connection.default() + connection.set_session(session) + self.assertEqual(connection.get_connection().cluster._config_mode, _ConfigMode.PROFILES) + + sync_table(ConnectionModel) + ConnectionModel.objects.create(key=0, some_data='text0') + ConnectionModel.objects.create(key=1, some_data='text1') + self.assertEqual(ConnectionModel.objects(key=0)[0].some_data, 'text0') diff --git a/tests/unit/cqlengine/test_connection.py b/tests/unit/cqlengine/test_connection.py index 3efff57369..9f8e500c6b 100644 --- a/tests/unit/cqlengine/test_connection.py +++ b/tests/unit/cqlengine/test_connection.py @@ -17,6 +17,7 @@ except ImportError: import unittest # noqa +from cassandra.cluster import _ConfigMode from cassandra.cqlengine import connection from cassandra.query import dict_factory @@ -38,9 +39,13 @@ def test_set_session_without_existing_connection(self): """ Users can set the default session without having a default connection set. """ + mock_cluster = Mock( + _config_mode=_ConfigMode.LEGACY, + ) mock_session = Mock( row_factory=dict_factory, - encoder=Mock(mapping={}) + encoder=Mock(mapping={}), + cluster=mock_cluster, ) connection.set_session(mock_session) From b68e2a6316c50737e5af979351e04f2b82c362ef Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 11 Sep 2019 09:41:49 -0500 Subject: [PATCH 1605/2005] Fix cloud formatting in getting started doc again --- docs/getting_started.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 042a33fe61..487d85aaef 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -407,7 +407,8 @@ level on that: Connecting to DataStax Cloud ---------------------------- 1. Download the secure connect bundle from your DataStax Constellation account. -2. Connect to your cloud cluster with:: +2. Connect to your cloud cluster with + .. code-block:: python from cassandra.cluster import Cluster From 6ebe0d854870866c856ab8b7ee1a7670e8da690a Mon Sep 17 00:00:00 2001 From: nick Date: Wed, 11 Sep 2019 17:13:23 +0200 Subject: [PATCH 1606/2005] Fix integration tests C* 3.11 does not exist, changed to 3.11.4 ccm_options['version'] can somehow get messed up and be str instead of Version object --- tests/integration/__init__.py | 5 ++++- tests/integration/upgrade/test_upgrade.py | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 02f9851477..08a4fc98ce 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -126,7 +126,7 @@ def _get_dse_version_from_cass(cass_version): CASSANDRA_IP = os.getenv('CASSANDRA_IP', '127.0.0.1') CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) -default_cassandra_version = '3.11' +default_cassandra_version = '3.11.4' cv_string = os.getenv('CASSANDRA_VERSION', default_cassandra_version) mcv_string = os.getenv('MAPPED_CASSANDRA_VERSION', None) try: @@ -366,6 +366,9 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, elif ccm_options is None: ccm_options = CCM_KWARGS.copy() + if 'version' in ccm_options and not isinstance(ccm_options['version'], Version): + ccm_options['version'] = Version(ccm_options['version']) + cassandra_version = ccm_options.get('version', CCM_VERSION) dse_version = ccm_options.get('version', DSE_VERSION) diff --git a/tests/integration/upgrade/test_upgrade.py b/tests/integration/upgrade/test_upgrade.py index 1e4fbb2e92..7fa88a9a3f 100644 --- a/tests/integration/upgrade/test_upgrade.py +++ b/tests/integration/upgrade/test_upgrade.py @@ -27,7 +27,7 @@ two_to_three_path = upgrade_paths([ - UpgradePath("2.2.9-3.11", {"version": "2.2.9"}, {"version": "3.11"}, {}), + UpgradePath("2.2.9-3.11", {"version": "2.2.9"}, {"version": "3.11.4"}, {}), ]) class UpgradeTests(UpgradeBase): @two_to_three_path @@ -176,7 +176,7 @@ def _assert_same_token_map(self, original, new): two_to_three_with_auth_path = upgrade_paths([ - UpgradePath("2.2.9-3.11-auth", {"version": "2.2.9"}, {"version": "3.11"}, + UpgradePath("2.2.9-3.11-auth", {"version": "2.2.9"}, {"version": "3.11.4"}, {'authenticator': 'PasswordAuthenticator', 'authorizer': 'CassandraAuthorizer'}), ]) From ae20796bcf25cd79ee7725afff97b01e657c428e Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 11 Sep 2019 10:34:21 -0500 Subject: [PATCH 1607/2005] adding cqlengine --- cassandra/cqlengine/models.py | 2 +- cassandra/cqlengine/usertype.py | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 9fe5d3e642..b3c7c9e37f 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -504,7 +504,7 @@ def _can_update(self): if not self._is_persisted: return False - return all(not self._values[k].changed for k in self._primary_keys) + return all([not self._values[k].changed for k in self._primary_keys]) @classmethod def _get_keyspace(cls): diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index adf3f5e95b..155068d99e 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -1,3 +1,17 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import re import six From bb8e82783de92fdeb9beb1a7168287945b6b4c0a Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 11 Sep 2019 12:01:15 -0500 Subject: [PATCH 1608/2005] More unit tests --- setup.py | 2 +- tests/unit/test_cluster.py | 9 +++++++-- tests/unit/test_control_connection.py | 2 +- tests/unit/test_metadata.py | 5 ++++- tests/unit/test_response_future.py | 2 +- 5 files changed, 14 insertions(+), 6 deletions(-) diff --git a/setup.py b/setup.py index 1e1eb0adb3..51cbffa3c3 100644 --- a/setup.py +++ b/setup.py @@ -307,7 +307,7 @@ def _setup_extensions(self): try: from Cython.Build import cythonize cython_candidates = ['cluster', 'concurrent', 'connection', 'cqltypes', 'metadata', - 'pool', 'hosts', 'protocol', 'query', 'util'] + 'pool', 'protocol', 'query', 'util'] compile_args = [] if is_windows else ['-Wno-unused-function'] self.extensions.extend(cythonize( [Extension('cassandra.%s' % m, ['cassandra/%s.py' % m], diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 66799fb393..9478575cbf 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -195,13 +195,18 @@ def test_default_serial_consistency_level_legacy(self, *_): class ProtocolVersionTests(unittest.TestCase): def test_protocol_downgrade_test(self): - + lower = ProtocolVersion.get_lower_supported(ProtocolVersion.DSE_V2) + self.assertEqual(ProtocolVersion.DSE_V1, lower) lower = ProtocolVersion.get_lower_supported(ProtocolVersion.DSE_V1) self.assertEqual(ProtocolVersion.V4,lower) lower = ProtocolVersion.get_lower_supported(ProtocolVersion.V4) self.assertEqual(ProtocolVersion.V3,lower) lower = ProtocolVersion.get_lower_supported(ProtocolVersion.V3) - self.assertEqual(0,lower) + self.assertEqual(ProtocolVersion.V2,lower) + lower = ProtocolVersion.get_lower_supported(ProtocolVersion.V2) + self.assertEqual(ProtocolVersion.V1, lower) + lower = ProtocolVersion.get_lower_supported(ProtocolVersion.V1) + self.assertEqual(0, lower) self.assertTrue(ProtocolVersion.uses_error_code_map(ProtocolVersion.DSE_V1)) self.assertTrue(ProtocolVersion.uses_int_query_flags(ProtocolVersion.DSE_V1)) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index c2b6b7b4c5..db194fe0c7 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -25,7 +25,7 @@ from cassandra import OperationTimedOut, SchemaTargetType, SchemaChangeType from cassandra.protocol import ResultMessage, RESULT_KIND_ROWS from cassandra.cluster import ControlConnection, _Scheduler, ProfileManager, EXEC_PROFILE_DEFAULT, ExecutionProfile -from cassandra.hosts import Host +from cassandra.pool import Host from cassandra.connection import EndPoint, DefaultEndPoint, DefaultEndPointFactory from cassandra.policies import (SimpleConvictionPolicy, RoundRobinPolicy, ConstantReconnectionPolicy, IdentityTranslator) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 44365a9ee1..0ab64a4fcc 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -472,7 +472,10 @@ def test_as_cql_query_name_escaping(self): class UserDefinedFunctionTest(unittest.TestCase): def test_as_cql_query_removes_frozen(self): - func = Function("ks1", "myfunction", ["frozen>"], ["a"], "int", "java", "return 0;", True) + func = Function( + "ks1", "myfunction", ["frozen>"], ["a"], + "int", "java", "return 0;", True, False, False, False + ) expected_result = ( "CREATE FUNCTION ks1.myfunction(a tuple) " "CALLED ON NULL INPUT " diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index ae8c220669..c8d65a0f2a 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -38,7 +38,7 @@ class ResponseFutureTests(unittest.TestCase): def make_basic_session(self): s = Mock(spec=Session) - s.cluster._default_row_factory = lambda col_names, rows: [(col_names, rows)] + s.row_factory = lambda col_names, rows: [(col_names, rows)] return s def make_pool(self): From 5d94357a1c404b99392d6153b88f6029de8b126f Mon Sep 17 00:00:00 2001 From: nick Date: Thu, 12 Sep 2019 09:55:39 +0200 Subject: [PATCH 1609/2005] Added unit test --- tests/unit/test_response_future.py | 35 +++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 08a1f8b108..36916c130c 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -19,7 +19,7 @@ from mock import Mock, MagicMock, ANY -from cassandra import ConsistencyLevel, Unavailable, SchemaTargetType, SchemaChangeType +from cassandra import ConsistencyLevel, Unavailable, SchemaTargetType, SchemaChangeType, OperationTimedOut from cassandra.cluster import Session, ResponseFuture, NoHostAvailable, ProtocolVersion from cassandra.connection import Connection, ConnectionException from cassandra.protocol import (ReadTimeoutErrorMessage, WriteTimeoutErrorMessage, @@ -125,6 +125,39 @@ def test_other_result_message_kind(self): rf._set_result(None, None, None, Mock(spec=ResultMessage, kind=999, results=result)) self.assertListEqual(list(rf.result()), result) + def test_heartbeat_defunct_deadlock(self): + """ + Heartbeat defuncts all connections and clears request queues. Response future times out and even + if it has been removed from request queue, timeout exception must be thrown. Otherwise event loop + will deadlock on eventual ResponseFuture.result() call. + + PYTHON-1044 + """ + + connection = MagicMock(spec=Connection) + connection._requests = {} + + pool = Mock() + pool.is_shutdown = False + pool.borrow_connection.return_value = [connection, 1] + + session = self.make_basic_session() + session.cluster._default_load_balancing_policy.make_query_plan.return_value = [Mock(), Mock()] + session._pools.get.return_value = pool + + query = SimpleStatement("SELECT * FROM foo") + message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) + + rf = ResponseFuture(session, message, query, 1) + rf.send_request() + + # Simulate Connection.error_all_requests() after heartbeat defuncts + connection._requests = {} + + # Simulate ResponseFuture timing out + rf._on_timeout() + self.assertRaises(OperationTimedOut, rf.result) + def test_read_timeout_error_message(self): session = self.make_session() query = SimpleStatement("SELECT * FROM foo") From 94736318efc47e68d27e7fb3b04830749c193468 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 12 Sep 2019 16:23:56 -0400 Subject: [PATCH 1610/2005] Add graph tests --- cassandra/cluster.py | 12 +- cassandra/datastax/graph/graphson.py | 5 +- cassandra/datastax/graph/query.py | 6 + cassandra/datastax/graph/types.py | 5 +- cassandra/graph/__init__.py | 16 + cassandra/graph/graphson.py | 16 + cassandra/graph/query.py | 16 + cassandra/graph/types.py | 16 + tests/integration/advanced/__init__.py | 692 +++++++++++++++++++++ tests/integration/advanced/test_graph.py | 746 +++++++++++++++++++++++ tests/unit/advanced/test_graph.py | 400 ++++++++++++ 11 files changed, 1927 insertions(+), 3 deletions(-) create mode 100644 cassandra/graph/__init__.py create mode 100644 cassandra/graph/graphson.py create mode 100644 cassandra/graph/query.py create mode 100644 cassandra/graph/types.py create mode 100644 tests/integration/advanced/__init__.py create mode 100644 tests/integration/advanced/test_graph.py create mode 100644 tests/unit/advanced/test_graph.py diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 032399cb8d..cf92a9cfa2 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1162,6 +1162,7 @@ def __init__(self, Session._default_serial_consistency_level, Session._default_timeout, Session._row_factory) + # legacy mode if either of these is not default if load_balancing_policy or default_retry_policy: if execution_profiles: @@ -1173,10 +1174,19 @@ def __init__(self, "execution profiles.", DeprecationWarning) else: + profiles = self.profile_manager.profiles if execution_profiles: - self.profile_manager.profiles.update(execution_profiles) + profiles.update(execution_profiles) self._config_mode = _ConfigMode.PROFILES + # TODO should be changed to Default... + lbp = DSELoadBalancingPolicy(self.profile_manager.default.load_balancing_policy) + profiles.setdefault(EXEC_PROFILE_GRAPH_DEFAULT, GraphExecutionProfile(load_balancing_policy=lbp)) + profiles.setdefault(EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, + GraphExecutionProfile(load_balancing_policy=lbp, request_timeout=60. * 3.)) + profiles.setdefault(EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT, + GraphAnalyticsExecutionProfile(load_balancing_policy=lbp)) + if self._contact_points_explicit: if self._config_mode is _ConfigMode.PROFILES: default_lbp_profiles = self.profile_manager._profiles_without_explicit_lbps() diff --git a/cassandra/datastax/graph/graphson.py b/cassandra/datastax/graph/graphson.py index 8cc5561ba8..620adf045e 100644 --- a/cassandra/datastax/graph/graphson.py +++ b/cassandra/datastax/graph/graphson.py @@ -30,7 +30,10 @@ __all__ = ['GraphSON1Serializer', 'GraphSON1Deserializer', 'GraphSON1TypeDeserializer', 'GraphSON2Serializer', 'GraphSON2Deserializer', - 'GraphSON2Reader'] + 'GraphSON2Reader', 'BooleanTypeIO', 'Int16TypeIO', 'Int32TypeIO', 'DoubleTypeIO', + 'FloatTypeIO', 'UUIDTypeIO', 'BigDecimalTypeIO', 'DurationTypeIO', 'InetTypeIO', + 'InstantTypeIO', 'LocalDateTypeIO', 'LocalTimeTypeIO', 'Int64TypeIO', 'BigIntegerTypeIO', + 'LocalDateTypeIO', 'PolygonTypeIO', 'PointTypeIO', 'LineStringTypeIO', 'BlobTypeIO'] """ Supported types: diff --git a/cassandra/datastax/graph/query.py b/cassandra/datastax/graph/query.py index 657068fbff..0ebeeb28d3 100644 --- a/cassandra/datastax/graph/query.py +++ b/cassandra/datastax/graph/query.py @@ -23,6 +23,12 @@ from cassandra.datastax.graph.graphson import GraphSON2Reader +__all__ = [ + 'GraphProtocol', 'GraphOptions', 'GraphStatement', 'SimpleGraphStatement', + 'single_object_row_factory', 'graph_result_row_factory', 'graph_object_row_factory', + 'graph_graphson2_row_factory', 'Result' +] + # (attr, description, server option) _graph_options = ( ('graph_name', 'name of the targeted graph.', 'graph-name'), diff --git a/cassandra/datastax/graph/types.py b/cassandra/datastax/graph/types.py index 9b06a0609c..ae22cd4bfe 100644 --- a/cassandra/datastax/graph/types.py +++ b/cassandra/datastax/graph/types.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +__all__ = ['Element', 'Vertex', 'Edge', 'VertexProperty', 'Path'] + class Element(object): @@ -143,8 +145,9 @@ class Path(object): """ def __init__(self, labels, objects): + # TODO fix next major # The Path class should not do any deserialization by itself. To fix in the next major. - from cassandra.datastax.graph import _graph_object_sequence + from cassandra.datastax.graph.query import _graph_object_sequence self.labels = labels self.objects = list(_graph_object_sequence(objects)) diff --git a/cassandra/graph/__init__.py b/cassandra/graph/__init__.py new file mode 100644 index 0000000000..51bd1de16a --- /dev/null +++ b/cassandra/graph/__init__.py @@ -0,0 +1,16 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This is only for backward compatibility when migrating from dse-driver. +from cassandra.datastax.graph import * \ No newline at end of file diff --git a/cassandra/graph/graphson.py b/cassandra/graph/graphson.py new file mode 100644 index 0000000000..d37c172a6b --- /dev/null +++ b/cassandra/graph/graphson.py @@ -0,0 +1,16 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This is only for backward compatibility when migrating from dse-driver. +from cassandra.datastax.graph.graphson import * diff --git a/cassandra/graph/query.py b/cassandra/graph/query.py new file mode 100644 index 0000000000..50eef72ad0 --- /dev/null +++ b/cassandra/graph/query.py @@ -0,0 +1,16 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This is only for backward compatibility when migrating from dse-driver. +from cassandra.datastax.graph.query import * diff --git a/cassandra/graph/types.py b/cassandra/graph/types.py new file mode 100644 index 0000000000..c8b613f8e4 --- /dev/null +++ b/cassandra/graph/types.py @@ -0,0 +1,16 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This is only for backward compatibility when migrating from dse-driver. +from cassandra.datastax.graph.types import * diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py new file mode 100644 index 0000000000..119a9faa39 --- /dev/null +++ b/tests/integration/advanced/__init__.py @@ -0,0 +1,692 @@ +# Copyright DataStax, Inc. +# +# Licensed under the DataStax DSE Driver License; +# you may not use this file except in compliance with the License. +# +# You may obtain a copy of the License at +# +# http://www.datastax.com/terms/datastax-dse-driver-license-terms + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from six.moves.urllib.request import build_opener, Request, HTTPHandler +import sys +import re +import os +import time +from os.path import expanduser +from uuid import UUID +from decimal import Decimal +from ccmlib import common +import datetime +import six +from packaging.version import Version + +from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT, EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT + +from tests.integration import PROTOCOL_VERSION, DSE_VERSION, get_server_versions, BasicKeyspaceUnitTestCase, \ + drop_keyspace_shutdown_cluster, get_node, USE_CASS_EXTERNAL +from tests.integration import use_singledc, use_single_node, wait_for_node_socket +from cassandra.protocol import ServerError +from cassandra.util import Point, LineString, Polygon +from cassandra.graph import Edge, Vertex, Path +from cassandra.graph import GraphSON1Deserializer +from cassandra.graph.graphson import InetTypeIO +from cassandra.datastax.graph.query import _graphson2_reader +from cassandra.cluster import (GraphAnalyticsExecutionProfile, GraphExecutionProfile, + EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, default_lbp_factory) +from cassandra.policies import DSELoadBalancingPolicy +from cassandra.graph.query import GraphOptions, GraphProtocol, graph_graphson2_row_factory + +home = expanduser('~') + + +# Home directory of the Embedded Apache Directory Server to use +ADS_HOME = os.getenv('ADS_HOME', home) +MAKE_STRICT = "schema.config().option('graph.schema_mode').set('production')" +MAKE_NON_STRICT = "schema.config().option('graph.schema_mode').set('development')" +ALLOW_SCANS = "schema.config().option('graph.allow_scan').set('true')" + +# A map of common types and their corresponding groovy declaration for use in schema creation and insertion +MAX_LONG = 9223372036854775807 +MIN_LONG = -9223372036854775808 +ZERO_LONG = 0 + +if sys.version_info < (3, 0): + MAX_LONG = long(MAX_LONG) + MIN_LONG = long(MIN_LONG) + ZERO_LONG = long(ZERO_LONG) + +deserializers = GraphSON1Deserializer()._deserializers + +TYPE_MAP = {"point1": ["Point()", Point(.5, .13), GraphSON1Deserializer.deserialize_point], + "point2": ["Point()", Point(-5, .0), GraphSON1Deserializer.deserialize_point], + + "linestring1": ["Linestring()", LineString(((1.0, 2.0), (3.0, 4.0), (-89.0, 90.0))), + GraphSON1Deserializer.deserialize_linestring], + "polygon1": ["Polygon()", Polygon([(10.0, 10.0), (80.0, 10.0), (80., 88.0), (10., 89.0), (10., 10.0)], + [[(20., 20.0), (20., 30.0), (30., 30.0), (30., 20.0), (20., 20.0)], + [(40., 20.0), (40., 30.0), (50., 30.0), (50., 20.0), (40., 20.0)]]), + GraphSON1Deserializer.deserialize_polygon], + "smallint1": ["Smallint()", 1, GraphSON1Deserializer.deserialize_smallint], + "varint1": ["Varint()", 2147483647, GraphSON1Deserializer.deserialize_varint], + + "bigint1": ["Bigint()", MAX_LONG, GraphSON1Deserializer.deserialize_bigint], + "bigint2": ["Bigint()", MIN_LONG, GraphSON1Deserializer.deserialize_bigint], + "bigint3": ["Bigint()", ZERO_LONG, GraphSON1Deserializer.deserialize_bigint], + + "int1": ["Int()", 100, GraphSON1Deserializer.deserialize_int], + "float1": ["Float()", .5, GraphSON1Deserializer.deserialize_float], + "double1": ["Double()", .3415681, GraphSON1Deserializer.deserialize_double], + "uuid1": ["Uuid()", UUID('12345678123456781234567812345678'), GraphSON1Deserializer.deserialize_uuid], + "decimal1": ["Decimal()", Decimal(10), GraphSON1Deserializer.deserialize_decimal], + "blob1": ["Blob()", bytearray(b"Hello World"), GraphSON1Deserializer.deserialize_blob], + + "timestamp1": ["Timestamp()", datetime.datetime.now().replace(microsecond=0), + GraphSON1Deserializer.deserialize_timestamp], + "timestamp2": ["Timestamp()", datetime.datetime.max.replace(microsecond=0), + GraphSON1Deserializer.deserialize_timestamp], + # These are valid values but are pending for DSP-14093 to be fixed + #"timestamp3": ["Timestamp()", datetime.datetime(159, 1, 1, 23, 59, 59), + # GraphSON1TypeDeserializer.deserialize_timestamp], + #"timestamp4": ["Timestamp()", datetime.datetime.min, + # GraphSON1TypeDeserializer.deserialize_timestamp], + + "duration1": ["Duration()", datetime.timedelta(1, 16, 0), + GraphSON1Deserializer.deserialize_duration], + "duration2": ["Duration()", datetime.timedelta(days=1, seconds=16, milliseconds=15), + GraphSON1Deserializer.deserialize_duration], + } + + +if six.PY2: + TYPE_MAP["blob3"] = ["Blob()", buffer(b"Hello World"), GraphSON1Deserializer.deserialize_blob] + + TYPE_MAP["inet1"] = ["Inet()", "127.0.0.1", GraphSON1Deserializer.deserialize_inet] + TYPE_MAP["inet2"] = ["Inet()", "2001:db8:85a3:8d3:1319:8a2e:370:7348", GraphSON1Deserializer.deserialize_inet] + +else: + TYPE_MAP["blob4"] = ["Blob()", bytes(b"Hello World Again"), GraphSON1Deserializer.deserialize_blob] + TYPE_MAP["blob5"] = ["Blob()", memoryview(b"And Again Hello World"), GraphSON1Deserializer.deserialize_blob] + + import ipaddress + deserializer_plus_to_ipaddressv4 = lambda x: ipaddress.IPv4Address(GraphSON1Deserializer.deserialize_inet(x)) + deserializer_plus_to_ipaddressv6 = lambda x: ipaddress.IPv6Address(GraphSON1Deserializer.deserialize_inet(x)) + + def generic_ip_deserializer(string_ip_adress): + if ":" in string_ip_adress: + return deserializer_plus_to_ipaddressv6(string_ip_adress) + return deserializer_plus_to_ipaddressv4(string_ip_adress) + + class GenericIpAddressIO(InetTypeIO): + @classmethod + def deserialize(cls, value, reader=None): + return generic_ip_deserializer(value) + + _graphson2_reader.deserializers[GenericIpAddressIO.graphson_type] = GenericIpAddressIO + + TYPE_MAP["inet1"] = ["Inet()", ipaddress.IPv4Address("127.0.0.1"), deserializer_plus_to_ipaddressv4] + TYPE_MAP["inet2"] = ["Inet()", ipaddress.IPv6Address("2001:db8:85a3:8d3:1319:8a2e:370:7348"), + deserializer_plus_to_ipaddressv6] + +if DSE_VERSION >= Version("5.1"): + TYPE_MAP["datetime1"]= ["Date()", datetime.date.today(), GraphSON1Deserializer.deserialize_date] + TYPE_MAP["time1"] = ["Time()", datetime.time(12, 6, 12, 444), GraphSON1Deserializer.deserialize_time] + TYPE_MAP["time2"] = ["Time()", datetime.time(12, 6, 12), GraphSON1Deserializer.deserialize_time] + TYPE_MAP["time3"] = ["Time()", datetime.time(12, 6), GraphSON1Deserializer.deserialize_time] + TYPE_MAP["time4"] = ["Time()", datetime.time.min, GraphSON1Deserializer.deserialize_time] + TYPE_MAP["time5"] = ["Time()", datetime.time.max, GraphSON1Deserializer.deserialize_time] + TYPE_MAP["blob2"] = ["Blob()", bytearray(b"AKDLIElksadlaswqA" * 100000), GraphSON1Deserializer.deserialize_blob] + TYPE_MAP["datetime1"]= ["Date()", datetime.date.today(), GraphSON1Deserializer.deserialize_date] + TYPE_MAP["datetime2"]= ["Date()", datetime.date(159, 1, 3), GraphSON1Deserializer.deserialize_date] + TYPE_MAP["datetime3"]= ["Date()", datetime.date.min, GraphSON1Deserializer.deserialize_date] + TYPE_MAP["datetime4"]= ["Date()", datetime.date.max, GraphSON1Deserializer.deserialize_date] + TYPE_MAP["time1"] = ["Time()", datetime.time(12, 6, 12, 444), GraphSON1Deserializer.deserialize_time] + TYPE_MAP["time2"] = ["Time()", datetime.time(12, 6, 12), GraphSON1Deserializer.deserialize_time] + TYPE_MAP["time3"] = ["Time()", datetime.time(12, 6), GraphSON1Deserializer.deserialize_time] + TYPE_MAP["time4"] = ["Time()", datetime.time.min, GraphSON1Deserializer.deserialize_time] + TYPE_MAP["time5"] = ["Time()", datetime.time.max, GraphSON1Deserializer.deserialize_time] + TYPE_MAP["blob2"] = ["Blob()", bytearray(b"AKDLIElksadlaswqA" * 100000), GraphSON1Deserializer.deserialize_blob] + +def find_spark_master(session): + + # Itterate over the nodes the one with port 7080 open is the spark master + for host in session.hosts: + ip = host.address + port = 7077 + spark_master = (ip, port) + if common.check_socket_listening(spark_master, timeout=3): + return spark_master[0] + return None + + +def wait_for_spark_workers(num_of_expected_workers, timeout): + """ + This queries the spark master and checks for the expected number of workers + """ + start_time = time.time() + while True: + opener = build_opener(HTTPHandler) + request = Request("http://{0}:7080".format(DSE_IP)) + request.get_method = lambda: 'GET' + connection = opener.open(request) + match = re.search('Alive Workers:.*(\d+)', connection.read().decode('utf-8')) + num_workers = int(match.group(1)) + if num_workers == num_of_expected_workers: + match = True + break + elif time.time() - start_time > timeout: + match = True + break + time.sleep(1) + return match + + +def use_single_node_with_graph(start=True, options={}, dse_options={}): + use_single_node(start=start, workloads=['graph'], configuration_options=options, dse_options=dse_options) + + +def use_single_node_with_graph_and_spark(start=True, options={}): + use_single_node(start=start, workloads=['graph', 'spark'], configuration_options=options) + + +def use_single_node_with_graph_and_solr(start=True, options={}): + use_single_node(start=start, workloads=['graph', 'solr'], configuration_options=options) + + +def use_singledc_wth_graph(start=True): + use_singledc(start=start, workloads=['graph']) + + +def use_singledc_wth_graph_and_spark(start=True): + use_cluster_with_graph(3) + + +def use_cluster_with_graph(num_nodes): + """ + This is a work around to account for the fact that spark nodes will conflict over master assignment + when started all at once. + """ + if USE_CASS_EXTERNAL: + set_default_dse_ip() + return + + # Create the cluster but don't start it. + use_singledc(start=False, workloads=['graph', 'spark']) + # Start first node. + get_node(1).start(wait_for_binary_proto=True) + # Wait binary protocol port to open + wait_for_node_socket(get_node(1), 120) + # Wait for spark master to start up + spark_master_http = ("localhost", 7080) + common.check_socket_listening(spark_master_http, timeout=60) + tmp_cluster = Cluster(protocol_version=PROTOCOL_VERSION) + + # Start up remaining nodes. + try: + session = tmp_cluster.connect() + statement = "ALTER KEYSPACE dse_leases WITH REPLICATION = {'class': 'NetworkTopologyStrategy', 'dc1': '%d'}" % (num_nodes) + session.execute(statement) + finally: + tmp_cluster.shutdown() + + for i in range(1, num_nodes+1): + if i is not 1: + node = get_node(i) + node.start(wait_for_binary_proto=True) + wait_for_node_socket(node, 120) + + # Wait for workers to show up as Alive on master + wait_for_spark_workers(3, 120) + + +def reset_graph(session, graph_name): + session.execute_graph('system.graph(name).ifNotExists().create()', {'name': graph_name}, + execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) + wait_for_graph_inserted(session, graph_name) + + +def wait_for_graph_inserted(session, graph_name): + count = 0 + exists = session.execute_graph('system.graph(name).exists()', {'name': graph_name}, + execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0].value + while not exists and count < 50: + time.sleep(1) + exists = session.execute_graph('system.graph(name).exists()', {'name': graph_name}, + execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0].value + return exists + + +class BasicGraphUnitTestCase(BasicKeyspaceUnitTestCase): + """ + This is basic graph unit test case that provides various utility methods that can be leveraged for testcase setup and tear + down + """ + @property + def graph_name(self): + return self._testMethodName.lower() + + def session_setup(self): + lbp = DSELoadBalancingPolicy(default_lbp_factory()) + + ep_graphson2 = GraphExecutionProfile( + request_timeout=60, + load_balancing_policy=lbp, + graph_options=GraphOptions( + graph_name=self.graph_name, + graph_protocol=GraphProtocol.GRAPHSON_2_0 + ), + row_factory=graph_graphson2_row_factory) + + ep_graphson1 = GraphExecutionProfile( + request_timeout=60, + load_balancing_policy=lbp, + graph_options=GraphOptions( + graph_name=self.graph_name + ) + ) + + ep_analytics = GraphAnalyticsExecutionProfile( + request_timeout=60, + load_balancing_policy=lbp, + graph_options = GraphOptions( + graph_language=b'gremlin-groovy', + graph_name=self.graph_name + ) + ) + + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={ + EXEC_PROFILE_GRAPH_DEFAULT: ep_graphson1, + EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT: ep_analytics, + "graphson2": ep_graphson2 + }) + self.session = self.cluster.connect() + self.ks_name = self._testMethodName.lower() + self.cass_version, self.cql_version = get_server_versions() + + def setUp(self): + self.session_setup() + self.reset_graph() + + self.clear_schema() + + def tearDown(self): + self.cluster.shutdown() + + def clear_schema(self): + self.session.execute_graph('schema.clear()') + + def reset_graph(self): + reset_graph(self.session, self.graph_name) + + + def wait_for_graph_inserted(self): + wait_for_graph_inserted(self.session, self.graph_name) + + +class BasicSharedGraphUnitTestCase(BasicKeyspaceUnitTestCase): + """ + This is basic graph unit test case that provides various utility methods that can be leveraged for testcase setup and tear + down + """ + + @classmethod + def session_setup(cls): + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.session = cls.cluster.connect() + cls.ks_name = cls.__name__.lower() + cls.cass_version, cls.cql_version = get_server_versions() + cls.graph_name = cls.__name__.lower() + + @classmethod + def setUpClass(cls): + cls.session_setup() + cls.reset_graph() + profiles = cls.cluster.profile_manager.profiles + profiles[EXEC_PROFILE_GRAPH_DEFAULT].request_timeout = 60 + profiles[EXEC_PROFILE_GRAPH_DEFAULT].graph_options.graph_name = cls.graph_name + profiles[EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT].request_timeout = 60 + profiles[EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT].graph_options.graph_name = cls.graph_name + cls.clear_schema() + + @classmethod + def tearDownClass(cls): + cls.cluster.shutdown() + + @classmethod + def clear_schema(self): + self.session.execute_graph('schema.clear()') + + @classmethod + def reset_graph(self): + reset_graph(self.session, self.graph_name) + + def wait_for_graph_inserted(self): + wait_for_graph_inserted(self.session, self.graph_name) + + +def fetchCustomGeoType(type): + if type.lower().startswith("point"): + return getPointType() + elif type.lower().startswith("line"): + return getLineType() + elif type.lower().startswith("poly"): + return getPolygonType() + else: + return None + + +geo_condition = DSE_VERSION < Version('5.1') +def getPointType(): + if geo_condition: + return "Point()" + + return "Point().withGeoBounds()" + +def getPointTypeWithBounds(lowerX, lowerY, upperX, upperY): + if geo_condition: + return "Point()" + + return "Point().withBounds({0}, {1}, {2}, {3})".format(lowerX, lowerY, upperX, upperY) + +def getLineType(): + if geo_condition: + return "Linestring()" + + return "Linestring().withGeoBounds()" + +def getPolygonType(): + if geo_condition: + return "Polygon()" + + return "Polygon().withGeoBounds()" + + + +class BasicGeometricUnitTestCase(BasicKeyspaceUnitTestCase): + """ + This base test class is used by all the geomteric tests. It contains class level teardown and setup + methods. It also contains the test fixtures used by those tests + """ + @classmethod + def common_dse_setup(cls, rf, keyspace_creation=True): + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.session = cls.cluster.connect() + cls.ks_name = cls.__name__.lower() + if keyspace_creation: + cls.create_keyspace(rf) + cls.cass_version, cls.cql_version = get_server_versions() + cls.session.set_keyspace(cls.ks_name) + + @classmethod + def setUpClass(cls): + cls.common_dse_setup(1) + cls.initalizeTables() + + @classmethod + def tearDownClass(cls): + drop_keyspace_shutdown_cluster(cls.ks_name, cls.session, cls.cluster) + + @classmethod + def initalizeTables(cls): + udt_type = "CREATE TYPE udt1 (g {0})".format(cls.cql_type_name) + large_table = "CREATE TABLE tbl (k uuid PRIMARY KEY, g {0}, l list<{0}>, s set<{0}>, m0 map<{0},int>, m1 map, t tuple<{0},{0},{0}>, u frozen)".format(cls.cql_type_name) + simple_table = "CREATE TABLE tblpk (k {0} primary key, v int)".format( cls.cql_type_name) + cluster_table = "CREATE TABLE tblclustering (k0 int, k1 {0}, v int, primary key (k0, k1))".format(cls.cql_type_name) + cls.session.execute(udt_type) + cls.session.execute(large_table) + cls.session.execute(simple_table) + cls.session.execute(cluster_table) + + +def generate_line_graph(length): + query_parts = [] + query_parts.append(ALLOW_SCANS+';') + query_parts.append("schema.propertyKey('index').Int().ifNotExists().create();") + query_parts.append("schema.propertyKey('distance').Int().ifNotExists().create();") + query_parts.append("schema.vertexLabel('lp').properties('index').ifNotExists().create();") + query_parts.append("schema.edgeLabel('goesTo').properties('distance').connection('lp', 'lp').ifNotExists().create();") + for index in range(0, length): + query_parts.append('''Vertex vertex{0} = graph.addVertex(label, 'lp', 'index', {0}); '''.format(index)) + if index is not 0: + query_parts.append('''vertex{0}.addEdge('goesTo', vertex{1}, 'distance', 5); '''.format(index-1, index)) + final_graph_generation_statement = "".join(query_parts) + return final_graph_generation_statement + + +def generate_classic(session): + to_run = [MAKE_STRICT, ALLOW_SCANS, '''schema.propertyKey('name').Text().ifNotExists().create(); + schema.propertyKey('age').Int().ifNotExists().create(); + schema.propertyKey('lang').Text().ifNotExists().create(); + schema.propertyKey('weight').Float().ifNotExists().create(); + schema.vertexLabel('person').properties('name', 'age').ifNotExists().create(); + schema.vertexLabel('software').properties('name', 'lang').ifNotExists().create(); + schema.edgeLabel('created').properties('weight').connection('person', 'software').ifNotExists().create(); + schema.edgeLabel('created').connection('software', 'software').add(); + schema.edgeLabel('knows').properties('weight').connection('person', 'person').ifNotExists().create();''', + '''Vertex marko = graph.addVertex(label, 'person', 'name', 'marko', 'age', 29); + Vertex vadas = graph.addVertex(label, 'person', 'name', 'vadas', 'age', 27); + Vertex lop = graph.addVertex(label, 'software', 'name', 'lop', 'lang', 'java'); + Vertex josh = graph.addVertex(label, 'person', 'name', 'josh', 'age', 32); + Vertex ripple = graph.addVertex(label, 'software', 'name', 'ripple', 'lang', 'java'); + Vertex peter = graph.addVertex(label, 'person', 'name', 'peter', 'age', 35); + marko.addEdge('knows', vadas, 'weight', 0.5f); + marko.addEdge('knows', josh, 'weight', 1.0f); + marko.addEdge('created', lop, 'weight', 0.4f); + josh.addEdge('created', ripple, 'weight', 1.0f); + josh.addEdge('created', lop, 'weight', 0.4f); + peter.addEdge('created', lop, 'weight', 0.2f);'''] + + for run in to_run: + succeed = False + count = 0 + # Retry up to 10 times this is an issue for + # Graph Mult-NodeClusters + while count < 10 and not succeed: + try: + session.execute_graph(run) + succeed = True + except (ServerError): + print("error creating classic graph retrying") + time.sleep(.5) + count += 1 + + +def generate_multi_field_graph(session): + to_run = [ALLOW_SCANS, + '''schema.propertyKey('shortvalue').Smallint().ifNotExists().create(); + schema.vertexLabel('shortvertex').properties('shortvalue').ifNotExists().create(); + short s1 = 5000; graph.addVertex(label, "shortvertex", "shortvalue", s1);''', + '''schema.propertyKey('intvalue').Int().ifNotExists().create(); + schema.vertexLabel('intvertex').properties('intvalue').ifNotExists().create(); + int i1 = 1000000000; graph.addVertex(label, "intvertex", "intvalue", i1);''', + '''schema.propertyKey('intvalue2').Int().ifNotExists().create(); + schema.vertexLabel('intvertex2').properties('intvalue2').ifNotExists().create(); + Integer i2 = 100000000; graph.addVertex(label, "intvertex2", "intvalue2", i2);''', + '''schema.propertyKey('longvalue').Bigint().ifNotExists().create(); + schema.vertexLabel('longvertex').properties('longvalue').ifNotExists().create(); + long l1 = 9223372036854775807; graph.addVertex(label, "longvertex", "longvalue", l1);''', + '''schema.propertyKey('longvalue2').Bigint().ifNotExists().create(); + schema.vertexLabel('longvertex2').properties('longvalue2').ifNotExists().create(); + Long l2 = 100000000000000000L; graph.addVertex(label, "longvertex2", "longvalue2", l2);''', + '''schema.propertyKey('floatvalue').Float().ifNotExists().create(); + schema.vertexLabel('floatvertex').properties('floatvalue').ifNotExists().create(); + float f1 = 3.5f; graph.addVertex(label, "floatvertex", "floatvalue", f1);''', + '''schema.propertyKey('doublevalue').Double().ifNotExists().create(); + schema.vertexLabel('doublevertex').properties('doublevalue').ifNotExists().create(); + double d1 = 3.5e40; graph.addVertex(label, "doublevertex", "doublevalue", d1);''', + '''schema.propertyKey('doublevalue2').Double().ifNotExists().create(); + schema.vertexLabel('doublevertex2').properties('doublevalue2').ifNotExists().create(); + Double d2 = 3.5e40d; graph.addVertex(label, "doublevertex2", "doublevalue2", d2);'''] + + + for run in to_run: + session.execute_graph(run) + + if DSE_VERSION >= Version('5.1'): + to_run_51=['''schema.propertyKey('datevalue1').Date().ifNotExists().create(); + schema.vertexLabel('datevertex1').properties('datevalue1').ifNotExists().create();''', + '''schema.propertyKey('negdatevalue2').Date().ifNotExists().create(); + schema.vertexLabel('negdatevertex2').properties('negdatevalue2').ifNotExists().create();'''] + for i in range(1,4): + to_run_51.append('''schema.propertyKey('timevalue{0}').Time().ifNotExists().create(); + schema.vertexLabel('timevertex{0}').properties('timevalue{0}').ifNotExists().create();'''.format(i)) + + for run in to_run_51: + session.execute_graph(run) + + session.execute_graph('''graph.addVertex(label, "datevertex1", "datevalue1", date1);''', + {'date1': '1999-07-29' }) + session.execute_graph('''graph.addVertex(label, "negdatevertex2", "negdatevalue2", date2);''', + {'date2': '-1999-07-28' }) + + session.execute_graph('''graph.addVertex(label, "timevertex1", "timevalue1", time1);''', + {'time1': '14:02'}) + session.execute_graph('''graph.addVertex(label, "timevertex2", "timevalue2", time2);''', + {'time2': '14:02:20'}) + session.execute_graph('''graph.addVertex(label, "timevertex3", "timevalue3", time3);''', + {'time3': '14:02:20.222'}) + + +def generate_type_graph_schema(session, prime_schema=True): + """ + This method will prime the schema for all types in the TYPE_MAP + """ + session.execute_graph(ALLOW_SCANS) + if(prime_schema): + create_vertex= "schema.vertexLabel('{0}').ifNotExists().create();".\ + format(generate_type_graph_schema.single_vertex) + session.execute_graph(create_vertex) + for key in TYPE_MAP.keys(): + prop_type = fetchCustomGeoType(key) + if prop_type is None: + prop_type=TYPE_MAP[key][0] + vertex_label = key + prop_name = key+"value" + insert_string = "" + insert_string += "schema.propertyKey('{0}').{1}.ifNotExists().create();".format(prop_name, prop_type) + insert_string += "schema.vertexLabel('{}').properties('{}').add();".\ + format(generate_type_graph_schema.single_vertex, prop_name) + session.execute_graph(insert_string) + else: + session.execute_graph(MAKE_NON_STRICT) +generate_type_graph_schema.single_vertex = "single_vertex_label" + +def generate_address_book_graph(session, size): + to_run = [ALLOW_SCANS, + "schema.propertyKey('name').Text().create()\n" + + "schema.propertyKey('pointPropWithBoundsWithSearchIndex')." + getPointTypeWithBounds(-100, -100, 100, 100) + ".create()\n" + + "schema.propertyKey('pointPropWithBounds')." + getPointTypeWithBounds(-100, -100, 100, 100) + ".create()\n" + + "schema.propertyKey('pointPropWithGeoBoundsWithSearchIndex')." + getPointType() + ".create()\n" + + "schema.propertyKey('pointPropWithGeoBounds')." + getPointType() + ".create()\n" + + "schema.propertyKey('city').Text().create()\n" + + "schema.propertyKey('state').Text().create()\n" + + "schema.propertyKey('description').Text().create()\n" + + "schema.vertexLabel('person').properties('name', 'city', 'state', 'description', 'pointPropWithBoundsWithSearchIndex', 'pointPropWithBounds', 'pointPropWithGeoBoundsWithSearchIndex', 'pointPropWithGeoBounds').create()", + "schema.vertexLabel('person').index('searchPointWithBounds').secondary().by('pointPropWithBounds').add()", + "schema.vertexLabel('person').index('searchPointWithGeoBounds').secondary().by('pointPropWithGeoBounds').add()", + + "g.addV('person').property('name', 'Paul Thomas Joe').property('city', 'Rochester').property('state', 'MN').property('pointPropWithBoundsWithSearchIndex', Geo.point(-92.46295, 44.0234)).property('pointPropWithBounds', Geo.point(-92.46295, 44.0234)).property('pointPropWithGeoBoundsWithSearchIndex', Geo.point(-92.46295, 44.0234)).property('pointPropWithGeoBounds', Geo.point(-92.46295, 44.0234)).property('description', 'Lives by the hospital')", + "g.addV('person').property('name', 'George Bill Steve').property('city', 'Minneapolis').property('state', 'MN').property('pointPropWithBoundsWithSearchIndex', Geo.point(-93.266667, 44.093333)).property('pointPropWithBounds', Geo.point(-93.266667, 44.093333)).property('pointPropWithGeoBoundsWithSearchIndex', Geo.point(-93.266667, 44.093333)).property('pointPropWithGeoBounds', Geo.point(-93.266667, 44.093333)).property('description', 'A cold dude')", + "g.addV('person').property('name', 'James Paul Smith').property('city', 'Chicago').property('state', 'IL').property('pointPropWithBoundsWithSearchIndex', Geo.point(-87.684722, 41.836944)).property('description', 'Likes to hang out')", + "g.addV('person').property('name', 'Jill Alice').property('city', 'Atlanta').property('state', 'GA').property('pointPropWithBoundsWithSearchIndex', Geo.point(-84.39, 33.755)).property('description', 'Enjoys a nice cold coca cola')", + ] + + if not Version('5.0') <= DSE_VERSION < Version('5.1'): + to_run.append("schema.vertexLabel('person').index('search').search().by('pointPropWithBoundsWithSearchIndex').withError(0.00001, 0.0).by('pointPropWithGeoBoundsWithSearchIndex').withError(0.00001, 0.0).add()") + + for run in to_run: + session.execute_graph(run) + + +def generate_large_complex_graph(session, size): + to_run = '''schema.config().option('graph.schema_mode').set('development'); + int size = 2000; + List ids = new ArrayList(); + schema.propertyKey('ts').Int().single().ifNotExists().create(); + schema.propertyKey('sin').Int().single().ifNotExists().create(); + schema.propertyKey('cos').Int().single().ifNotExists().create(); + schema.propertyKey('ii').Int().single().ifNotExists().create(); + schema.vertexLabel('lcg').properties('ts', 'sin', 'cos', 'ii').ifNotExists().create(); + schema.edgeLabel('linked').connection('lcg', 'lcg').ifNotExists().create(); + Vertex v = graph.addVertex(label, 'lcg'); + v.property("ts", 100001); + v.property("sin", 0); + v.property("cos", 1); + v.property("ii", 0); + ids.add(v.id()); + Random rand = new Random(); + for (int ii = 1; ii < size; ii++) { + v = graph.addVertex(label, 'lcg'); + v.property("ii", ii); + v.property("ts", 100001 + ii); + v.property("sin", Math.sin(ii/5.0)); + v.property("cos", Math.cos(ii/5.0)); + Vertex u = g.V(ids.get(rand.nextInt(ids.size()))).next(); + v.addEdge("linked", u); + ids.add(u.id()); + ids.add(v.id()); + } + g.V().count();''' + prof = session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, request_timeout=32) + session.execute_graph(to_run, execution_profile=prof) + + +def validate_classic_vertex(test, vertex): + vertex_props = vertex.properties.keys() + test.assertEqual(len(vertex_props), 2) + test.assertIn('name', vertex_props) + test.assertTrue('lang' in vertex_props or 'age' in vertex_props) + + +def validate_classic_vertex_return_type(test, vertex): + validate_generic_vertex_result_type(vertex) + vertex_props = vertex.properties + test.assertIn('name', vertex_props) + test.assertTrue('lang' in vertex_props or 'age' in vertex_props) + + +def validate_generic_vertex_result_type(test, vertex): + test.assertIsInstance(vertex, Vertex) + for attr in ('id', 'type', 'label', 'properties'): + test.assertIsNotNone(getattr(vertex, attr)) + + +def validate_classic_edge_properties(test, edge_properties): + test.assertEqual(len(edge_properties.keys()), 1) + test.assertIn('weight', edge_properties) + test.assertIsInstance(edge_properties, dict) + + +def validate_classic_edge(test, edge): + validate_generic_edge_result_type(test, edge) + validate_classic_edge_properties(test, edge.properties) + + +def validate_line_edge(test, edge): + validate_generic_edge_result_type(test, edge) + edge_props = edge.properties + test.assertEqual(len(edge_props.keys()), 1) + test.assertIn('distance', edge_props) + + +def validate_generic_edge_result_type(test, edge): + test.assertIsInstance(edge, Edge) + for attr in ('properties', 'outV', 'outVLabel', 'inV', 'inVLabel', 'label', 'type', 'id'): + test.assertIsNotNone(getattr(edge, attr)) + + +def validate_path_result_type(test, path): + test.assertIsInstance(path, Path) + test.assertIsNotNone(path.labels) + for obj in path.objects: + if isinstance(obj, Edge): + validate_classic_edge(test, obj) + elif isinstance(obj, Vertex): + validate_classic_vertex(test, obj) + else: + test.fail("Invalid object found in path " + str(object.type)) diff --git a/tests/integration/advanced/test_graph.py b/tests/integration/advanced/test_graph.py new file mode 100644 index 0000000000..d143e570a7 --- /dev/null +++ b/tests/integration/advanced/test_graph.py @@ -0,0 +1,746 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa# http://www.datastax.com/terms/datastax-dse-driver-license-terms + +from copy import copy +from itertools import chain +import json +import six +import time +import sys + +from packaging.version import Version + +from cassandra import OperationTimedOut, ConsistencyLevel, InvalidRequest +from cassandra.protocol import SyntaxException +from cassandra.query import QueryTrace +from cassandra.policies import WhiteListRoundRobinPolicy +from cassandra.cluster import NoHostAvailable + +from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, GraphExecutionProfile, Cluster +from cassandra.graph import (SimpleGraphStatement, graph_object_row_factory, single_object_row_factory,\ + graph_result_row_factory, Result, Vertex, GraphOptions, GraphProtocol, + graph_graphson2_row_factory) +from cassandra.datastax.graph.query import _graph_options +from cassandra.graph.types import VertexProperty +from cassandra.util import SortedSet + +from tests.integration.advanced import BasicGraphUnitTestCase, use_single_node_with_graph, generate_classic, \ + generate_line_graph, generate_multi_field_graph, generate_large_complex_graph, validate_classic_vertex, \ + validate_classic_edge, validate_path_result_type, validate_line_edge, validate_generic_vertex_result_type, \ + fetchCustomGeoType, generate_type_graph_schema, TYPE_MAP +from tests.integration import PROTOCOL_VERSION, greaterthanorequaldse51, DSE_VERSION + + +def setup_module(): + dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}} + use_single_node_with_graph(dse_options=dse_options) + + +class BasicGraphTest(BasicGraphUnitTestCase): + + def test_basic_query(self): + """ + Test to validate that basic graph query results can be executed with a sane result set. + + Creates a simple classic tinkerpot graph, and attempts to find all vertices + related the vertex marco, that have a label of knows. + See reference graph here + http://www.tinkerpop.com/docs/3.0.0.M1/ + + @since 1.0.0 + @jira_ticket PYTHON-457 + @expected_result graph should find two vertices related to marco via 'knows' edges. + + @test_category dse graph + """ + generate_classic(self.session) + rs = self.session.execute_graph('''g.V().has('name','marko').out('knows').values('name')''') + self.assertFalse(rs.has_more_pages) + results_list = [result.value for result in rs.current_rows] + self.assertEqual(len(results_list), 2) + self.assertIn('vadas', results_list) + self.assertIn('josh', results_list) + + def test_classic_graph(self): + """ + Test to validate that basic graph generation, and vertex and edges are surfaced correctly + + Creates a simple classic tinkerpot graph, and iterates over the the vertices and edges + ensureing that each one is correct. See reference graph here + http://www.tinkerpop.com/docs/3.0.0.M1/ + + @since 1.0.0 + @jira_ticket PYTHON-457 + @expected_result graph should generate and all vertices and edge results should be + + @test_category dse graph + """ + generate_classic(self.session) + rs = self.session.execute_graph('g.V()') + for vertex in rs: + validate_classic_vertex(self, vertex) + rs = self.session.execute_graph('g.E()') + for edge in rs: + validate_classic_edge(self, edge) + + def test_edge_properties_with_graphson2(self): + """ + Test that the edge property deserializer return a dict, and not a set. + + @since 1.5 + @jira_ticket PYTHON-1033 + @expected_result the properties are stored in a dict + + @test_category dse graph + """ + generate_classic(self.session) + epg2 = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT) + epg2.graph_options.graph_protocol = GraphProtocol.GRAPHSON_2_0 + epg2.row_factory = graph_graphson2_row_factory + rs = self.session.execute_graph('g.E()', execution_profile=epg2) + for edge in rs: + validate_classic_edge(self, edge) + + def test_graph_classic_path(self): + """ + Test to validate that the path version of the result type is generated correctly. It also + tests basic path results as that is not covered elsewhere + + @since 1.0.0 + @jira_ticket PYTHON-479 + @expected_result path object should be unpacked correctly including all nested edges and verticies + @test_category dse graph + """ + generate_classic(self.session) + + rs = self.session.execute_graph("g.V().hasLabel('person').has('name', 'marko').as('a').outE('knows').inV().as('c', 'd').outE('created').as('e', 'f', 'g').inV().path()") + rs_list = list(rs) + self.assertEqual(len(rs_list), 2) + for result in rs_list: + path = result.as_path() + validate_path_result_type(self, path) + + def test_large_create_script(self): + """ + Test to validate that server errors due to large groovy scripts are properly surfaced + + Creates a very large line graph script and executes it. Then proceeds to create a line graph script + that is to large for the server to handle expects a server error to be returned + + @since 1.0.0 + @jira_ticket PYTHON-457 + @expected_result graph should generate and all vertices and edge results should be + + @test_category dse graph + """ + query_to_run = generate_line_graph(150) + self.session.execute_graph(query_to_run) + query_to_run = generate_line_graph(300) + self.assertRaises(SyntaxException, self.session.execute_graph, query_to_run) + + def test_range_query(self): + """ + Test to validate range queries are handled correctly. + + Creates a very large line graph script and executes it. Then proceeds to to a range + limited query against it, and ensure that the results are formated correctly and that + the result set is properly sized. + + @since 1.0.0 + @jira_ticket PYTHON-457 + @expected_result result set should be properly formated and properly sized + + @test_category dse graph + """ + query_to_run = generate_line_graph(150) + self.session.execute_graph(query_to_run) + rs = self.session.execute_graph("g.E().range(0,10)") + self.assertFalse(rs.has_more_pages) + results = list(rs) + self.assertEqual(len(results), 10) + for result in results: + validate_line_edge(self, result) + + def test_large_result_set(self): + """ + Test to validate that large result sets return correctly. + + Creates a very large graph. Ensures that large result sets are handled appropriately. + + @since 1.0.0 + @jira_ticket PYTHON-457 + @expected_result when limits of result sets are hit errors should be surfaced appropriately + + @test_category dse graph + """ + generate_large_complex_graph(self.session, 5000) + rs = self.session.execute_graph("g.V()") + for result in rs: + validate_generic_vertex_result_type(self, result) + + def test_parameter_passing(self): + """ + Test to validate that parameter passing works as expected + + @since 1.0.0 + @jira_ticket PYTHON-457 + @expected_result parameters work as expected + + @test_category dse graph + """ + + s = self.session + # unused parameters are passed, but ignored + s.execute_graph("null", {"doesn't": "matter", "what's": "passed"}) + + # multiple params + results = s.execute_graph("[a, b]", {'a': 0, 'b': 1}) + self.assertEqual(results[0].value, 0) + self.assertEqual(results[1].value, 1) + + # different value types + for param in (None, "string", 1234, 5.678, True, False): + result = s.execute_graph('x', {'x': param})[0] + self.assertEqual(result.value, param) + + def test_consistency_passing(self): + """ + Test to validated that graph consistency levels are properly surfaced to the base driver + + @since 1.0.0 + @jira_ticket PYTHON-509 + @expected_result graph consistency levels are surfaced correctly + @test_category dse graph + """ + cl_attrs = ('graph_read_consistency_level', 'graph_write_consistency_level') + + # Iterates over the graph options and constructs an array containing + # The graph_options that correlate to graoh read and write consistency levels + graph_params = [a[2] for a in _graph_options if a[0] in cl_attrs] + + s = self.session + default_profile = s.cluster.profile_manager.profiles[EXEC_PROFILE_GRAPH_DEFAULT] + default_graph_opts = default_profile.graph_options + try: + # Checks the default graph attributes and ensures that both graph_read_consistency_level and graph_write_consistency_level + # Are None by default + for attr in cl_attrs: + self.assertIsNone(getattr(default_graph_opts, attr)) + + res = s.execute_graph("null") + for param in graph_params: + self.assertNotIn(param, res.response_future.message.custom_payload) + + # session defaults are passed + opts = GraphOptions() + opts.update(default_graph_opts) + cl = {0: ConsistencyLevel.ONE, 1: ConsistencyLevel.LOCAL_QUORUM} + for k, v in cl.items(): + setattr(opts, cl_attrs[k], v) + default_profile.graph_options = opts + + res = s.execute_graph("null") + + for k, v in cl.items(): + self.assertEqual(res.response_future.message.custom_payload[graph_params[k]], six.b(ConsistencyLevel.value_to_name[v])) + + # passed profile values override session defaults + cl = {0: ConsistencyLevel.ALL, 1: ConsistencyLevel.QUORUM} + opts = GraphOptions() + opts.update(default_graph_opts) + for k, v in cl.items(): + attr_name = cl_attrs[k] + setattr(opts, attr_name, v) + self.assertNotEqual(getattr(default_profile.graph_options, attr_name), getattr(opts, attr_name)) + tmp_profile = s.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, graph_options=opts) + res = s.execute_graph("null", execution_profile=tmp_profile) + + for k, v in cl.items(): + self.assertEqual(res.response_future.message.custom_payload[graph_params[k]], six.b(ConsistencyLevel.value_to_name[v])) + finally: + default_profile.graph_options = default_graph_opts + + def test_additional_custom_payload(self): + s = self.session + custom_payload = {'some': 'example'.encode('utf-8'), 'items': 'here'.encode('utf-8')} + sgs = SimpleGraphStatement("null", custom_payload=custom_payload) + future = s.execute_graph_async(sgs) + + default_profile = s.cluster.profile_manager.profiles[EXEC_PROFILE_GRAPH_DEFAULT] + default_graph_opts = default_profile.graph_options + for k, v in chain(custom_payload.items(), default_graph_opts.get_options_map().items()): + self.assertEqual(future.message.custom_payload[k], v) + + def test_geometric_graph_types(self): + """ + Test to validate that geometric types function correctly + + Creates a very simple graph, and tries to insert a simple point type + + @since 1.0.0 + @jira_ticket DSP-8087 + @expected_result json types associated with insert is parsed correctly + + @test_category dse graph + """ + self.session.execute_graph('''import org.apache.cassandra.db.marshal.geometry.Point; + schema.propertyKey('pointP').{0}.ifNotExists().create(); + schema.vertexLabel('PointV').properties('pointP').ifNotExists().create();'''.format(fetchCustomGeoType("point"))) + + rs = self.session.execute_graph('''g.addV('PointV').property('pointP', 'POINT(0 1)');''') + + # if result set is not parsed correctly this will throw an exception + self.assertIsNotNone(rs) + + def test_result_forms(self): + """ + Test to validate that geometric types function correctly + + Creates a very simple graph, and tries to insert a simple point type + + @since 1.0.0 + @jira_ticket DSP-8087 + @expected_result json types assoicated with insert is parsed correctly + + @test_category dse graph + """ + generate_classic(self.session) + rs = list(self.session.execute_graph('g.V()')) + self.assertGreater(len(rs), 0, "Result set was empty this was not expected") + for v in rs: + validate_classic_vertex(self, v) + + rs = list(self.session.execute_graph('g.E()')) + self.assertGreater(len(rs), 0, "Result set was empty this was not expected") + for e in rs: + validate_classic_edge(self, e) + + def test_vertex_multiple_properties(self): + """ + Test verifying vertex property form for various Cardinality + + All key types are encoded as a list, regardless of cardinality + + Single cardinality properties have only one value -- the last one added + + Default is single (this is config dependent) + + @since 1.0.0 + @jira_ticket PYTHON-487 + + @test_category dse graph + """ + s = self.session + s.execute_graph('''Schema schema = graph.schema(); + schema.propertyKey('mult_key').Text().multiple().ifNotExists().create(); + schema.propertyKey('single_key').Text().single().ifNotExists().create(); + schema.vertexLabel('MPW1').properties('mult_key').ifNotExists().create(); + schema.vertexLabel('SW1').properties('single_key').ifNotExists().create();''') + + v = s.execute_graph('''v = graph.addVertex('MPW1') + v.property('mult_key', 'value') + v''')[0] + self.assertEqual(len(v.properties), 1) + self.assertEqual(len(v.properties['mult_key']), 1) + self.assertEqual(v.properties['mult_key'][0].label, 'mult_key') + self.assertEqual(v.properties['mult_key'][0].value, 'value') + + # multiple_with_two_values + v = s.execute_graph('''g.addV('MPW1').property('mult_key', 'value0').property('mult_key', 'value1')''')[0] + self.assertEqual(len(v.properties), 1) + self.assertEqual(len(v.properties['mult_key']), 2) + self.assertEqual(v.properties['mult_key'][0].label, 'mult_key') + self.assertEqual(v.properties['mult_key'][1].label, 'mult_key') + self.assertEqual(v.properties['mult_key'][0].value, 'value0') + self.assertEqual(v.properties['mult_key'][1].value, 'value1') + + # single_with_one_value + v = s.execute_graph('''v = graph.addVertex('SW1') + v.property('single_key', 'value') + v''')[0] + self.assertEqual(len(v.properties), 1) + self.assertEqual(len(v.properties['single_key']), 1) + self.assertEqual(v.properties['single_key'][0].label, 'single_key') + self.assertEqual(v.properties['single_key'][0].value, 'value') + + # single_with_two_values + with self.assertRaises(InvalidRequest): + v = s.execute_graph('''v = graph.addVertex('SW1') + v.property('single_key', 'value0').property('single_key', 'value1') + v''')[0] + + def test_vertex_property_properties(self): + """ + Test verifying vertex property properties + + @since 1.0.0 + @jira_ticket PYTHON-487 + + @test_category dse graph + """ + s = self.session + s.execute_graph("schema.propertyKey('k0').Text().ifNotExists().create();") + s.execute_graph("schema.propertyKey('k1').Text().ifNotExists().create();") + s.execute_graph("schema.propertyKey('key').Text().properties('k0', 'k1').ifNotExists().create();") + s.execute_graph("schema.vertexLabel('MLP').properties('key').ifNotExists().create();") + v = s.execute_graph('''v = graph.addVertex('MLP') + v.property('key', 'value', 'k0', 'v0', 'k1', 'v1') + v''')[0] + self.assertEqual(len(v.properties), 1) + self.assertEqual(len(v.properties['key']), 1) + p = v.properties['key'][0] + self.assertEqual(p.label, 'key') + self.assertEqual(p.value, 'value') + self.assertEqual(p.properties, {'k0': 'v0', 'k1': 'v1'}) + + def test_profile_graph_options(self): + s = self.session + statement = SimpleGraphStatement("true") + ep = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT) + self.assertTrue(s.execute_graph(statement, execution_profile=ep)[0].value) + + # bad graph name to verify it's passed + ep.graph_options = ep.graph_options.copy() + ep.graph_options.graph_name = "definitely_not_correct" + try: + s.execute_graph(statement, execution_profile=ep) + except NoHostAvailable: # it's a ServerError + self.assertTrue(DSE_VERSION >= Version("6.0")) + except InvalidRequest: + self.assertTrue(DSE_VERSION < Version("6.0")) + else: + self.fail("Should have risen ServerError or InvalidRequest") + + def test_execute_graph_timeout(self): + s = self.session + + value = [1, 2, 3] + query = "[%r]" % (value,) + + # default is passed down + default_graph_profile = s.cluster.profile_manager.profiles[EXEC_PROFILE_GRAPH_DEFAULT] + rs = self.session.execute_graph(query) + self.assertEqual(rs[0].value, value) + self.assertEqual(rs.response_future.timeout, default_graph_profile.request_timeout) + + # tiny timeout times out as expected + tmp_profile = copy(default_graph_profile) + tmp_profile.request_timeout = sys.float_info.min + + max_retry_count = 10 + for _ in range(max_retry_count): + start = time.time() + try: + with self.assertRaises(OperationTimedOut): + s.execute_graph(query, execution_profile=tmp_profile) + break + except: + end = time.time() + self.assertAlmostEqual(start, end, 1) + else: + raise Exception("session.execute_graph didn't time out in {0} tries".format(max_retry_count)) + + + def test_execute_graph_trace(self): + s = self.session + + value = [1, 2, 3] + query = "[%r]" % (value,) + + # default is no trace + rs = s.execute_graph(query) + self.assertEqual(rs[0].value, value) + self.assertIsNone(rs.get_query_trace()) + + # request trace + rs = s.execute_graph(query, trace=True) + self.assertEqual(rs[0].value, value) + qt = rs.get_query_trace(max_wait_sec=10) + self.assertIsInstance(qt, QueryTrace) + self.assertIsNotNone(qt.duration) + + def test_execute_graph_row_factory(self): + s = self.session + + # default Results + default_profile = s.cluster.profile_manager.profiles[EXEC_PROFILE_GRAPH_DEFAULT] + self.assertEqual(default_profile.row_factory, graph_object_row_factory) + result = s.execute_graph("123")[0] + self.assertIsInstance(result, Result) + self.assertEqual(result.value, 123) + + # other via parameter + prof = s.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, row_factory=single_object_row_factory) + rs = s.execute_graph("123", execution_profile=prof) + self.assertEqual(rs.response_future.row_factory, single_object_row_factory) + self.assertEqual(json.loads(rs[0]), {'result': 123}) + + +class GraphTypesTests(BasicGraphUnitTestCase): + + def test_result_types(self): + """ + Test to validate that the edge and vertex version of results are constructed correctly. + + @since 1.0.0 + @jira_ticket PYTHON-479 + @expected_result edge/vertex result types should be unpacked correctly. + @test_category dse graph + """ + generate_multi_field_graph(self.session) # TODO: we could just make a single vertex with properties of all types, or even a simple query that just uses a sequence of groovy expressions + + prof = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, row_factory=graph_result_row_factory) # requires simplified row factory to avoid shedding id/~type information used for validation below + rs = self.session.execute_graph("g.V()", execution_profile=prof) + + for result in rs: + self._validate_type(result) + + def test_all_types_graphson1(self): + s = self.session + generate_type_graph_schema(s) + + for key in TYPE_MAP.keys(): + vertex_label = generate_type_graph_schema.single_vertex + property_name = key + "value" + _, value, deserializer = TYPE_MAP[key] + s.execute_graph("g.addV('{0}').property('{1}', type_value)".format(vertex_label, property_name), + {'type_value' : value}) + read_results = s.execute_graph("g.V().hasLabel('{0}').has('{1}')".format(vertex_label, property_name)) + row = next(read_results.current_rows) + + self.assertEqual(len(row.properties), 1) + self.assertEqual(len(list(row.properties.values())[0]), 1) + self.assertIsInstance(list(row.properties.values())[0][0], VertexProperty) + + deserialized_value = deserializer(list(row.properties.values())[0][0].value) + + self.assertEqual(deserialized_value, value) + + self.assertRaises(StopIteration, next, read_results.current_rows) + + prof = s.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, + row_factory=graph_result_row_factory) + rs = s.execute_graph("g.V()", execution_profile=prof) + for result in rs: + self._validate_type(result) + + def test_all_types_graphson2(self): + """ + Sends and reads back all the available types verifying they were written correctly + + @since 2.3.0 + @jira_ticket PYTHON-775 + + @test_category dse graph + """ + s = self.session + generate_type_graph_schema(s) + + for key in TYPE_MAP.keys(): + vertex_label = generate_type_graph_schema.single_vertex + property_name = key + "value" + _, value, deserializer = TYPE_MAP[key] + s.execute_graph("g.addV('{0}').property('{1}', type_value)".format(vertex_label, property_name), + {'type_value' : value}, execution_profile="graphson2") + read_results = s.execute_graph("g.V().hasLabel('{0}').has('{1}')". + format(vertex_label, property_name), execution_profile="graphson2") + first_vertex = read_results.current_rows[0] + + self.assertIsInstance(first_vertex, Vertex) + + vertex_property = first_vertex.properties[property_name][0] + self.assertIsInstance(vertex_property, VertexProperty) + self.assertEqual(vertex_property.label, property_name) + self.assertEqual(vertex_property.value, value) + self.assertEqual(vertex_property.properties, {}) + + def _validate_type(self, vertex): + for properties in vertex.properties.values(): + prop = properties[0] + + if DSE_VERSION >= Version("5.1"): + type_indicator = prop['id']['~label'] + else: + type_indicator = prop['id']['~type'] + + if any(type_indicator.startswith(t) for t in ('int', 'short', 'long', 'bigint', 'decimal', 'smallint', 'varint')): + typ = six.integer_types + elif any(type_indicator.startswith(t) for t in ('float', 'double')): + typ = float + elif any(type_indicator.startswith(t) for t in ('duration', 'date', 'negdate', 'time', + 'blob', 'timestamp', 'point', 'linestring', 'polygon', 'inet', 'uuid')): + typ = six.text_type + else: + pass + self.fail("Received unexpected type: %s" % type_indicator) + self.assertIsInstance(prop['value'], typ) + + +class GraphTimeoutTests(BasicGraphUnitTestCase): + + def test_should_wait_indefinitely_by_default(self): + """ + Tests that by default the client should wait indefinitely for server timeouts + + @since 1.0.0 + @jira_ticket PYTHON-589 + + @test_category dse graph + """ + desired_timeout = 1000 + + graph_source = "test_timeout_1" + ep_name = graph_source + ep = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT) + ep.graph_options = ep.graph_options.copy() + ep.graph_options.graph_source = graph_source + self.cluster.add_execution_profile(ep_name, ep) + + to_run = '''graph.schema().config().option("graph.traversal_sources.{0}.evaluation_timeout").set('{1} ms')'''.format(graph_source, desired_timeout) + self.session.execute_graph(to_run, execution_profile=ep_name) + with self.assertRaises(InvalidRequest) as ir: + self.session.execute_graph("java.util.concurrent.TimeUnit.MILLISECONDS.sleep(35000L);1+1", execution_profile=ep_name) + self.assertTrue("evaluation exceeded the configured threshold of 1000" in str(ir.exception) or + "evaluation exceeded the configured threshold of evaluation_timeout at 1000" in str(ir.exception)) + + def test_request_timeout_less_then_server(self): + """ + Tests that with explicit request_timeouts set, that a server timeout is honored if it's relieved prior to the + client timeout + + @since 1.0.0 + @jira_ticket PYTHON-589 + + @test_category dse graph + """ + desired_timeout = 1000 + graph_source = "test_timeout_2" + ep_name = graph_source + ep = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, request_timeout=32) + ep.graph_options = ep.graph_options.copy() + ep.graph_options.graph_source = graph_source + self.cluster.add_execution_profile(ep_name, ep) + + + + to_run = '''graph.schema().config().option("graph.traversal_sources.{0}.evaluation_timeout").set('{1} ms')'''.format(graph_source, desired_timeout) + self.session.execute_graph(to_run, execution_profile=ep_name) + with self.assertRaises(InvalidRequest) as ir: + self.session.execute_graph("java.util.concurrent.TimeUnit.MILLISECONDS.sleep(35000L);1+1", execution_profile=ep_name) + print(ir.exception) + self.assertTrue("evaluation exceeded the configured threshold of 1000" in str(ir.exception) or + "evaluation exceeded the configured threshold of evaluation_timeout at 1000" in str(ir.exception)) + + def test_server_timeout_less_then_request(self): + """ + Tests that with explicit request_timeouts set, that a client timeout is honored if it's triggered prior to the + server sending a timeout. + + @since 1.0.0 + @jira_ticket PYTHON-589 + + @test_category dse graph + """ + graph_source = "test_timeout_3" + ep_name = graph_source + ep = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, request_timeout=1) + ep.graph_options = ep.graph_options.copy() + ep.graph_options.graph_source = graph_source + self.cluster.add_execution_profile(ep_name, ep) + server_timeout = 10000 + to_run = '''graph.schema().config().option("graph.traversal_sources.{0}.evaluation_timeout").set('{1} ms')'''.format(graph_source, server_timeout) + self.session.execute_graph(to_run, execution_profile=ep_name) + + with self.assertRaises(Exception) as e: + self.session.execute_graph("java.util.concurrent.TimeUnit.MILLISECONDS.sleep(35000L);1+1", execution_profile=ep_name) + self.assertTrue(isinstance(e, InvalidRequest) or isinstance(e, OperationTimedOut)) + + +class GraphProfileTests(BasicGraphUnitTestCase): + def test_graph_profile(self): + """ + Test verifying various aspects of graph config properties. + + @since 1.0.0 + @jira_ticket PYTHON-570 + + @test_category dse graph + """ + hosts = self.cluster.metadata.all_hosts() + first_host = hosts[0].address + second_hosts = "1.2.3.4" + + generate_classic(self.session) + # Create variou execution policies + exec_dif_factory = GraphExecutionProfile(row_factory=single_object_row_factory) + exec_dif_factory.graph_options.graph_name = self.graph_name + exec_dif_lbp = GraphExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy([first_host])) + exec_dif_lbp.graph_options.graph_name = self.graph_name + exec_bad_lbp = GraphExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy([second_hosts])) + exec_dif_lbp.graph_options.graph_name = self.graph_name + exec_short_timeout = GraphExecutionProfile(request_timeout=1, load_balancing_policy=WhiteListRoundRobinPolicy([first_host])) + exec_short_timeout.graph_options.graph_name = self.graph_name + + # Add a single exection policy on cluster creation + local_cluster = Cluster(protocol_version=PROTOCOL_VERSION, execution_profiles={"exec_dif_factory": exec_dif_factory}) + local_session = local_cluster.connect() + self.addCleanup(local_cluster.shutdown) + + rs1 = self.session.execute_graph('g.V()') + rs2 = local_session.execute_graph('g.V()', execution_profile='exec_dif_factory') + + # Verify default and non default policy works + self.assertFalse(isinstance(rs2[0], Vertex)) + self.assertTrue(isinstance(rs1[0], Vertex)) + # Add other policies validate that lbp are honored + local_cluster.add_execution_profile("exec_dif_ldp", exec_dif_lbp) + local_session.execute_graph('g.V()', execution_profile="exec_dif_ldp") + local_cluster.add_execution_profile("exec_bad_lbp", exec_bad_lbp) + with self.assertRaises(NoHostAvailable): + local_session.execute_graph('g.V()', execution_profile="exec_bad_lbp") + + # Try with missing EP + with self.assertRaises(ValueError): + local_session.execute_graph('g.V()', execution_profile='bad_exec_profile') + + # Validate that timeout is honored + local_cluster.add_execution_profile("exec_short_timeout", exec_short_timeout) + with self.assertRaises(Exception) as e: + self.assertTrue(isinstance(e, InvalidRequest) or isinstance(e, OperationTimedOut)) + local_session.execute_graph('java.util.concurrent.TimeUnit.MILLISECONDS.sleep(2000L);', execution_profile='exec_short_timeout') + + +class GraphMetadataTest(BasicGraphUnitTestCase): + + @greaterthanorequaldse51 + def test_dse_workloads(self): + """ + Test to ensure dse_workloads is populated appropriately. + Field added in DSE 5.1 + + @since DSE 2.0 + @jira_ticket PYTHON-667 + @expected_result dse_workloads set is set on host model + + @test_category metadata + """ + for host in self.cluster.metadata.all_hosts(): + self.assertIsInstance(host.dse_workloads, SortedSet) + self.assertIn("Cassandra", host.dse_workloads) + self.assertIn("Graph", host.dse_workloads) \ No newline at end of file diff --git a/tests/unit/advanced/test_graph.py b/tests/unit/advanced/test_graph.py new file mode 100644 index 0000000000..702b3c376b --- /dev/null +++ b/tests/unit/advanced/test_graph.py @@ -0,0 +1,400 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +import json + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import six + +from cassandra import ConsistencyLevel +from cassandra.policies import RetryPolicy +from cassandra.graph import (SimpleGraphStatement, GraphOptions, GraphProtocol, Result, + graph_result_row_factory, single_object_row_factory, + Vertex, Edge, Path, VertexProperty) +from cassandra.datastax.graph.query import _graph_options + + +class GraphResultTests(unittest.TestCase): + + _values = (None, 1, 1.2, True, False, [1, 2, 3], {'x': 1, 'y': 2}) + + def test_result_value(self): + for v in self._values: + result = self._make_result(v) + self.assertEqual(result.value, v) + + def test_result_attr(self): + # value is not a dict + result = self._make_result(123) + with self.assertRaises(ValueError): + result.something + + expected = {'a': 1, 'b': 2} + result = self._make_result(expected) + self.assertEqual(result.a, 1) + self.assertEqual(result.b, 2) + with self.assertRaises(AttributeError): + result.not_present + + def test_result_item(self): + # value is not a dict, list + result = self._make_result(123) + with self.assertRaises(ValueError): + result['something'] + with self.assertRaises(ValueError): + result[0] + + # dict key access + expected = {'a': 1, 'b': 2} + result = self._make_result(expected) + self.assertEqual(result['a'], 1) + self.assertEqual(result['b'], 2) + with self.assertRaises(KeyError): + result['not_present'] + with self.assertRaises(ValueError): + result[0] + + # list index access + expected = [0, 1] + result = self._make_result(expected) + self.assertEqual(result[0], 0) + self.assertEqual(result[1], 1) + with self.assertRaises(IndexError): + result[2] + with self.assertRaises(ValueError): + result['something'] + + def test_as_vertex(self): + prop_name = 'name' + prop_val = 'val' + vertex_dict = {'id': object(), + 'label': object(), + 'type': 'vertex', + 'properties': {prop_name: [{'value': prop_val, 'whatever': object()}]} + } + required_attrs = [k for k in vertex_dict if k != 'properties'] + result = self._make_result(vertex_dict) + vertex = result.as_vertex() + for attr in required_attrs: + self.assertEqual(getattr(vertex, attr), vertex_dict[attr]) + self.assertEqual(len(vertex.properties), 1) + self.assertEqual(vertex.properties[prop_name][0].value, prop_val) + + # no props + modified_vertex_dict = vertex_dict.copy() + del modified_vertex_dict['properties'] + vertex = self._make_result(modified_vertex_dict).as_vertex() + self.assertEqual(vertex.properties, {}) + + # wrong 'type' + modified_vertex_dict = vertex_dict.copy() + modified_vertex_dict['type'] = 'notavertex' + result = self._make_result(modified_vertex_dict) + self.assertRaises(TypeError, result.as_vertex) + + # missing required properties + for attr in required_attrs: + modified_vertex_dict = vertex_dict.copy() + del modified_vertex_dict[attr] + result = self._make_result(modified_vertex_dict) + self.assertRaises(TypeError, result.as_vertex) + + def test_as_edge(self): + prop_name = 'name' + prop_val = 'val' + edge_dict = {'id': object(), + 'label': object(), + 'type': 'edge', + 'inV': object(), + 'inVLabel': object(), + 'outV': object(), + 'outVLabel': object(), + 'properties': {prop_name: prop_val} + } + required_attrs = [k for k in edge_dict if k != 'properties'] + result = self._make_result(edge_dict) + edge = result.as_edge() + for attr in required_attrs: + self.assertEqual(getattr(edge, attr), edge_dict[attr]) + self.assertEqual(len(edge.properties), 1) + self.assertEqual(edge.properties[prop_name], prop_val) + + # no props + modified_edge_dict = edge_dict.copy() + del modified_edge_dict['properties'] + edge = self._make_result(modified_edge_dict).as_edge() + self.assertEqual(edge.properties, {}) + + # wrong 'type' + modified_edge_dict = edge_dict.copy() + modified_edge_dict['type'] = 'notanedge' + result = self._make_result(modified_edge_dict) + self.assertRaises(TypeError, result.as_edge) + + # missing required properties + for attr in required_attrs: + modified_edge_dict = edge_dict.copy() + del modified_edge_dict[attr] + result = self._make_result(modified_edge_dict) + self.assertRaises(TypeError, result.as_edge) + + def test_as_path(self): + vertex_dict = {'id': object(), + 'label': object(), + 'type': 'vertex', + 'properties': {'name': [{'value': 'val', 'whatever': object()}]} + } + edge_dict = {'id': object(), + 'label': object(), + 'type': 'edge', + 'inV': object(), + 'inVLabel': object(), + 'outV': object(), + 'outVLabel': object(), + 'properties': {'name': 'val'} + } + path_dict = {'labels': [['a', 'b'], ['c']], + 'objects': [vertex_dict, edge_dict] + } + result = self._make_result(path_dict) + path = result.as_path() + self.assertEqual(path.labels, path_dict['labels']) + + # make sure inner objects are bound correctly + self.assertIsInstance(path.objects[0], Vertex) + self.assertIsInstance(path.objects[1], Edge) + + # missing required properties + for attr in path_dict: + modified_path_dict = path_dict.copy() + del modified_path_dict[attr] + result = self._make_result(modified_path_dict) + self.assertRaises(TypeError, result.as_path) + + def test_str(self): + for v in self._values: + self.assertEqual(str(self._make_result(v)), str(v)) + + def test_repr(self): + for v in self._values: + result = self._make_result(v) + self.assertEqual(eval(repr(result)), result) + + def _make_result(self, value): + # direct pass-through now + return Result(value) + + +class GraphTypeTests(unittest.TestCase): + # see also: GraphResultTests.test_as_* + + def test_vertex_str_repr(self): + prop_name = 'name' + prop_val = 'val' + kwargs = {'id': 'id_val', 'label': 'label_val', 'type': 'vertex', 'properties': {prop_name: [{'value': prop_val}]}} + vertex = Vertex(**kwargs) + transformed = kwargs.copy() + transformed['properties'] = {prop_name: [VertexProperty(prop_name, prop_val)]} + self.assertEqual(eval(str(vertex)), transformed) + self.assertEqual(eval(repr(vertex)), vertex) + + def test_edge_str_repr(self): + prop_name = 'name' + prop_val = 'val' + kwargs = {'id': 'id_val', 'label': 'label_val', 'type': 'edge', + 'inV': 'inV_val', 'inVLabel': 'inVLabel_val', + 'outV': 'outV_val', 'outVLabel': 'outVLabel_val', + 'properties': {prop_name: prop_val}} + edge = Edge(**kwargs) + self.assertEqual(eval(str(edge)), kwargs) + self.assertEqual(eval(repr(edge)), edge) + + def test_path_str_repr(self): + kwargs = {'labels': [['a', 'b'], ['c']], 'objects': range(10)} + path = Path(**kwargs) + transformed = kwargs.copy() + transformed['objects'] = [Result(o) for o in kwargs['objects']] + self.assertEqual(eval(str(path)), transformed) + self.assertEqual(eval(repr(path)), path) + + +class GraphOptionTests(unittest.TestCase): + + opt_mapping = dict((t[0], t[2]) for t in _graph_options if not t[0].endswith('consistency_level')) # cl excluded from general tests because it requires mapping to names + + api_params = dict((p, str(i)) for i, p in enumerate(opt_mapping)) + + def test_init(self): + opts = GraphOptions(**self.api_params) + self._verify_api_params(opts, self.api_params) + self._verify_api_params(GraphOptions(), { + 'graph_source': 'g', + 'graph_language': 'gremlin-groovy', + 'graph_protocol': GraphProtocol.GRAPHSON_1_0 + }) + + def test_init_unknown_kwargs(self): + with warnings.catch_warnings(record=True) as w: + GraphOptions(unknown_param=42) + self.assertEqual(len(w), 1) + self.assertRegexpMatches(str(w[0].message), r"^Unknown keyword.*GraphOptions.*") + + def test_update(self): + opts = GraphOptions(**self.api_params) + new_params = dict((k, str(int(v) + 1)) for k, v in self.api_params.items()) + opts.update(GraphOptions(**new_params)) + self._verify_api_params(opts, new_params) + + def test_get_options(self): + # nothing set --> base map + base = GraphOptions(**self.api_params) + self.assertEqual(GraphOptions().get_options_map(base), base._graph_options) + + # something set overrides + kwargs = self.api_params.copy() # this test concept got strange after we added default values for a couple GraphOption attrs + kwargs['graph_name'] = 'unit_test' + other = GraphOptions(**kwargs) + options = base.get_options_map(other) + updated = self.opt_mapping['graph_name'] + self.assertEqual(options[updated], six.b('unit_test')) + for name in (n for n in self.opt_mapping.values() if n != updated): + self.assertEqual(options[name], base._graph_options[name]) + + # base unchanged + self._verify_api_params(base, self.api_params) + + def test_set_attr(self): + expected = 'test@@@@' + opts = GraphOptions(graph_name=expected) + self.assertEqual(opts.graph_name, six.b(expected)) + expected = 'somethingelse####' + opts.graph_name = expected + self.assertEqual(opts.graph_name, six.b(expected)) + + # will update options with set value + another = GraphOptions() + self.assertIsNone(another.graph_name) + another.update(opts) + self.assertEqual(another.graph_name, six.b(expected)) + + opts.graph_name = None + self.assertIsNone(opts.graph_name) + # will not update another with its set-->unset value + another.update(opts) + self.assertEqual(another.graph_name, six.b(expected)) # remains unset + opt_map = another.get_options_map(opts) + self.assertEqual(opt_map, another._graph_options) + + def test_del_attr(self): + opts = GraphOptions(**self.api_params) + test_params = self.api_params.copy() + del test_params['graph_source'] + del opts.graph_source + self._verify_api_params(opts, test_params) + + def _verify_api_params(self, opts, api_params): + self.assertEqual(len(opts._graph_options), len(api_params)) + for name, value in api_params.items(): + value = six.b(value) + self.assertEqual(getattr(opts, name), value) + self.assertEqual(opts._graph_options[self.opt_mapping[name]], value) + + def test_consistency_levels(self): + read_cl = ConsistencyLevel.ONE + write_cl = ConsistencyLevel.LOCAL_QUORUM + + # set directly + opts = GraphOptions(graph_read_consistency_level=read_cl, graph_write_consistency_level=write_cl) + self.assertEqual(opts.graph_read_consistency_level, read_cl) + self.assertEqual(opts.graph_write_consistency_level, write_cl) + + # mapping from base + opt_map = opts.get_options_map() + self.assertEqual(opt_map['graph-read-consistency'], six.b(ConsistencyLevel.value_to_name[read_cl])) + self.assertEqual(opt_map['graph-write-consistency'], six.b(ConsistencyLevel.value_to_name[write_cl])) + + # empty by default + new_opts = GraphOptions() + opt_map = new_opts.get_options_map() + self.assertNotIn('graph-read-consistency', opt_map) + self.assertNotIn('graph-write-consistency', opt_map) + + # set from other + opt_map = new_opts.get_options_map(opts) + self.assertEqual(opt_map['graph-read-consistency'], six.b(ConsistencyLevel.value_to_name[read_cl])) + self.assertEqual(opt_map['graph-write-consistency'], six.b(ConsistencyLevel.value_to_name[write_cl])) + + def test_graph_source_convenience_attributes(self): + opts = GraphOptions() + self.assertEqual(opts.graph_source, b'g') + self.assertFalse(opts.is_analytics_source) + self.assertTrue(opts.is_graph_source) + self.assertFalse(opts.is_default_source) + + opts.set_source_default() + self.assertIsNotNone(opts.graph_source) + self.assertFalse(opts.is_analytics_source) + self.assertFalse(opts.is_graph_source) + self.assertTrue(opts.is_default_source) + + opts.set_source_analytics() + self.assertIsNotNone(opts.graph_source) + self.assertTrue(opts.is_analytics_source) + self.assertFalse(opts.is_graph_source) + self.assertFalse(opts.is_default_source) + + opts.set_source_graph() + self.assertIsNotNone(opts.graph_source) + self.assertFalse(opts.is_analytics_source) + self.assertTrue(opts.is_graph_source) + self.assertFalse(opts.is_default_source) + +class GraphStatementTests(unittest.TestCase): + + def test_init(self): + # just make sure Statement attributes are accepted + kwargs = {'query_string': object(), + 'retry_policy': RetryPolicy(), + 'consistency_level': object(), + 'fetch_size': object(), + 'keyspace': object(), + 'custom_payload': object()} + statement = SimpleGraphStatement(**kwargs) + for k, v in kwargs.items(): + self.assertIs(getattr(statement, k), v) + + # but not a bogus parameter + kwargs['bogus'] = object() + self.assertRaises(TypeError, SimpleGraphStatement, **kwargs) + + +class GraphRowFactoryTests(unittest.TestCase): + + def test_object_row_factory(self): + col_names = [] # unused + rows = [object() for _ in range(10)] + self.assertEqual(single_object_row_factory(col_names, ((o,) for o in rows)), rows) + + def test_graph_result_row_factory(self): + col_names = [] # unused + rows = [json.dumps({'result': i}) for i in range(10)] + results = graph_result_row_factory(col_names, ((o,) for o in rows)) + for i, res in enumerate(results): + self.assertIsInstance(res, Result) + self.assertEqual(res.value, i) From a976380a53f963c4427e9eb994c8f6eaa8e9ec9d Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 12 Sep 2019 15:50:19 -0500 Subject: [PATCH 1611/2005] Include host_id in peers schema query and update tests --- cassandra/cluster.py | 2 +- tests/integration/advanced/cloud/__init__.py | 19 ++- .../advanced/cloud/test_cloud_schema.py | 118 ++++++++++++++++++ 3 files changed, 127 insertions(+), 12 deletions(-) create mode 100644 tests/integration/advanced/cloud/test_cloud_schema.py diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 90b0dfd8c7..c74558be60 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2928,7 +2928,7 @@ class ControlConnection(object): # Used only when token_metadata_enabled is set to False _SELECT_LOCAL_NO_TOKENS_RPC_ADDRESS = "SELECT rpc_address FROM system.local WHERE key='local'" - _SELECT_SCHEMA_PEERS = "SELECT peer, rpc_address, schema_version FROM system.peers" + _SELECT_SCHEMA_PEERS = "SELECT peer, host_id, rpc_address, schema_version FROM system.peers" _SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'" _is_shutdown = False diff --git a/tests/integration/advanced/cloud/__init__.py b/tests/integration/advanced/cloud/__init__.py index ce1e6cda54..271259e608 100644 --- a/tests/integration/advanced/cloud/__init__.py +++ b/tests/integration/advanced/cloud/__init__.py @@ -22,11 +22,17 @@ from cassandra.cluster import Cluster -from tests.integration import CLOUD_PROXY_PATH +from tests.integration import CLOUD_PROXY_PATH, USE_CASS_EXTERNAL + + +def setup_package(): + if CLOUD_PROXY_PATH and not USE_CASS_EXTERNAL: + start_cloud_proxy() def teardown_package(): - stop_cloud_proxy() + if not USE_CASS_EXTERNAL: + stop_cloud_proxy() class CloudProxyCluster(unittest.TestCase): @@ -39,11 +45,6 @@ class CloudProxyCluster(unittest.TestCase): cluster, connect = None, False session = None - @classmethod - def setUpClass(cls): - if CLOUD_PROXY_PATH is not None: - start_cloud_proxy() - @classmethod def connect(cls, creds, **kwargs): cloud_config = { @@ -52,10 +53,6 @@ def connect(cls, creds, **kwargs): cls.cluster = Cluster(cloud=cloud_config, protocol_version=4, **kwargs) cls.session = cls.cluster.connect(wait_for_all_pools=True) - @classmethod - def tearDownClass(cls): - stop_cloud_proxy() - def tearDown(self): if self.cluster: self.cluster.shutdown() diff --git a/tests/integration/advanced/cloud/test_cloud_schema.py b/tests/integration/advanced/cloud/test_cloud_schema.py new file mode 100644 index 0000000000..0a70fd0c2a --- /dev/null +++ b/tests/integration/advanced/cloud/test_cloud_schema.py @@ -0,0 +1,118 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License +""" +This is mostly copypasta from integration/long/test_schema.py + +TODO: Come up with way to run cloud and local tests without duplication +""" + +import logging +import time + +from cassandra import ConsistencyLevel +from cassandra.cluster import Cluster +from cassandra.query import SimpleStatement + +from tests.integration import execute_until_pass +from tests.integration.advanced.cloud import CloudProxyCluster + +log = logging.getLogger(__name__) + + +class CloudSchemaTests(CloudProxyCluster): + def test_recreates(self): + """ + Basic test for repeated schema creation and use, using many different keyspaces + """ + self.connect(self.creds) + session = self.session + + for _ in self.cluster.metadata.all_hosts(): + for keyspace_number in range(5): + keyspace = "ks_{0}".format(keyspace_number) + + if keyspace in self.cluster.metadata.keyspaces.keys(): + drop = "DROP KEYSPACE {0}".format(keyspace) + log.debug(drop) + execute_until_pass(session, drop) + + create = "CREATE KEYSPACE {0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 3}}".format( + keyspace) + log.debug(create) + execute_until_pass(session, create) + + create = "CREATE TABLE {0}.cf (k int PRIMARY KEY, i int)".format(keyspace) + log.debug(create) + execute_until_pass(session, create) + + use = "USE {0}".format(keyspace) + log.debug(use) + execute_until_pass(session, use) + + insert = "INSERT INTO cf (k, i) VALUES (0, 0)" + log.debug(insert) + ss = SimpleStatement(insert, consistency_level=ConsistencyLevel.QUORUM) + execute_until_pass(session, ss) + + def test_for_schema_disagreement_attribute(self): + """ + Tests to ensure that schema disagreement is properly surfaced on the response future. + + Creates and destroys keyspaces/tables with various schema agreement timeouts set. + First part runs cql create/drop cmds with schema agreement set in such away were it will be impossible for agreement to occur during timeout. + It then validates that the correct value is set on the result. + Second part ensures that when schema agreement occurs, that the result set reflects that appropriately + + @since 3.1.0 + @jira_ticket PYTHON-458 + @expected_result is_schema_agreed is set appropriately on response thefuture + + @test_category schema + """ + # This should yield a schema disagreement + cloud_config = {'secure_connect_bundle': self.creds} + cluster = Cluster(max_schema_agreement_wait=0.001, protocol_version=4, cloud=cloud_config) + session = cluster.connect(wait_for_all_pools=True) + + rs = session.execute( + "CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}") + self.check_and_wait_for_agreement(session, rs, False) + rs = session.execute( + SimpleStatement("CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)", + consistency_level=ConsistencyLevel.ALL)) + self.check_and_wait_for_agreement(session, rs, False) + rs = session.execute("DROP KEYSPACE test_schema_disagreement") + self.check_and_wait_for_agreement(session, rs, False) + cluster.shutdown() + + # These should have schema agreement + cluster = Cluster(protocol_version=4, max_schema_agreement_wait=100, cloud=cloud_config) + session = cluster.connect() + rs = session.execute( + "CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}") + self.check_and_wait_for_agreement(session, rs, True) + rs = session.execute( + SimpleStatement("CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)", + consistency_level=ConsistencyLevel.ALL)) + self.check_and_wait_for_agreement(session, rs, True) + rs = session.execute("DROP KEYSPACE test_schema_disagreement") + self.check_and_wait_for_agreement(session, rs, True) + cluster.shutdown() + + def check_and_wait_for_agreement(self, session, rs, exepected): + # Wait for RESULT_KIND_SCHEMA_CHANGE message to arrive + time.sleep(1) + self.assertEqual(rs.response_future.is_schema_agreed, exepected) + if not rs.response_future.is_schema_agreed: + session.cluster.control_connection.wait_for_schema_agreement(wait_time=1000) \ No newline at end of file From d261b4eda510a53328f367bb987c77c31ff18991 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 13 Sep 2019 04:32:03 -0400 Subject: [PATCH 1612/2005] Skip dse graph tests for c* clusters --- tests/integration/advanced/__init__.py | 4 ++-- tests/integration/advanced/test_graph.py | 12 +++++++++--- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py index 119a9faa39..1316e3e747 100644 --- a/tests/integration/advanced/__init__.py +++ b/tests/integration/advanced/__init__.py @@ -132,7 +132,7 @@ def deserialize(cls, value, reader=None): TYPE_MAP["inet2"] = ["Inet()", ipaddress.IPv6Address("2001:db8:85a3:8d3:1319:8a2e:370:7348"), deserializer_plus_to_ipaddressv6] -if DSE_VERSION >= Version("5.1"): +if DSE_VERSION and DSE_VERSION >= Version("5.1"): TYPE_MAP["datetime1"]= ["Date()", datetime.date.today(), GraphSON1Deserializer.deserialize_date] TYPE_MAP["time1"] = ["Time()", datetime.time(12, 6, 12, 444), GraphSON1Deserializer.deserialize_time] TYPE_MAP["time2"] = ["Time()", datetime.time(12, 6, 12), GraphSON1Deserializer.deserialize_time] @@ -380,7 +380,7 @@ def fetchCustomGeoType(type): return None -geo_condition = DSE_VERSION < Version('5.1') +geo_condition = DSE_VERSION and DSE_VERSION < Version('5.1') def getPointType(): if geo_condition: return "Point()" diff --git a/tests/integration/advanced/test_graph.py b/tests/integration/advanced/test_graph.py index d143e570a7..7f977e6cea 100644 --- a/tests/integration/advanced/test_graph.py +++ b/tests/integration/advanced/test_graph.py @@ -44,14 +44,16 @@ generate_line_graph, generate_multi_field_graph, generate_large_complex_graph, validate_classic_vertex, \ validate_classic_edge, validate_path_result_type, validate_line_edge, validate_generic_vertex_result_type, \ fetchCustomGeoType, generate_type_graph_schema, TYPE_MAP -from tests.integration import PROTOCOL_VERSION, greaterthanorequaldse51, DSE_VERSION +from tests.integration import PROTOCOL_VERSION, greaterthanorequaldse51, DSE_VERSION, requiredse def setup_module(): - dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}} - use_single_node_with_graph(dse_options=dse_options) + if DSE_VERSION: + dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}} + use_single_node_with_graph(dse_options=dse_options) +@requiredse class BasicGraphTest(BasicGraphUnitTestCase): def test_basic_query(self): @@ -492,6 +494,7 @@ def test_execute_graph_row_factory(self): self.assertEqual(json.loads(rs[0]), {'result': 123}) +@requiredse class GraphTypesTests(BasicGraphUnitTestCase): def test_result_types(self): @@ -592,6 +595,7 @@ def _validate_type(self, vertex): self.assertIsInstance(prop['value'], typ) +@requiredse class GraphTimeoutTests(BasicGraphUnitTestCase): def test_should_wait_indefinitely_by_default(self): @@ -672,6 +676,7 @@ def test_server_timeout_less_then_request(self): self.assertTrue(isinstance(e, InvalidRequest) or isinstance(e, OperationTimedOut)) +@requiredse class GraphProfileTests(BasicGraphUnitTestCase): def test_graph_profile(self): """ @@ -726,6 +731,7 @@ def test_graph_profile(self): local_session.execute_graph('java.util.concurrent.TimeUnit.MILLISECONDS.sleep(2000L);', execution_profile='exec_short_timeout') +@requiredse class GraphMetadataTest(BasicGraphUnitTestCase): @greaterthanorequaldse51 From eba2cc0ae6f9e9a46d16c44a784ee7dbd0d23f60 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 13 Sep 2019 04:49:14 -0400 Subject: [PATCH 1613/2005] build.yaml: run dse advanced tests --- build.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/build.yaml b/build.yaml index 2ba6d42151..7e6ee2f842 100644 --- a/build.yaml +++ b/build.yaml @@ -258,6 +258,11 @@ build: PROTOCOL_VERSION=1 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results_v1.xml tests/integration/standard/ || true fi + if [ -n "$DSE_VERSION" ] && ! [[ $DSE_VERSION == "4.8"* ]]; then + echo "==========RUNNING DSE INTEGRATION TESTS==========" + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR DSE_VERSION=$CCM_VERSION ADS_HOME=$HOME/ VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true + fi + if [ -z "$EXCLUDE_LONG" ]; then echo "==========RUNNING LONG INTEGRATION TESTS==========" EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true From 1b1a482bf1b4a9447b5a73f845340dd586f0bd91 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 13 Sep 2019 05:04:11 -0400 Subject: [PATCH 1614/2005] Add test_geometry.py --- tests/integration/advanced/__init__.py | 39 ++-- tests/integration/advanced/test_geometry.py | 247 ++++++++++++++++++++ 2 files changed, 269 insertions(+), 17 deletions(-) create mode 100644 tests/integration/advanced/test_geometry.py diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py index 1316e3e747..bb01526c57 100644 --- a/tests/integration/advanced/__init__.py +++ b/tests/integration/advanced/__init__.py @@ -309,13 +309,15 @@ def session_setup(self): self.cass_version, self.cql_version = get_server_versions() def setUp(self): - self.session_setup() - self.reset_graph() + if DSE_VERSION: + self.session_setup() + self.reset_graph() - self.clear_schema() + self.clear_schema() def tearDown(self): - self.cluster.shutdown() + if DSE_VERSION: + self.cluster.shutdown() def clear_schema(self): self.session.execute_graph('schema.clear()') @@ -344,18 +346,20 @@ def session_setup(cls): @classmethod def setUpClass(cls): - cls.session_setup() - cls.reset_graph() - profiles = cls.cluster.profile_manager.profiles - profiles[EXEC_PROFILE_GRAPH_DEFAULT].request_timeout = 60 - profiles[EXEC_PROFILE_GRAPH_DEFAULT].graph_options.graph_name = cls.graph_name - profiles[EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT].request_timeout = 60 - profiles[EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT].graph_options.graph_name = cls.graph_name - cls.clear_schema() + if DSE_VERSION: + cls.session_setup() + cls.reset_graph() + profiles = cls.cluster.profile_manager.profiles + profiles[EXEC_PROFILE_GRAPH_DEFAULT].request_timeout = 60 + profiles[EXEC_PROFILE_GRAPH_DEFAULT].graph_options.graph_name = cls.graph_name + profiles[EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT].request_timeout = 60 + profiles[EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT].graph_options.graph_name = cls.graph_name + cls.clear_schema() @classmethod def tearDownClass(cls): - cls.cluster.shutdown() + if DSE_VERSION: + cls.cluster.shutdown() @classmethod def clear_schema(self): @@ -406,7 +410,6 @@ def getPolygonType(): return "Polygon().withGeoBounds()" - class BasicGeometricUnitTestCase(BasicKeyspaceUnitTestCase): """ This base test class is used by all the geomteric tests. It contains class level teardown and setup @@ -424,12 +427,14 @@ def common_dse_setup(cls, rf, keyspace_creation=True): @classmethod def setUpClass(cls): - cls.common_dse_setup(1) - cls.initalizeTables() + if DSE_VERSION: + cls.common_dse_setup(1) + cls.initalizeTables() @classmethod def tearDownClass(cls): - drop_keyspace_shutdown_cluster(cls.ks_name, cls.session, cls.cluster) + if DSE_VERSION: + drop_keyspace_shutdown_cluster(cls.ks_name, cls.session, cls.cluster) @classmethod def initalizeTables(cls): diff --git a/tests/integration/advanced/test_geometry.py b/tests/integration/advanced/test_geometry.py new file mode 100644 index 0000000000..e731d8bbc3 --- /dev/null +++ b/tests/integration/advanced/test_geometry.py @@ -0,0 +1,247 @@ +# Copyright DataStax, Inc. +# +# Licensed under the DataStax DSE Driver License; +# you may not use this file except in compliance with the License. +# +# You may obtain a copy of the License at +# +# http://www.datastax.com/terms/datastax-dse-driver-license-terms + + +from tests.integration import DSE_VERSION, requiredse +from tests.integration.advanced import BasicGeometricUnitTestCase, use_single_node_with_graph +from cassandra.util import OrderedMap, sortedset +from collections import namedtuple + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa +from uuid import uuid1 +from cassandra.util import Point, LineString, Polygon +from cassandra.cqltypes import LineStringType, PointType, PolygonType + + +def setup_module(): + if DSE_VERSION: + use_single_node_with_graph() + + +class AbstractGeometricTypeTest(): + + original_value = "" + + def test_should_insert_simple(self): + """ + This tests will attempt to insert a point, polygon, or line, using simple inline formating. + @since 1.0.0 + @jira_ticket PYTHON-456 + @test_category dse geometric + @expected_result geometric types should be able to be inserted and queried. + """ + uuid_key = uuid1() + self.session.execute("INSERT INTO tbl (k, g) VALUES (%s, %s)", [uuid_key, self.original_value]) + self.validate('g', uuid_key, self.original_value) + + def test_should_insert_simple_prepared(self): + """ + This tests will attempt to insert a point, polygon, or line, using prepared statements. + @since 1.0.0 + @jira_ticket PYTHON-456 + @test_category dse geometric + @expected_result geometric types should be able to be inserted and queried. + """ + uuid_key = uuid1() + prepared = self.session.prepare("INSERT INTO tbl (k, g) VALUES (?, ?)") + self.session.execute(prepared, (uuid_key, self.original_value)) + self.validate('g', uuid_key, self.original_value) + + def test_should_insert_simple_prepared_with_bound(self): + """ + This tests will attempt to insert a point, polygon, or line, using prepared statements and bind. + @since 1.0.0 + @jira_ticket PYTHON-456 + @test_category dse geometric + @expected_result geometric types should be able to be inserted and queried. + """ + uuid_key = uuid1() + prepared = self.session.prepare("INSERT INTO tbl (k, g) VALUES (?, ?)") + bound_statement = prepared.bind((uuid_key, self.original_value)) + self.session.execute(bound_statement) + self.validate('g', uuid_key, self.original_value) + + def test_should_insert_as_list(self): + """ + This tests will attempt to insert a point, polygon, or line, as values of list. + @since 1.0.0 + @jira_ticket PYTHON-456 + @test_category dse geometric + @expected_result geometric types should be able to be inserted and queried as a list. + """ + uuid_key = uuid1() + prepared = self.session.prepare("INSERT INTO tbl (k, l) VALUES (?, ?)") + bound_statement = prepared.bind((uuid_key, [self.original_value])) + self.session.execute(bound_statement) + self.validate('l', uuid_key, [self.original_value]) + + def test_should_insert_as_set(self): + """ + This tests will attempt to insert a point, polygon, or line, as values of set. + @since 1.0.0 + @jira_ticket PYTHON-456 + @test_category dse geometric + @expected_result geometric types should be able to be inserted and queried as a set. + """ + uuid_key = uuid1() + prepared = self.session.prepare("INSERT INTO tbl (k, s) VALUES (?, ?)") + bound_statement = prepared.bind((uuid_key, sortedset([self.original_value]))) + self.session.execute(bound_statement) + self.validate('s', uuid_key, sortedset([self.original_value])) + + def test_should_insert_as_map_keys(self): + """ + This tests will attempt to insert a point, polygon, or line, as keys of a map. + @since 1.0.0 + @jira_ticket PYTHON-456 + @test_category dse geometric + @expected_result geometric types should be able to be inserted and queried as keys of a map. + """ + uuid_key = uuid1() + prepared = self.session.prepare("INSERT INTO tbl (k, m0) VALUES (?, ?)") + bound_statement = prepared.bind((uuid_key, OrderedMap(zip([self.original_value], [1])))) + self.session.execute(bound_statement) + self.validate('m0', uuid_key, OrderedMap(zip([self.original_value], [1]))) + + def test_should_insert_as_map_values(self): + """ + This tests will attempt to insert a point, polygon, or line, as values of a map. + @since 1.0.0 + @jira_ticket PYTHON-456 + @test_category dse geometric + @expected_result geometric types should be able to be inserted and queried as values of a map. + """ + uuid_key = uuid1() + prepared = self.session.prepare("INSERT INTO tbl (k, m1) VALUES (?, ?)") + bound_statement = prepared.bind((uuid_key, OrderedMap(zip([1], [self.original_value])))) + self.session.execute(bound_statement) + self.validate('m1', uuid_key, OrderedMap(zip([1], [self.original_value]))) + + def test_should_insert_as_tuple(self): + """ + This tests will attempt to insert a point, polygon, or line, as values of a tuple. + @since 1.0.0 + @jira_ticket PYTHON-456 + @test_category dse geometric + @expected_result geometric types should be able to be inserted and queried as values of a tuple. + """ + uuid_key = uuid1() + prepared = self.session.prepare("INSERT INTO tbl (k, t) VALUES (?, ?)") + bound_statement = prepared.bind((uuid_key, (self.original_value, self.original_value, self.original_value))) + self.session.execute(bound_statement) + self.validate('t', uuid_key, (self.original_value, self.original_value, self.original_value)) + + def test_should_insert_as_udt(self): + """ + This tests will attempt to insert a point, polygon, or line, as members of a udt. + @since 1.0.0 + @jira_ticket PYTHON-456 + @test_category dse geometric + @expected_result geometric types should be able to be inserted and queried as members of a udt. + """ + UDT1 = namedtuple('udt1', ('g')) + self.cluster.register_user_type(self.ks_name, 'udt1', UDT1) + uuid_key = uuid1() + prepared = self.session.prepare("INSERT INTO tbl (k, u) values (?, ?)") + bound_statement = prepared.bind((uuid_key, UDT1(self.original_value))) + self.session.execute(bound_statement) + rs = self.session.execute("SELECT {0} from {1} where k={2}".format('u', 'tbl', uuid_key)) + retrieved_udt = rs[0]._asdict()['u'] + + self.assertEqual(retrieved_udt.g, self.original_value) + + def test_should_accept_as_partition_key(self): + """ + This tests will attempt to insert a point, polygon, or line, as a partition key. + @since 1.0.0 + @jira_ticket PYTHON-456 + @test_category dse geometric + @expected_result geometric types should be able to be inserted and queried as a partition key. + """ + prepared = self.session.prepare("INSERT INTO tblpk (k, v) VALUES (?, ?)") + bound_statement = prepared.bind((self.original_value, 1)) + self.session.execute(bound_statement) + rs = self.session.execute("SELECT k, v FROM tblpk") + foundpk = rs[0]._asdict()['k'] + self.assertEqual(foundpk, self.original_value) + + def validate(self, value, key, expected): + """ + Simple utility method used for validation of inserted types. + """ + rs = self.session.execute("SELECT {0} from tbl where k={1}".format(value, key)) + retrieved = rs[0]._asdict()[value] + self.assertEqual(expected, retrieved) + + def test_insert_empty_with_string(self): + """ + This tests will attempt to insert a point, polygon, or line, as Empty + @since 1.0.0 + @jira_ticket PYTHON-481 + @test_category dse geometric + @expected_result EMPTY as a keyword should be honored + """ + uuid_key = uuid1() + self.session.execute("INSERT INTO tbl (k, g) VALUES (%s, %s)", [uuid_key, self.empty_statement]) + self.validate('g', uuid_key, self.empty_value) + + def test_insert_empty_with_object(self): + """ + This tests will attempt to insert a point, polygon, or line, as Empty + @since 1.0.0 + @jira_ticket PYTHON-481 + @test_category dse geometric + @expected_result EMPTY as a keyword should be used with empty objects + """ + uuid_key = uuid1() + prepared = self.session.prepare("INSERT INTO tbl (k, g) VALUES (?, ?)") + self.session.execute(prepared, (uuid_key, self.empty_value)) + self.validate('g', uuid_key, self.empty_value) + + +@requiredse +class BasicGeometricPointTypeTest(AbstractGeometricTypeTest, BasicGeometricUnitTestCase): + """ + Runs all the geometric tests against PointType + """ + cql_type_name = "'{0}'".format(PointType.typename) + original_value = Point(.5, .13) + + @unittest.skip("Empty String") + def test_insert_empty_with_string(self): + pass + + @unittest.skip("Empty String") + def test_insert_empty_with_object(self): + pass + + +@requiredse +class BasicGeometricLineStringTypeTest(AbstractGeometricTypeTest, BasicGeometricUnitTestCase): + """ + Runs all the geometric tests against LineStringType + """ + cql_type_name = cql_type_name = "'{0}'".format(LineStringType.typename) + original_value = LineString(((1, 2), (3, 4), (9871234, 1235487215))) + empty_statement = 'LINESTRING EMPTY' + empty_value = LineString() + + +@requiredse +class BasicGeometricPolygonTypeTest(AbstractGeometricTypeTest, BasicGeometricUnitTestCase): + """ + Runs all the geometric tests against PolygonType + """ + cql_type_name = cql_type_name = "'{0}'".format(PolygonType.typename) + original_value = Polygon([(10.0, 10.0), (110.0, 10.0), (110., 110.0), (10., 110.0), (10., 10.0)], [[(20., 20.0), (20., 30.0), (30., 30.0), (30., 20.0), (20., 20.0)], [(40., 20.0), (40., 30.0), (50., 30.0), (50., 20.0), (40., 20.0)]]) + empty_statement = 'POLYGON EMPTY' + empty_value = Polygon() From 82e3529404a20b4be48bdbf1816fe3c123e50a93 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 13 Sep 2019 05:47:06 -0400 Subject: [PATCH 1615/2005] Add advanced/test_auth.py --- tests/integration/advanced/test_auth.py | 530 ++++++++++++++++++++++++ 1 file changed, 530 insertions(+) create mode 100644 tests/integration/advanced/test_auth.py diff --git a/tests/integration/advanced/test_auth.py b/tests/integration/advanced/test_auth.py new file mode 100644 index 0000000000..5db3033664 --- /dev/null +++ b/tests/integration/advanced/test_auth.py @@ -0,0 +1,530 @@ +# Copyright DataStax, Inc. +# +# Licensed under the DataStax DSE Driver License; +# you may not use this file except in compliance with the License. +# +# You may obtain a copy of the License at +# +# http://www.datastax.com/terms/datastax-dse-driver-license-terms +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa +import logging +import os +import subprocess +import time + +from ccmlib.dse_cluster import DseCluster +from nose.plugins.attrib import attr +from packaging.version import Version + +from cassandra.auth import (DSEGSSAPIAuthProvider, DSEPlainTextAuthProvider, + SaslAuthProvider, TransitionalModePlainTextAuthProvider) +from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, Cluster, NoHostAvailable +from cassandra.protocol import Unauthorized +from cassandra.query import SimpleStatement +from tests.integration import (get_cluster, greaterthanorequaldse51, + remove_cluster, requiredse) +from tests.integration.advanced import (ADS_HOME, DSE_VERSION, + generate_classic, reset_graph, + use_single_node_with_graph) + + +log = logging.getLogger(__name__) + + +def setup_module(): + if DSE_VERSION: + use_single_node_with_graph() + + +def teardown_module(): + if DSE_VERSION: + remove_cluster() # this test messes with config + + +def wait_role_manager_setup_then_execute(session, statements): + for s in statements: + e = None + for attempt in range(3): + try: + session.execute(s) + break + except Exception as e: + time.sleep(5) + else: # if we didn't reach `break` + if e is not None: + raise e + + +@attr('long') +@requiredse +class BasicDseAuthTest(unittest.TestCase): + + @classmethod + def setUpClass(self): + """ + This will setup the necessary infrastructure to run our authentication tests. It requres the ADS_HOME environment variable + and our custom embedded apache directory server jar in order to run. + """ + if not DSE_VERSION: + return + + clear_kerberos_tickets() + self.cluster = None + + # Setup variables for various keytab and other files + self.conf_file_dir = os.path.join(ADS_HOME, "conf/") + self.krb_conf = os.path.join(self.conf_file_dir, "krb5.conf") + self.dse_keytab = os.path.join(self.conf_file_dir, "dse.keytab") + self.dseuser_keytab = os.path.join(self.conf_file_dir, "dseuser.keytab") + self.cassandra_keytab = os.path.join(self.conf_file_dir, "cassandra.keytab") + self.bob_keytab = os.path.join(self.conf_file_dir, "bob.keytab") + self.charlie_keytab = os.path.join(self.conf_file_dir, "charlie.keytab") + actual_jar = os.path.join(ADS_HOME, "embedded-ads.jar") + + # Create configuration directories if they don't already exists + if not os.path.exists(self.conf_file_dir): + os.makedirs(self.conf_file_dir) + if not os.path.exists(actual_jar): + raise RuntimeError('could not find {}'.format(actual_jar)) + log.warning("Starting adserver") + # Start the ADS, this will create the keytab con configuration files listed above + self.proc = subprocess.Popen(['java', '-jar', actual_jar, '-k', '--confdir', self.conf_file_dir], shell=False) + time.sleep(10) + # TODO poll for server to come up + + log.warning("Starting adserver started") + ccm_cluster = get_cluster() + log.warning("fetching tickets") + # Stop cluster if running and configure it with the correct options + ccm_cluster.stop() + if isinstance(ccm_cluster, DseCluster): + # Setup kerberos options in cassandra.yaml + config_options = {'kerberos_options': {'keytab': self.dse_keytab, + 'service_principal': 'dse/_HOST@DATASTAX.COM', + 'qop': 'auth'}, + 'authentication_options': {'enabled': 'true', + 'default_scheme': 'kerberos', + 'scheme_permissions': 'true', + 'allow_digest_with_kerberos': 'true', + 'plain_text_without_ssl': 'warn', + 'transitional_mode': 'disabled'}, + 'authorization_options': {'enabled': 'true'}} + + krb5java = "-Djava.security.krb5.conf=" + self.krb_conf + # Setup dse authenticator in cassandra.yaml + ccm_cluster.set_configuration_options({ + 'authenticator': 'com.datastax.bdp.cassandra.auth.DseAuthenticator', + 'authorizer': 'com.datastax.bdp.cassandra.auth.DseAuthorizer' + }) + ccm_cluster.set_dse_configuration_options(config_options) + ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=[krb5java]) + else: + log.error("Cluster is not dse cluster test will fail") + + @classmethod + def tearDownClass(self): + """ + Terminates running ADS (Apache directory server). + """ + if not DSE_VERSION: + return + + self.proc.terminate() + + def tearDown(self): + """ + This will clear any existing kerberos tickets by using kdestroy + """ + clear_kerberos_tickets() + if self.cluster: + self.cluster.shutdown() + + def refresh_kerberos_tickets(self, keytab_file, user_name, krb_conf): + """ + Fetches a new ticket for using the keytab file and username provided. + """ + self.ads_pid = subprocess.call(['kinit', '-t', keytab_file, user_name], env={'KRB5_CONFIG': krb_conf}, shell=False) + + def connect_and_query(self, auth_provider, query=None): + """ + Runs a simple system query with the auth_provided specified. + """ + os.environ['KRB5_CONFIG'] = self.krb_conf + self.cluster = Cluster(auth_provider=auth_provider) + self.session = self.cluster.connect() + query = query if query else "SELECT * FROM system.local" + statement = SimpleStatement(query) + rs = self.session.execute(statement) + return rs + + def test_should_not_authenticate_with_bad_user_ticket(self): + """ + This tests will attempt to authenticate with a user that has a valid ticket, but is not a valid dse user. + @since 1.0.0 + @jira_ticket PYTHON-457 + @test_category dse auth + @expected_result NoHostAvailable exception should be thrown + + """ + self.refresh_kerberos_tickets(self.dseuser_keytab, "dseuser@DATASTAX.COM", self.krb_conf) + auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"]) + self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider) + + def test_should_not_athenticate_without_ticket(self): + """ + This tests will attempt to authenticate with a user that is valid but has no ticket + @since 1.0.0 + @jira_ticket PYTHON-457 + @test_category dse auth + @expected_result NoHostAvailable exception should be thrown + + """ + auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"]) + self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider) + + def test_connect_with_kerberos(self): + """ + This tests will attempt to authenticate with a user that is valid and has a ticket + @since 1.0.0 + @jira_ticket PYTHON-457 + @test_category dse auth + @expected_result Client should be able to connect and run a basic query + + """ + self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf) + auth_provider = DSEGSSAPIAuthProvider() + rs = self.connect_and_query(auth_provider) + self.assertIsNotNone(rs) + connections = [c for holders in self.cluster.get_connection_holders() for c in holders.get_connections()] + # Check to make sure our server_authenticator class is being set appropriate + for connection in connections: + self.assertTrue('DseAuthenticator' in connection.authenticator.server_authenticator_class) + + def test_connect_with_kerberos_and_graph(self): + """ + This tests will attempt to authenticate with a user and execute a graph query + @since 1.0.0 + @jira_ticket PYTHON-457 + @test_category dse auth + @expected_result Client should be able to connect and run a basic graph query with authentication + + """ + self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf) + + auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"]) + rs = self.connect_and_query(auth_provider) + self.assertIsNotNone(rs) + reset_graph(self.session, self._testMethodName.lower()) + profiles = self.cluster.profile_manager.profiles + profiles[EXEC_PROFILE_GRAPH_DEFAULT].graph_options.graph_name = self._testMethodName.lower() + generate_classic(self.session) + + rs = self.session.execute_graph('g.V()') + self.assertIsNotNone(rs) + + def test_connect_with_kerberos_host_not_resolved(self): + """ + This tests will attempt to authenticate with IP, this will fail on osx. + The success or failure of this test is dependent on a reverse dns lookup which can be impacted by your environment + if it fails don't panic. + @since 1.0.0 + @jira_ticket PYTHON-566 + @test_category dse auth + @expected_result Client should error when ip is used + + """ + self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf) + DSEGSSAPIAuthProvider(service='dse', qops=["auth"], resolve_host_name=False) + + def test_connect_with_explicit_principal(self): + """ + This tests will attempt to authenticate using valid and invalid user principals + @since 1.0.0 + @jira_ticket PYTHON-574 + @test_category dse auth + @expected_result Client principals should be used by the underlying mechanism + + """ + + # Connect with valid principal + self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf) + auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="cassandra@DATASTAX.COM") + self.connect_and_query(auth_provider) + connections = [c for holders in self.cluster.get_connection_holders() for c in holders.get_connections()] + + # Check to make sure our server_authenticator class is being set appropriate + for connection in connections: + self.assertTrue('DseAuthenticator' in connection.authenticator.server_authenticator_class) + + # Use invalid principal + auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="notauser@DATASTAX.COM") + self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider) + + @greaterthanorequaldse51 + def test_proxy_login_with_kerberos(self): + """ + Test that the proxy login works with kerberos. + """ + # Set up users for proxy login test + self._setup_for_proxy() + + query = "select * from testkrbproxy.testproxy" + + # Try normal login with Charlie + self.refresh_kerberos_tickets(self.charlie_keytab, "charlie@DATASTAX.COM", self.krb_conf) + auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="charlie@DATASTAX.COM") + self.connect_and_query(auth_provider, query=query) + + # Try proxy login with bob + self.refresh_kerberos_tickets(self.bob_keytab, "bob@DATASTAX.COM", self.krb_conf) + auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="bob@DATASTAX.COM", + authorization_id='charlie@DATASTAX.COM') + self.connect_and_query(auth_provider, query=query) + + # Try logging with bob without mentioning charlie + self.refresh_kerberos_tickets(self.bob_keytab, "bob@DATASTAX.COM", self.krb_conf) + auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="bob@DATASTAX.COM") + self.assertRaises(Unauthorized, self.connect_and_query, auth_provider, query=query) + + self._remove_proxy_setup() + + @greaterthanorequaldse51 + def test_proxy_login_with_kerberos_forbidden(self): + """ + Test that the proxy login fail when proxy role is not granted + """ + # Set up users for proxy login test + self._setup_for_proxy(False) + query = "select * from testkrbproxy.testproxy" + + # Try normal login with Charlie + self.refresh_kerberos_tickets(self.bob_keytab, "bob@DATASTAX.COM", self.krb_conf) + auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="bob@DATASTAX.COM", + authorization_id='charlie@DATASTAX.COM') + self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider, query=query) + + self.refresh_kerberos_tickets(self.bob_keytab, "bob@DATASTAX.COM", self.krb_conf) + auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="bob@DATASTAX.COM") + self.assertRaises(Unauthorized, self.connect_and_query, auth_provider, query=query) + + self._remove_proxy_setup() + + def _remove_proxy_setup(self): + os.environ['KRB5_CONFIG'] = self.krb_conf + self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf) + auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal='cassandra@DATASTAX.COM') + cluster = Cluster(auth_provider=auth_provider) + session = cluster.connect() + + session.execute("REVOKE PROXY.LOGIN ON ROLE '{0}' FROM '{1}'".format('charlie@DATASTAX.COM', 'bob@DATASTAX.COM')) + + session.execute("DROP ROLE IF EXISTS '{0}';".format('bob@DATASTAX.COM')) + session.execute("DROP ROLE IF EXISTS '{0}';".format('charlie@DATASTAX.COM')) + + # Create a keyspace and allow only charlie to query it. + + session.execute("DROP KEYSPACE testkrbproxy") + + cluster.shutdown() + + def _setup_for_proxy(self, grant=True): + os.environ['KRB5_CONFIG'] = self.krb_conf + self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf) + auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal='cassandra@DATASTAX.COM') + cluster = Cluster(auth_provider=auth_provider) + session = cluster.connect() + + stmts = [ + "CREATE ROLE IF NOT EXISTS '{0}' WITH LOGIN = TRUE;".format('bob@DATASTAX.COM'), + "CREATE ROLE IF NOT EXISTS '{0}' WITH LOGIN = TRUE;".format('bob@DATASTAX.COM'), + "GRANT EXECUTE ON ALL AUTHENTICATION SCHEMES to 'bob@DATASTAX.COM'", + "CREATE ROLE IF NOT EXISTS '{0}' WITH LOGIN = TRUE;".format('charlie@DATASTAX.COM'), + "GRANT EXECUTE ON ALL AUTHENTICATION SCHEMES to 'charlie@DATASTAX.COM'", + # Create a keyspace and allow only charlie to query it. + "CREATE KEYSPACE testkrbproxy WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}", + "CREATE TABLE testkrbproxy.testproxy (id int PRIMARY KEY, value text)", + "GRANT ALL PERMISSIONS ON KEYSPACE testkrbproxy to '{0}'".format('charlie@DATASTAX.COM'), + ] + + if grant: + stmts.append("GRANT PROXY.LOGIN ON ROLE '{0}' to '{1}'".format('charlie@DATASTAX.COM', 'bob@DATASTAX.COM')) + + wait_role_manager_setup_then_execute(session, stmts) + + cluster.shutdown() + + +def clear_kerberos_tickets(): + subprocess.call(['kdestroy'], shell=False) + + +@attr('long') +@requiredse +class BaseDseProxyAuthTest(unittest.TestCase): + + @classmethod + def setUpClass(self): + """ + This will setup the necessary infrastructure to run unified authentication tests. + """ + if not DSE_VERSION or DSE_VERSION < Version('5.1'): + return + self.cluster = None + + ccm_cluster = get_cluster() + # Stop cluster if running and configure it with the correct options + ccm_cluster.stop() + if isinstance(ccm_cluster, DseCluster): + # Setup dse options in dse.yaml + config_options = {'authentication_options': {'enabled': 'true', + 'default_scheme': 'internal', + 'scheme_permissions': 'true', + 'transitional_mode': 'normal'}, + 'authorization_options': {'enabled': 'true'} + } + + # Setup dse authenticator in cassandra.yaml + ccm_cluster.set_configuration_options({ + 'authenticator': 'com.datastax.bdp.cassandra.auth.DseAuthenticator', + 'authorizer': 'com.datastax.bdp.cassandra.auth.DseAuthorizer' + }) + ccm_cluster.set_dse_configuration_options(config_options) + ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) + else: + log.error("Cluster is not dse cluster test will fail") + + # Create users and test keyspace + self.user_role = 'user1' + self.server_role = 'server' + self.root_cluster = Cluster(auth_provider=DSEPlainTextAuthProvider('cassandra', 'cassandra')) + self.root_session = self.root_cluster.connect() + + stmts = [ + "CREATE USER {0} WITH PASSWORD '{1}'".format(self.server_role, self.server_role), + "CREATE USER {0} WITH PASSWORD '{1}'".format(self.user_role, self.user_role), + "CREATE KEYSPACE testproxy WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}", + "CREATE TABLE testproxy.testproxy (id int PRIMARY KEY, value text)", + "GRANT ALL PERMISSIONS ON KEYSPACE testproxy to {0}".format(self.user_role) + ] + + wait_role_manager_setup_then_execute(self.root_session, stmts) + + @classmethod + def tearDownClass(self): + """ + Shutdown the root session. + """ + if not DSE_VERSION or DSE_VERSION < Version('5.1'): + return + self.root_session.execute('DROP KEYSPACE testproxy;') + self.root_session.execute('DROP USER {0}'.format(self.user_role)) + self.root_session.execute('DROP USER {0}'.format(self.server_role)) + self.root_cluster.shutdown() + + def tearDown(self): + """ + Shutdown the cluster and reset proxy permissions + """ + self.cluster.shutdown() + + self.root_session.execute("REVOKE PROXY.LOGIN ON ROLE {0} from {1}".format(self.user_role, self.server_role)) + self.root_session.execute("REVOKE PROXY.EXECUTE ON ROLE {0} from {1}".format(self.user_role, self.server_role)) + + def grant_proxy_login(self): + """ + Grant PROXY.LOGIN permission on a role to a specific user. + """ + self.root_session.execute("GRANT PROXY.LOGIN on role {0} to {1}".format(self.user_role, self.server_role)) + + def grant_proxy_execute(self): + """ + Grant PROXY.EXECUTE permission on a role to a specific user. + """ + self.root_session.execute("GRANT PROXY.EXECUTE on role {0} to {1}".format(self.user_role, self.server_role)) + + +@attr('long') +@greaterthanorequaldse51 +class DseProxyAuthTest(BaseDseProxyAuthTest): + """ + Tests Unified Auth. Proxy Login using SASL and Proxy Execute. + """ + + @classmethod + def get_sasl_options(self, mechanism='PLAIN'): + sasl_options = { + "service": 'dse', + "username": 'server', + "mechanism": mechanism, + 'password': self.server_role, + 'authorization_id': self.user_role + } + return sasl_options + + def connect_and_query(self, auth_provider, execute_as=None, query="SELECT * FROM testproxy.testproxy"): + self.cluster = Cluster(auth_provider=auth_provider) + self.session = self.cluster.connect() + rs = self.session.execute(query, execute_as=execute_as) + return rs + + def test_proxy_login_forbidden(self): + """ + Test that a proxy login is forbidden by default for a user. + @since 2.0.0 + @jira_ticket PYTHON-662 + @test_category dse auth + @expected_result connect and query should not be allowed + """ + auth_provider = SaslAuthProvider(**self.get_sasl_options()) + with self.assertRaises(Unauthorized): + self.connect_and_query(auth_provider) + + def test_proxy_login_allowed(self): + """ + Test that a proxy login is allowed with proper permissions. + @since 2.0.0 + @jira_ticket PYTHON-662 + @test_category dse auth + @expected_result connect and query should be allowed + """ + auth_provider = SaslAuthProvider(**self.get_sasl_options()) + self.grant_proxy_login() + self.connect_and_query(auth_provider) + + def test_proxy_execute_forbidden(self): + """ + Test that a proxy execute is forbidden by default for a user. + @since 2.0.0 + @jira_ticket PYTHON-662 + @test_category dse auth + @expected_result connect and query should not be allowed + """ + auth_provider = DSEPlainTextAuthProvider(self.server_role, self.server_role) + with self.assertRaises(Unauthorized): + self.connect_and_query(auth_provider, execute_as=self.user_role) + + def test_proxy_execute_allowed(self): + """ + Test that a proxy execute is allowed with proper permissions. + @since 2.0.0 + @jira_ticket PYTHON-662 + @test_category dse auth + @expected_result connect and query should be allowed + """ + auth_provider = DSEPlainTextAuthProvider(self.server_role, self.server_role) + self.grant_proxy_execute() + self.connect_and_query(auth_provider, execute_as=self.user_role) + + def test_connection_with_transitional_mode(self): + """ + Test that the driver can connect using TransitionalModePlainTextAuthProvider + @since 3.12 + @jira_ticket PYTHON-831 + @test_category dse auth + @expected_result connect and query should be allowed + """ + auth_provider = TransitionalModePlainTextAuthProvider() + self.assertIsNotNone(self.connect_and_query(auth_provider, query="SELECT * from system.local")) From 037862d8d2ae886d71edd5ad565d071dc4daaedf Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 13 Sep 2019 06:40:40 -0400 Subject: [PATCH 1616/2005] Rename protocol v1/v2 tests results file --- build.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.yaml b/build.yaml index 7e6ee2f842..afff107164 100644 --- a/build.yaml +++ b/build.yaml @@ -253,9 +253,9 @@ build: # TODO: Temporary, don't merge this 2.1 stuff... if [ "$CCM_CASSANDRA_VERSION" == "2.1.21" ]; then echo "Testing cassandra 2.1 with protocol V2" - PROTOCOL_VERSION=2 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results_v2.xml tests/integration/standard/ || true + PROTOCOL_VERSION=2 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_v2_results.xml tests/integration/standard/ || true echo "Testing cassandra 2.1 with protocol V1" - PROTOCOL_VERSION=1 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results_v1.xml tests/integration/standard/ || true + PROTOCOL_VERSION=1 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_v1_results.xml tests/integration/standard/ || true fi if [ -n "$DSE_VERSION" ] && ! [[ $DSE_VERSION == "4.8"* ]]; then From 164e1695ff1aa2484b3b5cea871ebaf0633b22b5 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 13 Sep 2019 10:01:35 -0500 Subject: [PATCH 1617/2005] Add cassandra.cloud to setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b79774476b..2712897112 100644 --- a/setup.py +++ b/setup.py @@ -416,7 +416,7 @@ def run_setup(extensions): url='http://github.com/datastax/python-driver', author='Tyler Hobbs', author_email='tyler@datastax.com', - packages=['cassandra', 'cassandra.io', 'cassandra.cqlengine'], + packages=['cassandra', 'cassandra.io', 'cassandra.cqlengine', 'cassandra.cloud'], keywords='cassandra,cql,orm', include_package_data=True, install_requires=dependencies, From 9ce13c170e229ef1f85c839879d53c5de8681a21 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 13 Sep 2019 12:01:30 -0500 Subject: [PATCH 1618/2005] Rest of the unit tests --- tests/unit/advanced/__init__.py | 13 + tests/unit/advanced/test_auth.py | 41 +++ tests/unit/advanced/test_execution_profile.py | 34 +++ tests/unit/advanced/test_geometry.py | 281 ++++++++++++++++++ tests/unit/advanced/test_policies.py | 103 +++++++ tests/unit/utils.py | 15 + 6 files changed, 487 insertions(+) create mode 100644 tests/unit/advanced/test_auth.py create mode 100644 tests/unit/advanced/test_execution_profile.py create mode 100644 tests/unit/advanced/test_geometry.py create mode 100644 tests/unit/advanced/test_policies.py diff --git a/tests/unit/advanced/__init__.py b/tests/unit/advanced/__init__.py index e69de29bb2..2c9ca172f8 100644 --- a/tests/unit/advanced/__init__.py +++ b/tests/unit/advanced/__init__.py @@ -0,0 +1,13 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/unit/advanced/test_auth.py b/tests/unit/advanced/test_auth.py new file mode 100644 index 0000000000..0a3d644246 --- /dev/null +++ b/tests/unit/advanced/test_auth.py @@ -0,0 +1,41 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from puresasl import QOP + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from cassandra.auth import DSEGSSAPIAuthProvider + + +class TestGSSAPI(unittest.TestCase): + + def test_host_resolution(self): + # resolves by default + provider = DSEGSSAPIAuthProvider(service='test', qops=QOP.all) + authenticator = provider.new_authenticator('127.0.0.1') + self.assertEqual(authenticator.sasl.host, 'localhost') + + # numeric fallback okay + authenticator = provider.new_authenticator('192.0.2.1') + self.assertEqual(authenticator.sasl.host, '192.0.2.1') + + # disable explicitly + provider = DSEGSSAPIAuthProvider(service='test', qops=QOP.all, resolve_host_name=False) + authenticator = provider.new_authenticator('127.0.0.1') + self.assertEqual(authenticator.sasl.host, '127.0.0.1') + diff --git a/tests/unit/advanced/test_execution_profile.py b/tests/unit/advanced/test_execution_profile.py new file mode 100644 index 0000000000..8592f56a44 --- /dev/null +++ b/tests/unit/advanced/test_execution_profile.py @@ -0,0 +1,34 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from cassandra.cluster import GraphExecutionProfile, GraphAnalyticsExecutionProfile +from cassandra.graph import GraphOptions + + +class GraphExecutionProfileTest(unittest.TestCase): + + def test_graph_source_can_be_set_with_graph_execution_profile(self): + options = GraphOptions(graph_source='a') + ep = GraphExecutionProfile(graph_options=options) + self.assertEqual(ep.graph_options.graph_source, b'a') + + def test_graph_source_is_preserve_with_graph_analytics_execution_profile(self): + options = GraphOptions(graph_source='doesnt_matter') + ep = GraphAnalyticsExecutionProfile(graph_options=options) + self.assertEqual(ep.graph_options.graph_source, b'a') # graph source is set automatically diff --git a/tests/unit/advanced/test_geometry.py b/tests/unit/advanced/test_geometry.py new file mode 100644 index 0000000000..7cd8f666e6 --- /dev/null +++ b/tests/unit/advanced/test_geometry.py @@ -0,0 +1,281 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import struct +import math +from cassandra.cqltypes import lookup_casstype +from cassandra.protocol import ProtocolVersion +from cassandra.cqltypes import PointType, LineStringType, PolygonType, WKBGeometryType +from cassandra.util import Point, LineString, Polygon, _LinearRing, Distance + +wkb_be = 0 +wkb_le = 1 + +protocol_versions = ProtocolVersion.SUPPORTED_VERSIONS + + +class GeoTypes(unittest.TestCase): + + samples = (Point(1, 2), LineString(((1, 2), (3, 4), (5, 6))), Polygon([(10.1, 10.0), (110.0, 10.0), (110., 110.0), (10., 110.0), (10., 10.0)], [[(20., 20.0), (20., 30.0), (30., 30.0), (30., 20.0), (20., 20.0)], [(40., 20.0), (40., 30.0), (50., 30.0), (50., 20.0), (40., 20.0)]])) + + def test_marshal_platform(self): + for proto_ver in protocol_versions: + for geo in self.samples: + cql_type = lookup_casstype(geo.__class__.__name__ + 'Type') + self.assertEqual(cql_type.from_binary(cql_type.to_binary(geo, proto_ver), proto_ver), geo) + + def _verify_both_endian(self, typ, body_fmt, params, expected): + for proto_ver in protocol_versions: + self.assertEqual(typ.from_binary(struct.pack(">BI" + body_fmt, wkb_be, *params), proto_ver), expected) + self.assertEqual(typ.from_binary(struct.pack(" Date: Fri, 13 Sep 2019 15:07:10 -0400 Subject: [PATCH 1619/2005] more integration advanced tests --- tests/integration/advanced/__init__.py | 11 +- .../integration/advanced/test_adv_metadata.py | 282 ++++++++++++++++++ tests/integration/advanced/test_auth.py | 11 +- .../integration/advanced/test_cont_paging.py | 247 +++++++++++++++ .../test_cqlengine_where_operators.py | 114 +++++++ tests/integration/advanced/test_geometry.py | 11 +- tests/integration/advanced/test_spark.py | 49 +++ tests/unit/advanced/test_insights.py | 11 +- 8 files changed, 724 insertions(+), 12 deletions(-) create mode 100644 tests/integration/advanced/test_adv_metadata.py create mode 100644 tests/integration/advanced/test_cont_paging.py create mode 100644 tests/integration/advanced/test_cqlengine_where_operators.py create mode 100644 tests/integration/advanced/test_spark.py diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py index bb01526c57..66660a1f10 100644 --- a/tests/integration/advanced/__init__.py +++ b/tests/integration/advanced/__init__.py @@ -1,11 +1,16 @@ # Copyright DataStax, Inc. # -# Licensed under the DataStax DSE Driver License; +# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. -# # You may obtain a copy of the License at # -# http://www.datastax.com/terms/datastax-dse-driver-license-terms +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. try: import unittest2 as unittest diff --git a/tests/integration/advanced/test_adv_metadata.py b/tests/integration/advanced/test_adv_metadata.py new file mode 100644 index 0000000000..f7a29d3d3b --- /dev/null +++ b/tests/integration/advanced/test_adv_metadata.py @@ -0,0 +1,282 @@ +# Copyright DataStax, Inc. +# +# Licensed under the DataStax DSE Driver License; +# you may not use this file except in compliance with the License. +# +# You may obtain a copy of the License at +# +# http://www.datastax.com/terms/datastax-dse-driver-license-terms +# from tests.integration import BasicSharedKeyspaceUnitTestCase, use_single_node + +from tests.integration import (BasicSharedKeyspaceUnitTestCase, + BasicSharedKeyspaceUnitTestCaseRF1, + greaterthanorequaldse51, + greaterthanorequaldse60, use_single_node, + DSE_VERSION, requiredse) + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import logging +import time + + +log = logging.getLogger(__name__) + + +def setup_module(): + if DSE_VERSION: + use_single_node() + + +@requiredse +@greaterthanorequaldse60 +class FunctionAndAggregateMetadataTests(BasicSharedKeyspaceUnitTestCaseRF1): + + @classmethod + def setUpClass(cls): + if DSE_VERSION: + super(FunctionAndAggregateMetadataTests, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + if DSE_VERSION: + super(FunctionAndAggregateMetadataTests, cls).setUpClass() + + def setUp(self): + self.func_name = self.function_table_name + '_func' + self.agg_name = self.function_table_name + '_agg(int)' + + def _populated_ks_meta_attr(self, attr_name): + val, start_time = None, time.time() + while not val: + self.cluster.refresh_schema_metadata() + val = getattr(self.cluster.metadata.keyspaces[self.keyspace_name], + attr_name) + self.assertLess(time.time(), start_time + 30, + 'did not see func in metadata in 30s') + log.debug('done blocking; dict is populated: {}'.format(val)) + return val + + def test_monotonic_on_and_deterministic_function(self): + self.session.execute(""" + CREATE FUNCTION {ksn}.{ftn}(key int, val int) + RETURNS NULL ON NULL INPUT + RETURNS int + DETERMINISTIC + MONOTONIC ON val + LANGUAGE java AS 'return key+val;'; + """.format(ksn=self.keyspace_name, + ftn=self.func_name)) + fn = self._populated_ks_meta_attr('functions')[ + '{}(int,int)'.format(self.func_name) + ] + self.assertEqual(fn.monotonic_on, ['val']) + # monotonic is not set by MONOTONIC ON + self.assertFalse(fn.monotonic) + self.assertTrue(fn.deterministic) + self.assertEqual('CREATE FUNCTION {ksn}.{ftn}(key int, val int) ' + 'RETURNS NULL ON NULL INPUT ' + 'RETURNS int DETERMINISTIC MONOTONIC ON val ' + 'LANGUAGE java AS $$return key+val;$$' + ''.format(ksn=self.keyspace_name, + ftn=self.func_name), + fn.as_cql_query()) + self.session.execute('DROP FUNCTION {}.{}'.format(self.keyspace_name, + self.func_name)) + self.session.execute(fn.as_cql_query()) + + def test_monotonic_all_and_nondeterministic_function(self): + self.session.execute(""" + CREATE FUNCTION {ksn}.{ftn}(key int, val int) + RETURNS NULL ON NULL INPUT + RETURNS int + MONOTONIC + LANGUAGE java AS 'return key+val;'; + """.format(ksn=self.keyspace_name, + ftn=self.func_name)) + fn = self._populated_ks_meta_attr('functions')[ + '{}(int,int)'.format(self.func_name) + ] + self.assertEqual(set(fn.monotonic_on), {'key', 'val'}) + self.assertTrue(fn.monotonic) + self.assertFalse(fn.deterministic) + self.assertEqual('CREATE FUNCTION {ksn}.{ftn}(key int, val int) ' + 'RETURNS NULL ON NULL INPUT RETURNS int MONOTONIC ' + 'LANGUAGE java AS $$return key+val;$$' + ''.format(ksn=self.keyspace_name, + ftn=self.func_name), + fn.as_cql_query()) + self.session.execute('DROP FUNCTION {}.{}'.format(self.keyspace_name, + self.func_name)) + self.session.execute(fn.as_cql_query()) + + def _create_func_for_aggregate(self): + self.session.execute(""" + CREATE FUNCTION {ksn}.{ftn}(key int, val int) + RETURNS NULL ON NULL INPUT + RETURNS int + DETERMINISTIC + LANGUAGE java AS 'return key+val;'; + """.format(ksn=self.keyspace_name, + ftn=self.func_name)) + + def test_deterministic_aggregate(self): + self._create_func_for_aggregate() + self.session.execute(""" + CREATE AGGREGATE {ksn}.{an} + SFUNC {ftn} + STYPE int + INITCOND 0 + DETERMINISTIC + """.format(ksn=self.keyspace_name, + ftn=self.func_name, + an=self.agg_name)) + ag = self._populated_ks_meta_attr('aggregates')[self.agg_name] + self.assertTrue(ag.deterministic) + self.assertEqual( + 'CREATE AGGREGATE {ksn}.{an} SFUNC ' + '{ftn} STYPE int INITCOND 0 DETERMINISTIC' + ''.format(ksn=self.keyspace_name, + ftn=self.func_name, + an=self.agg_name), + ag.as_cql_query()) + self.session.execute('DROP AGGREGATE {}.{}'.format(self.keyspace_name, + self.agg_name)) + self.session.execute(ag.as_cql_query()) + + def test_nondeterministic_aggregate(self): + self._create_func_for_aggregate() + self.session.execute(""" + CREATE AGGREGATE {ksn}.{an} + SFUNC {ftn} + STYPE int + INITCOND 0 + """.format(ksn=self.keyspace_name, + ftn=self.func_name, + an=self.agg_name)) + ag = self._populated_ks_meta_attr('aggregates')[self.agg_name] + self.assertFalse(ag.deterministic) + self.assertEqual( + 'CREATE AGGREGATE {ksn}.{an} SFUNC ' + '{ftn} STYPE int INITCOND 0' + ''.format(ksn=self.keyspace_name, + ftn=self.func_name, + an=self.agg_name), + ag.as_cql_query()) + self.session.execute('DROP AGGREGATE {}.{}'.format(self.keyspace_name, + self.agg_name)) + self.session.execute(ag.as_cql_query()) + + +@requiredse +class RLACMetadataTests(BasicSharedKeyspaceUnitTestCase): + + @classmethod + def setUpClass(cls): + if DSE_VERSION: + super(RLACMetadataTests, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + if DSE_VERSION: + super(RLACMetadataTests, cls).setUpClass() + + @greaterthanorequaldse51 + def test_rlac_on_table(self): + """ + Checks to ensure that the RLAC table extension appends the proper cql on export to tables + + @since 2.0 + @jira_ticket PYTHON-638 + @expected_result Invalid hosts on the contact list should be excluded + + @test_category metadata + """ + self.session.execute("CREATE TABLE {0}.reports (" + " report_user text, " + " report_number int, " + " report_month int, " + " report_year int, " + " report_text text," + " PRIMARY KEY (report_user, report_number))".format(self.keyspace_name)) + restrict_cql = "RESTRICT ROWS ON {0}.reports USING report_user".format(self.keyspace_name) + self.session.execute(restrict_cql) + table_meta = self.cluster.metadata.keyspaces[self.keyspace_name].tables['reports'] + self.assertTrue(restrict_cql in table_meta.export_as_string()) + + @unittest.skip("Dse 5.1 doesn't current MV and RLAC remove after update") + @greaterthanorequaldse51 + def test_rlac_on_mv(self): + """ + Checks to ensure that the RLAC table extension appends the proper cql to export on mV's + + @since 2.0 + @jira_ticket PYTHON-682 + @expected_result Invalid hosts on the contact list should be excluded + + @test_category metadata + """ + self.session.execute("CREATE TABLE {0}.reports2 (" + " report_user text, " + " report_number int, " + " report_month int, " + " report_year int, " + " report_text text," + " PRIMARY KEY (report_user, report_number))".format(self.keyspace_name)) + self.session.execute("CREATE MATERIALIZED VIEW {0}.reports_by_year AS " + " SELECT report_year, report_user, report_number, report_text FROM {0}.reports2 " + " WHERE report_user IS NOT NULL AND report_number IS NOT NULL AND report_year IS NOT NULL " + " PRIMARY KEY ((report_year, report_user), report_number)".format(self.keyspace_name)) + + restrict_cql_table = "RESTRICT ROWS ON {0}.reports2 USING report_user".format(self.keyspace_name) + self.session.execute(restrict_cql_table) + restrict_cql_view = "RESTRICT ROWS ON {0}.reports_by_year USING report_user".format(self.keyspace_name) + self.session.execute(restrict_cql_view) + table_cql = self.cluster.metadata.keyspaces[self.keyspace_name].tables['reports2'].export_as_string() + view_cql = self.cluster.metadata.keyspaces[self.keyspace_name].tables['reports2'].views["reports_by_year"].export_as_string() + self.assertTrue(restrict_cql_table in table_cql) + self.assertTrue(restrict_cql_view in table_cql) + self.assertTrue(restrict_cql_view in view_cql) + self.assertTrue(restrict_cql_table not in view_cql) + + +@requiredse +class NodeSyncMetadataTests(BasicSharedKeyspaceUnitTestCase): + + @classmethod + def setUpClass(cls): + if DSE_VERSION: + super(NodeSyncMetadataTests, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + if DSE_VERSION: + super(NodeSyncMetadataTests, cls).setUpClass() + + @greaterthanorequaldse60 + def test_nodesync_on_table(self): + """ + Checks to ensure that nodesync is visible through driver metadata + + @since DSE6.0 + @jira_ticket PYTHON-799 + @expected_result nodesync should be enabled + + @test_category metadata + """ + self.session.execute("CREATE TABLE {0}.reports (" + " report_user text PRIMARY KEY" + ") WITH nodesync = {{" + "'enabled': 'true', 'deadline_target_sec' : 86400 }};".format( + self.keyspace_name + )) + table_meta = self.cluster.metadata.keyspaces[self.keyspace_name].tables['reports'] + self.assertIn('nodesync =', table_meta.export_as_string()) + self.assertIn('nodesync', table_meta.options) + + table_3rf = self.cluster.metadata.keyspaces["test3rf"].tables['test'] + self.assertNotIn('nodesync =', table_3rf.export_as_string()) + self.assertIsNone(table_3rf.options['nodesync']) diff --git a/tests/integration/advanced/test_auth.py b/tests/integration/advanced/test_auth.py index 5db3033664..fb8cbd1efd 100644 --- a/tests/integration/advanced/test_auth.py +++ b/tests/integration/advanced/test_auth.py @@ -1,11 +1,16 @@ # Copyright DataStax, Inc. # -# Licensed under the DataStax DSE Driver License; +# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. -# # You may obtain a copy of the License at # -# http://www.datastax.com/terms/datastax-dse-driver-license-terms +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. try: import unittest2 as unittest except ImportError: diff --git a/tests/integration/advanced/test_cont_paging.py b/tests/integration/advanced/test_cont_paging.py new file mode 100644 index 0000000000..47cd02bb55 --- /dev/null +++ b/tests/integration/advanced/test_cont_paging.py @@ -0,0 +1,247 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tests.integration import use_singledc, greaterthanorequaldse51, BasicSharedKeyspaceUnitTestCaseRF3WM, \ + DSE_VERSION, ProtocolVersion, greaterthanorequaldse60, requiredse + +import logging +log = logging.getLogger(__name__) + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from itertools import cycle, count +from six.moves import range +from packaging.version import Version +import time + +from cassandra.cluster import Cluster, ExecutionProfile, ContinuousPagingOptions +from cassandra.concurrent import execute_concurrent +from cassandra.query import SimpleStatement + + +def setup_module(): + if DSE_VERSION: + use_singledc() + + +@requiredse +class BaseContPagingTests(): + @classmethod + def setUpClass(cls): + if not DSE_VERSION or DSE_VERSION < cls.required_dse_version: + return + + cls.execution_profiles = {"CONTDEFAULT": ExecutionProfile(continuous_paging_options=ContinuousPagingOptions()), + "ONEPAGE": ExecutionProfile( + continuous_paging_options=ContinuousPagingOptions(max_pages=1)), + "MANYPAGES": ExecutionProfile( + continuous_paging_options=ContinuousPagingOptions(max_pages=10)), + "BYTES": ExecutionProfile(continuous_paging_options=ContinuousPagingOptions( + page_unit=ContinuousPagingOptions.PagingUnit.BYTES)), + "SLOW": ExecutionProfile( + continuous_paging_options=ContinuousPagingOptions(max_pages_per_second=1)), } + cls.sane_eps = ["CONTDEFAULT", "BYTES"] + + @classmethod + def tearDownClass(cls): + if not DSE_VERSION or DSE_VERSION < cls.required_dse_version: + return + + @classmethod + def create_cluster(cls): + + cls.cluster_with_profiles = Cluster(protocol_version=cls.protocol_version, execution_profiles=cls.execution_profiles) + + cls.session_with_profiles = cls.cluster_with_profiles.connect(wait_for_all_pools=True) + statements_and_params = zip( + cycle(["INSERT INTO " + cls.ks_name + "." + cls.ks_name + " (k, v) VALUES (%s, 0)"]), + [(i,) for i in range(150)]) + execute_concurrent(cls.session_with_profiles, list(statements_and_params)) + + cls.select_all_statement = "SELECT * FROM {0}.{0}".format(cls.ks_name) + + def test_continous_paging(self): + """ + Test to ensure that various continuous paging schemes return the full set of results. + @since DSE 2.0 + @jira_ticket PYTHON-615 + @expected_result various continous paging options should fetch all the results + + @test_category queries + """ + for ep in self.execution_profiles.keys(): + results = list(self.session_with_profiles.execute(self.select_all_statement, execution_profile= ep)) + self.assertEqual(len(results), 150) + + + def test_page_fetch_size(self): + """ + Test to ensure that continuous paging works appropriately with fetch size. + @since DSE 2.0 + @jira_ticket PYTHON-615 + @expected_result continuous paging options should work sensibly with various fetch size + + @test_category queries + """ + + # Since we fetch one page at a time results should match fetch size + for fetch_size in (2, 3, 7, 10, 99, 100, 101, 150): + self.session_with_profiles.default_fetch_size = fetch_size + results = list(self.session_with_profiles.execute(self.select_all_statement, execution_profile= "ONEPAGE")) + self.assertEqual(len(results), fetch_size) + + # Since we fetch ten pages at a time results should match fetch size * 10 + for fetch_size in (2, 3, 7, 10, 15): + self.session_with_profiles.default_fetch_size = fetch_size + results = list(self.session_with_profiles.execute(self.select_all_statement, execution_profile= "MANYPAGES")) + self.assertEqual(len(results), fetch_size*10) + + # Default settings for continuous paging should be able to fetch all results regardless of fetch size + # Changing the units should, not affect the number of results, if max_pages is not set + for profile in self.sane_eps: + for fetch_size in (2, 3, 7, 10, 15): + self.session_with_profiles.default_fetch_size = fetch_size + results = list(self.session_with_profiles.execute(self.select_all_statement, execution_profile= profile)) + self.assertEqual(len(results), 150) + + # This should take around 3 seconds to fetch but should still complete with all results + self.session_with_profiles.default_fetch_size = 50 + results = list(self.session_with_profiles.execute(self.select_all_statement, execution_profile= "SLOW")) + self.assertEqual(len(results), 150) + + def test_paging_cancel(self): + """ + Test to ensure we can cancel a continuous paging session once it's started + @since DSE 2.0 + @jira_ticket PYTHON-615 + @expected_result This query should be canceled before any sizable amount of results can be returned + @test_category queries + """ + + self.session_with_profiles.default_fetch_size = 1 + # This combination should fetch one result a second. We should see a very few results + results = self.session_with_profiles.execute_async(self.select_all_statement, execution_profile= "SLOW") + result_set =results.result() + result_set.cancel_continuous_paging() + result_lst =list(result_set) + self.assertLess(len(result_lst), 2, "Cancel should have aborted fetch immediately") + + def test_con_paging_verify_writes(self): + """ + Test to validate results with a few continuous paging options + @since DSE 2.0 + @jira_ticket PYTHON-615 + @expected_result all results should be returned correctly + @test_category queries + """ + prepared = self.session_with_profiles.prepare(self.select_all_statement) + + + for ep in self.sane_eps: + for fetch_size in (2, 3, 7, 10, 99, 100, 101, 10000): + self.session_with_profiles.default_fetch_size = fetch_size + results = self.session_with_profiles.execute(self.select_all_statement, execution_profile=ep) + result_array = set() + result_set = set() + for result in results: + result_array.add(result.k) + result_set.add(result.v) + + self.assertEqual(set(range(150)), result_array) + self.assertEqual(set([0]), result_set) + + statement = SimpleStatement(self.select_all_statement) + results = self.session_with_profiles.execute(statement, execution_profile=ep) + result_array = set() + result_set = set() + for result in results: + result_array.add(result.k) + result_set.add(result.v) + + self.assertEqual(set(range(150)), result_array) + self.assertEqual(set([0]), result_set) + + results = self.session_with_profiles.execute(prepared, execution_profile=ep) + result_array = set() + result_set = set() + for result in results: + result_array.add(result.k) + result_set.add(result.v) + + self.assertEqual(set(range(150)), result_array) + self.assertEqual(set([0]), result_set) + + def test_can_get_results_when_no_more_pages(self): + """ + Test to validate that the resutls can be fetched when + has_more_pages is False + @since DSE 2.5 + @jira_ticket PYTHON-946 + @expected_result the results can be fetched + @test_category queries + """ + generator_expanded = [] + def get_all_rows(generator, future, generator_expanded): + self.assertFalse(future.has_more_pages) + + generator_expanded.extend(list(generator)) + print("Setting generator_expanded to True") + + future = self.session_with_profiles.execute_async("SELECT * from system.local LIMIT 10", + execution_profile="CONTDEFAULT") + future.add_callback(get_all_rows, future, generator_expanded) + time.sleep(5) + self.assertTrue(generator_expanded) + + +@requiredse +@greaterthanorequaldse51 +class ContPagingTestsDSEV1(BaseContPagingTests, BasicSharedKeyspaceUnitTestCaseRF3WM): + @classmethod + def setUpClass(cls): + cls.required_dse_version = BaseContPagingTests.required_dse_version = Version('5.1') + if not DSE_VERSION or DSE_VERSION < cls.required_dse_version: + return + + BasicSharedKeyspaceUnitTestCaseRF3WM.setUpClass() + BaseContPagingTests.setUpClass() + + cls.protocol_version = ProtocolVersion.DSE_V1 + cls.create_cluster() + + +@requiredse +@greaterthanorequaldse60 +class ContPagingTestsDSEV2(BaseContPagingTests, BasicSharedKeyspaceUnitTestCaseRF3WM): + @classmethod + def setUpClass(cls): + cls.required_dse_version = BaseContPagingTests.required_dse_version = Version('6.0') + if not DSE_VERSION or DSE_VERSION < cls.required_dse_version: + return + + BasicSharedKeyspaceUnitTestCaseRF3WM.setUpClass() + BaseContPagingTests.setUpClass() + + more_profiles = { + "SMALL_QUEUE": ExecutionProfile(continuous_paging_options=ContinuousPagingOptions(max_queue_size=2)), + "BIG_QUEUE": ExecutionProfile(continuous_paging_options=ContinuousPagingOptions(max_queue_size=400)) + } + cls.sane_eps += ["SMALL_QUEUE", "BIG_QUEUE"] + cls.execution_profiles.update(more_profiles) + + cls.protocol_version = ProtocolVersion.DSE_V2 + cls.create_cluster() diff --git a/tests/integration/advanced/test_cqlengine_where_operators.py b/tests/integration/advanced/test_cqlengine_where_operators.py new file mode 100644 index 0000000000..06913db60f --- /dev/null +++ b/tests/integration/advanced/test_cqlengine_where_operators.py @@ -0,0 +1,114 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import os +import time + +from cassandra.cluster import Cluster +from cassandra.cqlengine import columns, connection, models +from cassandra.cqlengine.management import (CQLENG_ALLOW_SCHEMA_MANAGEMENT, + create_keyspace_simple, drop_table, + sync_table) +from cassandra.cqlengine.statements import IsNotNull +from tests.integration import DSE_VERSION, requiredse, CASSANDRA_IP, greaterthanorequaldse60 +from tests.integration.advanced import use_single_node_with_graph_and_solr +from tests.integration.cqlengine import DEFAULT_KEYSPACE + + +class SimpleNullableModel(models.Model): + __keyspace__ = DEFAULT_KEYSPACE + partition = columns.Integer(primary_key=True) + nullable = columns.Integer(required=False) + # nullable = columns.Integer(required=False, custom_index=True) + + +def setup_module(): + if DSE_VERSION: + os.environ[CQLENG_ALLOW_SCHEMA_MANAGEMENT] = '1' + use_single_node_with_graph_and_solr() + setup_connection(DEFAULT_KEYSPACE) + create_keyspace_simple(DEFAULT_KEYSPACE, 1) + sync_table(SimpleNullableModel) + + +def setup_connection(keyspace_name): + connection.setup([CASSANDRA_IP], + # consistency=ConsistencyLevel.ONE, + # protocol_version=PROTOCOL_VERSION, + default_keyspace=keyspace_name) + + +def teardown_module(): + if DSE_VERSION: + drop_table(SimpleNullableModel) + + +@requiredse +class IsNotNullTests(unittest.TestCase): + + @classmethod + def setUpClass(cls): + if DSE_VERSION: + cls.cluster = Cluster() + + @greaterthanorequaldse60 + def test_is_not_null_execution(self): + """ + Verify that CQL statements have correct syntax when executed + If we wanted them to return something meaningful and not a InvalidRequest + we'd have to create an index in search for the column we are using + IsNotNull + + @since 2.5 + @jira_ticket PYTHON-968 + @expected_result InvalidRequest is arisen + + @test_category cqlengine + """ + cluster = Cluster() + self.addCleanup(cluster.shutdown) + session = cluster.connect() + + SimpleNullableModel.create(partition=1, nullable=2) + SimpleNullableModel.create(partition=2, nullable=None) + + self.addCleanup(session.execute, "DROP SEARCH INDEX ON {}".format( + SimpleNullableModel.column_family_name())) + create_index_stmt = ( + "CREATE SEARCH INDEX ON {} WITH COLUMNS nullable " + "".format(SimpleNullableModel.column_family_name())) + session.execute(create_index_stmt) + + SimpleNullableModel.create(partition=1, nullable=1) + SimpleNullableModel.create(partition=2, nullable=None) + + # TODO: block on indexing more precisely + time.sleep(5) + + self.assertEqual(len(list(SimpleNullableModel.objects.all())), 2) + self.assertEqual( + len(list( + SimpleNullableModel.filter(IsNotNull("nullable"), partition__eq=2) + )), + 0) + self.assertEqual( + len(list( + SimpleNullableModel.filter(IsNotNull("nullable"), partition__eq=1) + )), + 1) diff --git a/tests/integration/advanced/test_geometry.py b/tests/integration/advanced/test_geometry.py index e731d8bbc3..f10900bbfd 100644 --- a/tests/integration/advanced/test_geometry.py +++ b/tests/integration/advanced/test_geometry.py @@ -1,11 +1,16 @@ # Copyright DataStax, Inc. # -# Licensed under the DataStax DSE Driver License; +# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. -# # You may obtain a copy of the License at # -# http://www.datastax.com/terms/datastax-dse-driver-license-terms +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from tests.integration import DSE_VERSION, requiredse diff --git a/tests/integration/advanced/test_spark.py b/tests/integration/advanced/test_spark.py new file mode 100644 index 0000000000..587521c868 --- /dev/null +++ b/tests/integration/advanced/test_spark.py @@ -0,0 +1,49 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from cassandra.cluster import EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT +from cassandra.graph import SimpleGraphStatement +from tests.integration import DSE_VERSION, requiredse +from tests.integration.advanced import BasicGraphUnitTestCase, use_singledc_wth_graph_and_spark, generate_classic, find_spark_master +log = logging.getLogger(__name__) + + +def setup_module(): + if DSE_VERSION: + use_singledc_wth_graph_and_spark() + + +@requiredse +class SparkLBTests(BasicGraphUnitTestCase): + """ + Test to validate that analtics query can run in a multi-node enviroment. Also check to to ensure + that the master spark node is correctly targeted when OLAP queries are run + + @since 1.0.0 + @jira_ticket PYTHON-510 + @expected_result OLAP results should come back correctly, master spark coordinator should always be picked. + @test_category dse graph + """ + def test_spark_analytic_query(self): + generate_classic(self.session) + spark_master = find_spark_master(self.session) + + # Run multipltle times to ensure we don't round robin + for i in range(3): + to_run = SimpleGraphStatement("g.V().count()") + rs = self.session.execute_graph(to_run, execution_profile=EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT) + self.assertEqual(rs[0].value, 6) + self.assertEqual(rs.response_future._current_host.address, spark_master) diff --git a/tests/unit/advanced/test_insights.py b/tests/unit/advanced/test_insights.py index b200761a4d..7b210d40da 100644 --- a/tests/unit/advanced/test_insights.py +++ b/tests/unit/advanced/test_insights.py @@ -1,11 +1,16 @@ # Copyright DataStax, Inc. # -# Licensed under the DataStax DSE Driver License; +# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. -# # You may obtain a copy of the License at # -# http://www.datastax.com/terms/datastax-dse-driver-license-terms +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. try: From 5f4d0db59f2b9a59a685cefb158bd2d677717f6d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 13 Sep 2019 15:18:12 -0400 Subject: [PATCH 1620/2005] integration/cqlengine/advanced/test_cont_paging.py --- .../cqlengine/advanced/test_cont_paging.py | 170 ++++++++++++++++++ 1 file changed, 170 insertions(+) create mode 100644 tests/integration/cqlengine/advanced/test_cont_paging.py diff --git a/tests/integration/cqlengine/advanced/test_cont_paging.py b/tests/integration/cqlengine/advanced/test_cont_paging.py new file mode 100644 index 0000000000..ec7b196f1a --- /dev/null +++ b/tests/integration/cqlengine/advanced/test_cont_paging.py @@ -0,0 +1,170 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from packaging.version import Version + +from cassandra.cluster import (EXEC_PROFILE_DEFAULT, Cluster, + ContinuousPagingOptions, ExecutionProfile, + ProtocolVersion) +from cassandra.cqlengine import columns, connection, models +from cassandra.cqlengine.management import drop_table, sync_table +from tests.integration import (DSE_VERSION, greaterthanorequaldse51, + greaterthanorequaldse60, requiredse) + + +class TestMultiKeyModel(models.Model): + partition = columns.Integer(primary_key=True) + cluster = columns.Integer(primary_key=True) + count = columns.Integer(required=False) + text = columns.Text(required=False) + + +def setup_module(): + if DSE_VERSION: + sync_table(TestMultiKeyModel) + for i in range(1000): + TestMultiKeyModel.create(partition=i, cluster=i, count=5, text="text to write") + + +def teardown_module(): + if DSE_VERSION: + drop_table(TestMultiKeyModel) + + +@requiredse +class BasicConcurrentTests(): + required_dse_version = None + protocol_version = None + connections = set() + sane_connections = {"CONTDEFAULT"} + + @classmethod + def setUpClass(cls): + if DSE_VERSION: + cls._create_cluster_with_cp_options("CONTDEFAULT", ContinuousPagingOptions()) + cls._create_cluster_with_cp_options("ONEPAGE", ContinuousPagingOptions(max_pages=1)) + cls._create_cluster_with_cp_options("MANYPAGES", ContinuousPagingOptions(max_pages=10)) + cls._create_cluster_with_cp_options("SLOW", ContinuousPagingOptions(max_pages_per_second=1)) + + @classmethod + def tearDownClass(cls): + if not DSE_VERSION or DSE_VERSION < cls.required_dse_version: + return + + cls.cluster_default.shutdown() + connection.set_default_connection("default") + + @classmethod + def _create_cluster_with_cp_options(cls, name, cp_options): + execution_profiles = {EXEC_PROFILE_DEFAULT: + ExecutionProfile(continuous_paging_options=cp_options)} + cls.cluster_default = Cluster(protocol_version=cls.protocol_version, + execution_profiles=execution_profiles) + cls.session_default = cls.cluster_default.connect(wait_for_all_pools=True) + connection.register_connection(name, default=True, session=cls.session_default) + cls.connections.add(name) + + def test_continuous_paging_basic(self): + """ + Test to ensure that various continuous paging works with cqlengine + for session + @since DSE 2.4 + @jira_ticket PYTHON-872 + @expected_result various continous paging options should fetch all the results + + @test_category queries + """ + for connection_name in self.sane_connections: + connection.set_default_connection(connection_name) + row = TestMultiKeyModel.get(partition=0, cluster=0) + self.assertEqual(row.partition, 0) + self.assertEqual(row.cluster, 0) + rows = TestMultiKeyModel.objects().allow_filtering() + self.assertEqual(len(rows), 1000) + + def test_fetch_size(self): + """ + Test to ensure that various continuous paging works with different fetch sizes + for session + @since DSE 2.4 + @jira_ticket PYTHON-872 + @expected_result various continous paging options should fetch all the results + + @test_category queries + """ + for connection_name in self.connections: + conn = connection._connections[connection_name] + initial_default = conn.session.default_fetch_size + self.addCleanup( + setattr, + conn.session, + "default_fetch_size", + initial_default + ) + + connection.set_default_connection("ONEPAGE") + for fetch_size in (2, 3, 7, 10, 99, 100, 101, 150): + connection._connections["ONEPAGE"].session.default_fetch_size = fetch_size + rows = TestMultiKeyModel.objects().allow_filtering() + self.assertEqual(fetch_size, len(rows)) + + connection.set_default_connection("MANYPAGES") + for fetch_size in (2, 3, 7, 10, 15): + connection._connections["MANYPAGES"].session.default_fetch_size = fetch_size + rows = TestMultiKeyModel.objects().allow_filtering() + self.assertEqual(fetch_size * 10, len(rows)) + + for connection_name in self.sane_connections: + connection.set_default_connection(connection_name) + for fetch_size in (2, 3, 7, 10, 99, 100, 101, 150): + connection._connections[connection_name].session.default_fetch_size = fetch_size + rows = TestMultiKeyModel.objects().allow_filtering() + self.assertEqual(1000, len(rows)) + + +@requiredse +@greaterthanorequaldse51 +class ContPagingTestsDSEV1(BasicConcurrentTests, unittest.TestCase): + @classmethod + def setUpClass(cls): + BasicConcurrentTests.required_dse_version = Version('5.1') + if not DSE_VERSION or DSE_VERSION < BasicConcurrentTests.required_dse_version: + return + + BasicConcurrentTests.protocol_version = ProtocolVersion.DSE_V1 + BasicConcurrentTests.setUpClass() + +@requiredse +@greaterthanorequaldse60 +class ContPagingTestsDSEV2(BasicConcurrentTests, unittest.TestCase): + @classmethod + def setUpClass(cls): + BasicConcurrentTests.required_dse_version = Version('6.0') + if not DSE_VERSION or DSE_VERSION < BasicConcurrentTests.required_dse_version: + return + BasicConcurrentTests.protocol_version = ProtocolVersion.DSE_V2 + BasicConcurrentTests.setUpClass() + + cls.connections = cls.connections.union({"SMALL_QUEUE", "BIG_QUEUE"}) + cls.sane_connections = cls.sane_connections.union({"SMALL_QUEUE", "BIG_QUEUE"}) + + cls._create_cluster_with_cp_options("SMALL_QUEUE", ContinuousPagingOptions(max_queue_size=2)) + cls._create_cluster_with_cp_options("BIG_QUEUE", ContinuousPagingOptions(max_queue_size=400)) From ae222a3ec7ea1660daa867e7366a068569ecac28 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 13 Sep 2019 12:50:23 -0500 Subject: [PATCH 1621/2005] long tests --- tests/integration/long/test_consistency.py | 64 +++++++++---------- tests/integration/long/test_failure_types.py | 48 ++++++++------ tests/integration/long/test_large_data.py | 9 ++- .../long/test_loadbalancingpolicies.py | 37 ++++++----- tests/integration/long/test_policies.py | 2 +- tests/integration/long/test_schema.py | 4 ++ tests/integration/long/test_ssl.py | 13 +++- tests/integration/long/utils.py | 58 +++++++---------- 8 files changed, 124 insertions(+), 111 deletions(-) diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index bb6828aa3b..a4507a9bf0 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -12,32 +12,33 @@ # See the License for the specific language governing permissions and # limitations under the License. -import struct, time, traceback, sys, logging +import logging +import struct +import sys +import time +import traceback -from random import randint from cassandra import ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, Unavailable -from cassandra.cluster import Cluster +from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.policies import TokenAwarePolicy, RoundRobinPolicy, DowngradingConsistencyRetryPolicy from cassandra.query import SimpleStatement from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass -from tests.integration.long.utils import (force_stop, create_schema, wait_for_down, wait_for_up, - start, CoordinatorStats) +from tests.integration.long.utils import ( + force_stop, create_schema, wait_for_down, wait_for_up, start, CoordinatorStats +) try: import unittest2 as unittest except ImportError: import unittest # noqa -ALL_CONSISTENCY_LEVELS = set([ - ConsistencyLevel.ANY, ConsistencyLevel.ONE, ConsistencyLevel.TWO, - ConsistencyLevel.QUORUM, ConsistencyLevel.THREE, - ConsistencyLevel.ALL, ConsistencyLevel.LOCAL_QUORUM, - ConsistencyLevel.EACH_QUORUM]) - -MULTI_DC_CONSISTENCY_LEVELS = set([ - ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM]) - +ALL_CONSISTENCY_LEVELS = { + ConsistencyLevel.ANY, ConsistencyLevel.ONE, ConsistencyLevel.TWO, ConsistencyLevel.QUORUM, + ConsistencyLevel.THREE, ConsistencyLevel.ALL, ConsistencyLevel.LOCAL_QUORUM, + ConsistencyLevel.EACH_QUORUM +} +MULTI_DC_CONSISTENCY_LEVELS = {ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM} SINGLE_DC_CONSISTENCY_LEVELS = ALL_CONSISTENCY_LEVELS - MULTI_DC_CONSISTENCY_LEVELS log = logging.getLogger(__name__) @@ -128,9 +129,8 @@ def _assert_reads_fail(self, session, keyspace, consistency_levels): pass def _test_tokenaware_one_node_down(self, keyspace, rf, accepted): - cluster = Cluster( - load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), - protocol_version=PROTOCOL_VERSION) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()))}) session = cluster.connect(wait_for_all_pools=True) wait_for_up(cluster, 1) wait_for_up(cluster, 2) @@ -180,9 +180,8 @@ def test_rfthree_tokenaware_one_node_down(self): def test_rfthree_tokenaware_none_down(self): keyspace = 'test_rfthree_tokenaware_none_down' - cluster = Cluster( - load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), - protocol_version=PROTOCOL_VERSION) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()))}) session = cluster.connect(wait_for_all_pools=True) wait_for_up(cluster, 1) wait_for_up(cluster, 2) @@ -204,10 +203,9 @@ def test_rfthree_tokenaware_none_down(self): cluster.shutdown() def _test_downgrading_cl(self, keyspace, rf, accepted): - cluster = Cluster( - load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), - default_retry_policy=DowngradingConsistencyRetryPolicy(), - protocol_version=PROTOCOL_VERSION) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()), + DowngradingConsistencyRetryPolicy())}) session = cluster.connect(wait_for_all_pools=True) create_schema(cluster, session, keyspace, replication_factor=rf) @@ -248,19 +246,17 @@ def test_rftwo_downgradingcl(self): def test_rfthree_roundrobin_downgradingcl(self): keyspace = 'test_rfthree_roundrobin_downgradingcl' - cluster = Cluster( - load_balancing_policy=RoundRobinPolicy(), - default_retry_policy=DowngradingConsistencyRetryPolicy(), - protocol_version=PROTOCOL_VERSION) - self.rfthree_downgradingcl(cluster, keyspace, True) + with Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(RoundRobinPolicy(), + DowngradingConsistencyRetryPolicy())}) as cluster: + self.rfthree_downgradingcl(cluster, keyspace, True) def test_rfthree_tokenaware_downgradingcl(self): keyspace = 'test_rfthree_tokenaware_downgradingcl' - cluster = Cluster( - load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), - default_retry_policy=DowngradingConsistencyRetryPolicy(), - protocol_version=PROTOCOL_VERSION) - self.rfthree_downgradingcl(cluster, keyspace, False) + with Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()), + DowngradingConsistencyRetryPolicy())}) as cluster: + self.rfthree_downgradingcl(cluster, keyspace, False) def rfthree_downgradingcl(self, cluster, keyspace, roundrobin): session = cluster.connect(wait_for_all_pools=True) diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index a67c05aa57..a63b36649b 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -12,17 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys,logging, traceback, time, re +import logging +import sys +import traceback +import time +from mock import Mock -from cassandra import (ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, ReadFailure, WriteFailure, - FunctionFailure, ProtocolVersion) -from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.policies import HostFilterPolicy, RoundRobinPolicy +from cassandra import ( + ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, ReadFailure, WriteFailure, + FunctionFailure, ProtocolVersion, +) +from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.concurrent import execute_concurrent_with_args from cassandra.query import SimpleStatement -from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace, remove_cluster, get_node, \ - requiresmallclockgranularity -from mock import Mock +from tests.integration import ( + use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace, remove_cluster, + get_node, start_cluster_wait_for_up, requiresmallclockgranularity, +) + try: import unittest2 as unittest @@ -46,7 +54,7 @@ def setup_module(): ccm_cluster.stop() config_options = {'tombstone_failure_threshold': 2000, 'tombstone_warn_threshold': 1000} ccm_cluster.set_configuration_options(config_options) - ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) + start_cluster_wait_for_up(ccm_cluster) setup_keyspace() @@ -317,15 +325,17 @@ def setUp(self): """ Setup sessions and pause node1 """ - - # self.node1, self.node2, self.node3 = get_cluster().nodes.values() - - node1 = ExecutionProfile( - load_balancing_policy=HostFilterPolicy( - RoundRobinPolicy(), lambda host: host.address == "127.0.0.1" - ) + self.cluster = Cluster( + protocol_version=PROTOCOL_VERSION, + execution_profiles={ + EXEC_PROFILE_DEFAULT: ExecutionProfile( + load_balancing_policy=HostFilterPolicy( + RoundRobinPolicy(), lambda host: host.address == "127.0.0.1" + ) + ) + } ) - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, execution_profiles={EXEC_PROFILE_DEFAULT: node1}) + self.session = self.cluster.connect(wait_for_all_pools=True) self.control_connection_host_number = 1 @@ -372,13 +382,14 @@ def test_async_timeouts(self): future.result() end_time = time.time() total_time = end_time-start_time - expected_time = self.session.default_timeout + expected_time = self.cluster.profile_manager.default.request_timeout # check timeout and ensure it's within a reasonable range self.assertAlmostEqual(expected_time, total_time, delta=.05) # Test with user defined timeout (Should be 1) + expected_time = 1 start_time = time.time() - future = self.session.execute_async(ss, timeout=1) + future = self.session.execute_async(ss, timeout=expected_time) mock_callback = Mock(return_value=None) mock_errorback = Mock(return_value=None) future.add_callback(mock_callback) @@ -388,7 +399,6 @@ def test_async_timeouts(self): future.result() end_time = time.time() total_time = end_time-start_time - expected_time = 1 # check timeout and ensure it's within a reasonable range self.assertAlmostEqual(expected_time, total_time, delta=.05) self.assertTrue(mock_errorback.called) diff --git a/tests/integration/long/test_large_data.py b/tests/integration/long/test_large_data.py index 76cafa03b7..071268d86c 100644 --- a/tests/integration/long/test_large_data.py +++ b/tests/integration/long/test_large_data.py @@ -21,7 +21,7 @@ import logging, sys, traceback, time from cassandra import ConsistencyLevel, OperationTimedOut, WriteTimeout -from cassandra.cluster import Cluster +from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.query import dict_factory from cassandra.query import SimpleStatement from tests.integration import use_singledc, PROTOCOL_VERSION @@ -61,11 +61,10 @@ def setUp(self): self.keyspace = 'large_data' def make_session_and_keyspace(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(request_timeout=20, + row_factory=dict_factory)}) session = cluster.connect() - session.default_timeout = 20.0 # increase the default timeout - session.row_factory = dict_factory - create_schema(cluster, session, self.keyspace) return session diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 2cc3d8e7f1..8a5b7fe4c8 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -19,12 +19,14 @@ from cassandra import ConsistencyLevel, Unavailable, OperationTimedOut, ReadTimeout, ReadFailure, \ WriteTimeout, WriteFailure -from cassandra.cluster import Cluster, NoHostAvailable +from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.concurrent import execute_concurrent_with_args from cassandra.metadata import murmur3 -from cassandra.policies import (RoundRobinPolicy, DCAwareRoundRobinPolicy, - TokenAwarePolicy, WhiteListRoundRobinPolicy, - HostFilterPolicy) +from cassandra.policies import ( + RoundRobinPolicy, DCAwareRoundRobinPolicy, + TokenAwarePolicy, WhiteListRoundRobinPolicy, + HostFilterPolicy +) from cassandra.query import SimpleStatement from tests.integration import use_singledc, use_multidc, remove_cluster, PROTOCOL_VERSION @@ -60,8 +62,8 @@ def teardown_class(cls): def _connect_probe_cluster(self): if not self.probe_cluster: # distinct cluster so we can see the status of nodes ignored by the LBP being tested - self.probe_cluster = Cluster(load_balancing_policy=RoundRobinPolicy(), - schema_metadata_enabled=False, token_metadata_enabled=False) + self.probe_cluster = Cluster(schema_metadata_enabled=False, token_metadata_enabled=False, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=RoundRobinPolicy())}) self.probe_session = self.probe_cluster.connect() def _wait_for_nodes_up(self, nodes, cluster=None): @@ -87,8 +89,9 @@ def _wait_for_nodes_down(self, nodes, cluster=None): def _cluster_session_with_lbp(self, lbp): # create a cluster with no delay on events - cluster = Cluster(load_balancing_policy=lbp, protocol_version=PROTOCOL_VERSION, - topology_event_refresh_window=0, status_event_refresh_window=0) + + cluster = Cluster(protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=lbp)}) session = cluster.connect() return cluster, session @@ -164,7 +167,7 @@ def _query(self, session, keyspace, count=12, def test_token_aware_is_used_by_default(self): """ - Test for default loadbalacing policy + Test for default load balancing policy test_token_aware_is_used_by_default tests that the default loadbalancing policy is policies.TokenAwarePolicy. It creates a simple Cluster and verifies that the default loadbalancing policy is TokenAwarePolicy if the @@ -180,9 +183,9 @@ def test_token_aware_is_used_by_default(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) if murmur3 is not None: - self.assertTrue(isinstance(cluster.load_balancing_policy, TokenAwarePolicy)) + self.assertTrue(isinstance(cluster.profile_manager.default.load_balancing_policy, TokenAwarePolicy)) else: - self.assertTrue(isinstance(cluster.load_balancing_policy, DCAwareRoundRobinPolicy)) + self.assertTrue(isinstance(cluster.profile_manager.default.load_balancing_policy, DCAwareRoundRobinPolicy)) cluster.shutdown() @@ -656,9 +659,11 @@ def test_white_list(self): use_singledc() keyspace = 'test_white_list' - cluster = Cluster(('127.0.0.2',), load_balancing_policy=WhiteListRoundRobinPolicy((IP_FORMAT % 2,)), - protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, - status_event_refresh_window=0) + cluster = Cluster(('127.0.0.2',), protocol_version=PROTOCOL_VERSION, + topology_event_refresh_window=0, status_event_refresh_window=0, + execution_profiles={EXEC_PROFILE_DEFAULT: + ExecutionProfile(load_balancing_policy= + WhiteListRoundRobinPolicy((IP_FORMAT % 2,)))}) session = cluster.connect() self._wait_for_nodes_up([1, 2, 3]) @@ -706,10 +711,10 @@ def test_black_list_with_host_filter_policy(self): ) cluster = Cluster( (IP_FORMAT % 1,), - load_balancing_policy=hfp, protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, - status_event_refresh_window=0 + status_event_refresh_window=0, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=hfp)} ) self.addCleanup(cluster.shutdown) session = cluster.connect() diff --git a/tests/integration/long/test_policies.py b/tests/integration/long/test_policies.py index f720548ea2..d694476fb5 100644 --- a/tests/integration/long/test_policies.py +++ b/tests/integration/long/test_policies.py @@ -32,7 +32,7 @@ class RetryPolicyTests(unittest.TestCase): @classmethod def tearDownClass(cls): cluster = get_cluster() - cluster.start() # make sure other nodes are restarted + cluster.start(wait_for_binary_proto=True) # make sure other nodes are restarted def test_should_rethrow_on_unvailable_with_default_policy_if_cas(self): """ diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index 5163066e43..2ad854688d 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -20,6 +20,8 @@ from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass +import time + try: import unittest2 as unittest except ImportError: @@ -156,6 +158,8 @@ def test_for_schema_disagreement_attribute(self): cluster.shutdown() def check_and_wait_for_agreement(self, session, rs, exepected): + # Wait for RESULT_KIND_SCHEMA_CHANGE message to arrive + time.sleep(1) self.assertEqual(rs.response_future.is_schema_agreed, exepected) if not rs.response_future.is_schema_agreed: session.cluster.control_connection.wait_for_schema_agreement(wait_time=1000) diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 7f0a870718..db44b76592 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -21,7 +21,16 @@ from cassandra.cluster import Cluster, NoHostAvailable from cassandra import ConsistencyLevel from cassandra.query import SimpleStatement -from tests.integration import PROTOCOL_VERSION, get_cluster, remove_cluster, use_single_node, EVENT_LOOP_MANAGER +from tests.integration import ( + PROTOCOL_VERSION, get_cluster, remove_cluster, use_single_node, start_cluster_wait_for_up, EVENT_LOOP_MANAGER, +) + +if not hasattr(ssl, 'match_hostname'): + try: + from backports.ssl_match_hostname import match_hostname + ssl.match_hostname = match_hostname + except ImportError: + pass # tests will fail log = logging.getLogger(__name__) @@ -77,7 +86,7 @@ def setup_cluster_ssl(client_auth=False): client_encyrption_options['truststore_password'] = DEFAULT_PASSWORD ccm_cluster.set_configuration_options(config_options) - ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) + start_cluster_wait_for_up(ccm_cluster) def validate_ssl_options(**kwargs): diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index 07652b6ac6..1829d05cd0 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -17,13 +17,11 @@ import time from collections import defaultdict -from ccmlib.node import Node, ToolError +from packaging.version import Version -from nose.tools import assert_in -from cassandra.query import named_tuple_factory -from cassandra.cluster import ConsistencyLevel +from tests.integration import (get_node, get_cluster, wait_for_node_socket, + DSE_VERSION, CASSANDRA_VERSION) -from tests.integration import get_node, get_cluster, wait_for_node_socket IP_FORMAT = '127.0.0.%s' @@ -60,10 +58,6 @@ def assert_query_count_equals(self, testcase, node, expected): def create_schema(cluster, session, keyspace, simple_strategy=True, replication_factor=1, replication_strategy=None): - row_factory = session.row_factory - session.row_factory = named_tuple_factory - session.default_consistency_level = ConsistencyLevel.QUORUM - if keyspace in cluster.metadata.keyspaces.keys(): session.execute('DROP KEYSPACE %s' % keyspace, timeout=20) @@ -83,9 +77,6 @@ def create_schema(cluster, session, keyspace, simple_strategy=True, session.execute(ddl % keyspace, timeout=10) session.execute('USE %s' % keyspace) - session.row_factory = row_factory - session.default_consistency_level = ConsistencyLevel.LOCAL_ONE - def start(node): get_node(node).start() @@ -102,17 +93,11 @@ def force_stop(node): def decommission(node): - try: - get_node(node).decommission() - except ToolError as e: - expected_errs = (('Not enough live nodes to maintain replication ' - 'factor in keyspace system_distributed'), - 'Perform a forceful decommission to ignore.') - for err in expected_errs: - assert_in(err, e.stdout) - # in this case, we're running against a C* version with CASSANDRA-12510 - # applied and need to decommission with `--force` + if DSE_VERSION >= Version("5.1") or CASSANDRA_VERSION >= Version("4.0"): + # CASSANDRA-12510 get_node(node).decommission(force=True) + else: + get_node(node).decommission() get_node(node).stop() @@ -120,24 +105,29 @@ def bootstrap(node, data_center=None, token=None): log.debug('called bootstrap(' 'node={node}, data_center={data_center}, ' 'token={token})') - node_instance = Node('node%s' % node, - get_cluster(), - auto_bootstrap=False, - thrift_interface=(IP_FORMAT % node, 9160), - storage_interface=(IP_FORMAT % node, 7000), - binary_interface=(IP_FORMAT % node, 9042), - jmx_port=str(7000 + 100 * node), - remote_debug_port=0, - initial_token=token if token else node * 10) - get_cluster().add(node_instance, is_seed=False, data_center=data_center) + cluster = get_cluster() + # for now assumes cluster has at least one node + node_type = type(next(iter(cluster.nodes.values()))) + node_instance = node_type( + 'node%s' % node, + cluster, + auto_bootstrap=False, + thrift_interface=(IP_FORMAT % node, 9160), + storage_interface=(IP_FORMAT % node, 7000), + binary_interface=(IP_FORMAT % node, 9042), + jmx_port=str(7000 + 100 * node), + remote_debug_port=0, + initial_token=token if token else node * 10 + ) + cluster.add(node_instance, is_seed=False, data_center=data_center) try: - start(node) + node_instance.start() except Exception as e0: log.debug('failed 1st bootstrap attempt with: \n{}'.format(e0)) # Try only twice try: - start(node) + node_instance.start() except Exception as e1: log.debug('failed 2nd bootstrap attempt with: \n{}'.format(e1)) log.error('Added node failed to start twice.') From 0eb25874c46d3c44f0be84f99cc653684719cca7 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 13 Sep 2019 15:10:42 -0500 Subject: [PATCH 1622/2005] some license updates --- cassandra/auth.py | 7 ++++++- tests/integration/advanced/test_adv_metadata.py | 12 ++++++++---- tests/integration/advanced/test_graph.py | 2 +- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/cassandra/auth.py b/cassandra/auth.py index 276646a63b..38030b912d 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -6,7 +6,12 @@ # # http://www.apache.org/licenses/LICENSE-2.0 # -# http://www.datastax.com/terms/datastax-dse-driver-license-terms +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import socket import logging diff --git a/tests/integration/advanced/test_adv_metadata.py b/tests/integration/advanced/test_adv_metadata.py index f7a29d3d3b..7fd1ccbc1b 100644 --- a/tests/integration/advanced/test_adv_metadata.py +++ b/tests/integration/advanced/test_adv_metadata.py @@ -1,12 +1,16 @@ # Copyright DataStax, Inc. # -# Licensed under the DataStax DSE Driver License; +# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. -# # You may obtain a copy of the License at # -# http://www.datastax.com/terms/datastax-dse-driver-license-terms -# from tests.integration import BasicSharedKeyspaceUnitTestCase, use_single_node +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from tests.integration import (BasicSharedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCaseRF1, diff --git a/tests/integration/advanced/test_graph.py b/tests/integration/advanced/test_graph.py index 7f977e6cea..efb0a02838 100644 --- a/tests/integration/advanced/test_graph.py +++ b/tests/integration/advanced/test_graph.py @@ -15,7 +15,7 @@ try: import unittest2 as unittest except ImportError: - import unittest # noqa# http://www.datastax.com/terms/datastax-dse-driver-license-terms + import unittest # noqa from copy import copy from itertools import chain From e0bb12a95c8bfbeac77d274be35556eae9aa4f53 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 13 Sep 2019 18:45:17 -0400 Subject: [PATCH 1623/2005] Fix test_spark and disable protocol v1/v2 tests for now --- build.yaml | 12 ++++++------ tests/integration/advanced/__init__.py | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/build.yaml b/build.yaml index afff107164..4ea2394a20 100644 --- a/build.yaml +++ b/build.yaml @@ -251,12 +251,12 @@ build: EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true # TODO: Temporary, don't merge this 2.1 stuff... - if [ "$CCM_CASSANDRA_VERSION" == "2.1.21" ]; then - echo "Testing cassandra 2.1 with protocol V2" - PROTOCOL_VERSION=2 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_v2_results.xml tests/integration/standard/ || true - echo "Testing cassandra 2.1 with protocol V1" - PROTOCOL_VERSION=1 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_v1_results.xml tests/integration/standard/ || true - fi + # if [ "$CCM_CASSANDRA_VERSION" == "2.1.21" ]; then + # echo "Testing cassandra 2.1 with protocol V2" + # PROTOCOL_VERSION=2 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_v2_results.xml tests/integration/standard/ || true + # echo "Testing cassandra 2.1 with protocol V1" + # PROTOCOL_VERSION=1 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_v1_results.xml tests/integration/standard/ || true + # fi if [ -n "$DSE_VERSION" ] && ! [[ $DSE_VERSION == "4.8"* ]]; then echo "==========RUNNING DSE INTEGRATION TESTS==========" diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py index 66660a1f10..07f390397d 100644 --- a/tests/integration/advanced/__init__.py +++ b/tests/integration/advanced/__init__.py @@ -33,7 +33,7 @@ from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT, EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT from tests.integration import PROTOCOL_VERSION, DSE_VERSION, get_server_versions, BasicKeyspaceUnitTestCase, \ - drop_keyspace_shutdown_cluster, get_node, USE_CASS_EXTERNAL + drop_keyspace_shutdown_cluster, get_node, USE_CASS_EXTERNAL, CASSANDRA_IP from tests.integration import use_singledc, use_single_node, wait_for_node_socket from cassandra.protocol import ServerError from cassandra.util import Point, LineString, Polygon @@ -175,7 +175,7 @@ def wait_for_spark_workers(num_of_expected_workers, timeout): start_time = time.time() while True: opener = build_opener(HTTPHandler) - request = Request("http://{0}:7080".format(DSE_IP)) + request = Request("http://{0}:7080".format(CASSANDRA_IP)) request.get_method = lambda: 'GET' connection = opener.open(request) match = re.search('Alive Workers:.*(\d+)', connection.read().decode('utf-8')) From 70beac2273b0c220c2d507887a959a293bd744f2 Mon Sep 17 00:00:00 2001 From: nick Date: Mon, 16 Sep 2019 10:24:15 +0200 Subject: [PATCH 1624/2005] Second fix for PYTHON-1044 --- cassandra/cluster.py | 1 + tests/unit/test_response_future.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 680510e07b..2e8c47dff4 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3730,6 +3730,7 @@ def _on_timeout(self, _attempts=0): key = "Connection defunct by heartbeat" errors = {key: "Client request timeout. See Session.execute[_async](timeout)"} self._set_final_exception(OperationTimedOut(errors, self._current_host)) + return pool = self.session._pools.get(self._current_host) if pool and not pool.is_shutdown: diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 36916c130c..87f1bd683e 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -156,7 +156,7 @@ def test_heartbeat_defunct_deadlock(self): # Simulate ResponseFuture timing out rf._on_timeout() - self.assertRaises(OperationTimedOut, rf.result) + self.assertRaisesRegexp(OperationTimedOut, "Connection defunct by heartbeat", rf.result) def test_read_timeout_error_message(self): session = self.make_session() From 5049dfdf365447009f1483c5cb62e5bc8c86327c Mon Sep 17 00:00:00 2001 From: nick Date: Mon, 16 Sep 2019 10:24:34 +0200 Subject: [PATCH 1625/2005] Added integration test --- .../integration/simulacron/test_connection.py | 75 ++++++++++++++++++- tests/integration/util.py | 11 +++ 2 files changed, 85 insertions(+), 1 deletion(-) diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 4bc52d58de..33e68e6a9e 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -28,7 +28,7 @@ from cassandra.io.asyncorereactor import AsyncoreConnection from tests import connection_class, thread_pool_executor_class from tests.integration import (PROTOCOL_VERSION, requiressimulacron) -from tests.integration.util import assert_quiescent_pool_state +from tests.integration.util import assert_quiescent_pool_state, late from tests.integration.simulacron import SimulacronBase from tests.integration.simulacron.utils import (NO_THEN, PrimeOptions, prime_query, prime_request, @@ -178,6 +178,79 @@ def test_callbacks_and_pool_when_oto(self): errback.assert_called_once() callback.assert_not_called() + def test_heartbeat_defunct_deadlock(self): + """ + Ensure that there is no deadlock when request is in-flight and heartbeat defuncts connection + @since 3.16 + @jira_ticket PYTHON-1044 + @expected_result an OperationTimeout is raised and no deadlock occurs + + @test_category connection + """ + start_and_prime_singledc() + + # This is all about timing. We will need the QUERY response future to time out and the heartbeat to defunct + # at the same moment. The latter will schedule a QUERY retry to another node in case the pool is not + # already shut down. If and only if the response future timeout falls in between the retry scheduling and + # its execution the deadlock occurs. The odds are low, so we need to help fate a bit: + # 1) Make one heartbeat messages be sent to every node + # 2) Our QUERY goes always to the same host + # 3) This host needs to defunct first + # 4) Open a small time window for the response future timeout, i.e. block executor threads for retry + # execution and last connection to defunct + query_to_prime = "SELECT * from testkesypace.testtable" + query_host = "127.0.0.2" + heartbeat_interval = 1 + heartbeat_timeout = 1 + lag = 0.05 + never = 9999 + + class PatchedRoundRobinPolicy(RoundRobinPolicy): + # Send always to same host + def make_query_plan(self, working_keyspace=None, query=None): + print query + print self._live_hosts + if query and query.query_string == query_to_prime: + return filter(lambda h: h == query_host, self._live_hosts) + else: + return super(PatchedRoundRobinPolicy, self).make_query_plan() + + class PatchedCluster(Cluster): + # Make sure that QUERY connection will timeout first + def get_connection_holders(self): + holders = super(PatchedCluster, self).get_connection_holders() + return sorted(holders, reverse=True, key=lambda v: int(v._connection.host == query_host)) + + # Block executor thread like closing a dead socket could do + def connection_factory(self, *args, **kwargs): + conn = super(PatchedCluster, self).connection_factory(*args, **kwargs) + conn.defunct = late(seconds=2*lag)(conn.defunct) + return conn + + cluster = PatchedCluster( + protocol_version=PROTOCOL_VERSION, + compression=False, + idle_heartbeat_interval=heartbeat_interval, + idle_heartbeat_timeout=heartbeat_timeout, + load_balancing_policy=PatchedRoundRobinPolicy() + ) + session = cluster.connect() + self.addCleanup(cluster.shutdown) + + prime_query(query_to_prime, then={"delay_in_ms": never}) + + # Make heartbeat due + time.sleep(heartbeat_interval) + + future = session.execute_async(query_to_prime, timeout=heartbeat_interval+heartbeat_timeout+3*lag) + # Delay thread execution like kernel could do + future._retry_task = late(seconds=4*lag)(future._retry_task) + + prime_request(PrimeOptions(then={"result": "no_result", "delay_in_ms": never})) + prime_request(RejectConnections("unbind")) + + self.assertRaisesRegexp(OperationTimedOut, "Connection defunct by heartbeat", future.result) + def test_close_when_query(self): """ Test to ensure the driver behaves correctly if the connection is closed diff --git a/tests/integration/util.py b/tests/integration/util.py index 4e7afc5618..a2ce9d5c3f 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -13,6 +13,7 @@ # limitations under the License. from tests.integration import PROTOCOL_VERSION +from functools import wraps import time @@ -96,3 +97,13 @@ def wrapped_condition(): # last attempt, let the exception raise condition() + + +def late(seconds=1): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + time.sleep(seconds) + func(*args, **kwargs) + return wrapper + return decorator From fcaee5e7191365292c953901b4f843f7f5431dec Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 16 Sep 2019 09:24:43 -0400 Subject: [PATCH 1626/2005] Add mising tests/integration/cqlengine/advanced/__init__.py --- tests/integration/cqlengine/advanced/__init__.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 tests/integration/cqlengine/advanced/__init__.py diff --git a/tests/integration/cqlengine/advanced/__init__.py b/tests/integration/cqlengine/advanced/__init__.py new file mode 100644 index 0000000000..386372eb4a --- /dev/null +++ b/tests/integration/cqlengine/advanced/__init__.py @@ -0,0 +1,14 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + From 9db5a17e5c1a98a9d78e6f43f890a389b06758d1 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 16 Sep 2019 13:43:41 -0400 Subject: [PATCH 1627/2005] Some minor tests fixes --- build.yaml | 12 ++++++------ tests/integration/standard/test_metadata.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/build.yaml b/build.yaml index 4ea2394a20..d0af7604b5 100644 --- a/build.yaml +++ b/build.yaml @@ -234,21 +234,21 @@ build: "tests/integration/standard/test_query.py" "tests/integration/simulacron/test_endpoint.py" ) - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true exit 0 fi echo "Running with event loop manager: $EVENT_LOOP_MANAGER" echo "==========RUNNING SIMULACRON TESTS==========" SIMULACRON_JAR="$HOME/simulacron.jar" - SIMULACRON_JAR=$SIMULACRON_JAR EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR CCM_ARGS="$CCM_ARGS" DSE_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true + SIMULACRON_JAR=$SIMULACRON_JAR EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true echo "Running with event loop manager: $EVENT_LOOP_MANAGER" echo "==========RUNNING CQLENGINE TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true echo "==========RUNNING INTEGRATION TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true # TODO: Temporary, don't merge this 2.1 stuff... # if [ "$CCM_CASSANDRA_VERSION" == "2.1.21" ]; then @@ -260,12 +260,12 @@ build: if [ -n "$DSE_VERSION" ] && ! [[ $DSE_VERSION == "4.8"* ]]; then echo "==========RUNNING DSE INTEGRATION TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR DSE_VERSION=$CCM_VERSION ADS_HOME=$HOME/ VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR DSE_VERSION=$DSE_VERSION ADS_HOME=$HOME/ VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true fi if [ -z "$EXCLUDE_LONG" ]; then echo "==========RUNNING LONG INTEGRATION TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true fi - xunit: - "*_results.xml" diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index ef269b00fe..a5038672d4 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -82,7 +82,7 @@ def test_broadcast_listen_address(self): self.assertTrue(local_host in rpc_addrs) @unittest.skipUnless( - os.getenv('MAPPED_CASSANDRA_VERSION', None) is None, + os.getenv('MAPPED_CASSANDRA_VERSION', None) is not None, "Don't check the host version for test-dse") def test_host_release_version(self): """ From 40aef8e8b5a56bfbe9132ffac9fda9bf018647cb Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 16 Sep 2019 16:06:24 -0400 Subject: [PATCH 1628/2005] Merging various root files --- CHANGELOG.rst | 58 +++++++++++++++++++++++ README-dev.rst | 122 ++++++++++++++++++++++++++++++++++++++++++++----- setup.py | 15 ++++-- tox.ini | 2 + 4 files changed, 181 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index adbe0879f9..3f6dbe4e8f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,61 @@ +3.21.0 +====== +Unreleased + +Merged from dse-driver: + +Features +-------- + +* Insights integration (PYTHON-1047) +* Graph execution profiles should preserve their graph_source when graph_options is overridden (PYTHON-1021) +* Add NodeSync metadata (PYTHON-799) +* Add new NodeSync failure values (PYTHON-934) +* DETERMINISTIC and MONOTONIC Clauses for Functions and Aggregates (PYTHON-955) +* GraphOptions should show a warning for unknown parameters (PYTHON-819) +* DSE protocol version 2 and continous paging backpressure (PYTHON-798) +* GraphSON2 Serialization/Deserialization Support (PYTHON-775) +* Add graph-results payload option for GraphSON format (PYTHON-773) +* Create an AuthProvider for the DSE transitional mode (PYTHON-831) +* Implement serializers for the Graph String API (PYTHON-778) +* Provide deserializers for GraphSON types (PYTHON-782) +* Add Graph DurationType support (PYTHON-607) +* Support DSE DateRange type (PYTHON-668) +* RLAC CQL output for materialized views (PYTHON-682) +* Add Geom Types wkt deserializer +* DSE Graph Client timeouts in custom payload (PYTHON-589) +* Make DSEGSSAPIAuthProvider accept principal name (PYTHON-574) +* Add config profiles to DSE graph execution (PYTHON-570) +* DSE Driver version checking (PYTHON-568) +* Distinct default timeout for graph queries (PYTHON-477) +* Graph result parsing for known types (PYTHON-479,487) +* Distinct read/write CL for graph execution (PYTHON-509) +* Target graph analytics query to spark master when available (PYTHON-510) + +Bug Fixes +--------- + +* Continuous paging sessions raise RuntimeError when results are not entirely consumed (PYTHON-1054) +* GraphSON Property deserializer should return a dict instead of a set (PYTHON-1033) +* ResponseFuture.has_more_pages may hold the wrong value (PYTHON-946) +* DETERMINISTIC clause in AGGREGATE misplaced in CQL generation (PYTHON-963) +* graph module import cause a DLL issue on Windows due to its cythonizing failure (PYTHON-900) +* Update date serialization to isoformat in graph (PYTHON-805) +* DateRange Parse Error (PYTHON-729) +* MontonicTimestampGenerator.__init__ ignores class defaults (PYTHON-728) +* metadata.get_host returning None unexpectedly (PYTHON-709) +* Sockets associated with sessions not getting cleaned up on session.shutdown() (PYTHON-673) +* Resolve FQDN from ip address and use that as host passed to SASLClient (PYTHON-566) +* Geospatial type implementations don't handle 'EMPTY' values. (PYTHON-481) +* Correctly handle other types in geo type equality (PYTHON-508) + +Other +----- +* Add tests around cqlengine and continuous paging (PYTHON-872) +* Add an abstract GraphStatement to handle different graph statements (PYTHON-789) +* Write documentation examples for DSE 2.0 features (PYTHON-732) +* DSE_V1 protocol should not include all of protocol v5 (PYTHON-694) + 3.20.0 ====== Unreleased diff --git a/README-dev.rst b/README-dev.rst index c10aed2330..8feaf14a0d 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -2,23 +2,33 @@ Releasing ========= * Run the tests and ensure they all pass * Update CHANGELOG.rst - * Check for any missing entries * Add today's date to the release section * Update the version in ``cassandra/__init__.py`` - * For beta releases, use a version like ``(2, 1, '0b1')`` * For release candidates, use a version like ``(2, 1, '0rc1')`` * When in doubt, follow PEP 440 versioning * Add the new version in ``docs.yaml`` - * Commit the changelog and version changes, e.g. ``git commit -m'version 1.0.0'`` * Tag the release. For example: ``git tag -a 1.0.0 -m 'version 1.0.0'`` * Push the tag and new ``master``: ``git push origin 1.0.0 ; git push origin master`` -* Upload the package to pypi:: +* Update the `python-driver` submodule of `python-driver-wheels`, + commit then push. This will trigger TravisCI and the wheels building. +* For a GA release, upload the package to pypi:: + + # Clean the working directory + python setup.py clean + rm dist/* + + # Build the source distribution + python setup.py sdist - python setup.py register - python setup.py sdist upload + # Download all wheels from the jfrog repository and copy them in + # the dist/ directory + cp /path/to/wheels/*.whl dist/ + + # Upload all files + twine upload dist/* * On pypi, make the latest GA the only visible version * Update the docs (see below) @@ -27,6 +37,12 @@ Releasing * After a beta or rc release, this should look like ``(2, 1, '0b1', 'post0')`` +* After the release has been tagged, add a section to docs.yaml with the new tag ref:: + + versions: + - name: + ref: + * Commit and push * Update 'cassandra-test' branch to reflect new release @@ -80,6 +96,39 @@ directory and build from scratch:: rm -rf docs/_build/* +Documentor +========== +We now also use another tool called Documentor with Sphinx source to build docs. +This gives us versioned docs with nice integrated search. This is a private tool +of DataStax. + +Dependencies +------------ +Sphinx +~~~~~~ +Installed as described above + +Documentor +~~~~~~~~~~ +Clone and setup Documentor as specified in `the project `_. +This tool assumes Ruby, bundler, and npm are present. + +Building +-------- +The setup script expects documentor to be in the system path. You can either add it permanently or run with something +like this:: + + PATH=$PATH:/bin python setup.py doc + +The docs will not display properly just browsing the filesystem in a browser. To view the docs as they would be in most +web servers, use the SimpleHTTPServer module:: + + cd docs/_build/ + python -m SimpleHTTPServer + +Then, browse to `localhost:8000 `_. + + Running the Tests ================= In order for the extensions to be built and used in the test, run:: @@ -104,19 +153,40 @@ Use tee to capture logs and see them on your terminal:: nosetests -w tests/unit/ --nocapture --nologcapture 2>&1 | tee test.log -Specifying a Cassandra Version for Integration Tests +Specifying a Cassandra/DSE Version for Integration Tests ---------------------------------------------------- -You can specify a cassandra version with the ``CASSANDRA_VERSION`` environment variable:: +You can specify a cassandra version with the ``CASSANDRA_VERSION`` or ``DSE_VERSION` environment variable:: CASSANDRA_VERSION=2.0.9 nosetests -w tests/integration/standard + DSE_VERSION=6.7.4 nosetests -w tests/integration/standard You can also specify a cassandra directory (to test unreleased versions):: CASSANDRA_DIR=/home/thobbs/cassandra nosetests -w tests/integration/standard -Specifying the usage of an already running Cassandra cluster ----------------------------------------------------- -The test will start the appropriate Cassandra clusters when necessary but if you don't want this to happen because a Cassandra cluster is already running the flag ``USE_CASS_EXTERNAL`` can be used, for example: +For this to work with DSE, you have to build it before. Once the appropriate commit is checked out, inside the ``bdp`` folder: + + ./gradlew clean dist + +Running the advanced authentication tests for DSE +------------------------------------------------- +These tests are in the file ``tests/integration/advanced/test_auth.py``. These tests are run the same way +as the rest but first the we have to set the variable ADS_HOME: + + git clone https://github.com/riptano/testeng-devtools.git + cd testeng-devtools/EmbeddedAds + mvn clean install + cp target/embedded-ads-1.0.1-SNAPSHOT-*.jar embedded-ads.jar + export ADS_HOME=`pwd` + +After this we can run the tests normally from the appropriate folder: + + DSE_VERSION=6.7.4 nosetests -w tests/integration/advanced/test_auth.py + +Specifying the usage of an already running cluster +-------------------------------------------------- +The test will start the appropriate Cassandra clusters when necessary but if you don't want this to happen +because a Cassandra cluster is already running the flag ``USE_CASS_EXTERNAL`` can be used, for example: USE_CASS_EXTERNAL=1 python setup.py nosetests -w tests/integration/standard @@ -148,6 +218,10 @@ There needs to be a version of cassandra running locally so before running the b ccm create benchmark_cluster -v 3.0.1 -n 1 -s +If testing against DSE: + + ccm create 6.7.4 --dse --dse-username=your_username@datastax.com --dse-password=your_password -v 6.7.4 -n 1 -s + To run the benchmarks, pick one of the files under the ``benchmarks/`` dir and run it:: python benchmarks/future_batches.py @@ -168,4 +242,28 @@ name to specify the built version:: python setup.py egg_info -b-`git rev-parse --short HEAD` sdist --formats=zip -The file (``dist/cassandra-driver-.zip``) is packaged with Cassandra in ``cassandra/lib/cassandra-driver-internal-only*zip``. +The file (``dist/cassandra-driver-.zip``) is packaged with Cassandra in ``cassandra/lib/cassandra-driver-internal-only.zip``. + +Releasing an EAP +================ + +An EAP release is only uploaded on a private server and it is not published on pypi. + +* Clean the environment:: + + python setup.py clean + +* Package the source distribution:: + + python setup.py sdist + +* Test the source distribution:: + + pip install dist/cassandra-driver-.tar.gz + +* Upload the package on the EAP download server. +* Build the documentation:: + + python setup.py doc + +* Upload the docs on the EAP download server. \ No newline at end of file diff --git a/setup.py b/setup.py index 51cbffa3c3..c339d8bf90 100644 --- a/setup.py +++ b/setup.py @@ -412,12 +412,19 @@ def run_setup(extensions): setup( name='cassandra-driver', version=__version__, - description='Python driver for Cassandra', + description=' DataStax Driver for Apache Cassandra and DataStax products', long_description=long_description, url='http://github.com/datastax/python-driver', - author='Tyler Hobbs', - author_email='tyler@datastax.com', - packages=['cassandra', 'cassandra.io', 'cassandra.cqlengine', 'cassandra.graph', 'cassandra.insights'], + project_urls={ + 'Documentation': 'https://docs.datastax.com/en/developer/python-driver/latest/', + 'Source': 'https://github.com/datastax/python-driver/', + 'Issues': 'https://datastax-oss.atlassian.net/browse/PYTHON', + }, + author='DataStax', + packages=[ + 'cassandra', 'cassandra.io', 'cassandra.cqlengine', 'cassandra.graph', + 'cassandra.insights', 'cassandra.datastax' + ], keywords='cassandra,cql,orm,dse,graph', include_package_data=True, install_requires=dependencies, diff --git a/tox.ini b/tox.ini index 629c3f2261..ff04577dac 100644 --- a/tox.ini +++ b/tox.ini @@ -9,6 +9,8 @@ deps = nose cython eventlet twisted <15.5.0 + pure-sasl + kerberos futurist [testenv] From 1074040b9fcd828970bec6421e0e1793c8c5ce0f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 16 Sep 2019 16:20:09 -0400 Subject: [PATCH 1629/2005] Simulacron should rely on PROTOCOL_VERSION --- tests/integration/simulacron/__init__.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/integration/simulacron/__init__.py b/tests/integration/simulacron/__init__.py index fee6b39a01..fa940d2289 100644 --- a/tests/integration/simulacron/__init__.py +++ b/tests/integration/simulacron/__init__.py @@ -16,7 +16,7 @@ except ImportError: import unittest # noqa -from tests.integration import requiredse, CASSANDRA_VERSION, DSE_VERSION, SIMULACRON_JAR +from tests.integration import requiredse, CASSANDRA_VERSION, DSE_VERSION, SIMULACRON_JAR, PROTOCOL_VERSION from tests.integration.simulacron.utils import ( clear_queries, start_and_prime_singledc, @@ -29,7 +29,8 @@ from packaging.version import Version -PROTOCOL_VERSION = 4 if (DSE_VERSION is None or DSE_VERSION >= Version('5.0')) else 3 +PROTOCOL_VERSION = PROTOCOL_VERSION if (DSE_VERSION is None or DSE_VERSION >= Version('5.0')) else 3 + def teardown_package(): stop_simulacron() From ec2a8016d16519f117e0faed045e4b8dc63e8af8 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 16 Sep 2019 17:49:04 -0400 Subject: [PATCH 1630/2005] Simulacron fix for protocol_version > 4 --- tests/integration/simulacron/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/simulacron/__init__.py b/tests/integration/simulacron/__init__.py index fa940d2289..6543265db2 100644 --- a/tests/integration/simulacron/__init__.py +++ b/tests/integration/simulacron/__init__.py @@ -29,7 +29,7 @@ from packaging.version import Version -PROTOCOL_VERSION = PROTOCOL_VERSION if (DSE_VERSION is None or DSE_VERSION >= Version('5.0')) else 3 +PROTOCOL_VERSION = min(4, PROTOCOL_VERSION if (DSE_VERSION is None or DSE_VERSION >= Version('5.0')) else 3) def teardown_package(): From d98d90a474aa438b6cabca5cabf53780191d0088 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 16 Sep 2019 23:19:38 -0500 Subject: [PATCH 1631/2005] Fixing the auth test failures --- tests/integration/standard/test_cluster.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index b45b65cd89..e1ccda0bb2 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -740,8 +740,12 @@ def _warning_are_issued_when_auth(self, auth_provider): self.assertIsNotNone(session.execute("SELECT * from system.local")) # Three conenctions to nodes plus the control connection - self.assertEqual(4, mock_handler.get_message_count('warning', - "An authentication challenge was not sent")) + auth_warning = mock_handler.get_message_count('warning', "An authentication challenge was not sent") + self.assertGreaterEqual(auth_warning, 4) + self.assertEqual( + auth_warning, + mock_handler.get_message_count("debug", "Got ReadyMessage on new connection") + ) def test_idle_heartbeat(self): interval = 2 From 9e509d9402adafade28881b8c710dafb3a0b5042 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 17 Sep 2019 09:31:49 -0400 Subject: [PATCH 1632/2005] Add test_protocol_divergence_v5_fail_by_continuous_paging --- .../standard/test_custom_protocol_handler.py | 37 ++++++++++++++++++- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 20e3100b1a..2ab847677e 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -19,11 +19,13 @@ from cassandra.protocol import ProtocolHandler, ResultMessage, QueryMessage, UUIDType, read_int from cassandra.query import tuple_factory, SimpleStatement -from cassandra.cluster import Cluster, ResponseFuture, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import (Cluster, ResponseFuture, ExecutionProfile, EXEC_PROFILE_DEFAULT, + ContinuousPagingOptions, NoHostAvailable) from cassandra import ProtocolVersion, ConsistencyLevel from tests.integration import use_singledc, PROTOCOL_VERSION, drop_keyspace_shutdown_cluster, \ - greaterthanorequalcass30, execute_with_long_wait_retry, greaterthanorequaldse51, greaterthanorequalcass3_10 + greaterthanorequalcass30, execute_with_long_wait_retry, greaterthanorequaldse51, greaterthanorequalcass3_10, \ + greaterthanorequalcass31 from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES from tests.integration.standard.utils import create_table_with_all_types, get_all_primitive_params from six import binary_type @@ -120,6 +122,37 @@ def test_custom_raw_row_results_all_types(self): self.assertEqual(len(CustomResultMessageTracked.checked_rev_row_set), len(PRIMITIVE_DATATYPES)-1) cluster.shutdown() + @greaterthanorequalcass31 + def test_protocol_divergence_v5_fail_by_continuous_paging(self): + """ + Test to validate that V5 and DSE_V1 diverge. ContinuousPagingOptions is not supported by V5 + + @since DSE 2.0b3 GRAPH 1.0b1 + @jira_ticket PYTHON-694 + @expected_result NoHostAvailable will be risen when the continuous_paging_options parameter is set + + @test_category connection + """ + cluster = Cluster(protocol_version=ProtocolVersion.V5, allow_beta_protocol_version=True) + session = cluster.connect() + + max_pages = 4 + max_pages_per_second = 3 + continuous_paging_options = ContinuousPagingOptions(max_pages=max_pages, + max_pages_per_second=max_pages_per_second) + + future = self._send_query_message(session, timeout=session.default_timeout, + consistency_level=ConsistencyLevel.ONE, + continuous_paging_options=continuous_paging_options) + + # This should raise NoHostAvailable because continuous paging is not supported under ProtocolVersion.DSE_V1 + with self.assertRaises(NoHostAvailable) as context: + future.result() + self.assertIn("Continuous paging may only be used with protocol version ProtocolVersion.DSE_V1 or higher", + str(context.exception)) + + cluster.shutdown() + @greaterthanorequalcass30 def test_protocol_divergence_v4_fail_by_flag_uses_int(self): """ From e7aaaa5b4476d1fa9858124a46ec8764e11b349b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 17 Sep 2019 13:17:50 -0400 Subject: [PATCH 1633/2005] Replaced a bunch of wrong @since tag in tests --- cassandra/auth.py | 4 +- .../integration/advanced/test_adv_metadata.py | 6 +-- tests/integration/advanced/test_auth.py | 22 +++++----- .../integration/advanced/test_cont_paging.py | 10 ++--- .../test_cqlengine_where_operators.py | 2 +- tests/integration/advanced/test_geometry.py | 24 +++++------ tests/integration/advanced/test_graph.py | 40 +++++++++---------- tests/integration/advanced/test_spark.py | 2 +- 8 files changed, 55 insertions(+), 55 deletions(-) diff --git a/cassandra/auth.py b/cassandra/auth.py index 38030b912d..4e33761144 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -144,8 +144,8 @@ class TransitionalModePlainTextAuthProvider(object): Example usage:: - from dse.cluster import Cluster - from dse.auth import TransitionalModePlainTextAuthProvider + from cassandra.cluster import Cluster + from cassandra.auth import TransitionalModePlainTextAuthProvider auth_provider = TransitionalModePlainTextAuthProvider() cluster = Cluster(auth_provider=auth_provider) diff --git a/tests/integration/advanced/test_adv_metadata.py b/tests/integration/advanced/test_adv_metadata.py index 7fd1ccbc1b..25df0323a1 100644 --- a/tests/integration/advanced/test_adv_metadata.py +++ b/tests/integration/advanced/test_adv_metadata.py @@ -193,7 +193,7 @@ def test_rlac_on_table(self): """ Checks to ensure that the RLAC table extension appends the proper cql on export to tables - @since 2.0 + @since 3.20 @jira_ticket PYTHON-638 @expected_result Invalid hosts on the contact list should be excluded @@ -217,7 +217,7 @@ def test_rlac_on_mv(self): """ Checks to ensure that the RLAC table extension appends the proper cql to export on mV's - @since 2.0 + @since 3.20 @jira_ticket PYTHON-682 @expected_result Invalid hosts on the contact list should be excluded @@ -265,7 +265,7 @@ def test_nodesync_on_table(self): """ Checks to ensure that nodesync is visible through driver metadata - @since DSE6.0 + @since 3.20 @jira_ticket PYTHON-799 @expected_result nodesync should be enabled diff --git a/tests/integration/advanced/test_auth.py b/tests/integration/advanced/test_auth.py index fb8cbd1efd..533a9e70ed 100644 --- a/tests/integration/advanced/test_auth.py +++ b/tests/integration/advanced/test_auth.py @@ -168,7 +168,7 @@ def connect_and_query(self, auth_provider, query=None): def test_should_not_authenticate_with_bad_user_ticket(self): """ This tests will attempt to authenticate with a user that has a valid ticket, but is not a valid dse user. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-457 @test_category dse auth @expected_result NoHostAvailable exception should be thrown @@ -181,7 +181,7 @@ def test_should_not_authenticate_with_bad_user_ticket(self): def test_should_not_athenticate_without_ticket(self): """ This tests will attempt to authenticate with a user that is valid but has no ticket - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-457 @test_category dse auth @expected_result NoHostAvailable exception should be thrown @@ -193,7 +193,7 @@ def test_should_not_athenticate_without_ticket(self): def test_connect_with_kerberos(self): """ This tests will attempt to authenticate with a user that is valid and has a ticket - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-457 @test_category dse auth @expected_result Client should be able to connect and run a basic query @@ -211,7 +211,7 @@ def test_connect_with_kerberos(self): def test_connect_with_kerberos_and_graph(self): """ This tests will attempt to authenticate with a user and execute a graph query - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-457 @test_category dse auth @expected_result Client should be able to connect and run a basic graph query with authentication @@ -235,7 +235,7 @@ def test_connect_with_kerberos_host_not_resolved(self): This tests will attempt to authenticate with IP, this will fail on osx. The success or failure of this test is dependent on a reverse dns lookup which can be impacted by your environment if it fails don't panic. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-566 @test_category dse auth @expected_result Client should error when ip is used @@ -247,7 +247,7 @@ def test_connect_with_kerberos_host_not_resolved(self): def test_connect_with_explicit_principal(self): """ This tests will attempt to authenticate using valid and invalid user principals - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-574 @test_category dse auth @expected_result Client principals should be used by the underlying mechanism @@ -478,7 +478,7 @@ def connect_and_query(self, auth_provider, execute_as=None, query="SELECT * FROM def test_proxy_login_forbidden(self): """ Test that a proxy login is forbidden by default for a user. - @since 2.0.0 + @since 3.20 @jira_ticket PYTHON-662 @test_category dse auth @expected_result connect and query should not be allowed @@ -490,7 +490,7 @@ def test_proxy_login_forbidden(self): def test_proxy_login_allowed(self): """ Test that a proxy login is allowed with proper permissions. - @since 2.0.0 + @since 3.20 @jira_ticket PYTHON-662 @test_category dse auth @expected_result connect and query should be allowed @@ -502,7 +502,7 @@ def test_proxy_login_allowed(self): def test_proxy_execute_forbidden(self): """ Test that a proxy execute is forbidden by default for a user. - @since 2.0.0 + @since 3.20 @jira_ticket PYTHON-662 @test_category dse auth @expected_result connect and query should not be allowed @@ -514,7 +514,7 @@ def test_proxy_execute_forbidden(self): def test_proxy_execute_allowed(self): """ Test that a proxy execute is allowed with proper permissions. - @since 2.0.0 + @since 3.20 @jira_ticket PYTHON-662 @test_category dse auth @expected_result connect and query should be allowed @@ -526,7 +526,7 @@ def test_proxy_execute_allowed(self): def test_connection_with_transitional_mode(self): """ Test that the driver can connect using TransitionalModePlainTextAuthProvider - @since 3.12 + @since 3.20 @jira_ticket PYTHON-831 @test_category dse auth @expected_result connect and query should be allowed diff --git a/tests/integration/advanced/test_cont_paging.py b/tests/integration/advanced/test_cont_paging.py index 47cd02bb55..82b3fe2960 100644 --- a/tests/integration/advanced/test_cont_paging.py +++ b/tests/integration/advanced/test_cont_paging.py @@ -77,7 +77,7 @@ def create_cluster(cls): def test_continous_paging(self): """ Test to ensure that various continuous paging schemes return the full set of results. - @since DSE 2.0 + @since 3.20 @jira_ticket PYTHON-615 @expected_result various continous paging options should fetch all the results @@ -91,7 +91,7 @@ def test_continous_paging(self): def test_page_fetch_size(self): """ Test to ensure that continuous paging works appropriately with fetch size. - @since DSE 2.0 + @since 3.20 @jira_ticket PYTHON-615 @expected_result continuous paging options should work sensibly with various fetch size @@ -126,7 +126,7 @@ def test_page_fetch_size(self): def test_paging_cancel(self): """ Test to ensure we can cancel a continuous paging session once it's started - @since DSE 2.0 + @since 3.20 @jira_ticket PYTHON-615 @expected_result This query should be canceled before any sizable amount of results can be returned @test_category queries @@ -143,7 +143,7 @@ def test_paging_cancel(self): def test_con_paging_verify_writes(self): """ Test to validate results with a few continuous paging options - @since DSE 2.0 + @since 3.20 @jira_ticket PYTHON-615 @expected_result all results should be returned correctly @test_category queries @@ -189,7 +189,7 @@ def test_can_get_results_when_no_more_pages(self): """ Test to validate that the resutls can be fetched when has_more_pages is False - @since DSE 2.5 + @since 3.20 @jira_ticket PYTHON-946 @expected_result the results can be fetched @test_category queries diff --git a/tests/integration/advanced/test_cqlengine_where_operators.py b/tests/integration/advanced/test_cqlengine_where_operators.py index 06913db60f..9497feabd7 100644 --- a/tests/integration/advanced/test_cqlengine_where_operators.py +++ b/tests/integration/advanced/test_cqlengine_where_operators.py @@ -75,7 +75,7 @@ def test_is_not_null_execution(self): we'd have to create an index in search for the column we are using IsNotNull - @since 2.5 + @since 3.20 @jira_ticket PYTHON-968 @expected_result InvalidRequest is arisen diff --git a/tests/integration/advanced/test_geometry.py b/tests/integration/advanced/test_geometry.py index f10900bbfd..8bee144d19 100644 --- a/tests/integration/advanced/test_geometry.py +++ b/tests/integration/advanced/test_geometry.py @@ -39,7 +39,7 @@ class AbstractGeometricTypeTest(): def test_should_insert_simple(self): """ This tests will attempt to insert a point, polygon, or line, using simple inline formating. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-456 @test_category dse geometric @expected_result geometric types should be able to be inserted and queried. @@ -51,7 +51,7 @@ def test_should_insert_simple(self): def test_should_insert_simple_prepared(self): """ This tests will attempt to insert a point, polygon, or line, using prepared statements. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-456 @test_category dse geometric @expected_result geometric types should be able to be inserted and queried. @@ -64,7 +64,7 @@ def test_should_insert_simple_prepared(self): def test_should_insert_simple_prepared_with_bound(self): """ This tests will attempt to insert a point, polygon, or line, using prepared statements and bind. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-456 @test_category dse geometric @expected_result geometric types should be able to be inserted and queried. @@ -78,7 +78,7 @@ def test_should_insert_simple_prepared_with_bound(self): def test_should_insert_as_list(self): """ This tests will attempt to insert a point, polygon, or line, as values of list. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-456 @test_category dse geometric @expected_result geometric types should be able to be inserted and queried as a list. @@ -92,7 +92,7 @@ def test_should_insert_as_list(self): def test_should_insert_as_set(self): """ This tests will attempt to insert a point, polygon, or line, as values of set. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-456 @test_category dse geometric @expected_result geometric types should be able to be inserted and queried as a set. @@ -106,7 +106,7 @@ def test_should_insert_as_set(self): def test_should_insert_as_map_keys(self): """ This tests will attempt to insert a point, polygon, or line, as keys of a map. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-456 @test_category dse geometric @expected_result geometric types should be able to be inserted and queried as keys of a map. @@ -120,7 +120,7 @@ def test_should_insert_as_map_keys(self): def test_should_insert_as_map_values(self): """ This tests will attempt to insert a point, polygon, or line, as values of a map. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-456 @test_category dse geometric @expected_result geometric types should be able to be inserted and queried as values of a map. @@ -134,7 +134,7 @@ def test_should_insert_as_map_values(self): def test_should_insert_as_tuple(self): """ This tests will attempt to insert a point, polygon, or line, as values of a tuple. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-456 @test_category dse geometric @expected_result geometric types should be able to be inserted and queried as values of a tuple. @@ -148,7 +148,7 @@ def test_should_insert_as_tuple(self): def test_should_insert_as_udt(self): """ This tests will attempt to insert a point, polygon, or line, as members of a udt. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-456 @test_category dse geometric @expected_result geometric types should be able to be inserted and queried as members of a udt. @@ -167,7 +167,7 @@ def test_should_insert_as_udt(self): def test_should_accept_as_partition_key(self): """ This tests will attempt to insert a point, polygon, or line, as a partition key. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-456 @test_category dse geometric @expected_result geometric types should be able to be inserted and queried as a partition key. @@ -190,7 +190,7 @@ def validate(self, value, key, expected): def test_insert_empty_with_string(self): """ This tests will attempt to insert a point, polygon, or line, as Empty - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-481 @test_category dse geometric @expected_result EMPTY as a keyword should be honored @@ -202,7 +202,7 @@ def test_insert_empty_with_string(self): def test_insert_empty_with_object(self): """ This tests will attempt to insert a point, polygon, or line, as Empty - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-481 @test_category dse geometric @expected_result EMPTY as a keyword should be used with empty objects diff --git a/tests/integration/advanced/test_graph.py b/tests/integration/advanced/test_graph.py index efb0a02838..0d967d67eb 100644 --- a/tests/integration/advanced/test_graph.py +++ b/tests/integration/advanced/test_graph.py @@ -65,7 +65,7 @@ def test_basic_query(self): See reference graph here http://www.tinkerpop.com/docs/3.0.0.M1/ - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-457 @expected_result graph should find two vertices related to marco via 'knows' edges. @@ -87,7 +87,7 @@ def test_classic_graph(self): ensureing that each one is correct. See reference graph here http://www.tinkerpop.com/docs/3.0.0.M1/ - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-457 @expected_result graph should generate and all vertices and edge results should be @@ -105,7 +105,7 @@ def test_edge_properties_with_graphson2(self): """ Test that the edge property deserializer return a dict, and not a set. - @since 1.5 + @since 3.20 @jira_ticket PYTHON-1033 @expected_result the properties are stored in a dict @@ -124,7 +124,7 @@ def test_graph_classic_path(self): Test to validate that the path version of the result type is generated correctly. It also tests basic path results as that is not covered elsewhere - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-479 @expected_result path object should be unpacked correctly including all nested edges and verticies @test_category dse graph @@ -145,7 +145,7 @@ def test_large_create_script(self): Creates a very large line graph script and executes it. Then proceeds to create a line graph script that is to large for the server to handle expects a server error to be returned - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-457 @expected_result graph should generate and all vertices and edge results should be @@ -164,7 +164,7 @@ def test_range_query(self): limited query against it, and ensure that the results are formated correctly and that the result set is properly sized. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-457 @expected_result result set should be properly formated and properly sized @@ -185,7 +185,7 @@ def test_large_result_set(self): Creates a very large graph. Ensures that large result sets are handled appropriately. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-457 @expected_result when limits of result sets are hit errors should be surfaced appropriately @@ -200,7 +200,7 @@ def test_parameter_passing(self): """ Test to validate that parameter passing works as expected - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-457 @expected_result parameters work as expected @@ -225,7 +225,7 @@ def test_consistency_passing(self): """ Test to validated that graph consistency levels are properly surfaced to the base driver - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-509 @expected_result graph consistency levels are surfaced correctly @test_category dse graph @@ -295,7 +295,7 @@ def test_geometric_graph_types(self): Creates a very simple graph, and tries to insert a simple point type - @since 1.0.0 + @since 3.20 @jira_ticket DSP-8087 @expected_result json types associated with insert is parsed correctly @@ -316,7 +316,7 @@ def test_result_forms(self): Creates a very simple graph, and tries to insert a simple point type - @since 1.0.0 + @since 3.20 @jira_ticket DSP-8087 @expected_result json types assoicated with insert is parsed correctly @@ -343,7 +343,7 @@ def test_vertex_multiple_properties(self): Default is single (this is config dependent) - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-487 @test_category dse graph @@ -391,7 +391,7 @@ def test_vertex_property_properties(self): """ Test verifying vertex property properties - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-487 @test_category dse graph @@ -501,7 +501,7 @@ def test_result_types(self): """ Test to validate that the edge and vertex version of results are constructed correctly. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-479 @expected_result edge/vertex result types should be unpacked correctly. @test_category dse graph @@ -547,7 +547,7 @@ def test_all_types_graphson2(self): """ Sends and reads back all the available types verifying they were written correctly - @since 2.3.0 + @since 3.20 @jira_ticket PYTHON-775 @test_category dse graph @@ -602,7 +602,7 @@ def test_should_wait_indefinitely_by_default(self): """ Tests that by default the client should wait indefinitely for server timeouts - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-589 @test_category dse graph @@ -628,7 +628,7 @@ def test_request_timeout_less_then_server(self): Tests that with explicit request_timeouts set, that a server timeout is honored if it's relieved prior to the client timeout - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-589 @test_category dse graph @@ -656,7 +656,7 @@ def test_server_timeout_less_then_request(self): Tests that with explicit request_timeouts set, that a client timeout is honored if it's triggered prior to the server sending a timeout. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-589 @test_category dse graph @@ -682,7 +682,7 @@ def test_graph_profile(self): """ Test verifying various aspects of graph config properties. - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-570 @test_category dse graph @@ -740,7 +740,7 @@ def test_dse_workloads(self): Test to ensure dse_workloads is populated appropriately. Field added in DSE 5.1 - @since DSE 2.0 + @since 3.20 @jira_ticket PYTHON-667 @expected_result dse_workloads set is set on host model diff --git a/tests/integration/advanced/test_spark.py b/tests/integration/advanced/test_spark.py index 587521c868..8864e9a947 100644 --- a/tests/integration/advanced/test_spark.py +++ b/tests/integration/advanced/test_spark.py @@ -32,7 +32,7 @@ class SparkLBTests(BasicGraphUnitTestCase): Test to validate that analtics query can run in a multi-node enviroment. Also check to to ensure that the master spark node is correctly targeted when OLAP queries are run - @since 1.0.0 + @since 3.20 @jira_ticket PYTHON-510 @expected_result OLAP results should come back correctly, master spark coordinator should always be picked. @test_category dse graph From 128bb084eb26de73b91a116c847c19356dc0108d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 17 Sep 2019 15:43:53 -0400 Subject: [PATCH 1634/2005] Rename DSELoadBalancingPolicy to DefaultLoadBalancingPolicy --- cassandra/cluster.py | 9 ++++----- cassandra/policies.py | 7 ++++++- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cf92a9cfa2..100f3010c2 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -71,7 +71,7 @@ from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, RetryPolicy, IdentityTranslator, NoSpeculativeExecutionPlan, - NoSpeculativeExecutionPolicy, DSELoadBalancingPolicy, + NoSpeculativeExecutionPolicy, DefaultLoadBalancingPolicy, NeverRetryPolicy) from cassandra.pool import (Host, _ReconnectionHandler, _HostReconnectionHandler, HostConnectionPool, HostConnection, @@ -431,7 +431,7 @@ def __init__(self, load_balancing_policy=None, retry_policy=None, Note: The graph_options.graph_source is set automatically to b'a' (analytics) when using GraphAnalyticsExecutionProfile. This is mandatory to target analytics nodes. """ - load_balancing_policy = load_balancing_policy or DSELoadBalancingPolicy(default_lbp_factory()) + load_balancing_policy = load_balancing_policy or DefaultLoadBalancingPolicy(default_lbp_factory()) graph_options = graph_options or GraphOptions(graph_language=b'gremlin-groovy') super(GraphAnalyticsExecutionProfile, self).__init__(load_balancing_policy, retry_policy, consistency_level, serial_consistency_level, request_timeout, row_factory, @@ -1179,8 +1179,7 @@ def __init__(self, profiles.update(execution_profiles) self._config_mode = _ConfigMode.PROFILES - # TODO should be changed to Default... - lbp = DSELoadBalancingPolicy(self.profile_manager.default.load_balancing_policy) + lbp = DefaultLoadBalancingPolicy(self.profile_manager.default.load_balancing_policy) profiles.setdefault(EXEC_PROFILE_GRAPH_DEFAULT, GraphExecutionProfile(load_balancing_policy=lbp)) profiles.setdefault(EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, GraphExecutionProfile(load_balancing_policy=lbp, request_timeout=60. * 3.)) @@ -2622,7 +2621,7 @@ def execute_graph_async(self, query, parameters=None, trace=False, execution_pro future.message.query_params = graph_parameters future._protocol_handler = self.client_protocol_handler - if options.is_analytics_source and isinstance(execution_profile.load_balancing_policy, DSELoadBalancingPolicy): + if options.is_analytics_source and isinstance(execution_profile.load_balancing_policy, DefaultLoadBalancingPolicy): self._target_analytics_master(future) else: future.send_request() diff --git a/cassandra/policies.py b/cassandra/policies.py index 2364f2a6c0..693ad9dde4 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -1128,7 +1128,7 @@ def on_remove(self, *args, **kwargs): return self._child_policy.on_remove(*args, **kwargs) -class DSELoadBalancingPolicy(WrapperPolicy): +class DefaultLoadBalancingPolicy(WrapperPolicy): """ A :class:`.LoadBalancingPolicy` wrapper that adds the ability to target a specific host first. @@ -1147,6 +1147,7 @@ def make_query_plan(self, working_keyspace=None, query=None): else: keyspace = working_keyspace + # TODO remove next major since execute(..., host=XXX) is now available addr = getattr(query, 'target_host', None) if query else None target_host = self._cluster_metadata.get_host(addr) @@ -1161,6 +1162,10 @@ def make_query_plan(self, working_keyspace=None, query=None): yield h +# TODO for backward compatibility, remove in next major +DSELoadBalancingPolicy = DefaultLoadBalancingPolicy + + class NeverRetryPolicy(RetryPolicy): def _rethrow(self, *args, **kwargs): return self.RETHROW, None From 948dcbcba3e37aad379d7cd606006f553f387a75 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 17 Sep 2019 16:00:52 -0400 Subject: [PATCH 1635/2005] Deprecate DSELoadBalancingPolicy --- CHANGELOG.rst | 15 +++++++++++++++ cassandra/policies.py | 10 +++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3f6dbe4e8f..3681885ff0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,21 @@ ====== Unreleased +Features +-------- + +Bug Fixes +--------- + +Others +------ + +Deprecations +------------ + +* DSELoadBalancingPolicy will be removed in the next major, consider using + the DefaultLoadBalancingPolicy. + Merged from dse-driver: Features diff --git a/cassandra/policies.py b/cassandra/policies.py index 693ad9dde4..edd91e8120 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -1163,7 +1163,15 @@ def make_query_plan(self, working_keyspace=None, query=None): # TODO for backward compatibility, remove in next major -DSELoadBalancingPolicy = DefaultLoadBalancingPolicy +class DSELoadBalancingPolicy(DefaultLoadBalancingPolicy): + """ + *Deprecated:* This will be removed in the next major release, + consider using :class:`.DefaultLoadBalancingPolicy`. + """ + def __init__(self, *args): + super(DSELoadBalancingPolicy, self).__init__(*args) + warnings.warn("DSELoadBalancingPolicy will be removed in 4.0. Consider using " + "DefaultLoadBalancingPolicy.", DeprecationWarning) class NeverRetryPolicy(RetryPolicy): From fde870a5f9c0bed59f865b6f9580ded5891dab15 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 19 Sep 2019 09:40:41 -0400 Subject: [PATCH 1636/2005] Executing graph queries required Execution Profiles --- cassandra/cluster.py | 5 +++++ setup.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 100f3010c2..e56cd10951 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2596,6 +2596,11 @@ def execute_graph_async(self, query, parameters=None, trace=False, execution_pro object which callbacks may be attached to for asynchronous response delivery. You may also call ``ResponseFuture.result()`` to synchronously block for results at any time. """ + if self.cluster._config_mode is _ConfigMode.LEGACY: + raise ValueError(("Cannot execute graph queries using Cluster legacy parameters. " + "Consider using Execution profiles: " + "https://docs.datastax.com/en/developer/python-driver/latest/execution_profiles/#execution-profiles")) + if not isinstance(query, GraphStatement): query = SimpleGraphStatement(query) diff --git a/setup.py b/setup.py index c339d8bf90..bda7b6f630 100644 --- a/setup.py +++ b/setup.py @@ -423,7 +423,7 @@ def run_setup(extensions): author='DataStax', packages=[ 'cassandra', 'cassandra.io', 'cassandra.cqlengine', 'cassandra.graph', - 'cassandra.insights', 'cassandra.datastax' + 'cassandra.datastax' ], keywords='cassandra,cql,orm,dse,graph', include_package_data=True, From 5074b163102ab3c721e2f4a4be250deb769fd9a1 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 19 Sep 2019 12:10:24 -0400 Subject: [PATCH 1637/2005] Add missing cassandra.datastax.* modules in setup --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index bda7b6f630..fa0da4dd5b 100644 --- a/setup.py +++ b/setup.py @@ -423,7 +423,7 @@ def run_setup(extensions): author='DataStax', packages=[ 'cassandra', 'cassandra.io', 'cassandra.cqlengine', 'cassandra.graph', - 'cassandra.datastax' + 'cassandra.datastax', 'cassandra.datastax.insights', 'cassandra.datastax.graph' ], keywords='cassandra,cql,orm,dse,graph', include_package_data=True, From 4d9677e03f7961804b7ab0941f474ae147e0d69c Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 19 Sep 2019 14:47:51 -0500 Subject: [PATCH 1638/2005] docs (#6) --- README-dev.rst | 91 ++++------- README.rst | 9 +- docs/CHANGELOG.rst | 5 + docs/conf.py | 4 +- docs/execution_profiles.rst | 4 +- docs/geo_types.rst | 39 +++++ docs/getting_started.rst | 53 ++++-- docs/graph.rst | 312 ++++++++++++++++++++++++++++++++++++ docs/index.rst | 14 +- docs/installation.rst | 28 +--- docs/security.rst | 89 ++++++++++ 11 files changed, 541 insertions(+), 107 deletions(-) create mode 100644 docs/CHANGELOG.rst create mode 100644 docs/geo_types.rst create mode 100644 docs/graph.rst diff --git a/README-dev.rst b/README-dev.rst index 8feaf14a0d..2296dcd05d 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -26,7 +26,7 @@ Releasing # Download all wheels from the jfrog repository and copy them in # the dist/ directory cp /path/to/wheels/*.whl dist/ - + # Upload all files twine upload dist/* @@ -128,14 +128,12 @@ web servers, use the SimpleHTTPServer module:: Then, browse to `localhost:8000 `_. +Tests +===== -Running the Tests -================= -In order for the extensions to be built and used in the test, run:: - - nosetests - -You can run a specific test module or package like so:: +Running Unit Tests +------------------ +Unit tests can be run like so:: nosetests -w tests/unit/ @@ -143,52 +141,21 @@ You can run a specific test method like so:: nosetests -w tests/unit/test_connection.py:ConnectionTest.test_bad_protocol_version -Seeing Test Logs in Real Time ------------------------------ -Sometimes it's useful to output logs for the tests as they run:: - - nosetests -w tests/unit/ --nocapture --nologcapture - -Use tee to capture logs and see them on your terminal:: - - nosetests -w tests/unit/ --nocapture --nologcapture 2>&1 | tee test.log - -Specifying a Cassandra/DSE Version for Integration Tests ----------------------------------------------------- -You can specify a cassandra version with the ``CASSANDRA_VERSION`` or ``DSE_VERSION` environment variable:: +Running Integration Tests +------------------------- +In order to run integration tests, you must specify a version to run using the ``CASSANDRA_VERSION`` or ``DSE_VERSION`` environment variable:: CASSANDRA_VERSION=2.0.9 nosetests -w tests/integration/standard - DSE_VERSION=6.7.4 nosetests -w tests/integration/standard - -You can also specify a cassandra directory (to test unreleased versions):: - - CASSANDRA_DIR=/home/thobbs/cassandra nosetests -w tests/integration/standard - -For this to work with DSE, you have to build it before. Once the appropriate commit is checked out, inside the ``bdp`` folder: - ./gradlew clean dist +Or you can specify a cassandra directory (to test unreleased versions):: -Running the advanced authentication tests for DSE -------------------------------------------------- -These tests are in the file ``tests/integration/advanced/test_auth.py``. These tests are run the same way -as the rest but first the we have to set the variable ADS_HOME: + CASSANDRA_DIR=/home/thobbs/cassandra nosetests -w tests/integration/standard/ - git clone https://github.com/riptano/testeng-devtools.git - cd testeng-devtools/EmbeddedAds - mvn clean install - cp target/embedded-ads-1.0.1-SNAPSHOT-*.jar embedded-ads.jar - export ADS_HOME=`pwd` +Specifying the usage of an already running Cassandra cluster +------------------------------------------------------------ +The test will start the appropriate Cassandra clusters when necessary but if you don't want this to happen because a Cassandra cluster is already running the flag ``USE_CASS_EXTERNAL`` can be used, for example:: -After this we can run the tests normally from the appropriate folder: - - DSE_VERSION=6.7.4 nosetests -w tests/integration/advanced/test_auth.py - -Specifying the usage of an already running cluster --------------------------------------------------- -The test will start the appropriate Cassandra clusters when necessary but if you don't want this to happen -because a Cassandra cluster is already running the flag ``USE_CASS_EXTERNAL`` can be used, for example: - - USE_CASS_EXTERNAL=1 python setup.py nosetests -w tests/integration/standard + USE_CASS_EXTERNAL=1 CASSANDRA_VERSION=2.0.9 nosetests -w tests/integration/standard Specify a Protocol Version for Tests ------------------------------------ @@ -197,20 +164,24 @@ it with the ``PROTOCOL_VERSION`` environment variable:: PROTOCOL_VERSION=3 nosetests -w tests/integration/standard +Seeing Test Logs in Real Time +----------------------------- +Sometimes it's useful to output logs for the tests as they run:: + + nosetests -w tests/unit/ --nocapture --nologcapture + +Use tee to capture logs and see them on your terminal:: + + nosetests -w tests/unit/ --nocapture --nologcapture 2>&1 | tee test.log + Testing Multiple Python Versions -------------------------------- -If you want to test all of python 2.7, 3.4, 3.5, 3.6 and pypy, use tox (this is what +If you want to test all of python 2.7, 3.4, 3.5, 3.6, 3.7, and pypy, use tox (this is what TravisCI runs):: tox -By default, tox only runs the unit tests because I haven't put in the effort -to get the integration tests to run on TravicCI. However, the integration -tests should work locally. To run them, edit the following line in tox.ini:: - - commands = {envpython} setup.py build_ext --inplace nosetests --verbosity=2 tests/unit/ - -and change ``tests/unit/`` to ``tests/``. +By default, tox only runs the unit tests. Running the Benchmarks ====================== @@ -218,10 +189,6 @@ There needs to be a version of cassandra running locally so before running the b ccm create benchmark_cluster -v 3.0.1 -n 1 -s -If testing against DSE: - - ccm create 6.7.4 --dse --dse-username=your_username@datastax.com --dse-password=your_password -v 6.7.4 -n 1 -s - To run the benchmarks, pick one of the files under the ``benchmarks/`` dir and run it:: python benchmarks/future_batches.py @@ -242,7 +209,7 @@ name to specify the built version:: python setup.py egg_info -b-`git rev-parse --short HEAD` sdist --formats=zip -The file (``dist/cassandra-driver-.zip``) is packaged with Cassandra in ``cassandra/lib/cassandra-driver-internal-only.zip``. +The file (``dist/cassandra-driver-.zip``) is packaged with Cassandra in ``cassandra/lib/cassandra-driver-internal-only*zip``. Releasing an EAP ================ @@ -266,4 +233,4 @@ An EAP release is only uploaded on a private server and it is not published on p python setup.py doc -* Upload the docs on the EAP download server. \ No newline at end of file +* Upload the docs on the EAP download server. diff --git a/README.rst b/README.rst index b2039a957f..c09ba27118 100644 --- a/README.rst +++ b/README.rst @@ -4,12 +4,10 @@ DataStax Python Driver for Apache Cassandra .. image:: https://travis-ci.org/datastax/python-driver.png?branch=master :target: https://travis-ci.org/datastax/python-driver -A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (2.1+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. +A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (2.1+) and DataStax Enterprise (4.7+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. The driver supports Python 2.7, 3.4, 3.5, 3.6 and 3.7. -If you require compatibility with DataStax Enterprise, use the `DataStax Enterprise Python Driver `_. - **Note:** DataStax products do not support big-endian systems. Feedback Requested @@ -27,6 +25,11 @@ Features * Configurable `load balancing `_ and `retry policies `_ * `Concurrent execution utilities `_ * `Object mapper `_ +* DSE Graph execution API +* DSE Geometric type serialization +* DSE PlainText and GSSAPI authentication + +A fluent API extension for DSE Graph is available in the ``dse-graph`` package. For more information, see `the documentation here `_. Installation ------------ diff --git a/docs/CHANGELOG.rst b/docs/CHANGELOG.rst new file mode 100644 index 0000000000..592a2c0efa --- /dev/null +++ b/docs/CHANGELOG.rst @@ -0,0 +1,5 @@ +********* +CHANGELOG +********* + +.. include:: ../CHANGELOG.rst diff --git a/docs/conf.py b/docs/conf.py index b2bfbe0c00..4c0dfb58d7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -153,7 +153,7 @@ #html_domain_indices = True # If false, no index is generated. -#html_use_index = True +html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False @@ -223,5 +223,5 @@ # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'cassandra-driver', u'Cassandra Driver Documentation', - [u'Tyler Hobbs'], 1) + [u'DataStax'], 1) ] diff --git a/docs/execution_profiles.rst b/docs/execution_profiles.rst index 698f3db84b..7be1a85e3f 100644 --- a/docs/execution_profiles.rst +++ b/docs/execution_profiles.rst @@ -121,7 +121,7 @@ New profiles can be added constructing from scratch, or deriving from default: locked_execution = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) node1_profile = 'node1_whitelist' cluster.add_execution_profile(node1_profile, locked_execution) - + for _ in cluster.metadata.all_hosts(): print session.execute(local_query, execution_profile=node1_profile)[0] @@ -141,7 +141,7 @@ We also have the ability to pass profile instances to be used for execution, but .. code:: python from cassandra.query import tuple_factory - + tmp = session.execution_profile_clone_update('node1', request_timeout=100, row_factory=tuple_factory) print session.execute(local_query, execution_profile=tmp)[0] diff --git a/docs/geo_types.rst b/docs/geo_types.rst new file mode 100644 index 0000000000..f8750d687c --- /dev/null +++ b/docs/geo_types.rst @@ -0,0 +1,39 @@ +DSE Geometry Types +================== +This section shows how to query and work with the geometric types provided by DSE. + +These types are enabled implicitly by creating the Session from :class:`cassandra.cluster.Cluster`. +This module implicitly registers these types for use in the driver. This extension provides +some simple representative types in :mod:`cassandra.util` for inserting and retrieving data:: + + from cassandra.cluster import Cluster + from cassandra.util import Point, LineString, Polygon + session = Cluster().connect() + + session.execute("INSERT INTO ks.geo (k, point, line, poly) VALUES (%s, %s, %s, %s)", + 0, Point(1, 2), LineString(((1, 2), (3, 4))), Polygon(((1, 2), (3, 4), (5, 6)))) + +Queries returning geometric types return the :mod:`dse.util` types. Note that these can easily be used to construct +types from third-party libraries using the common attributes:: + + from shapely.geometry import LineString + shapely_linestrings = [LineString(res.line.coords) for res in session.execute("SELECT line FROM ks.geo")] + +For prepared statements, shapely geometry types can be used interchangeably with the built-in types because their +defining attributes are the same:: + + from shapely.geometry import Point + prepared = session.prepare("UPDATE ks.geo SET point = ? WHERE k = ?") + session.execute(prepared, (0, Point(1.2, 3.4))) + +In order to use shapely types in a CQL-interpolated (non-prepared) query, one must update the encoder with those types, specifying +the same string encoder as set for the internal types:: + + from cassandra import util + from shapely.geometry import Point, LineString, Polygon + + encoder_func = session.encoder.mapping[util.Point] + for t in (Point, LineString, Polygon): + session.encoder.mapping[t] = encoder_func + + session.execute("UPDATE ks.geo SET point = %s where k = %s", (0, Point(1.2, 3.4))) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 2dc32e6499..e27f72baf5 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -11,6 +11,7 @@ instance of :class:`~.Cluster` for each Cassandra cluster you want to interact with. The simplest way to create a :class:`~.Cluster` is like this: +First, make sure you have the Cassandra driver properly :doc:`installed `. .. code-block:: python @@ -39,16 +40,48 @@ behavior in some other way, this is the place to do it: .. code-block:: python - from cassandra.cluster import Cluster - from cassandra.policies import DCAwareRoundRobinPolicy + from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT + from cassandra.query import tuple_factory + + profile = ExecutionProfile(row_factory=tuple_factory) + cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: profile}) + session = cluster.connect() + + print session.execute("SELECT release_version FROM system.local")[0] - cluster = Cluster( - ['10.1.1.3', '10.1.1.4', '10.1.1.5'], - load_balancing_policy=DCAwareRoundRobinPolicy(local_dc='US_EAST'), - port=9042) +Profiles are passed in by ``execution_profiles`` dict. +In this case we can construct the base ``ExecutionProfile`` passing all attributes: + +.. code-block:: python + + from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT + from cassandra.policies import WhiteListRoundRobinPolicy, DowngradingConsistencyRetryPolicy + from cassandra.query import tuple_factory + + profile = ExecutionProfile( + load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']), + retry_policy=DowngradingConsistencyRetryPolicy(), + consistency_level=ConsistencyLevel.LOCAL_QUORUM, + serial_consistency_level=ConsistencyLevel.LOCAL_SERIAL, + request_timeout=15, + row_factory=tuple_factory + ) + cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: profile}) + session = cluster.connect() + + print session.execute("SELECT release_version FROM system.local")[0] + +Users are free to setup additional profiles to be used by name: + +.. code-block:: python + + profile_long = ExecutionProfile(request_timeout=30) + cluster = Cluster(execution_profiles={'long': profile_long}) + session = cluster.connect() + session.execute(statement, execution_profile='long') -You can find a more complete list of options in the :class:`~.Cluster` documentation. +Also, parameters passed to ``Session.execute`` or attached to ``Statement``\s are still honored as before. Instantiating a :class:`~.Cluster` does not actually connect us to any nodes. To establish connections and begin executing queries we need a @@ -328,10 +361,8 @@ replicas of the data you are interacting with need to respond for the query to be considered a success. By default, :attr:`.ConsistencyLevel.LOCAL_ONE` will be used for all queries. -You can specify a different default for the session on :attr:`.Session.default_consistency_level` -if the cluster is configured in legacy mode (not using execution profiles). Otherwise this can -be done by setting the :attr:`.ExecutionProfile.consistency_level` for the execution profile with key -:data:`~.cluster.EXEC_PROFILE_DEFAULT`. +You can specify a different default by setting the :attr:`.ExecutionProfile.consistency_level` +for the execution profile with key :data:`~.cluster.EXEC_PROFILE_DEFAULT`. To specify a different consistency level per request, wrap queries in a :class:`~.SimpleStatement`: diff --git a/docs/graph.rst b/docs/graph.rst new file mode 100644 index 0000000000..b76ba76d66 --- /dev/null +++ b/docs/graph.rst @@ -0,0 +1,312 @@ +DSE Graph Queries +================= + +The Cassandra driver executes graph queries over the Cassandra native protocol. Use +:meth:`.Session.execute_graph` or :meth:`.Session.execute_graph_async` for +executing gremlin queries in DSE Graph. + +Three Execution Profiles are provided suitable for graph execution: + +* :data:`~.cluster.EXEC_PROFILE_GRAPH_DEFAULT` +* :data:`~.cluster.EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT` +* :data:`~.cluster.EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT` + +See :doc:`getting_started` and :doc:`execution_profiles` +for more detail on working with profiles. + +Getting Started +~~~~~~~~~~~~~~~ + +First, we need to create a graph in the system. To access the system API, we +use the system execution profile :: + + from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT + + cluster = Cluster() + session = cluster.connect() + + graph_name = 'movies' + session.execute_graph("system.graph(name).ifNotExists().create()", {'name': graph_name}, + execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) + + +To execute requests on our newly created graph, we need to setup an execution +profile. Additionally, we also need to set the schema_mode to `development` +for the schema creation:: + + + from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.datastax.graph import GraphOptions + + graph_name = 'movies' + ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name=graph_name)) + + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) + session = cluster.connect() + + session.execute_graph("schema.config().option('graph.schema_mode').set('development')") + + +We are ready to configure our graph schema. We will create a simple one for movies:: + + # properties are used to define a vertex + properties = """ + schema.propertyKey("genreId").Text().create(); + schema.propertyKey("personId").Text().create(); + schema.propertyKey("movieId").Text().create(); + schema.propertyKey("name").Text().create(); + schema.propertyKey("title").Text().create(); + schema.propertyKey("year").Int().create(); + schema.propertyKey("country").Text().create(); + """ + + session.execute_graph(properties) # we can execute multiple statements in a single request + + # A Vertex represents a "thing" in the world. + vertices = """ + schema.vertexLabel("genre").properties("genreId","name").create(); + schema.vertexLabel("person").properties("personId","name").create(); + schema.vertexLabel("movie").properties("movieId","title","year","country").create(); + """ + + session.execute_graph(vertices) + + # An edge represents a relationship between two vertices + edges = """ + schema.edgeLabel("belongsTo").single().connection("movie","genre").create(); + schema.edgeLabel("actor").connection("movie","person").create(); + """ + + session.execute_graph(edges) + + # Indexes to execute graph requests efficiently + indexes = """ + schema.vertexLabel("genre").index("genresById").materialized().by("genreId").add(); + schema.vertexLabel("genre").index("genresByName").materialized().by("name").add(); + schema.vertexLabel("person").index("personsById").materialized().by("personId").add(); + schema.vertexLabel("person").index("personsByName").materialized().by("name").add(); + schema.vertexLabel("movie").index("moviesById").materialized().by("movieId").add(); + schema.vertexLabel("movie").index("moviesByTitle").materialized().by("title").add(); + schema.vertexLabel("movie").index("moviesByYear").secondary().by("year").add(); + """ + +Next, we'll add some data:: + + session.execute_graph(""" + g.addV('genre').property('genreId', 1).property('name', 'Action').next(); + g.addV('genre').property('genreId', 2).property('name', 'Drama').next(); + g.addV('genre').property('genreId', 3).property('name', 'Comedy').next(); + g.addV('genre').property('genreId', 4).property('name', 'Horror').next(); + """) + + session.execute_graph(""" + g.addV('person').property('personId', 1).property('name', 'Mark Wahlberg').next(); + g.addV('person').property('personId', 2).property('name', 'Leonardo DiCaprio').next(); + g.addV('person').property('personId', 3).property('name', 'Iggy Pop').next(); + """) + + session.execute_graph(""" + g.addV('movie').property('movieId', 1).property('title', 'The Happening'). + property('year', 2008).property('country', 'United States').next(); + g.addV('movie').property('movieId', 2).property('title', 'The Italian Job'). + property('year', 2003).property('country', 'United States').next(); + + g.addV('movie').property('movieId', 3).property('title', 'Revolutionary Road'). + property('year', 2008).property('country', 'United States').next(); + g.addV('movie').property('movieId', 4).property('title', 'The Man in the Iron Mask'). + property('year', 1998).property('country', 'United States').next(); + + g.addV('movie').property('movieId', 5).property('title', 'Dead Man'). + property('year', 1995).property('country', 'United States').next(); + """) + +Now that our genre, actor and movie vertices are added, we'll create the relationships (edges) between them:: + + session.execute_graph(""" + genre_horror = g.V().hasLabel('genre').has('name', 'Horror').next(); + genre_drama = g.V().hasLabel('genre').has('name', 'Drama').next(); + genre_action = g.V().hasLabel('genre').has('name', 'Action').next(); + + leo = g.V().hasLabel('person').has('name', 'Leonardo DiCaprio').next(); + mark = g.V().hasLabel('person').has('name', 'Mark Wahlberg').next(); + iggy = g.V().hasLabel('person').has('name', 'Iggy Pop').next(); + + the_happening = g.V().hasLabel('movie').has('title', 'The Happening').next(); + the_italian_job = g.V().hasLabel('movie').has('title', 'The Italian Job').next(); + rev_road = g.V().hasLabel('movie').has('title', 'Revolutionary Road').next(); + man_mask = g.V().hasLabel('movie').has('title', 'The Man in the Iron Mask').next(); + dead_man = g.V().hasLabel('movie').has('title', 'Dead Man').next(); + + the_happening.addEdge('belongsTo', genre_horror); + the_italian_job.addEdge('belongsTo', genre_action); + rev_road.addEdge('belongsTo', genre_drama); + man_mask.addEdge('belongsTo', genre_drama); + man_mask.addEdge('belongsTo', genre_action); + dead_man.addEdge('belongsTo', genre_drama); + + the_happening.addEdge('actor', mark); + the_italian_job.addEdge('actor', mark); + rev_road.addEdge('actor', leo); + man_mask.addEdge('actor', leo); + dead_man.addEdge('actor', iggy); + """) + +We are all set. You can now query your graph. Here are some examples:: + + # Find all movies of the genre Drama + for r in session.execute_graph(""" + g.V().has('genre', 'name', 'Drama').in('belongsTo').valueMap();"""): + print(r) + + # Find all movies of the same genre than the movie 'Dead Man' + for r in session.execute_graph(""" + g.V().has('movie', 'title', 'Dead Man').out('belongsTo').in('belongsTo').valueMap();"""): + print(r) + + # Find all movies of Mark Wahlberg + for r in session.execute_graph(""" + g.V().has('person', 'name', 'Mark Wahlberg').in('actor').valueMap();"""): + print(r) + +To see a more graph examples, see `DataStax Graph Examples `_. + +Graph Types +~~~~~~~~~~~ + +Here are the supported graph types with their python representations: + +========== ================ +DSE Graph Python +========== ================ +boolean bool +bigint long, int (PY3) +int int +smallint int +varint int +float float +double double +uuid uuid.UUID +Decimal Decimal +inet str +timestamp datetime.datetime +date datetime.date +time datetime.time +duration datetime.timedelta +point Point +linestring LineString +polygon Polygon +blob bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) +========== ================ + +Graph Row Factory +~~~~~~~~~~~~~~~~~ + +By default (with :class:`.GraphExecutionProfile.row_factory` set to :func:`.datastax.graph.graph_object_row_factory`), known graph result +types are unpacked and returned as specialized types (:class:`.Vertex`, :class:`.Edge`). If the result is not one of these +types, a :class:`.datastax.graph.Result` is returned, containing the graph result parsed from JSON and removed from its outer dict. +The class has some accessor convenience methods for accessing top-level properties by name (`type`, `properties` above), +or lists by index:: + + # dicts with `__getattr__` or `__getitem__` + result = session.execute_graph("[[key_str: 'value', key_int: 3]]", execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0] # Using system exec just because there is no graph defined + result # cassandra.datastax.graph.Result({u'key_str': u'value', u'key_int': 3}) + result.value # {u'key_int': 3, u'key_str': u'value'} (dict) + result.key_str # u'value' + result.key_int # 3 + result['key_str'] # u'value' + result['key_int'] # 3 + + # lists with `__getitem__` + result = session.execute_graph('[[0, 1, 2]]', execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0] + result # cassandra.datastax.graph.Result([0, 1, 2]) + result.value # [0, 1, 2] (list) + result[1] # 1 (list[1]) + +You can use a different row factory by setting :attr:`.Session.default_graph_row_factory` or passing it to +:meth:`.Session.execute_graph`. For example, :func:`.datastax.graph.single_object_row_factory` returns the JSON result string`, +unparsed. :func:`.datastax.graph.graph_result_row_factory` returns parsed, but unmodified results (such that all metadata is retained, +unlike :func:`.datastax.graph.graph_object_row_factory`, which sheds some as attributes and properties are unpacked). These results +also provide convenience methods for converting to known types (:meth:`~.Result.as_vertex`, :meth:`~.Result.as_edge`, :meth:`~.Result.as_path`). + +Vertex and Edge properties are never unpacked since their types are unknown. If you know your graph schema and want to +deserialize properties, use the :class:`.datastax.graph.GraphSON1Deserializer`. It provides convenient methods to deserialize by types (e.g. +deserialize_date, deserialize_uuid, deserialize_polygon etc.) Example:: + + # ... + from cassandra.datastax.graph import GraphSON1Deserializer + + row = session.execute_graph("g.V().toList()")[0] + value = row.properties['my_property_key'][0].value # accessing the VertexProperty value + value = GraphSON1Deserializer.deserialize_timestamp(value) + + print value # 2017-06-26 08:27:05 + print type(value) # + + +Named Parameters +~~~~~~~~~~~~~~~~ + +Named parameters are passed in a dict to :meth:`.cluster.Session.execute_graph`:: + + result_set = session.execute_graph('[a, b]', {'a': 1, 'b': 2}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) + [r.value for r in result_set] # [1, 2] + +All python types listed in `Graph Types`_ can be passed as named parameters and will be serialized +automatically to their graph representation: + +Example:: + + s.execute_graph(""" + g.addV('person'). + property('name', text_value). + property('age', integer_value). + property('birthday', timestamp_value). + property('house_yard', polygon_value).toList() + """, { + 'text_value': 'Mike Smith', + 'integer_value': 34, + 'timestamp_value': datetime.datetime(1967, 12, 30), + 'polygon_value': Polygon(((30, 10), (40, 40), (20, 40), (10, 20), (30, 10))) + }) + + +As with all Execution Profile parameters, graph options can be set in the cluster default (as shown in the first example) +or specified per execution:: + + ep = session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, + graph_options=GraphOptions(graph_name='something-else')) + session.execute_graph(statement, execution_profile=ep) + +Using GraphSON2 Protocol +~~~~~~~~~~~~~~~~~~~~~~~~ + +The default graph protocol used is GraphSON1. However GraphSON1 may +cause problems of type conversion happening during the serialization +of the query to the DSE Graph server, or the deserialization of the +responses back from a string Gremlin query. GraphSON2 offers better +support for the complex data types handled by DSE Graph. + +DSE >=5.0.4 now offers the possibility to use the GraphSON2 protocol +for graph queries. Enabling GraphSON2 can be done by `changing the +graph protocol of the execution profile` and `setting the graphson2 row factory`:: + + from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.datastax.graph import GraphOptions, GraphProtocol, graph_graphson2_row_factory + + # Create a GraphSON2 execution profile + ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name='types', + graph_protocol=GraphProtocol.GRAPHSON_2_0), + row_factory=graph_graphson2_row_factory) + + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) + s = cluster.connect() + s.execute_graph(...) + +Using GraphSON2, all properties will be automatically deserialized to +its Python representation. Note that it may bring significant +behavioral change at runtime. + +It is generally recommended to switch to GraphSON2 as it brings more +consistent support for complex data types in the Graph driver and will +be activated by default in the next major version (Python dse-driver +driver 3.0). diff --git a/docs/index.rst b/docs/index.rst index 577b443a3d..e67da242d9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -2,7 +2,7 @@ Python Cassandra Driver ======================= A Python client driver for `Apache Cassandra `_. This driver works exclusively with the Cassandra Query Language v3 (CQL3) -and Cassandra's native protocol. Cassandra 2.1+ is supported. +and Cassandra's native protocol. Cassandra 2.1+ is supported, including DSE 4.7+. The driver supports Python 2.7, 3.4, 3.5, 3.6 and 3.7. @@ -53,6 +53,15 @@ Contents :doc:`faq` A collection of Frequently Asked Questions +:doc:`geo_types` + Working with DSE geometry types + +:doc:`graph` + Graph queries with DSE Graph + +:doc:`CHANGELOG` + Log of changes to the driver, organized by version. + .. toctree:: :hidden: @@ -67,6 +76,9 @@ Contents security user_defined_types object_mapper + geo_types + graph + dse_auth dates_and_times faq diff --git a/docs/installation.rst b/docs/installation.rst index a6eedf4eb8..4d16df99ef 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -23,8 +23,8 @@ You can use ``pip install --pre cassandra-driver`` if you need to install a beta Speeding Up Installation ^^^^^^^^^^^^^^^^^^^^^^^^ -By default, installing the driver through ``pip`` uses Cython to compile -certain parts of the driver. +By default, installing the driver through ``pip`` uses a pre-compiled, platform-specific wheel when available. +If using a source distribution rather than a wheel, Cython is used to compile certain parts of the driver. This makes those hot paths faster at runtime, but the Cython compilation process can take a long time -- as long as 10 minutes in some environments. @@ -40,30 +40,6 @@ threads used to build the driver and any C extensions: $ # installing from pip $ CASS_DRIVER_BUILD_CONCURRENCY=8 pip install cassandra-driver -Finally, you can `build a wheel `_ from the driver's source and distribute that to computers -that depend on it. For example: - -.. code-block:: bash - - $ git clone https://github.com/datastax/python-driver.git - $ cd python-driver - $ git checkout 3.14.0 # or other desired tag - $ pip install wheel - $ python setup.py bdist_wheel - $ # build wheel with optional concurrency settings - $ CASS_DRIVER_BUILD_CONCURRENCY=8 python setup.py bdist_wheel - $ scp ./dist/cassandra_driver-3.14.0-cp27-cp27mu-linux_x86_64.whl user@host:/remote_dir - -Then, on the remote machine or machines, simply - -.. code-block:: bash - - $ pip install /remote_dir/cassandra_driver-3.14.0-cp27-cp27mu-linux_x86_64.whl - -Note that the wheel created this way is a `platform wheel -`_ -and as such will not work across platforms or architectures. - OSX Installation Error ^^^^^^^^^^^^^^^^^^^^^^ If you're installing on OSX and have XCode 5.1 installed, you may see an error like this:: diff --git a/docs/security.rst b/docs/security.rst index 0353091eb7..880ba98147 100644 --- a/docs/security.rst +++ b/docs/security.rst @@ -276,3 +276,92 @@ SSL with Twisted In case the twisted event loop is used pyOpenSSL must be installed or an exception will be risen. Also to set the ``ssl_version`` and ``cert_reqs`` in ``ssl_opts`` the appropriate constants from pyOpenSSL are expected. + +DSE Authentication +------------------ +When authenticating against DSE, the Cassandra driver provides two auth providers that work both with legacy kerberos and Cassandra authenticators, +as well as the new DSE Unified Authentication. This allows client to configure this auth provider independently, +and in advance of any server upgrade. These auth providers are configured in the same way as any previous implementation:: + + from cassandra.auth import DSEGSSAPIAuthProvider + auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"]) + cluster = Cluster(auth_provider=auth_provider) + session = cluster.connect() + +Implementations are :attr:`.DSEPlainTextAuthProvider`, :class:`.DSEGSSAPIAuthProvider` and :class:`.SaslAuthProvider`. + +DSE Unified Authentication +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +With DSE (>=5.1), unified Authentication allows you to: + +* Proxy Login: Authenticate using a fixed set of authentication credentials but allow authorization of resources based another user id. +* Proxy Execute: Authenticate using a fixed set of authentication credentials but execute requests based on another user id. + +Proxy Login ++++++++++++ + +Proxy login allows you to authenticate with a user but act as another one. You need to ensure the authenticated user has the permission to use the authorization of resources of the other user. ie. this example will allow the `server` user to authenticate as usual but use the authorization of `user1`: + +.. code-block:: text + + GRANT PROXY.LOGIN on role user1 to server + +then you can do the proxy authentication.... + +.. code-block:: python + + from cassandra.cluster import Cluster + from cassandra.auth import SaslAuthProvider + + sasl_kwargs = { + "service": 'dse', + "mechanism":"PLAIN", + "username": 'server', + 'password': 'server', + 'authorization_id': 'user1' + } + + auth_provider = SaslAuthProvider(**sasl_kwargs) + c = Cluster(auth_provider=auth_provider) + s = c.connect() + s.execute(...) # all requests will be executed as 'user1' + +If you are using kerberos, you can use directly :class:`.DSEGSSAPIAuthProvider` and pass the authorization_id, like this: + +.. code-block:: python + + from cassandra.cluster import Cluster + from cassandra.auth import DSEGSSAPIAuthProvider + + # Ensure the kerberos ticket of the server user is set with the kinit utility. + auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="server@DATASTAX.COM", + authorization_id='user1@DATASTAX.COM') + c = Cluster(auth_provider=auth_provider) + s = c.connect() + s.execute(...) # all requests will be executed as 'user1' + + +Proxy Execute ++++++++++++++ + +Proxy execute allows you to execute requests as another user than the authenticated one. You need to ensure the authenticated user has the permission to use the authorization of resources of the specified user. ie. this example will allow the `server` user to execute requests as `user1`: + +.. code-block:: text + + GRANT PROXY.EXECUTE on role user1 to server + +then you can do a proxy execute... + +.. code-block:: python + + from cassandra.cluster import Cluster + from cassandra.auth import DSEPlainTextAuthProvider, + + auth_provider = DSEPlainTextAuthProvider('server', 'server') + + c = Cluster(auth_provider=auth_provider) + s = c.connect() + s.execute('select * from k.t;', execute_as='user1') # the request will be executed as 'user1' + +Please see the `official documentation `_ for more details on the feature and configuration process. From e81437e322fe695a54df562c7db7d35b3ae5311d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 23 Sep 2019 10:32:32 -0400 Subject: [PATCH 1639/2005] Add minor TODO comment --- cassandra/auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/auth.py b/cassandra/auth.py index 4e33761144..61cc48b3a6 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -215,7 +215,7 @@ def initial_response(self): def evaluate_challenge(self, challenge): return self.sasl.process(challenge) - +# TODO remove me next major DSEPlainTextAuthProvider = PlainTextAuthProvider From 65a4a783368680033349073d6074e5e7053874b4 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 23 Sep 2019 11:11:17 -0400 Subject: [PATCH 1640/2005] Test Cython --- build.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.yaml b/build.yaml index d0af7604b5..b84546bcf3 100644 --- a/build.yaml +++ b/build.yaml @@ -169,8 +169,8 @@ cassandra: env: CYTHON: -# - CYTHON - - NO_CYTHON + - CYTHON +# - NO_CYTHON build: - script: | From 00072d4f11deff4b2c687c295259d9758eef5238 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 23 Sep 2019 12:17:24 -0400 Subject: [PATCH 1641/2005] Fix insights test abot DSELoadBalancingPolicy --- tests/unit/advanced/test_insights.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/advanced/test_insights.py b/tests/unit/advanced/test_insights.py index 7b210d40da..b63e48ba6a 100644 --- a/tests/unit/advanced/test_insights.py +++ b/tests/unit/advanced/test_insights.py @@ -189,7 +189,7 @@ def test_graph_analytics_execution_profile(self): 'type': 'DCAwareRoundRobinPolicy'}, 'shuffle_replicas': False}, 'type': 'TokenAwarePolicy'}}, - 'type': 'DSELoadBalancingPolicy'}, + 'type': 'DefaultLoadBalancingPolicy'}, 'readTimeout': 604800.0, 'retry': {'namespace': 'cassandra.policies', 'options': {}, 'type': 'NeverRetryPolicy'}, 'serialConsistency': None, From f271d20f8de29c0a65bc55ada463a6ccc65d418b Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 24 Sep 2019 13:11:10 -0500 Subject: [PATCH 1642/2005] [PYTHON-1145] Change default CL if connected to caas (#7) * Change default CL if connected to caas * fix socket issue on windows * Make sure were emulating apollo through the sni endpoint * Always send options message --- CHANGELOG.rst | 1 + cassandra/cluster.py | 41 +++++++-- cassandra/connection.py | 17 ++-- cassandra/cqlengine/connection.py | 2 - cassandra/metadata.py | 4 + tests/integration/advanced/cloud/__init__.py | 2 +- .../integration/advanced/cloud/test_cloud.py | 88 ++++++++++++++++++- 7 files changed, 128 insertions(+), 27 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 738935d6b8..9d93e59153 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,6 +8,7 @@ Features * Add a new Endpoint type to support unix sockets (PYTHON-1098) * DataStax Cloud enablement (PYTHON-1075) * Add creds.zip support (PYTHON-1097) +* Detect CaaS and change consistency default (PYTHON-1145) 3.19.0 ====== diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c74558be60..cc8be5ba63 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -129,6 +129,8 @@ def _is_gevent_monkey_patched(): _NOT_SET = object() +PRODUCT_APOLLO = "DATASTAX_APOLLO" + class NoHostAvailable(Exception): """ @@ -288,11 +290,12 @@ class ExecutionProfile(object): Defaults to :class:`.NoSpeculativeExecutionPolicy` if not specified """ - # indicates if lbp was set explicitly or uses default values + # indicates if set explicitly or uses default values _load_balancing_policy_explicit = False + _consistency_level_explicit = False def __init__(self, load_balancing_policy=_NOT_SET, retry_policy=None, - consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, + consistency_level=_NOT_SET, serial_consistency_level=None, request_timeout=10.0, row_factory=named_tuple_factory, speculative_execution_policy=None): if load_balancing_policy is _NOT_SET: @@ -301,8 +304,15 @@ def __init__(self, load_balancing_policy=_NOT_SET, retry_policy=None, else: self._load_balancing_policy_explicit = True self.load_balancing_policy = load_balancing_policy + + if consistency_level is _NOT_SET: + self._consistency_level_explicit = False + self.consistency_level = ConsistencyLevel.LOCAL_ONE + else: + self._consistency_level_explicit = True + self.consistency_level = consistency_level + self.retry_policy = retry_policy or RetryPolicy() - self.consistency_level = consistency_level if (serial_consistency_level is not None and not ConsistencyLevel.is_serial(serial_consistency_level)): @@ -999,12 +1009,12 @@ def __init__(self, self.timestamp_generator = MonotonicTimestampGenerator() self.profile_manager = ProfileManager() - self.profile_manager.profiles[EXEC_PROFILE_DEFAULT] = ExecutionProfile(self.load_balancing_policy, - self.default_retry_policy, - Session._default_consistency_level, - Session._default_serial_consistency_level, - Session._default_timeout, - Session._row_factory) + self.profile_manager.profiles[EXEC_PROFILE_DEFAULT] = ExecutionProfile( + self.load_balancing_policy, + self.default_retry_policy, + request_timeout=Session._default_timeout, + row_factory=Session._row_factory + ) # legacy mode if either of these is not default if load_balancing_policy or default_retry_policy: if execution_profiles: @@ -1251,6 +1261,7 @@ def add_execution_profile(self, name, profile, pool_wait_timeout=5): profile.load_balancing_policy.on_up(host) futures = set() for session in tuple(self.sessions): + self._set_default_dbaas_consistency(session) futures.update(session.update_created_pools()) _, not_done = wait_futures(futures, pool_wait_timeout) if not_done: @@ -1470,8 +1481,18 @@ def connect(self, keyspace=None, wait_for_all_pools=False): session = self._new_session(keyspace) if wait_for_all_pools: wait_futures(session._initial_connect_futures) + + self._set_default_dbaas_consistency(session) + return session + def _set_default_dbaas_consistency(self, session): + if session.cluster.metadata.dbaas: + for profile in self.profile_manager.profiles.values(): + if not profile._consistency_level_explicit: + profile.consistency_level = ConsistencyLevel.LOCAL_QUORUM + session._default_consistency_level = ConsistencyLevel.LOCAL_QUORUM + def get_connection_holders(self): holders = [] for s in tuple(self.sessions): @@ -2978,6 +2999,8 @@ def connect(self): self._protocol_version = self._cluster.protocol_version self._set_new_connection(self._reconnect_internal()) + self._cluster.metadata.dbaas = self._connection.product_type == PRODUCT_APOLLO + def _set_new_connection(self, conn): """ Replace existing connection (if there is one) and close it. diff --git a/cassandra/connection.py b/cassandra/connection.py index 4e7d53825f..276a462f33 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -105,7 +105,6 @@ def decompress(byts): return snappy.decompress(byts) locally_supported_compressions['snappy'] = (snappy.compress, decompress) - DRIVER_NAME, DRIVER_VERSION = 'DataStax Python Driver', sys.modules['cassandra'].__version__ PROTOCOL_VERSION_MASK = 0x7f @@ -513,6 +512,7 @@ class Connection(object): _ssl_impl = ssl _check_hostname = False + product_type = None def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, @@ -618,7 +618,7 @@ def factory(cls, endpoint, timeout, *args, **kwargs): def _get_socket_addresses(self): address, port = self.endpoint.resolve() - if self.endpoint.socket_family == socket.AF_UNIX: + if hasattr(socket, 'AF_UNIX') and self.endpoint.socket_family == socket.AF_UNIX: return [(socket.AF_UNIX, socket.SOCK_STREAM, 0, None, address)] addresses = socket.getaddrinfo(address, port, self.endpoint.socket_family, socket.SOCK_STREAM) @@ -920,16 +920,8 @@ def process_msg(self, header, body): @defunct_on_error def _send_options_message(self): - if self.cql_version is None and (not self.compression or not locally_supported_compressions): - log.debug("Not sending options message for new connection(%s) to %s " - "because compression is disabled and a cql version was not " - "specified", id(self), self.endpoint) - self._compressor = None - self.cql_version = DEFAULT_CQL_VERSION - self._send_startup_message(no_compact=self.no_compact) - else: - log.debug("Sending initial options message for new connection (%s) to %s", id(self), self.endpoint) - self.send_msg(OptionsMessage(), self.get_request_id(), self._handle_options_response) + log.debug("Sending initial options message for new connection (%s) to %s", id(self), self.endpoint) + self.send_msg(OptionsMessage(), self.get_request_id(), self._handle_options_response) @defunct_on_error def _handle_options_response(self, options_response): @@ -950,6 +942,7 @@ def _handle_options_response(self, options_response): id(self), self.endpoint) supported_cql_versions = options_response.cql_versions remote_supported_compressions = options_response.options['COMPRESSION'] + self.product_type = options_response.options.get('PRODUCT_TYPE', [None])[0] if self.cql_version: if self.cql_version not in supported_cql_versions: diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 9322e4b96a..f1b49d5048 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -184,8 +184,6 @@ def register_connection(name, hosts=None, consistency=None, lazy_connect=False, conn = Connection.from_session(name, session=session) conn.setup_session() else: # use hosts argument - if consistency is None: - consistency = ConsistencyLevel.LOCAL_ONE conn = Connection( name, hosts=hosts, consistency=consistency, lazy_connect=lazy_connect, diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 1608a114c2..93d63b2e2a 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -110,8 +110,12 @@ class Metadata(object): token_map = None """ A :class:`~.TokenMap` instance describing the ring topology. """ + dbaas = False + """ A boolean indicating if connected to a DBaaS cluster """ + def __init__(self): self.keyspaces = {} + self.dbaas = False self._hosts = {} self._hosts_lock = RLock() diff --git a/tests/integration/advanced/cloud/__init__.py b/tests/integration/advanced/cloud/__init__.py index 271259e608..bd568b69ef 100644 --- a/tests/integration/advanced/cloud/__init__.py +++ b/tests/integration/advanced/cloud/__init__.py @@ -89,7 +89,7 @@ def is_running(self): return self.running def start_node(self, id): - subcommand = 'node{} start --root --wait-for-binary-proto'.format(id) + subcommand = 'node{} start --jvm_arg "-Ddse.product_type=DATASTAX_APOLLO" --root --wait-for-binary-proto'.format(id) subprocess.call( [self.ccm_command.format(subcommand)], shell=True) diff --git a/tests/integration/advanced/cloud/test_cloud.py b/tests/integration/advanced/cloud/test_cloud.py index 73628549d5..17257dadaa 100644 --- a/tests/integration/advanced/cloud/test_cloud.py +++ b/tests/integration/advanced/cloud/test_cloud.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License +from cassandra.query import SimpleStatement try: import unittest2 as unittest @@ -20,8 +21,8 @@ import six from ssl import SSLContext, PROTOCOL_TLSv1 -from cassandra import DriverException -from cassandra.cluster import NoHostAvailable +from cassandra import DriverException, ConsistencyLevel, InvalidRequest +from cassandra.cluster import NoHostAvailable, ExecutionProfile, Cluster from cassandra.connection import SniEndPoint from cassandra.auth import PlainTextAuthProvider from cassandra.policies import TokenAwarePolicy, DCAwareRoundRobinPolicy, ConstantReconnectionPolicy @@ -32,6 +33,18 @@ from tests.integration.util import wait_until_not_raised from tests.integration.advanced.cloud import CloudProxyCluster, CLOUD_PROXY_SERVER +DISALLOWED_CONSISTENCIES = [ + ConsistencyLevel.ANY, + ConsistencyLevel.ONE, + ConsistencyLevel.TWO, + ConsistencyLevel.THREE, + ConsistencyLevel.QUORUM, + ConsistencyLevel.ALL, + ConsistencyLevel.EACH_QUORUM, + ConsistencyLevel.LOCAL_ONE, + ConsistencyLevel.SERIAL +] + @requirescloudproxy class CloudTests(CloudProxyCluster): @@ -103,7 +116,7 @@ def test_resolve_and_reconnect_on_node_down(self): self.connect(self.creds, idle_heartbeat_interval=1, idle_heartbeat_timeout=1, - reconnection_policy=ConstantReconnectionPolicy(50)) + reconnection_policy=ConstantReconnectionPolicy(120)) self.assertEqual(len(self.hosts_up()), 3) CLOUD_PROXY_SERVER.stop_node(1) @@ -130,3 +143,72 @@ def test_metadata_ssl_error(self): self.connect(self.creds, ssl_context=SSLContext(PROTOCOL_TLSv1)) self.assertIn('Unable to connect to the metadata', str(cm.exception)) + + def test_default_consistency(self): + self.connect(self.creds) + self.assertEqual(self.session.default_consistency_level, ConsistencyLevel.LOCAL_QUORUM) + self.assertEqual(self.cluster.profile_manager.default.consistency_level, ConsistencyLevel.LOCAL_QUORUM) + + def test_default_consistency_of_execution_profiles(self): + cloud_config = {'secure_connect_bundle': self.creds} + self.cluster = Cluster(cloud=cloud_config, protocol_version=4, execution_profiles={ + 'pre_create_default_ep': ExecutionProfile(), + 'pre_create_changed_ep': ExecutionProfile( + consistency_level=ConsistencyLevel.LOCAL_ONE, + ), + }) + self.cluster.add_execution_profile('pre_connect_default_ep', ExecutionProfile()) + self.cluster.add_execution_profile( + 'pre_connect_changed_ep', + ExecutionProfile( + consistency_level=ConsistencyLevel.LOCAL_ONE, + ) + ) + session = self.cluster.connect(wait_for_all_pools=True) + + self.cluster.add_execution_profile('post_connect_default_ep', ExecutionProfile()) + self.cluster.add_execution_profile( + 'post_connect_changed_ep', + ExecutionProfile( + consistency_level=ConsistencyLevel.LOCAL_ONE, + ) + ) + + for default in ['pre_create_default_ep', 'pre_connect_default_ep', 'post_connect_default_ep']: + cl = self.cluster.profile_manager.profiles[default].consistency_level + self.assertEqual( + cl, ConsistencyLevel.LOCAL_QUORUM, + "Expecting LOCAL QUORUM for profile {}, but got {} instead".format(default, cl) + ) + for changed in ['pre_create_changed_ep', 'pre_connect_changed_ep', 'post_connect_changed_ep']: + cl = self.cluster.profile_manager.profiles[changed].consistency_level + self.assertEqual( + cl, ConsistencyLevel.LOCAL_ONE, + "Expecting LOCAL ONE for profile {}, but got {} instead".format(default, cl) + ) + + def test_consistency_guardrails(self): + self.connect(self.creds) + self.session.execute( + "CREATE KEYSPACE IF NOT EXISTS test_consistency_guardrails " + "with replication={'class': 'SimpleStrategy', 'replication_factor': 1}" + ) + self.session.execute("CREATE TABLE IF NOT EXISTS test_consistency_guardrails.guardrails (id int primary key)") + for consistency in DISALLOWED_CONSISTENCIES: + statement = SimpleStatement( + "INSERT INTO test_consistency_guardrails.guardrails (id) values (1)", + consistency_level=consistency + ) + with self.assertRaises(InvalidRequest) as e: + self.session.execute(statement) + self.assertIn('not allowed for Write Consistency Level', str(e.exception)) + + # Sanity check to make sure we can do a normal insert + statement = SimpleStatement( + "INSERT INTO test_consistency_guardrails.guardrails (id) values (1)", + consistency_level=ConsistencyLevel.LOCAL_QUORUM + ) + try: + self.session.execute(statement) + except InvalidRequest: + self.fail("InvalidRequest was incorrectly raised for write query at LOCAL QUORUM!") From 931b73db6acfe3fc47fd1669e9ad6bd066a44c14 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 26 Sep 2019 08:44:39 -0500 Subject: [PATCH 1643/2005] Remove unnecessary cloud deps --- build.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/build.yaml b/build.yaml index cb7a1043a9..60ba60b837 100644 --- a/build.yaml +++ b/build.yaml @@ -164,9 +164,6 @@ build: pip install nose-exclude pip install service_identity - # Required for DBaaS and https+ssl cert - pip install pyopenssl ndg-httpsclient pyasn1 - FORCE_CYTHON=False if [[ $CYTHON == 'CYTHON' ]]; then FORCE_CYTHON=True From 371911b578e57e512650d93e20a6a8daabb5351a Mon Sep 17 00:00:00 2001 From: Chris Lohfink Date: Sun, 29 Sep 2019 15:17:14 -0500 Subject: [PATCH 1644/2005] Use 4.0 schema parser in 4 alpha and snapshot builds --- cassandra/metadata.py | 2 +- tests/unit/test_util_types.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 1608a114c2..cdd1392503 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2770,7 +2770,7 @@ def export_as_string(self): def get_schema_parser(connection, server_version, timeout): version = Version(server_version) - if version >= Version('4.0.0'): + if version >= Version('4-a'): return SchemaParserV4(connection, timeout) if version >= Version('3.0.0'): return SchemaParserV3(connection, timeout) diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py index e5d50ea036..2cff3362c0 100644 --- a/tests/unit/test_util_types.py +++ b/tests/unit/test_util_types.py @@ -267,6 +267,9 @@ def test_version_compare(self): self.assertTrue(Version('2.3.0.build0') > Version('2.3.0.1')) # 4th part fallback to str cmp self.assertTrue(Version('2.3.0') < Version('2.3.0.build')) + self.assertTrue(Version('4-a') <= Version('4.0.0')) + self.assertTrue(Version('4-a') <= Version('4.0-alpha1')) + self.assertTrue(Version('4-a') <= Version('4.0-beta1')) self.assertTrue(Version('4.0.0') >= Version('4.0.0')) self.assertTrue(Version('4.0.0.421') >= Version('4.0.0')) self.assertTrue(Version('4.0.1') >= Version('4.0.0')) From 04486ed411008724acd0e06118c6d9a597bb8e9c Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 1 Oct 2019 10:08:28 -0500 Subject: [PATCH 1645/2005] Update supported consistencies in our cloud testing --- tests/integration/advanced/cloud/test_cloud.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/integration/advanced/cloud/test_cloud.py b/tests/integration/advanced/cloud/test_cloud.py index 17257dadaa..f33afd1161 100644 --- a/tests/integration/advanced/cloud/test_cloud.py +++ b/tests/integration/advanced/cloud/test_cloud.py @@ -38,11 +38,8 @@ ConsistencyLevel.ONE, ConsistencyLevel.TWO, ConsistencyLevel.THREE, - ConsistencyLevel.QUORUM, - ConsistencyLevel.ALL, ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_ONE, - ConsistencyLevel.SERIAL ] From 0a6025b3c578dc16439dc5cc42f45297e0bda42e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 1 Oct 2019 14:51:59 -0400 Subject: [PATCH 1646/2005] Fix unassigned variable in insights --- cassandra/datastax/insights/reporter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cassandra/datastax/insights/reporter.py b/cassandra/datastax/insights/reporter.py index 6fb2d8f037..5402c6a7f6 100644 --- a/cassandra/datastax/insights/reporter.py +++ b/cassandra/datastax/insights/reporter.py @@ -121,6 +121,7 @@ def _get_startup_data(self): try: local_ipaddr = cc._connection._socket.getsockname()[0] except Exception as e: + local_ipaddr = None log.debug('Unable to get local socket addr from {}: {}'.format(cc._connection, e)) hostname = socket.getfqdn() From b563c7bb3c5d7d252ee1ac008ae9eb7868501673 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 24 Sep 2019 06:41:42 -0400 Subject: [PATCH 1647/2005] Initial integration of dse-graph --- cassandra/datastax/graph/__init__.py | 12 ++ cassandra/datastax/graph/_dse_graph.py | 214 +++++++++++++++++++++++ cassandra/datastax/graph/_predicates.py | 167 ++++++++++++++++++ cassandra/datastax/graph/_query.py | 137 +++++++++++++++ cassandra/datastax/graph/_serializers.py | 131 ++++++++++++++ cassandra/datastax/graph/predicates.py | 20 +++ cassandra/datastax/graph/query.py | 143 +++++++++++++++ cassandra/datastax/graph/serializers.py | 20 +++ 8 files changed, 844 insertions(+) create mode 100644 cassandra/datastax/graph/_dse_graph.py create mode 100644 cassandra/datastax/graph/_predicates.py create mode 100644 cassandra/datastax/graph/_query.py create mode 100644 cassandra/datastax/graph/_serializers.py create mode 100644 cassandra/datastax/graph/predicates.py create mode 100644 cassandra/datastax/graph/serializers.py diff --git a/cassandra/datastax/graph/__init__.py b/cassandra/datastax/graph/__init__.py index 0c03c9249d..918a852334 100644 --- a/cassandra/datastax/graph/__init__.py +++ b/cassandra/datastax/graph/__init__.py @@ -20,3 +20,15 @@ graph_result_row_factory, graph_graphson2_row_factory ) from cassandra.datastax.graph.graphson import * + + +HAVE_GREMLIN = False +try: + import gremlin_python + HAVE_GREMLIN = True +except ImportError: + # gremlinpython is not installed. + pass + +if HAVE_GREMLIN: + from cassandra.datastax.graph._dse_graph import * diff --git a/cassandra/datastax/graph/_dse_graph.py b/cassandra/datastax/graph/_dse_graph.py new file mode 100644 index 0000000000..11c04254f4 --- /dev/null +++ b/cassandra/datastax/graph/_dse_graph.py @@ -0,0 +1,214 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +import copy + +from gremlin_python.structure.graph import Graph +from gremlin_python.driver.remote_connection import RemoteConnection, RemoteTraversal +from gremlin_python.process.traversal import Traverser, TraversalSideEffects +from gremlin_python.process.graph_traversal import GraphTraversal + +from cassandra.cluster import Session, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT +from cassandra.datastax.graph import GraphOptions, GraphProtocol + +from cassandra.datastax.graph.serializers import ( + GremlinGraphSONReader, + deserializers, + gremlin_deserializers +) +from cassandra.datastax.graph.query import _DefaultTraversalBatch, _query_from_traversal + +log = logging.getLogger(__name__) + +__all__ = ['BaseGraphRowFactory', 'dse_graphson_reader', 'graphson_reader', 'graph_traversal_row_factory', + 'graph_traversal_dse_object_row_factory', 'DSESessionRemoteGraphConnection', 'DseGraph'] + + +# Create our custom GraphSONReader/Writer +dse_graphson_reader = GremlinGraphSONReader(deserializer_map=deserializers) +graphson_reader = GremlinGraphSONReader(deserializer_map=gremlin_deserializers) + +# Traversal result keys +_bulk_key = 'bulk' +_result_key = 'result' + + +class BaseGraphRowFactory(object): + """ + Base row factory for graph traversal. This class basically wraps a + graphson reader function to handle additional features of Gremlin/DSE + and is callable as a normal row factory. + + Currently supported: + - bulk results + + :param graphson_reader: The function used to read the graphson. + + Use example:: + + my_custom_row_factory = BaseGraphRowFactory(custom_graphson_reader.readObject) + """ + + def __init__(self, graphson_reader): + self._graphson_reader = graphson_reader + + def __call__(self, column_names, rows): + results = [] + + for row in rows: + parsed_row = self._graphson_reader(row[0]) + bulk = parsed_row.get(_bulk_key, 1) + if bulk > 1: # Avoid deepcopy call if bulk <= 1 + results.extend([copy.deepcopy(parsed_row[_result_key]) + for _ in range(bulk-1)]) + + results.append(parsed_row[_result_key]) + + return results + + +graph_traversal_row_factory = BaseGraphRowFactory(graphson_reader.readObject) +graph_traversal_row_factory.__doc__ = "Row Factory that returns the decoded graphson." + +graph_traversal_dse_object_row_factory = BaseGraphRowFactory(dse_graphson_reader.readObject) +graph_traversal_dse_object_row_factory.__doc__ = "Row Factory that returns the decoded graphson as DSE types." + + +class DSESessionRemoteGraphConnection(RemoteConnection): + """ + A Tinkerpop RemoteConnection to execute traversal queries on DSE. + + :param session: A DSE session + :param graph_name: (Optional) DSE Graph name. + :param execution_profile: (Optional) Execution profile for traversal queries. Default is set to `EXEC_PROFILE_GRAPH_DEFAULT`. + """ + + session = None + graph_name = None + execution_profile = None + + def __init__(self, session, graph_name=None, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): + super(DSESessionRemoteGraphConnection, self).__init__(None, None) + + if not isinstance(session, Session): + raise ValueError('A DSE Session must be provided to execute graph traversal queries.') + + self.session = session + self.graph_name = graph_name + self.execution_profile = execution_profile + + def submit(self, bytecode): + + query = DseGraph.query_from_traversal(bytecode) + ep = self.session.execution_profile_clone_update(self.execution_profile, row_factory=graph_traversal_row_factory) + graph_options = ep.graph_options.copy() + graph_options.graph_language = DseGraph.DSE_GRAPH_QUERY_LANGUAGE + if self.graph_name: + graph_options.graph_name = self.graph_name + + ep.graph_options = graph_options + + traversers = self.session.execute_graph(query, execution_profile=ep) + traversers = [Traverser(t) for t in traversers] + return RemoteTraversal(iter(traversers), TraversalSideEffects()) + + def __str__(self): + return "".format(self.graph_name) + __repr__ = __str__ + + +class DseGraph(object): + """ + Dse Graph utility class for GraphTraversal construction and execution. + """ + + DSE_GRAPH_QUERY_LANGUAGE = 'bytecode-json' + """ + Graph query language, Default is 'bytecode-json' (GraphSON). + """ + + @staticmethod + def query_from_traversal(traversal): + """ + From a GraphTraversal, return a query string based on the language specified in `DseGraph.DSE_GRAPH_QUERY_LANGUAGE`. + + :param traversal: The GraphTraversal object + """ + + if isinstance(traversal, GraphTraversal): + for strategy in traversal.traversal_strategies.traversal_strategies: + rc = strategy.remote_connection + if (isinstance(rc, DSESessionRemoteGraphConnection) and + rc.session or rc.graph_name or rc.execution_profile): + log.warning("GraphTraversal session, graph_name and execution_profile are " + "only taken into account when executed with TinkerPop.") + + return _query_from_traversal(traversal) + + @staticmethod + def traversal_source(session=None, graph_name=None, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT, traversal_class=None): + """ + Returns a TinkerPop GraphTraversalSource binded to the session and graph_name if provided. + + :param session: (Optional) A DSE session + :param graph_name: (Optional) DSE Graph name + :param execution_profile: (Optional) Execution profile for traversal queries. Default is set to `EXEC_PROFILE_GRAPH_DEFAULT`. + :param traversal_class: (Optional) The GraphTraversalSource class to use (DSL). + + .. code-block:: python + + from dse.cluster import Cluster + from dse_graph import DseGraph + + c = Cluster() + session = c.connect() + + g = DseGraph.traversal_source(session, 'my_graph') + print g.V().valueMap().toList() + + """ + + graph = Graph() + traversal_source = graph.traversal(traversal_class) + + if session: + traversal_source = traversal_source.withRemote( + DSESessionRemoteGraphConnection(session, graph_name, execution_profile)) + + return traversal_source + + @staticmethod + def create_execution_profile(graph_name): + """ + Creates an ExecutionProfile for GraphTraversal execution. You need to register that execution profile to the + cluster by using `cluster.add_execution_profile`. + + :param graph_name: The graph name + """ + + ep = GraphExecutionProfile(row_factory=graph_traversal_dse_object_row_factory, + graph_options=GraphOptions(graph_name=graph_name, + graph_language=DseGraph.DSE_GRAPH_QUERY_LANGUAGE, + graph_protocol=GraphProtocol.GRAPHSON_2_0)) + return ep + + @staticmethod + def batch(*args, **kwargs): + """ + Returns the :class:`dse_graph.query.TraversalBatch` object allowing to + execute multiple traversals in the same transaction. + """ + return _DefaultTraversalBatch(*args, **kwargs) diff --git a/cassandra/datastax/graph/_predicates.py b/cassandra/datastax/graph/_predicates.py new file mode 100644 index 0000000000..b63dd90043 --- /dev/null +++ b/cassandra/datastax/graph/_predicates.py @@ -0,0 +1,167 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +from gremlin_python.process.traversal import P + +from cassandra.util import Distance + +__all__ = ['GeoP', 'TextDistanceP', 'Search', 'GeoUnit', 'Geo'] + + +class GeoP(object): + + def __init__(self, operator, value, other=None): + self.operator = operator + self.value = value + self.other = other + + @staticmethod + def inside(*args, **kwargs): + return GeoP("inside", *args, **kwargs) + + def __eq__(self, other): + return isinstance(other, + self.__class__) and self.operator == other.operator and self.value == other.value and self.other == other.other + + def __repr__(self): + return self.operator + "(" + str(self.value) + ")" if self.other is None else self.operator + "(" + str( + self.value) + "," + str(self.other) + ")" + + +class TextDistanceP(object): + + def __init__(self, operator, value, distance): + self.operator = operator + self.value = value + self.distance = distance + + @staticmethod + def fuzzy(*args): + return TextDistanceP("fuzzy", *args) + + @staticmethod + def token_fuzzy(*args): + return TextDistanceP("tokenFuzzy", *args) + + @staticmethod + def phrase(*args): + return TextDistanceP("phrase", *args) + + def __eq__(self, other): + return isinstance(other, + self.__class__) and self.operator == other.operator and self.value == other.value and self.distance == other.distance + + def __repr__(self): + return self.operator + "(" + str(self.value) + "," + str(self.distance) + ")" + + +class Search(object): + + @staticmethod + def token(value): + """ + Search any instance of a certain token within the text property targeted. + :param value: the value to look for. + """ + return P('token', value) + + @staticmethod + def token_prefix(value): + """ + Search any instance of a certain token prefix withing the text property targeted. + :param value: the value to look for. + """ + return P('tokenPrefix', value) + + @staticmethod + def token_regex(value): + """ + Search any instance of the provided regular expression for the targeted property. + :param value: the value to look for. + """ + return P('tokenRegex', value) + + @staticmethod + def prefix(value): + """ + Search for a specific prefix at the beginning of the text property targeted. + :param value: the value to look for. + """ + return P('prefix', value) + + @staticmethod + def regex(value): + """ + Search for this regular expression inside the text property targeted. + :param value: the value to look for. + """ + return P('regex', value) + + @staticmethod + def fuzzy(value, distance): + """ + Search for a fuzzy string inside the text property targeted. + :param value: the value to look for. + :param distance: The distance for the fuzzy search. ie. 1, to allow a one-letter misspellings. + """ + return TextDistanceP.fuzzy(value, distance) + + @staticmethod + def token_fuzzy(value, distance): + """ + Search for a token fuzzy inside the text property targeted. + :param value: the value to look for. + :param distance: The distance for the token fuzzy search. ie. 1, to allow a one-letter misspellings. + """ + return TextDistanceP.token_fuzzy(value, distance) + + @staticmethod + def phrase(value, proximity): + """ + Search for a phrase inside the text property targeted. + :param value: the value to look for. + :param proximity: The proximity for the phrase search. ie. phrase('David Felcey', 2).. to find 'David Felcey' with up to two middle names. + """ + return TextDistanceP.phrase(value, proximity) + + +class GeoUnit(object): + _EARTH_MEAN_RADIUS_KM = 6371.0087714 + _DEGREES_TO_RADIANS = math.pi / 180 + _DEG_TO_KM = _DEGREES_TO_RADIANS * _EARTH_MEAN_RADIUS_KM + _KM_TO_DEG = 1 / _DEG_TO_KM + _MILES_TO_KM = 1.609344001 + + MILES = _MILES_TO_KM * _KM_TO_DEG + KILOMETERS = _KM_TO_DEG + METERS = _KM_TO_DEG / 1000.0 + DEGREES = 1 + + +class Geo(object): + + @staticmethod + def inside(value, units=GeoUnit.DEGREES): + """ + Search any instance of geometry inside the Distance targeted. + :param value: A Distance to look for. + :param units: The units for ``value``. See GeoUnit enum. (Can also + provide an integer to use as a multiplier to convert ``value`` to + degrees.) + """ + return GeoP.inside( + value=Distance(x=value.x, y=value.y, radius=value.radius * units) + ) diff --git a/cassandra/datastax/graph/_query.py b/cassandra/datastax/graph/_query.py new file mode 100644 index 0000000000..e9d1504ae6 --- /dev/null +++ b/cassandra/datastax/graph/_query.py @@ -0,0 +1,137 @@ +# Copyright DataStax, Inc. +# +# The full license terms are available at http://www.datastax.com/terms/datastax-dse-driver-license-terms + +import logging + +from cassandra.graph import SimpleGraphStatement +from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT + +from gremlin_python.process.graph_traversal import GraphTraversal +from gremlin_python.structure.io.graphsonV2d0 import GraphSONWriter + +from cassandra.datastax.graph.serializers import serializers + +log = logging.getLogger(__name__) + +graphson_writer = GraphSONWriter(serializer_map=serializers) + + +def _query_from_traversal(traversal): + """ + From a GraphTraversal, return a query string. + + :param traversal: The GraphTraversal object + """ + try: + query = graphson_writer.writeObject(traversal) + except Exception: + log.exception("Error preparing graphson traversal query:") + raise + + return query + + +class TraversalBatch(object): + """ + A `TraversalBatch` is used to execute multiple graph traversals in a + single transaction. If any traversal in the batch fails, the entire + batch will fail to apply. + + If a TraversalBatch is bounded to a DSE session, it can be executed using + `traversal_batch.execute()`. + """ + + _session = None + _execution_profile = None + + def __init__(self, session=None, execution_profile=None): + """ + :param session: (Optional) A DSE session + :param execution_profile: (Optional) The execution profile to use for the batch execution + """ + self._session = session + self._execution_profile = execution_profile + + def add(self, traversal): + """ + Add a traversal to the batch. + + :param traversal: A gremlin GraphTraversal + """ + raise NotImplementedError() + + def add_all(self, traversals): + """ + Adds a sequence of traversals to the batch. + + :param traversals: A sequence of gremlin GraphTraversal + """ + raise NotImplementedError() + + def execute(self): + """ + Execute the traversal batch if bounded to a `DSE Session`. + """ + raise NotImplementedError() + + def as_graph_statement(self): + """ + Return the traversal batch as GraphStatement. + """ + raise NotImplementedError() + + def clear(self): + """ + Clear a traversal batch for reuse. + """ + raise NotImplementedError() + + def __len__(self): + raise NotImplementedError() + + def __str__(self): + return u''.format(len(self)) + __repr__ = __str__ + + +class _DefaultTraversalBatch(TraversalBatch): + + _traversals = None + + def __init__(self, *args, **kwargs): + super(_DefaultTraversalBatch, self).__init__(*args, **kwargs) + self._traversals = [] + + @property + def _query(self): + return u"[{0}]".format(','.join(self._traversals)) + + def add(self, traversal): + if not isinstance(traversal, GraphTraversal): + raise ValueError('traversal should be a gremlin GraphTraversal') + + query = _query_from_traversal(traversal) + self._traversals.append(query) + + return self + + def add_all(self, traversals): + for traversal in traversals: + self.add(traversal) + + def as_graph_statement(self): + return SimpleGraphStatement(self._query) + + def execute(self): + if self._session is None: + raise ValueError('A DSE Session must be provided to execute the traversal batch.') + + execution_profile = self._execution_profile if self._execution_profile else EXEC_PROFILE_GRAPH_DEFAULT + return self._session.execute_graph(self._query, execution_profile=execution_profile) + + def clear(self): + del self._traversals[:] + + def __len__(self): + return len(self._traversals) diff --git a/cassandra/datastax/graph/_serializers.py b/cassandra/datastax/graph/_serializers.py new file mode 100644 index 0000000000..01616af1f8 --- /dev/null +++ b/cassandra/datastax/graph/_serializers.py @@ -0,0 +1,131 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict + +import six + +from gremlin_python.structure.io.graphsonV2d0 import ( + GraphSONReader, + GraphSONUtil, + VertexDeserializer, + VertexPropertyDeserializer, + PropertyDeserializer, + EdgeDeserializer, + PathDeserializer +) + +from cassandra.datastax.graph.graphson import ( + GraphSON2Serializer, + GraphSON2Deserializer +) + +from cassandra.datastax.graph.predicates import GeoP, TextDistanceP +from cassandra.util import Distance + + +__all__ = ['GremlinGraphSONReader', 'GeoPSerializer', 'TextDistancePSerializer', + 'DistanceIO', 'gremlin_deserializers', 'deserializers', 'serializers'] + + +class _GremlinGraphSONTypeSerializer(object): + + @classmethod + def dictify(cls, v, _): + return GraphSON2Serializer.serialize(v) + + +class _GremlinGraphSONTypeDeserializer(object): + + deserializer = None + + def __init__(self, deserializer): + self.deserializer = deserializer + + def objectify(self, v, reader): + return self.deserializer.deserialize(v, reader=reader) + + +def _make_gremlin_deserializer(graphson_type): + return _GremlinGraphSONTypeDeserializer( + GraphSON2Deserializer.get_deserializer(graphson_type.graphson_type) + ) + + +class GremlinGraphSONReader(GraphSONReader): + """Gremlin GraphSONReader Adapter, required to use gremlin types""" + + def deserialize(self, obj): + return self.toObject(obj) + + +class GeoPSerializer(object): + @classmethod + def dictify(cls, p, writer): + out = { + "predicateType": "Geo", + "predicate": p.operator, + "value": [writer.toDict(p.value), writer.toDict(p.other)] if p.other is not None else writer.toDict(p.value) + } + return GraphSONUtil.typedValue("P", out, prefix='dse') + + +class TextDistancePSerializer(object): + @classmethod + def dictify(cls, p, writer): + out = { + "predicate": p.operator, + "value": { + 'query': writer.toDict(p.value), + 'distance': writer.toDict(p.distance) + } + } + return GraphSONUtil.typedValue("P", out) + + +class DistanceIO(object): + @classmethod + def dictify(cls, v, _): + return GraphSONUtil.typedValue('Distance', six.text_type(v), prefix='dse') + + +serializers = OrderedDict([ + (t, _GremlinGraphSONTypeSerializer) + for t in six.iterkeys(GraphSON2Serializer.get_type_definitions()) +]) + +# Predicates +serializers.update(OrderedDict([ + (Distance, DistanceIO), + (GeoP, GeoPSerializer), + (TextDistanceP, TextDistancePSerializer) +])) + +deserializers = { + k: _make_gremlin_deserializer(v) + for k, v in six.iteritems(GraphSON2Deserializer.get_type_definitions()) +} + +deserializers.update({ + "dse:Distance": DistanceIO, +}) + +gremlin_deserializers = deserializers.copy() +gremlin_deserializers.update({ + 'g:Vertex': VertexDeserializer, + 'g:VertexProperty': VertexPropertyDeserializer, + 'g:Edge': EdgeDeserializer, + 'g:Property': PropertyDeserializer, + 'g:Path': PathDeserializer +}) diff --git a/cassandra/datastax/graph/predicates.py b/cassandra/datastax/graph/predicates.py new file mode 100644 index 0000000000..beba60ef62 --- /dev/null +++ b/cassandra/datastax/graph/predicates.py @@ -0,0 +1,20 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import gremlin_python + from cassandra.datastax.graph._predicates import * +except ImportError: + # gremlinpython is not installed. + pass diff --git a/cassandra/datastax/graph/query.py b/cassandra/datastax/graph/query.py index 0ebeeb28d3..5bc85d3d32 100644 --- a/cassandra/datastax/graph/query.py +++ b/cassandra/datastax/graph/query.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging import json from warnings import warn @@ -22,6 +23,14 @@ from cassandra.datastax.graph.types import Vertex, Edge, Path from cassandra.datastax.graph.graphson import GraphSON2Reader +HAVE_GREMLIN = False +try: + import gremlin_python + HAVE_GREMLIN = True +except ImportError: + # gremlinpython is not installed. + pass + __all__ = [ 'GraphProtocol', 'GraphOptions', 'GraphStatement', 'SimpleGraphStatement', @@ -302,3 +311,137 @@ def as_path(self): return Path(self.labels, self.objects) except (AttributeError, ValueError, TypeError): raise TypeError("Could not create Path from %r" % (self,)) + + +# Fluent API +if HAVE_GREMLIN: + from gremlin_python.process.graph_traversal import GraphTraversal + from gremlin_python.structure.io.graphsonV2d0 import GraphSONWriter + + from cassandra.datastax.graph.serializers import serializers + + log = logging.getLogger(__name__) + + graphson_writer = GraphSONWriter(serializer_map=serializers) + + + def _query_from_traversal(traversal): + """ + From a GraphTraversal, return a query string. + + :param traversal: The GraphTraversal object + """ + try: + query = graphson_writer.writeObject(traversal) + except Exception: + log.exception("Error preparing graphson traversal query:") + raise + + return query + + + class TraversalBatch(object): + """ + A `TraversalBatch` is used to execute multiple graph traversals in a + single transaction. If any traversal in the batch fails, the entire + batch will fail to apply. + + If a TraversalBatch is bounded to a DSE session, it can be executed using + `traversal_batch.execute()`. + """ + + _session = None + _execution_profile = None + + def __init__(self, session=None, execution_profile=None): + """ + :param session: (Optional) A DSE session + :param execution_profile: (Optional) The execution profile to use for the batch execution + """ + self._session = session + self._execution_profile = execution_profile + + def add(self, traversal): + """ + Add a traversal to the batch. + + :param traversal: A gremlin GraphTraversal + """ + raise NotImplementedError() + + def add_all(self, traversals): + """ + Adds a sequence of traversals to the batch. + + :param traversals: A sequence of gremlin GraphTraversal + """ + raise NotImplementedError() + + def execute(self): + """ + Execute the traversal batch if bounded to a `DSE Session`. + """ + raise NotImplementedError() + + def as_graph_statement(self): + """ + Return the traversal batch as GraphStatement. + """ + raise NotImplementedError() + + def clear(self): + """ + Clear a traversal batch for reuse. + """ + raise NotImplementedError() + + def __len__(self): + raise NotImplementedError() + + def __str__(self): + return u''.format(len(self)) + + __repr__ = __str__ + + + class _DefaultTraversalBatch(TraversalBatch): + + _traversals = None + + def __init__(self, *args, **kwargs): + super(_DefaultTraversalBatch, self).__init__(*args, **kwargs) + self._traversals = [] + + @property + def _query(self): + return u"[{0}]".format(','.join(self._traversals)) + + def add(self, traversal): + if not isinstance(traversal, GraphTraversal): + raise ValueError('traversal should be a gremlin GraphTraversal') + + query = _query_from_traversal(traversal) + self._traversals.append(query) + + return self + + def add_all(self, traversals): + for traversal in traversals: + self.add(traversal) + + def as_graph_statement(self): + return SimpleGraphStatement(self._query) + + def execute(self): + if self._session is None: + raise ValueError('A DSE Session must be provided to execute the traversal batch.') + + from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT + execution_profile = self._execution_profile if self._execution_profile else EXEC_PROFILE_GRAPH_DEFAULT + return self._session.execute_graph(self._query, execution_profile=execution_profile) + + def clear(self): + del self._traversals[:] + + def __len__(self): + return len(self._traversals) \ No newline at end of file diff --git a/cassandra/datastax/graph/serializers.py b/cassandra/datastax/graph/serializers.py new file mode 100644 index 0000000000..cd9e7e9282 --- /dev/null +++ b/cassandra/datastax/graph/serializers.py @@ -0,0 +1,20 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import gremlin_python + from cassandra.datastax.graph._serializers import * +except ImportError: + # gremlinpython is not installed. + pass From 0526657331b1b9f59800ca1a31bfb4ab037f06fd Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 24 Sep 2019 14:54:15 -0400 Subject: [PATCH 1648/2005] Add DataStax Fluent API module --- build.yaml | 2 +- cassandra/cluster.py | 3 +- cassandra/datastax/graph/__init__.py | 12 - cassandra/datastax/graph/_dse_graph.py | 214 ------ cassandra/datastax/graph/fluent/__init__.py | 224 ++++++ .../graph/{ => fluent}/_predicates.py | 0 .../datastax/graph/{ => fluent}/_query.py | 16 +- .../graph/{ => fluent}/_serializers.py | 2 +- .../datastax/graph/{ => fluent}/predicates.py | 2 +- cassandra/datastax/graph/fluent/query.py | 20 + .../graph/{ => fluent}/serializers.py | 2 +- cassandra/datastax/graph/query.py | 143 ---- tests/integration/advanced/graph/__init__.py | 13 + .../advanced/graph/fluent/__init__.py | 13 + .../advanced/graph/fluent/test_graph.py | 715 ++++++++++++++++++ .../advanced/graph/fluent/test_search.py | 347 +++++++++ 16 files changed, 1352 insertions(+), 376 deletions(-) delete mode 100644 cassandra/datastax/graph/_dse_graph.py create mode 100644 cassandra/datastax/graph/fluent/__init__.py rename cassandra/datastax/graph/{ => fluent}/_predicates.py (100%) rename cassandra/datastax/graph/{ => fluent}/_query.py (84%) rename cassandra/datastax/graph/{ => fluent}/_serializers.py (97%) rename cassandra/datastax/graph/{ => fluent}/predicates.py (91%) create mode 100644 cassandra/datastax/graph/fluent/query.py rename cassandra/datastax/graph/{ => fluent}/serializers.py (91%) create mode 100644 tests/integration/advanced/graph/__init__.py create mode 100644 tests/integration/advanced/graph/fluent/__init__.py create mode 100644 tests/integration/advanced/graph/fluent/test_graph.py create mode 100644 tests/integration/advanced/graph/fluent/test_search.py diff --git a/build.yaml b/build.yaml index b84546bcf3..f94b78e259 100644 --- a/build.yaml +++ b/build.yaml @@ -138,7 +138,7 @@ schedules: schedule: adhoc disable_pull_requests: true branches: - include: ['oss-next'] + include: [/oss-next.*/] env_vars: | EVENT_LOOP_MANAGER='libev' EXCLUDE_LONG=1 diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e56cd10951..1016be72be 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2522,7 +2522,8 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, not yet connected, the query will fail with :class:`NoHostAvailable`. Using this is discouraged except in a few cases, e.g., querying node-local tables and applying schema changes. - `execute_as` the user that will be used on the server to execute the request. + `execute_as` the user that will be used on the server to execute the request. This is only available + on a DSE cluster. """ return self.execute_async(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state, host, execute_as).result() diff --git a/cassandra/datastax/graph/__init__.py b/cassandra/datastax/graph/__init__.py index 918a852334..0c03c9249d 100644 --- a/cassandra/datastax/graph/__init__.py +++ b/cassandra/datastax/graph/__init__.py @@ -20,15 +20,3 @@ graph_result_row_factory, graph_graphson2_row_factory ) from cassandra.datastax.graph.graphson import * - - -HAVE_GREMLIN = False -try: - import gremlin_python - HAVE_GREMLIN = True -except ImportError: - # gremlinpython is not installed. - pass - -if HAVE_GREMLIN: - from cassandra.datastax.graph._dse_graph import * diff --git a/cassandra/datastax/graph/_dse_graph.py b/cassandra/datastax/graph/_dse_graph.py deleted file mode 100644 index 11c04254f4..0000000000 --- a/cassandra/datastax/graph/_dse_graph.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import copy - -from gremlin_python.structure.graph import Graph -from gremlin_python.driver.remote_connection import RemoteConnection, RemoteTraversal -from gremlin_python.process.traversal import Traverser, TraversalSideEffects -from gremlin_python.process.graph_traversal import GraphTraversal - -from cassandra.cluster import Session, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT -from cassandra.datastax.graph import GraphOptions, GraphProtocol - -from cassandra.datastax.graph.serializers import ( - GremlinGraphSONReader, - deserializers, - gremlin_deserializers -) -from cassandra.datastax.graph.query import _DefaultTraversalBatch, _query_from_traversal - -log = logging.getLogger(__name__) - -__all__ = ['BaseGraphRowFactory', 'dse_graphson_reader', 'graphson_reader', 'graph_traversal_row_factory', - 'graph_traversal_dse_object_row_factory', 'DSESessionRemoteGraphConnection', 'DseGraph'] - - -# Create our custom GraphSONReader/Writer -dse_graphson_reader = GremlinGraphSONReader(deserializer_map=deserializers) -graphson_reader = GremlinGraphSONReader(deserializer_map=gremlin_deserializers) - -# Traversal result keys -_bulk_key = 'bulk' -_result_key = 'result' - - -class BaseGraphRowFactory(object): - """ - Base row factory for graph traversal. This class basically wraps a - graphson reader function to handle additional features of Gremlin/DSE - and is callable as a normal row factory. - - Currently supported: - - bulk results - - :param graphson_reader: The function used to read the graphson. - - Use example:: - - my_custom_row_factory = BaseGraphRowFactory(custom_graphson_reader.readObject) - """ - - def __init__(self, graphson_reader): - self._graphson_reader = graphson_reader - - def __call__(self, column_names, rows): - results = [] - - for row in rows: - parsed_row = self._graphson_reader(row[0]) - bulk = parsed_row.get(_bulk_key, 1) - if bulk > 1: # Avoid deepcopy call if bulk <= 1 - results.extend([copy.deepcopy(parsed_row[_result_key]) - for _ in range(bulk-1)]) - - results.append(parsed_row[_result_key]) - - return results - - -graph_traversal_row_factory = BaseGraphRowFactory(graphson_reader.readObject) -graph_traversal_row_factory.__doc__ = "Row Factory that returns the decoded graphson." - -graph_traversal_dse_object_row_factory = BaseGraphRowFactory(dse_graphson_reader.readObject) -graph_traversal_dse_object_row_factory.__doc__ = "Row Factory that returns the decoded graphson as DSE types." - - -class DSESessionRemoteGraphConnection(RemoteConnection): - """ - A Tinkerpop RemoteConnection to execute traversal queries on DSE. - - :param session: A DSE session - :param graph_name: (Optional) DSE Graph name. - :param execution_profile: (Optional) Execution profile for traversal queries. Default is set to `EXEC_PROFILE_GRAPH_DEFAULT`. - """ - - session = None - graph_name = None - execution_profile = None - - def __init__(self, session, graph_name=None, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): - super(DSESessionRemoteGraphConnection, self).__init__(None, None) - - if not isinstance(session, Session): - raise ValueError('A DSE Session must be provided to execute graph traversal queries.') - - self.session = session - self.graph_name = graph_name - self.execution_profile = execution_profile - - def submit(self, bytecode): - - query = DseGraph.query_from_traversal(bytecode) - ep = self.session.execution_profile_clone_update(self.execution_profile, row_factory=graph_traversal_row_factory) - graph_options = ep.graph_options.copy() - graph_options.graph_language = DseGraph.DSE_GRAPH_QUERY_LANGUAGE - if self.graph_name: - graph_options.graph_name = self.graph_name - - ep.graph_options = graph_options - - traversers = self.session.execute_graph(query, execution_profile=ep) - traversers = [Traverser(t) for t in traversers] - return RemoteTraversal(iter(traversers), TraversalSideEffects()) - - def __str__(self): - return "".format(self.graph_name) - __repr__ = __str__ - - -class DseGraph(object): - """ - Dse Graph utility class for GraphTraversal construction and execution. - """ - - DSE_GRAPH_QUERY_LANGUAGE = 'bytecode-json' - """ - Graph query language, Default is 'bytecode-json' (GraphSON). - """ - - @staticmethod - def query_from_traversal(traversal): - """ - From a GraphTraversal, return a query string based on the language specified in `DseGraph.DSE_GRAPH_QUERY_LANGUAGE`. - - :param traversal: The GraphTraversal object - """ - - if isinstance(traversal, GraphTraversal): - for strategy in traversal.traversal_strategies.traversal_strategies: - rc = strategy.remote_connection - if (isinstance(rc, DSESessionRemoteGraphConnection) and - rc.session or rc.graph_name or rc.execution_profile): - log.warning("GraphTraversal session, graph_name and execution_profile are " - "only taken into account when executed with TinkerPop.") - - return _query_from_traversal(traversal) - - @staticmethod - def traversal_source(session=None, graph_name=None, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT, traversal_class=None): - """ - Returns a TinkerPop GraphTraversalSource binded to the session and graph_name if provided. - - :param session: (Optional) A DSE session - :param graph_name: (Optional) DSE Graph name - :param execution_profile: (Optional) Execution profile for traversal queries. Default is set to `EXEC_PROFILE_GRAPH_DEFAULT`. - :param traversal_class: (Optional) The GraphTraversalSource class to use (DSL). - - .. code-block:: python - - from dse.cluster import Cluster - from dse_graph import DseGraph - - c = Cluster() - session = c.connect() - - g = DseGraph.traversal_source(session, 'my_graph') - print g.V().valueMap().toList() - - """ - - graph = Graph() - traversal_source = graph.traversal(traversal_class) - - if session: - traversal_source = traversal_source.withRemote( - DSESessionRemoteGraphConnection(session, graph_name, execution_profile)) - - return traversal_source - - @staticmethod - def create_execution_profile(graph_name): - """ - Creates an ExecutionProfile for GraphTraversal execution. You need to register that execution profile to the - cluster by using `cluster.add_execution_profile`. - - :param graph_name: The graph name - """ - - ep = GraphExecutionProfile(row_factory=graph_traversal_dse_object_row_factory, - graph_options=GraphOptions(graph_name=graph_name, - graph_language=DseGraph.DSE_GRAPH_QUERY_LANGUAGE, - graph_protocol=GraphProtocol.GRAPHSON_2_0)) - return ep - - @staticmethod - def batch(*args, **kwargs): - """ - Returns the :class:`dse_graph.query.TraversalBatch` object allowing to - execute multiple traversals in the same transaction. - """ - return _DefaultTraversalBatch(*args, **kwargs) diff --git a/cassandra/datastax/graph/fluent/__init__.py b/cassandra/datastax/graph/fluent/__init__.py new file mode 100644 index 0000000000..8cc1d1dd98 --- /dev/null +++ b/cassandra/datastax/graph/fluent/__init__.py @@ -0,0 +1,224 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import copy + +HAVE_GREMLIN = False +try: + import gremlin_python + HAVE_GREMLIN = True +except ImportError: + # gremlinpython is not installed. + pass + +if HAVE_GREMLIN: + from gremlin_python.structure.graph import Graph + from gremlin_python.driver.remote_connection import RemoteConnection, RemoteTraversal + from gremlin_python.process.traversal import Traverser, TraversalSideEffects + from gremlin_python.process.graph_traversal import GraphTraversal + + from cassandra.cluster import Session, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.datastax.graph import GraphOptions, GraphProtocol + + from cassandra.datastax.graph.fluent.serializers import ( + GremlinGraphSONReader, + deserializers, + gremlin_deserializers + ) + from cassandra.datastax.graph.fluent.query import _DefaultTraversalBatch, _query_from_traversal + + log = logging.getLogger(__name__) + + __all__ = ['BaseGraphRowFactory', 'dse_graphson_reader', 'graphson_reader', 'graph_traversal_row_factory', + 'graph_traversal_dse_object_row_factory', 'DSESessionRemoteGraphConnection', 'DseGraph'] + + # Create our custom GraphSONReader/Writer + dse_graphson_reader = GremlinGraphSONReader(deserializer_map=deserializers) + graphson_reader = GremlinGraphSONReader(deserializer_map=gremlin_deserializers) + + # Traversal result keys + _bulk_key = 'bulk' + _result_key = 'result' + + + class BaseGraphRowFactory(object): + """ + Base row factory for graph traversal. This class basically wraps a + graphson reader function to handle additional features of Gremlin/DSE + and is callable as a normal row factory. + + Currently supported: + - bulk results + + :param graphson_reader: The function used to read the graphson. + + Use example:: + + my_custom_row_factory = BaseGraphRowFactory(custom_graphson_reader.readObject) + """ + + def __init__(self, graphson_reader): + self._graphson_reader = graphson_reader + + def __call__(self, column_names, rows): + results = [] + + for row in rows: + parsed_row = self._graphson_reader(row[0]) + bulk = parsed_row.get(_bulk_key, 1) + if bulk > 1: # Avoid deepcopy call if bulk <= 1 + results.extend([copy.deepcopy(parsed_row[_result_key]) + for _ in range(bulk - 1)]) + + results.append(parsed_row[_result_key]) + + return results + + + graph_traversal_row_factory = BaseGraphRowFactory(graphson_reader.readObject) + graph_traversal_row_factory.__doc__ = "Row Factory that returns the decoded graphson." + + graph_traversal_dse_object_row_factory = BaseGraphRowFactory(dse_graphson_reader.readObject) + graph_traversal_dse_object_row_factory.__doc__ = "Row Factory that returns the decoded graphson as DSE types." + + + class DSESessionRemoteGraphConnection(RemoteConnection): + """ + A Tinkerpop RemoteConnection to execute traversal queries on DSE. + + :param session: A DSE session + :param graph_name: (Optional) DSE Graph name. + :param execution_profile: (Optional) Execution profile for traversal queries. Default is set to `EXEC_PROFILE_GRAPH_DEFAULT`. + """ + + session = None + graph_name = None + execution_profile = None + + def __init__(self, session, graph_name=None, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): + super(DSESessionRemoteGraphConnection, self).__init__(None, None) + + if not isinstance(session, Session): + raise ValueError('A DSE Session must be provided to execute graph traversal queries.') + + self.session = session + self.graph_name = graph_name + self.execution_profile = execution_profile + + def submit(self, bytecode): + + query = DseGraph.query_from_traversal(bytecode) + ep = self.session.execution_profile_clone_update(self.execution_profile, + row_factory=graph_traversal_row_factory) + graph_options = ep.graph_options.copy() + graph_options.graph_language = DseGraph.DSE_GRAPH_QUERY_LANGUAGE + if self.graph_name: + graph_options.graph_name = self.graph_name + + ep.graph_options = graph_options + + traversers = self.session.execute_graph(query, execution_profile=ep) + traversers = [Traverser(t) for t in traversers] + return RemoteTraversal(iter(traversers), TraversalSideEffects()) + + def __str__(self): + return "".format(self.graph_name) + + __repr__ = __str__ + + + class DseGraph(object): + """ + Dse Graph utility class for GraphTraversal construction and execution. + """ + + DSE_GRAPH_QUERY_LANGUAGE = 'bytecode-json' + """ + Graph query language, Default is 'bytecode-json' (GraphSON). + """ + + @staticmethod + def query_from_traversal(traversal): + """ + From a GraphTraversal, return a query string based on the language specified in `DseGraph.DSE_GRAPH_QUERY_LANGUAGE`. + + :param traversal: The GraphTraversal object + """ + + if isinstance(traversal, GraphTraversal): + for strategy in traversal.traversal_strategies.traversal_strategies: + rc = strategy.remote_connection + if (isinstance(rc, DSESessionRemoteGraphConnection) and + rc.session or rc.graph_name or rc.execution_profile): + log.warning("GraphTraversal session, graph_name and execution_profile are " + "only taken into account when executed with TinkerPop.") + + return _query_from_traversal(traversal) + + @staticmethod + def traversal_source(session=None, graph_name=None, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT, + traversal_class=None): + """ + Returns a TinkerPop GraphTraversalSource binded to the session and graph_name if provided. + + :param session: (Optional) A DSE session + :param graph_name: (Optional) DSE Graph name + :param execution_profile: (Optional) Execution profile for traversal queries. Default is set to `EXEC_PROFILE_GRAPH_DEFAULT`. + :param traversal_class: (Optional) The GraphTraversalSource class to use (DSL). + + .. code-block:: python + + from dse.cluster import Cluster + from dse_graph import DseGraph + + c = Cluster() + session = c.connect() + + g = DseGraph.traversal_source(session, 'my_graph') + print g.V().valueMap().toList() + + """ + + graph = Graph() + traversal_source = graph.traversal(traversal_class) + + if session: + traversal_source = traversal_source.withRemote( + DSESessionRemoteGraphConnection(session, graph_name, execution_profile)) + + return traversal_source + + @staticmethod + def create_execution_profile(graph_name): + """ + Creates an ExecutionProfile for GraphTraversal execution. You need to register that execution profile to the + cluster by using `cluster.add_execution_profile`. + + :param graph_name: The graph name + """ + + ep = GraphExecutionProfile(row_factory=graph_traversal_dse_object_row_factory, + graph_options=GraphOptions(graph_name=graph_name, + graph_language=DseGraph.DSE_GRAPH_QUERY_LANGUAGE, + graph_protocol=GraphProtocol.GRAPHSON_2_0)) + return ep + + @staticmethod + def batch(*args, **kwargs): + """ + Returns the :class:`dse_graph.query.TraversalBatch` object allowing to + execute multiple traversals in the same transaction. + """ + return _DefaultTraversalBatch(*args, **kwargs) diff --git a/cassandra/datastax/graph/_predicates.py b/cassandra/datastax/graph/fluent/_predicates.py similarity index 100% rename from cassandra/datastax/graph/_predicates.py rename to cassandra/datastax/graph/fluent/_predicates.py diff --git a/cassandra/datastax/graph/_query.py b/cassandra/datastax/graph/fluent/_query.py similarity index 84% rename from cassandra/datastax/graph/_query.py rename to cassandra/datastax/graph/fluent/_query.py index e9d1504ae6..b5d24df05b 100644 --- a/cassandra/datastax/graph/_query.py +++ b/cassandra/datastax/graph/fluent/_query.py @@ -1,6 +1,16 @@ # Copyright DataStax, Inc. # -# The full license terms are available at http://www.datastax.com/terms/datastax-dse-driver-license-terms +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import logging @@ -10,12 +20,14 @@ from gremlin_python.process.graph_traversal import GraphTraversal from gremlin_python.structure.io.graphsonV2d0 import GraphSONWriter -from cassandra.datastax.graph.serializers import serializers +from cassandra.datastax.graph.fluent.serializers import serializers log = logging.getLogger(__name__) graphson_writer = GraphSONWriter(serializer_map=serializers) +__all__ = ['TraversalBatch', '_query_from_traversal', '_DefaultTraversalBatch'] + def _query_from_traversal(traversal): """ diff --git a/cassandra/datastax/graph/_serializers.py b/cassandra/datastax/graph/fluent/_serializers.py similarity index 97% rename from cassandra/datastax/graph/_serializers.py rename to cassandra/datastax/graph/fluent/_serializers.py index 01616af1f8..56591603af 100644 --- a/cassandra/datastax/graph/_serializers.py +++ b/cassandra/datastax/graph/fluent/_serializers.py @@ -31,7 +31,7 @@ GraphSON2Deserializer ) -from cassandra.datastax.graph.predicates import GeoP, TextDistanceP +from cassandra.datastax.graph.fluent.predicates import GeoP, TextDistanceP from cassandra.util import Distance diff --git a/cassandra/datastax/graph/predicates.py b/cassandra/datastax/graph/fluent/predicates.py similarity index 91% rename from cassandra/datastax/graph/predicates.py rename to cassandra/datastax/graph/fluent/predicates.py index beba60ef62..6bfd6b3113 100644 --- a/cassandra/datastax/graph/predicates.py +++ b/cassandra/datastax/graph/fluent/predicates.py @@ -14,7 +14,7 @@ try: import gremlin_python - from cassandra.datastax.graph._predicates import * + from cassandra.datastax.graph.fluent._predicates import * except ImportError: # gremlinpython is not installed. pass diff --git a/cassandra/datastax/graph/fluent/query.py b/cassandra/datastax/graph/fluent/query.py new file mode 100644 index 0000000000..c5026cc046 --- /dev/null +++ b/cassandra/datastax/graph/fluent/query.py @@ -0,0 +1,20 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import gremlin_python + from cassandra.datastax.graph.fluent._query import * +except ImportError: + # gremlinpython is not installed. + pass diff --git a/cassandra/datastax/graph/serializers.py b/cassandra/datastax/graph/fluent/serializers.py similarity index 91% rename from cassandra/datastax/graph/serializers.py rename to cassandra/datastax/graph/fluent/serializers.py index cd9e7e9282..680e613edf 100644 --- a/cassandra/datastax/graph/serializers.py +++ b/cassandra/datastax/graph/fluent/serializers.py @@ -14,7 +14,7 @@ try: import gremlin_python - from cassandra.datastax.graph._serializers import * + from cassandra.datastax.graph.fluent._serializers import * except ImportError: # gremlinpython is not installed. pass diff --git a/cassandra/datastax/graph/query.py b/cassandra/datastax/graph/query.py index 5bc85d3d32..0ebeeb28d3 100644 --- a/cassandra/datastax/graph/query.py +++ b/cassandra/datastax/graph/query.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging import json from warnings import warn @@ -23,14 +22,6 @@ from cassandra.datastax.graph.types import Vertex, Edge, Path from cassandra.datastax.graph.graphson import GraphSON2Reader -HAVE_GREMLIN = False -try: - import gremlin_python - HAVE_GREMLIN = True -except ImportError: - # gremlinpython is not installed. - pass - __all__ = [ 'GraphProtocol', 'GraphOptions', 'GraphStatement', 'SimpleGraphStatement', @@ -311,137 +302,3 @@ def as_path(self): return Path(self.labels, self.objects) except (AttributeError, ValueError, TypeError): raise TypeError("Could not create Path from %r" % (self,)) - - -# Fluent API -if HAVE_GREMLIN: - from gremlin_python.process.graph_traversal import GraphTraversal - from gremlin_python.structure.io.graphsonV2d0 import GraphSONWriter - - from cassandra.datastax.graph.serializers import serializers - - log = logging.getLogger(__name__) - - graphson_writer = GraphSONWriter(serializer_map=serializers) - - - def _query_from_traversal(traversal): - """ - From a GraphTraversal, return a query string. - - :param traversal: The GraphTraversal object - """ - try: - query = graphson_writer.writeObject(traversal) - except Exception: - log.exception("Error preparing graphson traversal query:") - raise - - return query - - - class TraversalBatch(object): - """ - A `TraversalBatch` is used to execute multiple graph traversals in a - single transaction. If any traversal in the batch fails, the entire - batch will fail to apply. - - If a TraversalBatch is bounded to a DSE session, it can be executed using - `traversal_batch.execute()`. - """ - - _session = None - _execution_profile = None - - def __init__(self, session=None, execution_profile=None): - """ - :param session: (Optional) A DSE session - :param execution_profile: (Optional) The execution profile to use for the batch execution - """ - self._session = session - self._execution_profile = execution_profile - - def add(self, traversal): - """ - Add a traversal to the batch. - - :param traversal: A gremlin GraphTraversal - """ - raise NotImplementedError() - - def add_all(self, traversals): - """ - Adds a sequence of traversals to the batch. - - :param traversals: A sequence of gremlin GraphTraversal - """ - raise NotImplementedError() - - def execute(self): - """ - Execute the traversal batch if bounded to a `DSE Session`. - """ - raise NotImplementedError() - - def as_graph_statement(self): - """ - Return the traversal batch as GraphStatement. - """ - raise NotImplementedError() - - def clear(self): - """ - Clear a traversal batch for reuse. - """ - raise NotImplementedError() - - def __len__(self): - raise NotImplementedError() - - def __str__(self): - return u''.format(len(self)) - - __repr__ = __str__ - - - class _DefaultTraversalBatch(TraversalBatch): - - _traversals = None - - def __init__(self, *args, **kwargs): - super(_DefaultTraversalBatch, self).__init__(*args, **kwargs) - self._traversals = [] - - @property - def _query(self): - return u"[{0}]".format(','.join(self._traversals)) - - def add(self, traversal): - if not isinstance(traversal, GraphTraversal): - raise ValueError('traversal should be a gremlin GraphTraversal') - - query = _query_from_traversal(traversal) - self._traversals.append(query) - - return self - - def add_all(self, traversals): - for traversal in traversals: - self.add(traversal) - - def as_graph_statement(self): - return SimpleGraphStatement(self._query) - - def execute(self): - if self._session is None: - raise ValueError('A DSE Session must be provided to execute the traversal batch.') - - from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT - execution_profile = self._execution_profile if self._execution_profile else EXEC_PROFILE_GRAPH_DEFAULT - return self._session.execute_graph(self._query, execution_profile=execution_profile) - - def clear(self): - del self._traversals[:] - - def __len__(self): - return len(self._traversals) \ No newline at end of file diff --git a/tests/integration/advanced/graph/__init__.py b/tests/integration/advanced/graph/__init__.py new file mode 100644 index 0000000000..2c9ca172f8 --- /dev/null +++ b/tests/integration/advanced/graph/__init__.py @@ -0,0 +1,13 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/integration/advanced/graph/fluent/__init__.py b/tests/integration/advanced/graph/fluent/__init__.py new file mode 100644 index 0000000000..2c9ca172f8 --- /dev/null +++ b/tests/integration/advanced/graph/fluent/__init__.py @@ -0,0 +1,13 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/integration/advanced/graph/fluent/test_graph.py b/tests/integration/advanced/graph/fluent/test_graph.py new file mode 100644 index 0000000000..623e275352 --- /dev/null +++ b/tests/integration/advanced/graph/fluent/test_graph.py @@ -0,0 +1,715 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import sys +from cassandra.datastax.graph.fluent import DseGraph +from gremlin_python.process.graph_traversal import GraphTraversal, GraphTraversalSource +from gremlin_python.process.traversal import P +from tests.integration import DSE_VERSION, requiredse, greaterthanorequaldse60 +from tests.integration.advanced import BasicGraphUnitTestCase, use_single_node_with_graph_and_solr, \ + use_single_node_with_graph, generate_classic, generate_line_graph, generate_multi_field_graph, \ + generate_large_complex_graph, generate_type_graph_schema, validate_classic_vertex, validate_classic_edge, \ + validate_generic_vertex_result_type, validate_classic_edge_properties, validate_line_edge, \ + validate_generic_edge_result_type, validate_path_result_type, TYPE_MAP + + +from gremlin_python.structure.graph import Edge as TravEdge +from gremlin_python.structure.graph import Vertex as TravVertex +from cassandra.graph import Vertex, Edge +from cassandra.util import Point, Polygon, LineString +import datetime +from six import string_types +import six +if six.PY3: + import ipaddress + +def setup_module(): + if DSE_VERSION: + dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}} + use_single_node_with_graph(dse_options=dse_options) + + +def check_equality_base(testcase, original, read_value): + if isinstance(original, float): + testcase.assertAlmostEqual(original, read_value, delta=.01) + elif six.PY3 and isinstance(original, ipaddress.IPv4Address): + testcase.assertAlmostEqual(original, ipaddress.IPv4Address(read_value)) + elif six.PY3 and isinstance(original, ipaddress.IPv6Address): + testcase.assertAlmostEqual(original, ipaddress.IPv6Address(read_value)) + else: + testcase.assertEqual(original, read_value) + + +class AbstractTraversalTest(): + + def test_basic_query(self): + """ + Test to validate that basic graph queries works + + Creates a simple classic tinkerpot graph, and attempts to preform a basic query + using Tinkerpop's GLV with both explicit and implicit execution + ensuring that each one is correct. See reference graph here + http://www.tinkerpop.com/docs/3.0.0.M1/ + + @since 1.0.0 + @jira_ticket PYTHON-641 + @expected_result graph should generate and all vertices and edge results should be + + @test_category dse graph + """ + + + g = self.fetch_traversal_source() + generate_classic(self.session) + traversal =g.V().has('name', 'marko').out('knows').values('name') + results_list = self.execute_traversal(traversal) + self.assertEqual(len(results_list), 2) + self.assertIn('vadas', results_list) + self.assertIn('josh', results_list) + + def test_classic_graph(self): + """ + Test to validate that basic graph generation, and vertex and edges are surfaced correctly + + Creates a simple classic tinkerpot graph, and iterates over the the vertices and edges + using Tinkerpop's GLV with both explicit and implicit execution + ensuring that each one iscorrect. See reference graph here + http://www.tinkerpop.com/docs/3.0.0.M1/ + + @since 1.0.0 + @jira_ticket PYTHON-641 + @expected_result graph should generate and all vertices and edge results should be + + @test_category dse graph + """ + + generate_classic(self.session) + g = self.fetch_traversal_source() + traversal = g.V() + vert_list = self.execute_traversal(traversal) + + for vertex in vert_list: + self._validate_classic_vertex(g, vertex) + traversal = g.E() + edge_list = self.execute_traversal(traversal) + for edge in edge_list: + self._validate_classic_edge(g, edge) + + def test_graph_classic_path(self): + """ + Test to validate that the path version of the result type is generated correctly. It also + tests basic path results as that is not covered elsewhere + + @since 1.0.0 + @jira_ticket PYTHON-641 + @expected_result path object should be unpacked correctly including all nested edges and verticies + @test_category dse graph + """ + generate_classic(self.session) + g = self.fetch_traversal_source() + traversal = g.V().hasLabel('person').has('name', 'marko').as_('a').outE('knows').inV().as_('c', 'd').outE('created').as_('e', 'f', 'g').inV().path() + path_list = self.execute_traversal(traversal) + self.assertEqual(len(path_list), 2) + for path in path_list: + self._validate_path_result_type(g, path) + + + def test_range_query(self): + """ + Test to validate range queries are handled correctly. + + Creates a very large line graph script and executes it. Then proceeds to to a range + limited query against it, and ensure that the results are formated correctly and that + the result set is properly sized. + + @since 1.0.0 + @jira_ticket PYTHON-641 + @expected_result result set should be properly formated and properly sized + + @test_category dse graph + """ + + + query_to_run = generate_line_graph(150) + self.session.execute_graph(query_to_run) + g = self.fetch_traversal_source() + + traversal = g.E().range(0,10) + edges = self.execute_traversal(traversal) + self.assertEqual(len(edges), 10) + for edge in edges: + self._validate_line_edge(g, edge) + + def test_result_types(self): + """ + Test to validate that the edge and vertex version of results are constructed correctly. + + @since 1.0.0 + @jira_ticket PYTHON-641 + @expected_result edge/vertex result types should be unpacked correctly. + @test_category dse graph + """ + generate_multi_field_graph(self.session) # TODO: we could just make a single vertex with properties of all types, or even a simple query that just uses a sequence of groovy expressions + g = self.fetch_traversal_source() + traversal = g.V() + vertices = self.execute_traversal(traversal) + for vertex in vertices: + self._validate_type(g, vertex) + + def test_large_result_set(self): + """ + Test to validate that large result sets return correctly. + + Creates a very large graph. Ensures that large result sets are handled appropriately. + + @since 1.0.0 + @jira_ticket PYTHON-641 + @expected_result when limits of result sets are hit errors should be surfaced appropriately + + @test_category dse graph + """ + generate_large_complex_graph(self.session, 5000) + g = self.fetch_traversal_source() + traversal = g.V() + vertices = self.execute_traversal(traversal) + for vertex in vertices: + self._validate_generic_vertex_result_type(g,vertex) + + def test_vertex_meta_properties(self): + """ + Test verifying vertex property properties + + @since 1.0.0 + @jira_ticket PYTHON-641 + + @test_category dse graph + """ + s = self.session + s.execute_graph("schema.propertyKey('k0').Text().ifNotExists().create();") + s.execute_graph("schema.propertyKey('k1').Text().ifNotExists().create();") + s.execute_graph("schema.propertyKey('key').Text().properties('k0', 'k1').ifNotExists().create();") + s.execute_graph("schema.vertexLabel('MLP').properties('key').ifNotExists().create();") + s.execute_graph("schema.config().option('graph.allow_scan').set('true');") + v = s.execute_graph('''v = graph.addVertex('MLP') + v.property('key', 'meta_prop', 'k0', 'v0', 'k1', 'v1') + v''')[0] + + g = self.fetch_traversal_source() + + traversal = g.V() + # This should contain key, and value where value is a property + # This should be a vertex property and should contain sub properties + results = self.execute_traversal(traversal) + self._validate_meta_property(g, results[0]) + + def test_vertex_multiple_properties(self): + """ + Test verifying vertex property form for various Cardinality + + All key types are encoded as a list, regardless of cardinality + + Single cardinality properties have only one value -- the last one added + + Default is single (this is config dependent) + + @since 1.0.0 + @jira_ticket PYTHON-641 + + @test_category dse graph + """ + s = self.session + s.execute_graph('''Schema schema = graph.schema(); + schema.propertyKey('mult_key').Text().multiple().ifNotExists().create(); + schema.propertyKey('single_key').Text().single().ifNotExists().create(); + schema.vertexLabel('MPW1').properties('mult_key').ifNotExists().create(); + schema.vertexLabel('MPW2').properties('mult_key').ifNotExists().create(); + schema.vertexLabel('SW1').properties('single_key').ifNotExists().create();''') + + mpw1v = s.execute_graph('''v = graph.addVertex('MPW1') + v.property('mult_key', 'value') + v''')[0] + + mpw2v = s.execute_graph('''g.addV('MPW2').property('mult_key', 'value0').property('mult_key', 'value1')''')[0] + + g = self.fetch_traversal_source() + traversal = g.V(mpw1v.id).properties() + + vertex_props = self.execute_traversal(traversal) + + self.assertEqual(len(vertex_props), 1) + + self.assertEqual(self.fetch_key_from_prop(vertex_props[0]), "mult_key") + self.assertEqual(vertex_props[0].value, "value") + + # multiple_with_two_values + #v = s.execute_graph('''g.addV(label, 'MPW2', 'mult_key', 'value0', 'mult_key', 'value1')''')[0] + traversal = g.V(mpw2v.id).properties() + + vertex_props = self.execute_traversal(traversal) + + self.assertEqual(len(vertex_props), 2) + self.assertEqual(self.fetch_key_from_prop(vertex_props[0]), 'mult_key') + self.assertEqual(self.fetch_key_from_prop(vertex_props[1]), 'mult_key') + self.assertEqual(vertex_props[0].value, 'value0') + self.assertEqual(vertex_props[1].value, 'value1') + + # single_with_one_value + v = s.execute_graph('''v = graph.addVertex('SW1') + v.property('single_key', 'value') + v''')[0] + traversal = g.V(v.id).properties() + vertex_props = self.execute_traversal(traversal) + self.assertEqual(len(vertex_props), 1) + self.assertEqual(self.fetch_key_from_prop(vertex_props[0]), "single_key") + self.assertEqual(vertex_props[0].value, "value") + + + def should_parse_meta_properties(self): + g = self.fetch_traversal_source() + g.addV("meta_v").property("meta_prop", "hello", "sub_prop", "hi", "sub_prop2", "hi2") + + + def test_all_graph_types_with_schema(self): + """ + Exhaustively goes through each type that is supported by dse_graph. + creates a vertex for each type using a dse-tinkerpop traversal, + It then attempts to fetch it from the server and compares it to what was inserted + Prime the graph with the correct schema first + + @since 1.0.0 + @jira_ticket PYTHON-641 + @expected_result inserted objects are equivalent to those retrieved + + @test_category dse graph + """ + generate_type_graph_schema(self.session) + # if result set is not parsed correctly this will throw an exception + + self._write_and_read_data_types() + + + def test_all_graph_types_without_schema(self): + """ + Exhaustively goes through each type that is supported by dse_graph. + creates a vertex for each type using a dse-tinkerpop traversal, + It then attempts to fetch it from the server and compares it to what was inserted + Do not prime the graph with the correct schema first + + @since 1.0.0 + @jira_ticket PYTHON-641 + @expected_result inserted objects are equivalent to those retrieved + + @test_category dse graph + """ + + # Prime graph using common utilites + generate_type_graph_schema(self.session, prime_schema=False) + self._write_and_read_data_types() + + def test_dsl(self): + """ + The test creates a SocialTraversal and a SocialTraversalSource as part of + a DSL. Then calls it's method and checks the results to verify + we have the expected results + + @since @since 1.1.0a1 + @jira_ticket PYTHON-790 + @expected_result only the vertex corresponding to marko is in the result + + @test_category dse graph + """ + class SocialTraversal(GraphTraversal): + def knows(self, person_name): + return self.out("knows").hasLabel("person").has("name", person_name).in_() + + class SocialTraversalSource(GraphTraversalSource): + def __init__(self, *args, **kwargs): + super(SocialTraversalSource, self).__init__(*args, **kwargs) + self.graph_traversal = SocialTraversal + + def people(self, *names): + return self.get_graph_traversal().V().has("name", P.within(*names)) + + generate_classic(self.session) + g = self.fetch_traversal_source(traversal_class=SocialTraversalSource) + + traversal = g.people("marko", "albert").knows("vadas") + results = self.execute_traversal(traversal) + + self.assertEqual(len(results), 1) + only_vertex = results[0] + self._validate_classic_vertex(g, only_vertex) + + def test_bulked_results(self): + """ + Send a query expecting a bulked result and the driver "undoes" + the bulk and returns the expected list + + @since 1.1.0a1 + @jira_ticket PYTHON-771 + @expected_result the expanded list + + @test_category dse graph + """ + generate_classic(self.session) + g = self.fetch_traversal_source() + barrier_traversal = g.E().label().barrier() + results = self.execute_traversal(barrier_traversal) + self.assertEqual(["created", "created", "created", "created", "knows", "knows"], results) + + def _write_and_read_data_types(self): + g = self.fetch_traversal_source() + for key in TYPE_MAP.keys(): + vertex_label = generate_type_graph_schema.single_vertex + property_name = key + "value" + data_value = TYPE_MAP[key][1] + + write_traversal = g.addV(vertex_label).property(property_name, data_value) + self.execute_traversal(write_traversal) + + read_traversal = g.V().hasLabel(vertex_label).has(property_name).values() + results = self.execute_traversal(read_traversal) + + self._check_equality(g, data_value, results[0]) + + def fetch_edge_props(self, g, edge): + edge_props = g.E(edge.id).properties().toList() + return edge_props + + def fetch_vertex_props(self, g, vertex): + + vertex_props = g.V(vertex.id).properties().toList() + return vertex_props + + def _check_equality(self, g, original, read_value): + return check_equality_base(self, original, read_value) + + +@requiredse +class ImplicitExecutionTest(AbstractTraversalTest, BasicGraphUnitTestCase): + """ + This test class will execute all tests of the AbstractTraversalTestClass using implicit execution + This all traversal will be run directly using toList() + """ + def setUp(self): + super(ImplicitExecutionTest, self).setUp() + if DSE_VERSION: + self.ep = DseGraph().create_execution_profile(self.graph_name) + self.cluster.add_execution_profile(self.graph_name, self.ep) + + def fetch_key_from_prop(self, property): + return property.key + + def fetch_traversal_source(self, **kwargs): + return DseGraph().traversal_source(self.session, self.graph_name, execution_profile=self.ep, **kwargs) + + def execute_traversal(self, traversal): + return traversal.toList() + + def _validate_classic_vertex(self, g, vertex): + # Checks the properties on a classic vertex for correctness + vertex_props = self.fetch_vertex_props(g, vertex) + vertex_prop_keys = [vp.key for vp in vertex_props] + self.assertEqual(len(vertex_prop_keys), 2) + self.assertIn('name', vertex_prop_keys) + self.assertTrue('lang' in vertex_prop_keys or 'age' in vertex_prop_keys) + + def _validate_generic_vertex_result_type(self,g, vertex): + # Checks a vertex object for it's generic properties + properties = self.fetch_vertex_props(g, vertex) + for attr in ('id', 'label'): + self.assertIsNotNone(getattr(vertex, attr)) + self.assertTrue( len(properties)>2) + + def _validate_classic_edge_properties(self, g, edge): + # Checks the properties on a classic edge for correctness + edge_props = self.fetch_edge_props(g, edge) + edge_prop_keys = [ep.key for ep in edge_props] + self.assertEqual(len(edge_prop_keys), 1) + self.assertIn('weight', edge_prop_keys) + + def _validate_classic_edge(self, g, edge): + self._validate_generic_edge_result_type(edge) + self._validate_classic_edge_properties(g, edge) + + def _validate_line_edge(self, g, edge): + self._validate_generic_edge_result_type(edge) + edge_props = self.fetch_edge_props(g, edge) + edge_prop_keys = [ep.key for ep in edge_props] + self.assertEqual(len(edge_prop_keys), 1) + self.assertIn('distance', edge_prop_keys) + + def _validate_generic_edge_result_type(self, edge): + self.assertIsInstance(edge, TravEdge) + + for attr in ('outV', 'inV', 'label', 'id'): + self.assertIsNotNone(getattr(edge, attr)) + + def _validate_path_result_type(self, g, objects_path): + for obj in objects_path: + if isinstance(obj, TravEdge): + self._validate_classic_edge(g, obj) + elif isinstance(obj, TravVertex): + self._validate_classic_vertex(g, obj) + else: + self.fail("Invalid object found in path " + str(object.type)) + + def _validate_meta_property(self, g, vertex): + meta_props = g.V(vertex.id).properties().toList() + self.assertEqual(len(meta_props), 1) + meta_prop = meta_props[0] + self.assertEqual(meta_prop.value,"meta_prop") + self.assertEqual(meta_prop.key,"key") + + nested_props = vertex_props = g.V(vertex.id).properties().properties().toList() + self.assertEqual(len(nested_props), 2) + for nested_prop in nested_props: + self.assertTrue(nested_prop.key in ['k0', 'k1']) + self.assertTrue(nested_prop.value in ['v0', 'v1']) + + def _validate_type(self, g, vertex): + props = self.fetch_vertex_props(g, vertex) + for prop in props: + value = prop.value + key = prop.key + _validate_prop(key, value, self) + + +@requiredse +class ExplicitExecutionBase(BasicGraphUnitTestCase): + def setUp(self): + super(ExplicitExecutionBase, self).setUp() + if DSE_VERSION: + self.ep = DseGraph().create_execution_profile(self.graph_name) + self.cluster.add_execution_profile(self.graph_name, self.ep) + + def fetch_traversal_source(self, **kwargs): + return DseGraph().traversal_source(self.session, self.graph_name, **kwargs) + + def execute_traversal(self, traversal): + query = DseGraph.query_from_traversal(traversal) + #Use an ep that is configured with the correct row factory, and bytecode-json language flat set + result_set = self.session.execute_graph(query, execution_profile=self.ep) + return list(result_set) + + +@requiredse +class ExplicitExecutionTest(ExplicitExecutionBase, AbstractTraversalTest): + """ + This test class will execute all tests of the AbstractTraversalTestClass using Explicit execution + All queries will be run by converting them to byte code, and calling execute graph explicitly with a generated ep. + """ + def fetch_key_from_prop(self, property): + return property.label + + def _validate_classic_vertex(self, g, vertex): + validate_classic_vertex(self, vertex) + + def _validate_generic_vertex_result_type(self,g, vertex): + validate_generic_vertex_result_type(self, vertex) + + def _validate_classic_edge_properties(self, g, edge): + validate_classic_edge_properties(self, edge) + + def _validate_classic_edge(self, g, edge): + validate_classic_edge(self, edge) + + def _validate_line_edge(self, g, edge): + validate_line_edge(self, edge) + + def _validate_generic_edge_result_type(self, edge): + validate_generic_edge_result_type(self, edge) + + def _validate_type(self, g, vertex): + for key in vertex.properties: + value = vertex.properties[key][0].value + _validate_prop(key, value, self) + + def _validate_path_result_type(self, g, path_obj): + # This pre-processing is due to a change in TinkerPop + # properties are not returned automatically anymore + # with some queries. + for obj in path_obj.objects: + if not obj.properties: + props = [] + if isinstance(obj, Edge): + obj.properties = { + p['key']: p['value'] + for p in self.fetch_edge_props(g, obj) + } + elif isinstance(obj, Vertex): + obj.properties = { + p['label']: p['value'] + for p in self.fetch_vertex_props(g, obj) + } + + validate_path_result_type(self, path_obj) + + def _validate_meta_property(self, g, vertex): + + self.assertEqual(len(vertex.properties), 1) + self.assertEqual(len(vertex.properties['key']), 1) + p = vertex.properties['key'][0] + self.assertEqual(p.label, 'key') + self.assertEqual(p.value, 'meta_prop') + self.assertEqual(p.properties, {'k0': 'v0', 'k1': 'v1'}) + + +def _validate_prop(key, value, unittest): + if any(key.startswith(t) for t in ('int', 'short')): + typ = int + + elif any(key.startswith(t) for t in ('long',)): + if sys.version_info >= (3, 0): + typ = int + else: + typ = long + elif any(key.startswith(t) for t in ('float', 'double')): + typ = float + elif any(key.startswith(t) for t in ('polygon',)): + typ = Polygon + elif any(key.startswith(t) for t in ('point',)): + typ = Point + elif any(key.startswith(t) for t in ('Linestring',)): + typ = LineString + elif any(key.startswith(t) for t in ('neg',)): + typ = string_types + elif any(key.startswith(t) for t in ('date',)): + typ = datetime.date + elif any(key.startswith(t) for t in ('time',)): + typ = datetime.time + else: + unittest.fail("Received unexpected type: %s" % key) + unittest.assertIsInstance(value, typ) + + +@requiredse +@greaterthanorequaldse60 +class BatchStatementTests(ExplicitExecutionBase): + + def setUp(self): + super(BatchStatementTests, self).setUp() + self.g = self.fetch_traversal_source() + + if hasattr(self, "batch"): + self.batch.clear() + else: + self.batch = DseGraph.batch(session=self.session, execution_profile=self.ep) + + def test_batch_with_schema(self): + """ + Sends a Batch statement and verifies it has succeeded with a schema created + + @since 1.1.0 + @jira_ticket PYTHON-789 + @expected_result ValueError is arisen + + @test_category dse graph + """ + generate_type_graph_schema(self.session) + self._send_batch_and_read_results() + + def test_batch_without_schema(self): + """ + Sends a Batch statement and verifies it has succeeded without a schema created + + @since 1.1.0 + @jira_ticket PYTHON-789 + @expected_result ValueError is arisen + + @test_category dse graph + """ + generate_type_graph_schema(self.session) + self._send_batch_and_read_results() + + def test_batch_with_schema_add_all(self): + """ + Sends a Batch statement and verifies it has succeeded with a schema created. + Uses :method:`dse_graph.query._BatchGraphStatement.add_all` to add the statements + instead of :method:`dse_graph.query._BatchGraphStatement.add` + + @since 1.1.0 + @jira_ticket PYTHON-789 + @expected_result ValueError is arisen + + @test_category dse graph + """ + generate_type_graph_schema(self.session) + self._send_batch_and_read_results(add_all=True) + + def test_batch_without_schema_add_all(self): + """ + Sends a Batch statement and verifies it has succeeded without a schema created + Uses :method:`dse_graph.query._BatchGraphStatement.add_all` to add the statements + instead of :method:`dse_graph.query._BatchGraphStatement.add` + + @since 1.1.0 + @jira_ticket PYTHON-789 + @expected_result ValueError is arisen + + @test_category dse graph + """ + generate_type_graph_schema(self.session, prime_schema=False) + self._send_batch_and_read_results(add_all=True) + + def test_only_graph_traversals_are_accepted(self): + """ + Verifies that ValueError is risen if the parameter add is not a traversal + + @since 1.1.0 + @jira_ticket PYTHON-789 + @expected_result ValueError is arisen + + @test_category dse graph + """ + self.assertRaises(ValueError, self.batch.add, '{"@value":{"step":[["addV","poc_int"],' + '["property","bigint1value",{"@value":12,"@type":"g:Int32"}]]},' + '"@type":"g:Bytecode"}') + another_batch = DseGraph.batch() + self.assertRaises(ValueError, self.batch.add, another_batch) + + def _send_batch_and_read_results(self, add_all=False): + # For each supported type fetch create a vetex containing that type + vertex_label = generate_type_graph_schema.single_vertex + traversals = [] + for key in TYPE_MAP.keys(): + property_name = key + "value" + traversal = self.g.addV(vertex_label).property(property_name, TYPE_MAP[key][1]) + if not add_all: + self.batch.add(traversal) + traversals.append(traversal) + + if add_all: + self.batch.add_all(traversals) + + self.assertEqual(len(TYPE_MAP), len(self.batch)) + + self.batch.execute() + + traversal = self.g.V() + vertices = self.execute_traversal(traversal) + + self.assertEqual(len(vertices), len(TYPE_MAP), "g.V() returned {}".format(vertices)) + + # Iterate over all the vertices and check that they match the original input + for vertex in vertices: + key = list(vertex.properties.keys())[0].replace("value", "") + original = TYPE_MAP[key][1] + self._check_equality(self.g, original, vertex) + + def _check_equality(self,g, original, vertex): + for key in vertex.properties: + value = vertex.properties[key][0].value + check_equality_base(self, original, value) diff --git a/tests/integration/advanced/graph/fluent/test_search.py b/tests/integration/advanced/graph/fluent/test_search.py new file mode 100644 index 0000000000..dde4e73ab4 --- /dev/null +++ b/tests/integration/advanced/graph/fluent/test_search.py @@ -0,0 +1,347 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from cassandra.datastax.graph.fluent import DseGraph +import time +from cassandra.datastax.graph.fluent.predicates import Search, Geo, GeoUnit +from tests.integration.advanced import BasicSharedGraphUnitTestCase, generate_address_book_graph, use_single_node_with_graph_and_solr +from tests.integration import greaterthanorequaldse51, requiredse, DSE_VERSION +from cassandra.util import Distance +from cassandra import InvalidRequest + + +def setup_module(): + if DSE_VERSION: + use_single_node_with_graph_and_solr() + + +class AbstractSearchTest(): + + def test_search_by_prefix(self): + """ + Test to validate that solr searches by prefix function. + + @since 1.0.0 + @jira_ticket PYTHON-660 + @expected_result all names starting with Paul should be returned + + @test_category dse graph + """ + g = self.fetch_traversal_source() + traversal = g.V().has("person", "name", Search.prefix("Paul")).values("name") + results_list = self.execute_traversal(traversal) + self.assertEqual(len(results_list), 1) + self.assertEqual(results_list[0], "Paul Thomas Joe") + + def test_search_by_regex(self): + """ + Test to validate that solr searches by regex function. + + @since 1.0.0 + @jira_ticket PYTHON-660 + @expected_result all names containing Paul should be returned + + @test_category dse graph + """ + g = self.fetch_traversal_source() + traversal = g.V().has("person", "name", Search.regex(".*Paul.*")).values("name") + results_list = self.execute_traversal(traversal) + self.assertEqual(len(results_list), 2) + self.assertIn("Paul Thomas Joe", results_list ) + self.assertIn("James Paul Smith", results_list ) + + def test_search_by_token(self): + """ + Test to validate that solr searches by token. + + @since 1.0.0 + @jira_ticket PYTHON-660 + @expected_result all names with description containing could shoud be returned + + @test_category dse graph + """ + g = self.fetch_traversal_source() + traversal = g.V().has("person", "description", Search.token("cold")).values("name") + results_list = self.execute_traversal(traversal) + self.assertEqual(len(results_list), 2) + self.assertIn("Jill Alice", results_list ) + self.assertIn("George Bill Steve", results_list) + + + def test_search_by_token_prefix(self): + """ + Test to validate that solr searches by token prefix. + + @since 1.0.0 + @jira_ticket PYTHON-660 + @expected_result all names with description containing a token starting with h are returned + + @test_category dse graph + """ + g = self.fetch_traversal_source() + traversal = g.V().has("person", "description", Search.token_prefix("h")).values("name") + results_list = self.execute_traversal(traversal) + self.assertEqual(len(results_list), 2) + self.assertIn("Paul Thomas Joe", results_list ) + self.assertIn( "James Paul Smith", results_list ) + + + def test_search_by_token_regex(self): + """ + Test to validate that solr searches by token regex. + + @since 1.0.0 + @jira_ticket PYTHON-660 + @expected_result all names with description containing nice or hospital are returned + + @test_category dse graph + """ + + g = self.fetch_traversal_source() + traversal = g.V().has("person", "description", Search.token_regex("(nice|hospital)")).values("name") + results_list = self.execute_traversal(traversal) + self.assertEqual(len(results_list), 2) + self.assertIn("Paul Thomas Joe", results_list ) + self.assertIn( "Jill Alice", results_list ) + + def _assert_in_distance(self, inside, names): + """ + Helper function that asserts that an exception is arisen if geodetic predicates are used + in cartesian geometry. Also asserts that the expected list is equal to the returned from + the transversal using different search indexes. + """ + def assert_equal_list(L1, L2): + return len(L1) == len(L2) and sorted(L1) == sorted(L2) + + g = self.fetch_traversal_source() + + traversal = g.V().has("person", "pointPropWithBoundsWithSearchIndex", inside).values("name") + # throws an exception because of a SOLR/Search limitation in the indexing process + # may be resolved in the future + self.assertRaises(InvalidRequest, self.execute_traversal, traversal) + + traversal = g.V().has("person", "pointPropWithBounds", inside).values("name") + results_list = self.execute_traversal(traversal) + assert_equal_list(names, results_list) + + traversal = g.V().has("person", "pointPropWithGeoBoundsWithSearchIndex", inside).values("name") + results_list = self.execute_traversal(traversal) + assert_equal_list(names, results_list) + + traversal = g.V().has("person", "pointPropWithGeoBounds", inside).values("name") + results_list = self.execute_traversal(traversal) + assert_equal_list(names, results_list) + + @greaterthanorequaldse51 + def test_search_by_distance(self): + """ + Test to validate that solr searches by distance. + + @since 1.0.0 + @jira_ticket PYTHON-660 + @expected_result all names with a geo location within a 2 degree distance of -92,44 are returned + + @test_category dse graph + """ + self._assert_in_distance( + Geo.inside(Distance(-92, 44, 2)), + ["Paul Thomas Joe", "George Bill Steve"] + ) + + @greaterthanorequaldse51 + def test_search_by_distance_with_meters_units(self): + """ + Test to validate that solr searches by distance. + + @since 2.0.0 + @jira_ticket PYTHON-698 + @expected_result all names with a geo location within a 56k-meter radius of -92,44 are returned + + @test_category dse graph + """ + self._assert_in_distance( + Geo.inside(Distance(-92, 44, 56000), GeoUnit.METERS), + ["Paul Thomas Joe"] + ) + + @greaterthanorequaldse51 + def test_search_by_distance_with_miles_units(self): + """ + Test to validate that solr searches by distance. + + @since 2.0.0 + @jira_ticket PYTHON-698 + @expected_result all names with a geo location within a 70-mile radius of -92,44 are returned + + @test_category dse graph + """ + self._assert_in_distance( + Geo.inside(Distance(-92, 44, 70), GeoUnit.MILES), + ["Paul Thomas Joe", "George Bill Steve"] + ) + + @greaterthanorequaldse51 + def test_search_by_distance_check_limit(self): + """ + Test to validate that solr searches by distance using several units. It will also validate + that and exception is arisen if geodetic predicates are used against cartesian geometry + + @since 2.0.0 + @jira_ticket PYTHON-698 + @expected_result if the search distance is below the real distance only one + name will be in the list, otherwise, two + + @test_category dse graph + """ + # Paul Thomas Joe and George Bill Steve are 64.6923761881464 km apart + self._assert_in_distance( + Geo.inside(Distance(-92.46295, 44.0234, 65), GeoUnit.KILOMETERS), + ["George Bill Steve", "Paul Thomas Joe"] + ) + + self._assert_in_distance( + Geo.inside(Distance(-92.46295, 44.0234, 64), GeoUnit.KILOMETERS), + ["Paul Thomas Joe"] + ) + + # Paul Thomas Joe and George Bill Steve are 40.19797892069464 miles apart + self._assert_in_distance( + Geo.inside(Distance(-92.46295, 44.0234, 41), GeoUnit.MILES), + ["George Bill Steve", "Paul Thomas Joe"] + ) + + self._assert_in_distance( + Geo.inside(Distance(-92.46295, 44.0234, 40), GeoUnit.MILES), + ["Paul Thomas Joe"] + ) + + @greaterthanorequaldse51 + def test_search_by_fuzzy(self): + """ + Test to validate that solr searches by distance. + + @since 1.0.0 + @jira_ticket PYTHON-664 + @expected_result all names with a geo location within a 2 radius distance of -92,44 are returned + + @test_category dse graph + """ + g = self.fetch_traversal_source() + traversal = g.V().has("person", "name", Search.fuzzy("Paul Thamas Joe" ,1)).values("name") + results_list = self.execute_traversal(traversal) + self.assertEqual(len(results_list), 1) + self.assertIn("Paul Thomas Joe", results_list ) + + traversal = g.V().has("person", "name", Search.fuzzy("Paul Thames Joe" ,1)).values("name") + results_list = self.execute_traversal(traversal) + self.assertEqual(len(results_list), 0) + + @greaterthanorequaldse51 + def test_search_by_fuzzy_token(self): + """ + Test to validate that fuzzy searches. + + @since 1.0.0 + @jira_ticket PYTHON-664 + @expected_result all names with that differ from the search criteria by one letter should be returned + + @test_category dse graph + """ + g = self.fetch_traversal_source() + traversal = g.V().has("person", "description", Search.token_fuzzy("lives", 1)).values("name"); + # Should match 'Paul Thomas Joe' since description contains 'Lives' + # Should match 'James Paul Joe' since description contains 'Likes' + results_list = self.execute_traversal(traversal) + self.assertEqual(len(results_list), 2) + self.assertIn("Paul Thomas Joe", results_list ) + self.assertIn("James Paul Smith", results_list ) + + traversal = g.V().has("person", "description", Search.token_fuzzy("loues", 1)).values("name"); + results_list = self.execute_traversal(traversal) + self.assertEqual(len(results_list), 0) + + @greaterthanorequaldse51 + def test_search_by_phrase(self): + """ + Test to validate that phrase searches. + + @since 1.0.0 + @jira_ticket PYTHON-664 + @expected_result all names with that differ from the search phrase criteria by two letter should be returned + + @test_category dse graph + """ + g = self.fetch_traversal_source() + traversal = g.V().has("person", "description", Search.phrase("a cold", 2)).values("name"); + #Should match 'George Bill Steve' since 'A cold dude' is at distance of 0 for 'a cold'. + #Should match 'Jill Alice' since 'Enjoys a very nice cold coca cola' is at distance of 2 for 'a cold'. + results_list = self.execute_traversal(traversal) + self.assertEqual(len(results_list), 2) + self.assertIn('George Bill Steve', results_list ) + self.assertIn('Jill Alice', results_list ) + + traversal = g.V().has("user", "description", Search.phrase("a bald", 2)).values("name"); + results_list = self.execute_traversal(traversal) + self.assertEqual(len(results_list), 0) + + + +@requiredse +class ImplicitSearchTest(AbstractSearchTest, BasicSharedGraphUnitTestCase): + """ + This test class will execute all tests of the AbstractSearchTest using implicit execution + All traversals will be run directly using toList() + """ + @classmethod + def setUpClass(self): + super(ImplicitSearchTest, self).setUpClass() + if DSE_VERSION: + self.ep = DseGraph().create_execution_profile(self.graph_name) + self.cluster.add_execution_profile(self.graph_name, self.ep) + generate_address_book_graph(self.session, 0) + time.sleep(20) + + def fetch_key_from_prop(self, property): + return property.key + + def fetch_traversal_source(self): + return DseGraph().traversal_source(self.session, self.graph_name, execution_profile=self.ep) + + def execute_traversal(self, traversal): + return traversal.toList() + + +@requiredse +class ExplicitSearchTest(AbstractSearchTest, BasicSharedGraphUnitTestCase): + """ + This test class will execute all tests of the AbstractSearchTest using implicit execution + All traversals will be converted to byte code then they will be executed explicitly. + """ + @classmethod + def setUpClass(self): + super(ExplicitSearchTest, self).setUpClass() + if DSE_VERSION: + self.ep = DseGraph().create_execution_profile(self.graph_name) + self.cluster.add_execution_profile(self.graph_name, self.ep) + generate_address_book_graph(self.session, 0) + time.sleep(20) + + def fetch_traversal_source(self): + return DseGraph().traversal_source(self.session, self.graph_name) + + def execute_traversal(self, traversal): + query = DseGraph.query_from_traversal(traversal) + #Use an ep that is configured with the correct row factory, and bytecode-json language flat set + result_set = self.session.execute_graph(query, execution_profile=self.ep) + return list(result_set) From 05dc365d2416d784ed888db9a3ec1b875b1af594 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 24 Sep 2019 15:44:59 -0400 Subject: [PATCH 1649/2005] Add graph extra requires --- setup.py | 8 +++++++- test-requirements.txt | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index fa0da4dd5b..c4c9e979b3 100644 --- a/setup.py +++ b/setup.py @@ -409,6 +409,10 @@ def run_setup(extensions): if not PY3: dependencies.append('futures') + _EXTRAS_REQUIRE = { + 'graph': ['gremlinpython>=3.3.4,<3.3.9'] + } + setup( name='cassandra-driver', version=__version__, @@ -423,11 +427,13 @@ def run_setup(extensions): author='DataStax', packages=[ 'cassandra', 'cassandra.io', 'cassandra.cqlengine', 'cassandra.graph', - 'cassandra.datastax', 'cassandra.datastax.insights', 'cassandra.datastax.graph' + 'cassandra.datastax', 'cassandra.datastax.insights', 'cassandra.datastax.graph', + 'cassandra.datastax.graph.fluent' ], keywords='cassandra,cql,orm,dse,graph', include_package_data=True, install_requires=dependencies, + extras_require=_EXTRAS_REQUIRE, tests_require=['nose', 'mock>=2.0.0', 'PyYAML', 'pytz', 'sure'], classifiers=[ 'Development Status :: 5 - Production/Stable', diff --git a/test-requirements.txt b/test-requirements.txt index cccdf895a5..2760e27b47 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -18,3 +18,4 @@ requests backports.ssl_match_hostname; python_version < '2.7.9' futurist; python_version >= '3.7' asynctest; python_version > '3.4' +gremlinpython>=3.3.4,<3.3.9 From 4d7ae9b4c814d89ed98ef44c007d4f9823169104 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 25 Sep 2019 09:32:12 -0400 Subject: [PATCH 1650/2005] Fix generate_large_complex_graph to enable dev and allow_scan --- tests/integration/advanced/__init__.py | 64 ++++++++++++++------------ 1 file changed, 35 insertions(+), 29 deletions(-) diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py index 07f390397d..a943beac41 100644 --- a/tests/integration/advanced/__init__.py +++ b/tests/integration/advanced/__init__.py @@ -614,36 +614,42 @@ def generate_address_book_graph(session, size): def generate_large_complex_graph(session, size): - to_run = '''schema.config().option('graph.schema_mode').set('development'); - int size = 2000; - List ids = new ArrayList(); - schema.propertyKey('ts').Int().single().ifNotExists().create(); - schema.propertyKey('sin').Int().single().ifNotExists().create(); - schema.propertyKey('cos').Int().single().ifNotExists().create(); - schema.propertyKey('ii').Int().single().ifNotExists().create(); - schema.vertexLabel('lcg').properties('ts', 'sin', 'cos', 'ii').ifNotExists().create(); - schema.edgeLabel('linked').connection('lcg', 'lcg').ifNotExists().create(); - Vertex v = graph.addVertex(label, 'lcg'); - v.property("ts", 100001); - v.property("sin", 0); - v.property("cos", 1); - v.property("ii", 0); + prof = session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, request_timeout=32) + to_run = ''' + schema.config().option('graph.schema_mode').set('development'); + schema.config().option('graph.allow_scan').set('true'); + ''' + session.execute_graph(to_run, execution_profile=prof) + to_run = ''' + int size = 2000; + List ids = new ArrayList(); + schema.propertyKey('ts').Int().single().ifNotExists().create(); + schema.propertyKey('sin').Int().single().ifNotExists().create(); + schema.propertyKey('cos').Int().single().ifNotExists().create(); + schema.propertyKey('ii').Int().single().ifNotExists().create(); + schema.vertexLabel('lcg').properties('ts', 'sin', 'cos', 'ii').ifNotExists().create(); + schema.edgeLabel('linked').connection('lcg', 'lcg').ifNotExists().create(); + Vertex v = graph.addVertex(label, 'lcg'); + v.property("ts", 100001); + v.property("sin", 0); + v.property("cos", 1); + v.property("ii", 0); + ids.add(v.id()); + Random rand = new Random(); + for (int ii = 1; ii < size; ii++) { + v = graph.addVertex(label, 'lcg'); + v.property("ii", ii); + v.property("ts", 100001 + ii); + v.property("sin", Math.sin(ii/5.0)); + v.property("cos", Math.cos(ii/5.0)); + Vertex u = g.V(ids.get(rand.nextInt(ids.size()))).next(); + v.addEdge("linked", u); + ids.add(u.id()); ids.add(v.id()); - Random rand = new Random(); - for (int ii = 1; ii < size; ii++) { - v = graph.addVertex(label, 'lcg'); - v.property("ii", ii); - v.property("ts", 100001 + ii); - v.property("sin", Math.sin(ii/5.0)); - v.property("cos", Math.cos(ii/5.0)); - Vertex u = g.V(ids.get(rand.nextInt(ids.size()))).next(); - v.addEdge("linked", u); - ids.add(u.id()); - ids.add(v.id()); - } - g.V().count();''' - prof = session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, request_timeout=32) - session.execute_graph(to_run, execution_profile=prof) + } + g.V().count();''' + + session.execute_graph(to_run, execution_profile=prof) def validate_classic_vertex(test, vertex): From 5f765f0a3e3ee52565777ed69392dce100f2601d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 27 Sep 2019 14:10:53 -0400 Subject: [PATCH 1651/2005] Integrate dse-graph documentation --- docs/api/cassandra/cluster.rst | 8 +- .../cassandra/datastax/graph/fluent/index.rst | 24 ++ .../datastax/graph/fluent/predicates.rst | 11 + .../cassandra/datastax/graph/fluent/query.rst | 8 + docs/api/index.rst | 9 + docs/graph.rst | 19 +- docs/graph_fluent.rst | 339 ++++++++++++++++++ docs/index.rst | 4 + docs/installation.rst | 12 + 9 files changed, 424 insertions(+), 10 deletions(-) create mode 100644 docs/api/cassandra/datastax/graph/fluent/index.rst create mode 100644 docs/api/cassandra/datastax/graph/fluent/predicates.rst create mode 100644 docs/api/cassandra/datastax/graph/fluent/query.rst create mode 100644 docs/graph_fluent.rst diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index fef8f4d188..8d572b3307 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -152,9 +152,13 @@ .. autoattribute:: client_protocol_handler - .. automethod:: execute(statement[, parameters][, timeout][, trace][, custom_payload][, paging_state][, host]) + .. automethod:: execute(statement[, parameters][, timeout][, trace][, custom_payload][, paging_state][, host][, execute_as]) - .. automethod:: execute_async(statement[, parameters][, trace][, custom_payload][, paging_state][, host]) + .. automethod:: execute_async(statement[, parameters][, trace][, custom_payload][, paging_state][, host][, execute_as]) + + .. automethod:: execute_graph(statement[, parameters][, trace][, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT][, execute_as]) + + .. automethod:: execute_graph_async(statement[, parameters][, trace][, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT][, execute_as]) .. automethod:: prepare(statement) diff --git a/docs/api/cassandra/datastax/graph/fluent/index.rst b/docs/api/cassandra/datastax/graph/fluent/index.rst new file mode 100644 index 0000000000..5547e0fdd7 --- /dev/null +++ b/docs/api/cassandra/datastax/graph/fluent/index.rst @@ -0,0 +1,24 @@ +:mod:`cassandra.datastax.graph.fluent` +====================================== + +.. module:: cassandra.datastax.graph.fluent + +.. autoclass:: DseGraph + + .. autoattribute:: DSE_GRAPH_QUERY_LANGUAGE + + .. automethod:: create_execution_profile + + .. automethod:: query_from_traversal + + .. automethod:: traversal_source(session=None, graph_name=None, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT, traversal_class=None) + + .. automethod:: batch(session=None, execution_profile=None) + +.. autoclass:: DSESessionRemoteGraphConnection(session[, graph_name, execution_profile]) + +.. autoclass:: BaseGraphRowFactory + +.. autoclass:: graph_traversal_row_factory + +.. autoclass:: graph_traversal_dse_object_row_factory diff --git a/docs/api/cassandra/datastax/graph/fluent/predicates.rst b/docs/api/cassandra/datastax/graph/fluent/predicates.rst new file mode 100644 index 0000000000..09f9e4c8fa --- /dev/null +++ b/docs/api/cassandra/datastax/graph/fluent/predicates.rst @@ -0,0 +1,11 @@ +:mod:`cassandra.datastax.graph.fluent.predicates` +================================================= + +.. module:: cassandra.datastax.graph.fluent.predicates + + +.. autoclass:: Search + :members: + +.. autoclass:: Geo + :members: diff --git a/docs/api/cassandra/datastax/graph/fluent/query.rst b/docs/api/cassandra/datastax/graph/fluent/query.rst new file mode 100644 index 0000000000..3dd859f96e --- /dev/null +++ b/docs/api/cassandra/datastax/graph/fluent/query.rst @@ -0,0 +1,8 @@ +:mod:`cassandra.datastax.graph.fluent.query` +============================================ + +.. module:: cassandra.datastax.graph.fluent.query + + +.. autoclass:: TraversalBatch + :members: diff --git a/docs/api/index.rst b/docs/api/index.rst index a56d12031a..9695a796f8 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -40,3 +40,12 @@ Object Mapper cassandra/cqlengine/connection cassandra/cqlengine/management cassandra/cqlengine/usertype + +DataStax Graph +-------------- +.. toctree:: + :maxdepth: 1 + + cassandra/datastax/graph/fluent/index + cassandra/datastax/graph/fluent/query + cassandra/datastax/graph/fluent/predicates diff --git a/docs/graph.rst b/docs/graph.rst index b76ba76d66..ac057bc23c 100644 --- a/docs/graph.rst +++ b/docs/graph.rst @@ -1,5 +1,5 @@ -DSE Graph Queries -================= +DataStax Graph Queries +====================== The Cassandra driver executes graph queries over the Cassandra native protocol. Use :meth:`.Session.execute_graph` or :meth:`.Session.execute_graph_async` for @@ -7,9 +7,9 @@ executing gremlin queries in DSE Graph. Three Execution Profiles are provided suitable for graph execution: -* :data:`~.cluster.EXEC_PROFILE_GRAPH_DEFAULT` -* :data:`~.cluster.EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT` -* :data:`~.cluster.EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT` +* :data:`.cluster.EXEC_PROFILE_GRAPH_DEFAULT` +* :data:`.cluster.EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT` +* :data:`.cluster.EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT` See :doc:`getting_started` and :doc:`execution_profiles` for more detail on working with profiles. @@ -170,6 +170,8 @@ We are all set. You can now query your graph. Here are some examples:: To see a more graph examples, see `DataStax Graph Examples `_. +.. _graph-types: + Graph Types ~~~~~~~~~~~ @@ -222,11 +224,12 @@ or lists by index:: result.value # [0, 1, 2] (list) result[1] # 1 (list[1]) -You can use a different row factory by setting :attr:`.Session.default_graph_row_factory` or passing it to -:meth:`.Session.execute_graph`. For example, :func:`.datastax.graph.single_object_row_factory` returns the JSON result string`, +You can use a different row factory by setting :attr:`.cluster.ExecutionProfile.row_factory` or passing it to +:meth:`cluster.Session.execute_graph`. For example, :func:`.datastax.graph.single_object_row_factory` returns the JSON result string, unparsed. :func:`.datastax.graph.graph_result_row_factory` returns parsed, but unmodified results (such that all metadata is retained, unlike :func:`.datastax.graph.graph_object_row_factory`, which sheds some as attributes and properties are unpacked). These results -also provide convenience methods for converting to known types (:meth:`~.Result.as_vertex`, :meth:`~.Result.as_edge`, :meth:`~.Result.as_path`). +also provide convenience methods for converting to known types (:meth:`.datastax.graph.Result.as_vertex`, :meth:`.datastax.graph.Result.as_edge`, + :meth:`.datastax.graph.Result.as_path`). Vertex and Edge properties are never unpacked since their types are unknown. If you know your graph schema and want to deserialize properties, use the :class:`.datastax.graph.GraphSON1Deserializer`. It provides convenient methods to deserialize by types (e.g. diff --git a/docs/graph_fluent.rst b/docs/graph_fluent.rst new file mode 100644 index 0000000000..79ca9fa6b6 --- /dev/null +++ b/docs/graph_fluent.rst @@ -0,0 +1,339 @@ +DataStax Graph Fluent API +========================= + +The fluent API adds graph features to the core driver:: + +* A TinkerPop GraphTraversalSource builder to execute traversals on a DSE cluster +* The ability to execution traversal queries explicitly using execute_graph +* GraphSON serializers for all DSE Graph types. +* DSE Search predicates + +The Graph fluent API depends on Apache TinkerPop and is not installed by default. Make sure +you have the Graph requirements are properly :ref:`installed `. + +You might be interested in reading the :doc:`DSE Graph Getting Started documentation ` to +understand the basics of creating a graph and its schema. + +Graph Traversal Queries +~~~~~~~~~~~~~~~~~~~~~~~ + +The driver provides :meth:`.Session.execute_graph`, which allows users to execute traversal +query strings. Here is a simple example:: + + session.execute_graph("g.addV('genre').property('genreId', 1).property('name', 'Action').next();") + +Since graph queries can be very complex, working with strings is not very convenient and is +hard to maintain. This fluent API allows you to build Gremlin traversals and write your graph +queries directly in Python. These native traversal queries can be executed explicitly, with +a `Session` object, or implicitly:: + + g = DseGraph.traversal_source(session=dse_session) + g.addV('genre').property('genreId', 1).property('name', 'Action').next() + + # implicit execution caused by iterating over results + for v in g.V().has('genre', 'name', 'Drama').in('belongsTo').valueMap(): + print(v) + +These :ref:`Python types ` are also supported transparently:: + + g.addV('person').property('name', 'Mike').property('birthday', datetime(1984, 3, 11)). \ + property('house_yard', Polygon(((30, 10), (40, 40), (20, 40), (10, 20), (30, 10))) + +More readings about Gremlin: + +* `DataStax Drivers Fluent API `_ +* `gremlin-python documentation `_ + +Configuring a Traversal Execution Profile +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The fluent api takes advantage of *configuration profiles* to allow +different execution configurations for the various query handlers. Graph traversal +execution requires a custom execution profile to enable Gremlin-bytecode as +query language. Here is how to accomplish this configuration: + +.. code-block:: python + + from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.datastax.graph.fluent import DseGraph + + ep = DseGraph.create_execution_profile('graph_name') + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) + session = cluster.connect() + + g = DseGraph.traversal_source(session) # Build the GraphTraversalSource + print g.V().toList() # Traverse the Graph + +Note that the execution profile created with :meth:`DseGraph.create_execution_profile <.datastax.graph.fluent.DseGraph.create_execution_profile>` cannot +be used for any groovy string queries. + +If you want to change execution property defaults, please see the :doc:`Execution Profile documentation ` +for a more generalized discussion of the API. Graph traversal queries use the same execution profile defined for DSE graph. If you +need to change the default properties, please refer to the :doc:`DSE Graph query documentation page ` + + +Explicit Graph Traversal Execution with a DSE Session +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Traversal queries can be executed explicitly using `session.execute_graph` or `session.execute_graph_async`. These functions +return results as DSE graph types. If you are familiar with DSE queries or need async execution, you might prefer that way. +Below is an example of explicit execution. For this example, assume the schema has been generated as above: + +.. code-block:: python + + from cassandra.datastax.graph.fluent import DseGraph + from pprint import pprint + + # create a tinkerpop graphson2 ExecutionProfile + ep = DseGraph.create_execution_profile('graph_name') + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) + session = cluster.connect() + + g = DseGraph.traversal_source(session=session) + addV_query = DseGraph.query_from_traversal( + g.addV('genre').property('genreId', 1).property('name', 'Action') + ) + v_query = DseGraph.query_from_traversal(g.V()) + + for result in session.execute_graph(addV_query): + pprint(result.value) + for result in session.execute_graph(v_query): + pprint(result.value) + +Implicit Graph Traversal Execution with TinkerPop +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Using the :class:`DseGraph <.datastax.graph.fluent.DseGraph>` class, you can build a GraphTraversalSource +that will execute queries on a DSE session without explicitly passing anything to +that session. We call this *implicit execution* because the `Session` is not +explicitly involved. Everything is managed internally by TinkerPop while +traversing the graph and the results are TinkerPop types as well. + +For example: + +.. code-block:: python + + # Build the GraphTraversalSource + g = DseGraph.traversal_source(session) + # implicitly execute the query by traversing the TraversalSource + g.addV('genre').property('genreId', 1).property('name', 'Action').next() + # view the results of the execution + pprint(g.V().toList()) + +Specify the Execution Profile explicitly +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you don't want to change the default graph execution profile (`EXEC_PROFILE_GRAPH_DEFAULT`), you can register a new +one as usual and use it explicitly. Here is an example: + + +.. code-block:: python + + from cassandra.cluster import Cluster + from cassandra.datastax.graph.fluent import DseGraph + + cluster = Cluster() + ep = DseGraph.create_execution_profile('graph_name') + cluster.add_execution_profile('graph_traversal', ep) + session = cluster.connect() + + g = DseGraph.traversal_source() + query = DseGraph.query_from_traversal(g.V()) + session.execute_graph(query, execution_profile='graph_traversal') + +You can also create multiple GraphTraversalSources and use them with +the same execution profile (for different graphs): + +.. code-block:: python + + g_movies = DseGraph.traversal_source(session, graph_name='movies', ep) + g_series = DseGraph.traversal_source(session, graph_name='series', ep) + + print g_movies.V().toList() # Traverse the movies Graph + print g_series.V().toList() # Traverse the series Graph + +Batch Queries +~~~~~~~~~~~~~ + +DSE Graph supports batch queries using a :class:`TraversalBatch <.datastax.graph.fluent.query.TraversalBatch>` object +instantiated with :meth:`DseGraph.batch <.datastax.graph.fluent.DseGraph.batch>`. A :class:`TraversalBatch <.datastax.graph.fluent.query.TraversalBatch>` allows +you to execute multiple graph traversals in a single atomic transaction. A +traversal batch is executed with :meth:`.Session.execute_graph` or using +:meth:`TraversalBatch.execute <.datastax.graph.fluent.query.TraversalBatch.execute>` if bounded to a DSE session. + +Either way you choose to execute the traversal batch, you need to configure +the execution profile accordingly. Here is a example:: + + from cassandra.cluster import Cluster + from cassandra.datastax.graph.fluent import DseGraph + + ep = DseGraph.create_execution_profile('graph_name') + cluster = Cluster(execution_profiles={'graphson2': ep}) + session = cluster.connect() + + g = DseGraph.traversal_source() + +To execute the batch using :meth:`.Session.execute_graph`, you need to convert +the batch to a GraphStatement:: + + batch = DseGraph.batch() + + batch.add( + g.addV('genre').property('genreId', 1).property('name', 'Action')) + batch.add( + g.addV('genre').property('genreId', 2).property('name', 'Drama')) # Don't use `.next()` with a batch + + graph_statement = batch.as_graph_statement() + graph_statement.is_idempotent = True # configure any Statement parameters if needed... + session.execute_graph(graph_statement, execution_profile='graphson2') + +To execute the batch using :meth:`TraversalBatch.execute <.datastax.graph.fluent.query.TraversalBatch.execute>`, you need to bound the batch to a DSE session:: + + batch = DseGraph.batch(session, 'graphson2') # bound the session and execution profile + + batch.add( + g.addV('genre').property('genreId', 1).property('name', 'Action')) + batch.add( + g.addV('genre').property('genreId', 2).property('name', 'Drama')) # Don't use `.next()` with a batch + + batch.execute() + +DSL (Domain Specific Languages) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +DSL are very useful to write better domain-specific APIs and avoiding +code duplication. Let's say we have a graph of `People` and we produce +a lot of statistics based on age. All graph traversal queries of our +application would look like:: + + g.V().hasLabel("people").has("age", P.gt(21))... + + +which is not really verbose and quite annoying to repeat in a code base. Let's create a DSL:: + + from gremlin_python.process.graph_traversal import GraphTraversal, GraphTraversalSource + + class MyAppTraversal(GraphTraversal): + + def younger_than(self, age): + return self.has("age", P.lt(age)) + + def older_than(self, age): + return self.has("age", P.gt(age)) + + + class MyAppTraversalSource(GraphTraversalSource): + + def __init__(self, *args, **kwargs): + super(MyAppTraversalSource, self).__init__(*args, **kwargs) + self.graph_traversal = MyAppTraversal + + def people(self): + return self.get_graph_traversal().V().hasLabel("people") + +Now, we can use our DSL that is a lot cleaner:: + + from dse_graph import DseGraph + + # ... + g = DseGraph.traversal_source(session=session, traversal_class=MyAppTraversalsource) + + g.people().younger_than(21)... + g.people().older_than(30)... + +To see a more complete example of DSL, see the `Python killrvideo DSL app `_ + +Search +~~~~~~ + +DSE Graph can use search indexes that take advantage of DSE Search functionality for +efficient traversal queries. Here are the list of additional search predicates: + +Text tokenization: + +* :meth:`token <.datastax.graph.fluent.predicates.Search.token>` +* :meth:`token_prefix <.datastax.graph.fluent.predicates.Search.token_prefix>` +* :meth:`token_regex <.datastax.graph.fluent.predicates.Search.token_regex>` +* :meth:`token_fuzzy <.datastax.graph.fluent.predicates.Search.token_fuzzy>` + +Text match: + +* :meth:`prefix <.datastax.graph.fluent.predicates.Search.prefix>` +* :meth:`regex <.datastax.graph.fluent.predicates.Search.regex>` +* :meth:`fuzzy <.datastax.graph.fluent.predicates.Search.fuzzy>` +* :meth:`phrase <.datastax.graph.fluent.predicates.Search.phrase>` + +Geo: + +* :meth:`inside <.datastax.graph.fluent.predicates.Geo.inside>` + +Create search indexes +--------------------- + +For text tokenization: + +.. code-block:: python + + + s.execute_graph("schema.vertexLabel('my_vertex_label').index('search').search().by('text_field').asText().add()") + +For text match: + +.. code-block:: python + + + s.execute_graph("schema.vertexLabel('my_vertex_label').index('search').search().by('text_field').asString().add()") + + +For geospatial: + +You can create a geospatial index on Point and LineString fields. + +.. code-block:: python + + + s.execute_graph("schema.vertexLabel('my_vertex_label').index('search').search().by('point_field').add()") + + +Using search indexes +-------------------- + +Token: + +.. code-block:: python + + from cassandra.datastax.graph.fluent.predicates import Search + # ... + + g = DseGraph.traversal_source() + query = DseGraph.query_from_traversal( + g.V().has('my_vertex_label','text_field', Search.token_regex('Hello.+World')).values('text_field')) + session.execute_graph(query) + +Text: + +.. code-block:: python + + from cassandra.datastax.graph.fluent.predicates import Search + # ... + + g = DseGraph.traversal_source() + query = DseGraph.query_from_traversal( + g.V().has('my_vertex_label','text_field', Search.prefix('Hello')).values('text_field')) + session.execute_graph(query) + +Geospatial: + +.. code-block:: python + + from cassandra.datastax.graph.fluent.predicates import Geo + from cassandra.util import Distance + # ... + + g = DseGraph.traversal_source() + query = DseGraph.query_from_traversal( + g.V().has('my_vertex_label','point_field', Geo.inside(Distance(46, 71, 100)).values('point_field')) + session.execute_graph(query) + + +For more details, please refer to the official `DSE Search Indexes Documentation `_ diff --git a/docs/index.rst b/docs/index.rst index e67da242d9..c3508fd611 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -59,6 +59,9 @@ Contents :doc:`graph` Graph queries with DSE Graph +:doc:`graph_fluent` + DataStax Graph Fluent API + :doc:`CHANGELOG` Log of changes to the driver, organized by version. @@ -78,6 +81,7 @@ Contents object_mapper geo_types graph + graph_fluent dse_auth dates_and_times faq diff --git a/docs/installation.rst b/docs/installation.rst index 4d16df99ef..007f6e1743 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -88,6 +88,18 @@ To check if the installation was successful, you can run:: It should print something like "2.7.0". +.. _installation-datastax-graph: + +(*Optional*) DataStax Graph +--------------------------- +The driver provides an optional fluent graph API that depends on Apache TinkerPop (gremlinpython). It is +not installed by default. To be able to build Gremlin traversals, you need to install +the `graph` requirements:: + + pip install cassandra-driver[graph] + +See :doc:`graph_fluent` for more details about this API. + (*Optional*) Compression Support -------------------------------- Compression can optionally be used for communication between the driver and From 2e33c9c2e4ab79b9276dde15147cab4f1065ddfb Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 27 Sep 2019 15:34:00 -0400 Subject: [PATCH 1652/2005] Add cassandra.graph api docs --- cassandra/datastax/graph/query.py | 10 +-- docs/api/cassandra/cluster.rst | 9 ++ docs/api/cassandra/datastax/graph/index.rst | 94 +++++++++++++++++++++ docs/api/cassandra/graph.rst | 94 +++++++++++++++++++++ docs/api/index.rst | 2 + docs/graph.rst | 6 +- 6 files changed, 207 insertions(+), 8 deletions(-) create mode 100644 docs/api/cassandra/datastax/graph/index.rst create mode 100644 docs/api/cassandra/graph.rst diff --git a/cassandra/datastax/graph/query.py b/cassandra/datastax/graph/query.py index 0ebeeb28d3..50a03b5561 100644 --- a/cassandra/datastax/graph/query.py +++ b/cassandra/datastax/graph/query.py @@ -35,9 +35,9 @@ ('graph_source', 'choose the graph traversal source, configured on the server side.', 'graph-source'), ('graph_language', 'the language used in the queries (default "gremlin-groovy")', 'graph-language'), ('graph_protocol', 'the graph protocol that the server should use for query results (default "graphson-1-0")', 'graph-results'), - ('graph_read_consistency_level', '''read `dse.ConsistencyLevel `_ for graph queries (if distinct from session default). + ('graph_read_consistency_level', '''read `cassandra.ConsistencyLevel `_ for graph queries (if distinct from session default). Setting this overrides the native `Statement.consistency_level `_ for read operations from Cassandra persistence''', 'graph-read-consistency'), - ('graph_write_consistency_level', '''write `dse.ConsistencyLevel `_ for graph queries (if distinct from session default). + ('graph_write_consistency_level', '''write `cassandra.ConsistencyLevel `_ for graph queries (if distinct from session default). Setting this overrides the native `Statement.consistency_level `_ for write operations to Cassandra persistence.''', 'graph-write-consistency') ) _graph_option_names = tuple(option[0] for option in _graph_options) @@ -121,7 +121,7 @@ def set_source_graph(self): def set_graph_protocol(self, protocol): """ - Sets ``graph_protocol`` as server graph results format (See :class:`dse.graph.GraphProtocol`) + Sets ``graph_protocol`` as server graph results format (See :class:`cassandra.datastax.graph.GraphProtocol`) """ self.graph_protocol = protocol @@ -195,7 +195,7 @@ def single_object_row_factory(column_names, rows): def graph_result_row_factory(column_names, rows): """ - Returns a :class:`cassandra.graph.Result` object that can load graph results and produce specific types. + Returns a :class:`Result ` object that can load graph results and produce specific types. The Result JSON is deserialized and unpacked from the top-level 'result' dict. """ return [Result(json.loads(row[0])['result']) for row in rows] @@ -205,7 +205,7 @@ def graph_object_row_factory(column_names, rows): """ Like :func:`~.graph_result_row_factory`, except known element types (:class:`~.Vertex`, :class:`~.Edge`) are converted to their simplified objects. Some low-level metadata is shed in this conversion. Unknown result types are - still returned as :class:`dse.graph.Result`. + still returned as :class:`Result `. """ return _graph_object_sequence(json.loads(row[0])['result'] for row in rows) diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 8d572b3307..9d701bf8f8 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -128,6 +128,15 @@ .. autodata:: EXEC_PROFILE_DEFAULT :annotation: +.. autodata:: EXEC_PROFILE_GRAPH_DEFAULT + :annotation: + +.. autodata:: EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT + :annotation: + +.. autodata:: EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT + :annotation: + .. autoclass:: Session () .. autoattribute:: default_timeout diff --git a/docs/api/cassandra/datastax/graph/index.rst b/docs/api/cassandra/datastax/graph/index.rst new file mode 100644 index 0000000000..285ca5ffc2 --- /dev/null +++ b/docs/api/cassandra/datastax/graph/index.rst @@ -0,0 +1,94 @@ +``cassandra.datastax.graph`` - Graph Statements, Options, and Row Factories +=========================================================================== + +.. _api-datastax-graph: + +.. module:: cassandra.datastax.graph + +.. autofunction:: single_object_row_factory + +.. autofunction:: graph_result_row_factory + +.. autofunction:: graph_object_row_factory + +.. autoclass:: GraphProtocol + :members: + +.. autoclass:: GraphOptions + + .. autoattribute:: graph_name + + .. autoattribute:: graph_source + + .. autoattribute:: graph_language + + .. autoattribute:: graph_read_consistency_level + + .. autoattribute:: graph_write_consistency_level + + .. autoattribute:: is_default_source + + .. autoattribute:: is_analytics_source + + .. autoattribute:: is_graph_source + + .. automethod:: set_source_default + + .. automethod:: set_source_analytics + + .. automethod:: set_source_graph + + +.. autoclass:: SimpleGraphStatement + :members: + +.. autoclass:: Result + :members: + +.. autoclass:: Vertex + :members: + +.. autoclass:: VertexProperty + :members: + +.. autoclass:: Edge + :members: + +.. autoclass:: Path + :members: + +.. autoclass:: GraphSON1Serializer + :members: + +.. autoclass:: GraphSON1Deserializer + + .. automethod:: deserialize_date + + .. automethod:: deserialize_timestamp + + .. automethod:: deserialize_time + + .. automethod:: deserialize_duration + + .. automethod:: deserialize_int + + .. automethod:: deserialize_bigint + + .. automethod:: deserialize_double + + .. automethod:: deserialize_float + + .. automethod:: deserialize_uuid + + .. automethod:: deserialize_blob + + .. automethod:: deserialize_decimal + + .. automethod:: deserialize_point + + .. automethod:: deserialize_linestring + + .. automethod:: deserialize_polygon + +.. autoclass:: GraphSON2Reader + :members: diff --git a/docs/api/cassandra/graph.rst b/docs/api/cassandra/graph.rst new file mode 100644 index 0000000000..2211a25d96 --- /dev/null +++ b/docs/api/cassandra/graph.rst @@ -0,0 +1,94 @@ +``cassandra.graph`` - Graph Statements, Options, and Row Factories +================================================================== + +.. note:: This module is only for backward compatibility for dse-driver users. Consider using :ref:`cassandra.datastax.graph `. + +.. module:: cassandra.graph + +.. autofunction:: single_object_row_factory + +.. autofunction:: graph_result_row_factory + +.. autofunction:: graph_object_row_factory + +.. autoclass:: GraphProtocol + :members: + +.. autoclass:: GraphOptions + + .. autoattribute:: graph_name + + .. autoattribute:: graph_source + + .. autoattribute:: graph_language + + .. autoattribute:: graph_read_consistency_level + + .. autoattribute:: graph_write_consistency_level + + .. autoattribute:: is_default_source + + .. autoattribute:: is_analytics_source + + .. autoattribute:: is_graph_source + + .. automethod:: set_source_default + + .. automethod:: set_source_analytics + + .. automethod:: set_source_graph + + +.. autoclass:: SimpleGraphStatement + :members: + +.. autoclass:: Result + :members: + +.. autoclass:: Vertex + :members: + +.. autoclass:: VertexProperty + :members: + +.. autoclass:: Edge + :members: + +.. autoclass:: Path + :members: + +.. autoclass:: GraphSON1Serializer + :members: + +.. autoclass:: GraphSON1Deserializer + + .. automethod:: deserialize_date + + .. automethod:: deserialize_timestamp + + .. automethod:: deserialize_time + + .. automethod:: deserialize_duration + + .. automethod:: deserialize_int + + .. automethod:: deserialize_bigint + + .. automethod:: deserialize_double + + .. automethod:: deserialize_float + + .. automethod:: deserialize_uuid + + .. automethod:: deserialize_blob + + .. automethod:: deserialize_decimal + + .. automethod:: deserialize_point + + .. automethod:: deserialize_linestring + + .. automethod:: deserialize_polygon + +.. autoclass:: GraphSON2Reader + :members: diff --git a/docs/api/index.rst b/docs/api/index.rst index 9695a796f8..c816548867 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -10,6 +10,7 @@ Core Driver cassandra/cluster cassandra/policies cassandra/auth + cassandra/graph cassandra/metadata cassandra/metrics cassandra/query @@ -46,6 +47,7 @@ DataStax Graph .. toctree:: :maxdepth: 1 + cassandra/datastax/graph/index cassandra/datastax/graph/fluent/index cassandra/datastax/graph/fluent/query cassandra/datastax/graph/fluent/predicates diff --git a/docs/graph.rst b/docs/graph.rst index ac057bc23c..bd81f0cf4f 100644 --- a/docs/graph.rst +++ b/docs/graph.rst @@ -7,9 +7,9 @@ executing gremlin queries in DSE Graph. Three Execution Profiles are provided suitable for graph execution: -* :data:`.cluster.EXEC_PROFILE_GRAPH_DEFAULT` -* :data:`.cluster.EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT` -* :data:`.cluster.EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT` +* :data:`~.cluster.EXEC_PROFILE_GRAPH_DEFAULT` +* :data:`~.cluster.EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT` +* :data:`~.cluster.EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT` See :doc:`getting_started` and :doc:`execution_profiles` for more detail on working with profiles. From 232058ceb426c9dacf863afd3364d7678fa50867 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 30 Sep 2019 12:55:21 -0400 Subject: [PATCH 1653/2005] ConnectionHeartbeat test_empty_connection: make sure the thread is started before starting the test delay --- tests/unit/test_connection.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index fccf854b89..be205c33a8 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -31,6 +31,8 @@ from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ProtocolHandler) +from tests.integration.util import wait_until + class ConnectionTest(unittest.TestCase): @@ -284,7 +286,9 @@ def make_get_holders(len): def run_heartbeat(self, get_holders_fun, count=2, interval=0.05, timeout=0.05): ch = ConnectionHeartbeat(interval, get_holders_fun, timeout=timeout) - time.sleep(interval * count) + # wait until the thread is started + wait_until(lambda: get_holders_fun.call_count > 0, 0.01, 100) + time.sleep(interval * (count-1)) ch.stop() self.assertTrue(get_holders_fun.call_count) @@ -294,7 +298,7 @@ def test_empty_connections(self, *args): self.run_heartbeat(get_holders, count) - self.assertGreaterEqual(get_holders.call_count, count - 1) # lower bound to account for thread spinup time + self.assertGreaterEqual(get_holders.call_count, count-1) self.assertLessEqual(get_holders.call_count, count) holder = get_holders.return_value[0] holder.get_connections.assert_has_calls([call()] * get_holders.call_count) From 2301f5a20043e63e9c03b51ffec9c0d01e93b492 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 1 Oct 2019 15:00:43 -0400 Subject: [PATCH 1654/2005] fix comment/docs imports --- cassandra/datastax/graph/fluent/__init__.py | 6 +++--- docs/graph_fluent.rst | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cassandra/datastax/graph/fluent/__init__.py b/cassandra/datastax/graph/fluent/__init__.py index 8cc1d1dd98..193272d986 100644 --- a/cassandra/datastax/graph/fluent/__init__.py +++ b/cassandra/datastax/graph/fluent/__init__.py @@ -180,8 +180,8 @@ def traversal_source(session=None, graph_name=None, execution_profile=EXEC_PROFI .. code-block:: python - from dse.cluster import Cluster - from dse_graph import DseGraph + from cassandra.cluster import Cluster + from cassandra.datastax.graph.fluent import DseGraph c = Cluster() session = c.connect() @@ -218,7 +218,7 @@ def create_execution_profile(graph_name): @staticmethod def batch(*args, **kwargs): """ - Returns the :class:`dse_graph.query.TraversalBatch` object allowing to + Returns the :class:`cassandra.datastax.graph.fluent.query.TraversalBatch` object allowing to execute multiple traversals in the same transaction. """ return _DefaultTraversalBatch(*args, **kwargs) diff --git a/docs/graph_fluent.rst b/docs/graph_fluent.rst index 79ca9fa6b6..da5eee8db7 100644 --- a/docs/graph_fluent.rst +++ b/docs/graph_fluent.rst @@ -233,7 +233,7 @@ which is not really verbose and quite annoying to repeat in a code base. Let's c Now, we can use our DSL that is a lot cleaner:: - from dse_graph import DseGraph + from cassandra.datastax.graph.fluent import DseGraph # ... g = DseGraph.traversal_source(session=session, traversal_class=MyAppTraversalsource) From 61f9fbb77823520c50498e258e732b148fef29f9 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 1 Oct 2019 10:08:28 -0500 Subject: [PATCH 1655/2005] Update supported consistencies in our cloud testing --- tests/integration/advanced/cloud/test_cloud.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/integration/advanced/cloud/test_cloud.py b/tests/integration/advanced/cloud/test_cloud.py index 17257dadaa..bda9f7f6b0 100644 --- a/tests/integration/advanced/cloud/test_cloud.py +++ b/tests/integration/advanced/cloud/test_cloud.py @@ -36,13 +36,7 @@ DISALLOWED_CONSISTENCIES = [ ConsistencyLevel.ANY, ConsistencyLevel.ONE, - ConsistencyLevel.TWO, - ConsistencyLevel.THREE, - ConsistencyLevel.QUORUM, - ConsistencyLevel.ALL, - ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_ONE, - ConsistencyLevel.SERIAL ] From 27cbfdd482b21fdfda9c1a0e5d7b6c8a17941ae4 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 2 Oct 2019 14:18:25 -0500 Subject: [PATCH 1656/2005] [PYTHON-1143] Ignore test that shouldnt run on windows --- tests/integration/standard/test_cluster.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index e1ccda0bb2..eae7d64b71 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -1103,6 +1103,7 @@ def test_add_profile_timeout(self): else: raise Exception("add_execution_profile didn't timeout after {0} retries".format(max_retry_count)) + @notwindows def test_execute_query_timeout(self): with Cluster() as cluster: session = cluster.connect(wait_for_all_pools=True) From 2b5399d8df910727d3bf1df0c22bea18749cbd5e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 4 Oct 2019 09:57:46 -0400 Subject: [PATCH 1657/2005] minor docs change about the driver purpose --- README.rst | 7 ++++--- docs/index.rst | 8 ++++---- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/README.rst b/README.rst index c09ba27118..7dbfd08a41 100644 --- a/README.rst +++ b/README.rst @@ -1,10 +1,11 @@ -DataStax Python Driver for Apache Cassandra -=========================================== +DataStax Python Driver for Apache Cassandra and DataStax products +================================================================= .. image:: https://travis-ci.org/datastax/python-driver.png?branch=master :target: https://travis-ci.org/datastax/python-driver -A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (2.1+) and DataStax Enterprise (4.7+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. +A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (2.1+) and +DataStax Enterprise (4.7+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. The driver supports Python 2.7, 3.4, 3.5, 3.6 and 3.7. diff --git a/docs/index.rst b/docs/index.rst index c3508fd611..8b39dfb183 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,7 +1,7 @@ -Python Cassandra Driver -======================= -A Python client driver for `Apache Cassandra `_. -This driver works exclusively with the Cassandra Query Language v3 (CQL3) +DataStax Python Driver for Apache Cassandra and DataStax products +================================================================= +A Python client driver for `Apache Cassandra `_ and +DataStax products. This driver works exclusively with the Cassandra Query Language v3 (CQL3) and Cassandra's native protocol. Cassandra 2.1+ is supported, including DSE 4.7+. The driver supports Python 2.7, 3.4, 3.5, 3.6 and 3.7. From ea97ce9993e42ba108c6ec756ecc69e23238344c Mon Sep 17 00:00:00 2001 From: nick Date: Mon, 7 Oct 2019 11:50:09 +0200 Subject: [PATCH 1658/2005] Added @cythontest and changelog entry --- CHANGELOG.rst | 1 + tests/integration/simulacron/test_connection.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index adbe0879f9..2e56b13c80 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,7 @@ Unreleased Bug Fixes --------- * Connection setup methods prevent using ExecutionProfile in cqlengine (PYTHON-1009) +* Driver deadlock if all connections dropped by heartbeat whilst request in flight and request times out (PYTHON-1044) 3.19.0 ====== diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 33e68e6a9e..3153683352 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -27,6 +27,7 @@ from cassandra.policies import HostStateListener, RoundRobinPolicy from cassandra.io.asyncorereactor import AsyncoreConnection from tests import connection_class, thread_pool_executor_class +from tests.unit.cython.utils import cythontest from tests.integration import (PROTOCOL_VERSION, requiressimulacron) from tests.integration.util import assert_quiescent_pool_state, late from tests.integration.simulacron import SimulacronBase @@ -178,6 +179,7 @@ def test_callbacks_and_pool_when_oto(self): errback.assert_called_once() callback.assert_not_called() + @cythontest def test_heartbeat_defunct_deadlock(self): """ Ensure that there is no deadlock when request is in-flight and heartbeat defuncts connection From 2c8358f6179e388b3b83b1ef303aa007c936745a Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 7 Oct 2019 13:48:43 -0500 Subject: [PATCH 1659/2005] Remove print statements --- tests/integration/simulacron/test_connection.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 3153683352..dd7dc261d7 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -210,8 +210,6 @@ def test_heartbeat_defunct_deadlock(self): class PatchedRoundRobinPolicy(RoundRobinPolicy): # Send always to same host def make_query_plan(self, working_keyspace=None, query=None): - print query - print self._live_hosts if query and query.query_string == query_to_prime: return filter(lambda h: h == query_host, self._live_hosts) else: From 6b2b634d5c41551a3b4197a1bcc2f7c00c2037de Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 7 Oct 2019 12:14:11 -0500 Subject: [PATCH 1660/2005] [PYTHON-1121] Fix queryset generation for cqlengine Token instances --- CHANGELOG.rst | 1 + cassandra/cqlengine/columns.py | 2 +- tests/integration/cqlengine/query/test_queryoperators.py | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 2e56b13c80..86f98c80b9 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,6 +6,7 @@ Bug Fixes --------- * Connection setup methods prevent using ExecutionProfile in cqlengine (PYTHON-1009) * Driver deadlock if all connections dropped by heartbeat whilst request in flight and request times out (PYTHON-1044) +* Exception when use pk__token__gt filter In python 3.7 (PYTHON-1121) 3.19.0 ====== diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 1656bfb6cf..7c36371ba7 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -1071,7 +1071,7 @@ class _PartitionKeysToken(Column): """ def __init__(self, model): - self.partition_columns = model._partition_keys.values() + self.partition_columns = list(model._partition_keys.values()) super(_PartitionKeysToken, self).__init__(partition_key=True) @property diff --git a/tests/integration/cqlengine/query/test_queryoperators.py b/tests/integration/cqlengine/query/test_queryoperators.py index 7f7c0ac1c5..fd148bafcf 100644 --- a/tests/integration/cqlengine/query/test_queryoperators.py +++ b/tests/integration/cqlengine/query/test_queryoperators.py @@ -94,6 +94,7 @@ def test_token_function(self): # pk__token equality r = TokenTestModel.objects(pk__token=functions.Token(last_token)) self.assertEqual(len(r), 1) + r.all() # Attempt to obtain queryset for results. This has thrown an exception in the past def test_compound_pk_token_function(self): From 623a23d8afa58ff7a2dd964cff0e97eed2698653 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 9 Oct 2019 08:59:54 -0500 Subject: [PATCH 1661/2005] [PYTHON-1121] Fix queryset generation for cqlengine Token instances --- CHANGELOG.rst | 1 + cassandra/cqlengine/columns.py | 2 +- tests/integration/cqlengine/query/test_queryoperators.py | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3681885ff0..f35f5947e2 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -98,6 +98,7 @@ Bug Fixes * Call ConnectionException with correct kwargs (PYTHON-1117) * Can't connect to clusters built from source because version parsing doesn't handle 'x.y-SNAPSHOT' (PYTHON-1118) * Discovered node doesn´t honor the configured Cluster port on connection (PYTHON-1127) +* Exception when use pk__token__gt filter In python 3.7 (PYTHON-1121) Other ----- diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 1656bfb6cf..7c36371ba7 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -1071,7 +1071,7 @@ class _PartitionKeysToken(Column): """ def __init__(self, model): - self.partition_columns = model._partition_keys.values() + self.partition_columns = list(model._partition_keys.values()) super(_PartitionKeysToken, self).__init__(partition_key=True) @property diff --git a/tests/integration/cqlengine/query/test_queryoperators.py b/tests/integration/cqlengine/query/test_queryoperators.py index 7f7c0ac1c5..fd148bafcf 100644 --- a/tests/integration/cqlengine/query/test_queryoperators.py +++ b/tests/integration/cqlengine/query/test_queryoperators.py @@ -94,6 +94,7 @@ def test_token_function(self): # pk__token equality r = TokenTestModel.objects(pk__token=functions.Token(last_token)) self.assertEqual(len(r), 1) + r.all() # Attempt to obtain queryset for results. This has thrown an exception in the past def test_compound_pk_token_function(self): From 97a91cc79dc62260e57b8fd48386554886ab4d15 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 10 Oct 2019 09:54:02 -0500 Subject: [PATCH 1662/2005] Add a cloud page to documentation (#13) --- docs/cloud.rst | 29 +++++++++++++++++++++++++++++ docs/getting_started.rst | 17 ----------------- docs/index.rst | 4 ++++ 3 files changed, 33 insertions(+), 17 deletions(-) create mode 100644 docs/cloud.rst diff --git a/docs/cloud.rst b/docs/cloud.rst new file mode 100644 index 0000000000..2393a4b107 --- /dev/null +++ b/docs/cloud.rst @@ -0,0 +1,29 @@ +Cloud +----- +Connecting +========== +To connect to a DataStax Apollo cluster: + +1. Download the secure connect bundle from your Apollo account. +2. Connect to your cluster with + +.. code-block:: python + + from cassandra.cluster import Cluster + from cassandra.auth import PlainTextAuthProvider + + cloud_config = { + 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip' + } + auth_provider = PlainTextAuthProvider(username='user', password='pass') + cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider) + session = cluster.connect() + +Apollo Differences +================= +In most circumstances, the client code for interacting with an Apollo cluster will be the same as interacting with any other Cassandra cluster. The exceptions being: + + * A cloud configuration must be passed to a :class:`~.Cluster` instance via the `cloud` attribute (as demonstrated above). + * An SSL connection will be established automatically. Manual SSL configuration is not allowed, and using `ssl_context` or `ssl_options` will result in an exception. + * A :class:`~.Cluster`'s `contact_points` attribute should not be used. The cloud config contains all of the necessary contact information. + * If a consistency level is not specified for an execution profile or query, then :attr:`.ConsistencyLevel.LOCAL_QUORUM` will be used as the default. diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 487d85aaef..2dc32e6499 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -403,20 +403,3 @@ level on that: user3_lookup = user_lookup_stmt.bind([user_id3]) user3_lookup.consistency_level = ConsistencyLevel.ALL user3 = session.execute(user3_lookup) - -Connecting to DataStax Cloud ----------------------------- -1. Download the secure connect bundle from your DataStax Constellation account. -2. Connect to your cloud cluster with - -.. code-block:: python - - from cassandra.cluster import Cluster - from cassandra.auth import PlainTextAuthProvider - - cloud_config = { - 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip' - } - auth_provider = PlainTextAuthProvider(username='user', password='pass') - cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider) - session = cluster.connect() diff --git a/docs/index.rst b/docs/index.rst index 577b443a3d..2c01d0b6d0 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -50,6 +50,9 @@ Contents :doc:`dates_and_times` Some discussion on the driver's approach to working with timestamp, date, time types +:doc:`cloud` + A guide to connecting to Datastax Apollo + :doc:`faq` A collection of Frequently Asked Questions @@ -68,6 +71,7 @@ Contents user_defined_types object_mapper dates_and_times + cloud faq Getting Help From d95acb2bea0f092589728a8a3a378c3d8c7f1e2c Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 10 Oct 2019 14:09:48 -0400 Subject: [PATCH 1663/2005] ngdg --- CHANGELOG.rst | 25 + build.yaml | 14 + cassandra/__init__.py | 2 +- cassandra/cluster.py | 137 +- cassandra/datastax/graph/__init__.py | 3 +- cassandra/datastax/graph/fluent/__init__.py | 137 +- .../datastax/graph/fluent/_predicates.py | 37 +- cassandra/datastax/graph/fluent/_query.py | 112 +- .../datastax/graph/fluent/_serializers.py | 195 ++- cassandra/datastax/graph/graphson.py | 585 +++++++- cassandra/datastax/graph/query.py | 56 +- cassandra/metadata.py | 397 +++++- test-requirements.txt | 3 +- tests/integration/__init__.py | 35 + tests/integration/advanced/__init__.py | 578 +------- tests/integration/advanced/graph/__init__.py | 1192 +++++++++++++++++ .../advanced/graph/fluent/test_graph.py | 656 ++++++--- .../advanced/graph/fluent/test_search.py | 402 ++++-- .../integration/advanced/graph/test_graph.py | 271 ++++ .../advanced/graph/test_graph_cont_paging.py | 78 ++ .../advanced/graph/test_graph_datatype.py | 260 ++++ .../advanced/graph/test_graph_query.py | 575 ++++++++ .../integration/advanced/test_adv_metadata.py | 89 +- tests/integration/advanced/test_auth.py | 9 +- tests/integration/advanced/test_spark.py | 7 +- tests/integration/simulacron/utils.py | 7 +- tests/integration/standard/test_metadata.py | 17 +- tests/unit/advanced/test_graph.py | 20 +- tests/unit/advanced/test_insights.py | 4 +- tests/unit/advanced/test_metadata.py | 141 ++ 30 files changed, 4923 insertions(+), 1121 deletions(-) create mode 100644 tests/integration/advanced/graph/test_graph.py create mode 100644 tests/integration/advanced/graph/test_graph_cont_paging.py create mode 100644 tests/integration/advanced/graph/test_graph_datatype.py create mode 100644 tests/integration/advanced/graph/test_graph_query.py create mode 100644 tests/unit/advanced/test_metadata.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3681885ff0..3fdd3a7893 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,28 @@ +3.22.0 +====== +Unreleased + +Features +-------- + +* Parse new schema metadata in NGDG and generate table edges CQL syntax (PYTHON-996) +* Add GraphSON3 support (PYTHON-788) +* Use GraphSON3 as default for Native graphs (PYTHON-1004) +* Add Tuple and UDT types for native graph (PYTHON-1005) +* Add Duration type for native graph (PYTHON-1000) +* Add gx:ByteBuffer graphson type support for Blob field (PYTHON-1027) +* Enable Paging Through DSE Driver for Gremlin Traversals (PYTHON-1045) +* Provide numerical wrappers to ensure proper graphson schema definition (PYTHON-1051) +* Resolve the row_factory automatically for native graphs (PYTHON-1056) +* Add g:TraversalMetrics/g:Metrics graph deserializers (PYTHON-1057) +* Add g:BulkSet graph deserializers (PYTHON-1060) +* Update Graph Engine names and the way to create a Classic/Native Graph (PYTHON-1090) +* Update Native to Core Graph Engine +* Add graphson3 and native graph support (PYTHON-1039) +* Enable Paging Through DSE Driver for Gremlin Traversals (PYTHON-1045) +* Expose filter predicates for cql collections (PYTHON-1019) +* Add g:TraversalMetrics/Metrics deserializers (PYTHON-1057) + 3.21.0 ====== Unreleased diff --git a/build.yaml b/build.yaml index f94b78e259..1d6f63c274 100644 --- a/build.yaml +++ b/build.yaml @@ -55,6 +55,18 @@ schedules: env_vars: | EVENT_LOOP_MANAGER='libev' +ngdg: + schedule: adhoc + branches: + include: [ngdg_master_ft] + env_vars: | + EVENT_LOOP_MANAGER='libev' + EXCLUDE_LONG=1 + matrix: + exclude: + - python: [2.7, 3.4, 3.6, 3.7] + - cassandra: ['dse-4.8', 'dse-5.0', dse-6.0', 'dse-6.7'] + weekly_master: schedule: 0 10 * * 6 branches: @@ -166,6 +178,7 @@ cassandra: - 'dse-5.1' - 'dse-6.0' - 'dse-6.7' + - 'dse-6.8' env: CYTHON: @@ -177,6 +190,7 @@ build: export JAVA_HOME=$CCM_JAVA_HOME export PATH=$JAVA_HOME/bin:$PATH export PYTHONPATH="" + export CCM_MAX_HEAP_SIZE=1024M # Install latest setuptools pip install --upgrade pip diff --git a/cassandra/__init__.py b/cassandra/__init__.py index b8f4b9af39..2a4bd8f0e6 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 19, 0, 'post0') +__version_info__ = (3, 19, 0, '20190910+labs') __version__ = '.'.join(map(str, __version_info__)) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 1016be72be..b6442fe726 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -83,14 +83,16 @@ from cassandra.marshal import int64_pack from cassandra.timestamps import MonotonicTimestampGenerator from cassandra.compat import Mapping -from cassandra.util import _resolve_contact_points_to_string_map +from cassandra.util import _resolve_contact_points_to_string_map, Version from cassandra.datastax.insights.reporter import MonitorReporter from cassandra.datastax.insights.util import version_supports_insights from cassandra.datastax.graph import (graph_object_row_factory, GraphOptions, GraphSON1Serializer, - GraphProtocol, GraphSON2Serializer, GraphStatement, SimpleGraphStatement) -from cassandra.datastax.graph.query import _request_timeout_key + GraphProtocol, GraphSON2Serializer, GraphStatement, SimpleGraphStatement, + graph_graphson2_row_factory, graph_graphson3_row_factory, + GraphSON3Serializer) +from cassandra.datastax.graph.query import _request_timeout_key, _GraphSONContextRowFactory if six.PY3: long = int @@ -141,6 +143,7 @@ def _is_gevent_monkey_patched(): DEFAULT_MIN_CONNECTIONS_PER_REMOTE_HOST = 1 DEFAULT_MAX_CONNECTIONS_PER_REMOTE_HOST = 2 +_GRAPH_PAGING_MIN_DSE_VERSION = Version('6.8.0') _NOT_SET = object() @@ -395,20 +398,21 @@ class GraphExecutionProfile(ExecutionProfile): def __init__(self, load_balancing_policy=_NOT_SET, retry_policy=None, consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, - request_timeout=30.0, row_factory=graph_object_row_factory, - graph_options=None): + request_timeout=30.0, row_factory=None, + graph_options=None, continuous_paging_options=_NOT_SET): """ Default execution profile for graph execution. - See :class:`.ExecutionProfile` - for base attributes. + See :class:`.ExecutionProfile` for base attributes. Note that if not explicitly set, + the row_factory and graph_options.graph_protocol are resolved during the query execution. In addition to default parameters shown in the signature, this profile also defaults ``retry_policy`` to :class:`cassandra.policies.NeverRetryPolicy`. """ retry_policy = retry_policy or NeverRetryPolicy() super(GraphExecutionProfile, self).__init__(load_balancing_policy, retry_policy, consistency_level, - serial_consistency_level, request_timeout, row_factory) + serial_consistency_level, request_timeout, row_factory, + continuous_paging_options=continuous_paging_options) self.graph_options = graph_options or GraphOptions(graph_source=b'g', graph_language=b'gremlin-groovy') @@ -417,7 +421,7 @@ class GraphAnalyticsExecutionProfile(GraphExecutionProfile): def __init__(self, load_balancing_policy=None, retry_policy=None, consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, - request_timeout=3600. * 24. * 7., row_factory=graph_object_row_factory, + request_timeout=3600. * 24. * 7., row_factory=None, graph_options=None): """ Execution profile with timeout and load balancing appropriate for graph analytics queries. @@ -2434,6 +2438,7 @@ def default_serial_consistency_level(self, cl): _profile_manager = None _metrics = None _request_init_callbacks = None + _graph_paging_available = False def __init__(self, cluster, hosts, keyspace=None): self.cluster = cluster @@ -2466,6 +2471,8 @@ def __init__(self, cluster, hosts, keyspace=None): msg += " using keyspace '%s'" % self.keyspace raise NoHostAvailable(msg, [h.address for h in hosts]) + self._graph_paging_available = self._check_graph_paging_available() + cc_host = self.cluster.get_control_connection_host() valid_insights_version = (cc_host and version_supports_insights(cc_host.dse_version)) if self.cluster.monitor_reporting_enabled and valid_insights_version: @@ -2605,18 +2612,31 @@ def execute_graph_async(self, query, parameters=None, trace=False, execution_pro if not isinstance(query, GraphStatement): query = SimpleGraphStatement(query) - execution_profile = self._maybe_get_execution_profile(execution_profile) # look up instance here so we can apply the extended attributes + # Clone and look up instance here so we can resolve and apply the extended attributes + execution_profile = self.execution_profile_clone_update(execution_profile) + + if not hasattr(execution_profile, 'graph_options'): + raise ValueError( + "Execution profile for graph queries must derive from GraphExecutionProfile, and provide graph_options") + + self._resolve_execution_profile_options(execution_profile) + # make sure the graphson context row factory is binded to this cluster try: - options = execution_profile.graph_options.copy() - except AttributeError: - raise ValueError("Execution profile for graph queries must derive from GraphExecutionProfile, and provide graph_options") + if issubclass(execution_profile.row_factory, _GraphSONContextRowFactory): + execution_profile.row_factory = execution_profile.row_factory(self.cluster) + except TypeError: + # issubclass might fail if arg1 is an instance + pass + + # set graph paging if needed + self._maybe_set_graph_paging(execution_profile) graph_parameters = None if parameters: - graph_parameters = self._transform_params(parameters, graph_options=options) + graph_parameters = self._transform_params(parameters, graph_options=execution_profile.graph_options) - custom_payload = options.get_options_map() + custom_payload = execution_profile.graph_options.get_options_map() if execute_as: custom_payload[_proxy_execute_key] = six.b(execute_as) custom_payload[_request_timeout_key] = int64_pack(long(execution_profile.request_timeout * 1000)) @@ -2627,12 +2647,81 @@ def execute_graph_async(self, query, parameters=None, trace=False, execution_pro future.message.query_params = graph_parameters future._protocol_handler = self.client_protocol_handler - if options.is_analytics_source and isinstance(execution_profile.load_balancing_policy, DefaultLoadBalancingPolicy): + if execution_profile.graph_options.is_analytics_source and \ + isinstance(execution_profile.load_balancing_policy, DefaultLoadBalancingPolicy): self._target_analytics_master(future) else: future.send_request() return future + def _maybe_set_graph_paging(self, execution_profile): + graph_paging = execution_profile.continuous_paging_options + if execution_profile.continuous_paging_options is _NOT_SET: + graph_paging = ContinuousPagingOptions() if self._graph_paging_available else None + + execution_profile.continuous_paging_options = graph_paging + + def _check_graph_paging_available(self): + """Verify if we can enable graph paging. This executed only once when the session is created.""" + + if not ProtocolVersion.has_continuous_paging_next_pages(self._protocol_version): + return False + + for host in self.cluster.metadata.all_hosts(): + if host.dse_version is None: + return False + + version = Version(host.dse_version) + if version < _GRAPH_PAGING_MIN_DSE_VERSION: + return False + + return True + + def _resolve_execution_profile_options(self, execution_profile): + """ + Determine the GraphSON protocol and row factory for a graph query. This is useful + to configure automatically the execution profile when executing a query on a + core graph. + If `graph_protocol` is not explicitly specified, the following rules apply: + - Default to GraphProtocol.GRAPHSON_1_0, or GRAPHSON_2_0 if the `graph_language` is not gremlin-groovy. + - If `graph_options.graph_name` is specified and is a Core graph, set GraphSON_3_0. + If `row_factory` is not explicitly specified, the following rules apply: + - Default to graph_object_row_factory. + - If `graph_options.graph_name` is specified and is a Core graph, set graph_graphson3_row_factory. + """ + if execution_profile.graph_options.graph_protocol is not None and \ + execution_profile.row_factory is not None: + return + + graph_options = execution_profile.graph_options + + is_core_graph = False + if graph_options.graph_name: + # graph_options.graph_name is bytes ... + name = graph_options.graph_name.decode('utf-8') + if name in self.cluster.metadata.keyspaces: + ks_metadata = self.cluster.metadata.keyspaces[name] + if ks_metadata.graph_engine == 'Core': + is_core_graph = True + + if is_core_graph: + graph_protocol = GraphProtocol.GRAPHSON_3_0 + row_factory = graph_graphson3_row_factory + else: + if graph_options.graph_language == GraphOptions.DEFAULT_GRAPH_LANGUAGE: + graph_protocol = GraphOptions.DEFAULT_GRAPH_PROTOCOL + row_factory = graph_object_row_factory + else: + # if not gremlin-groovy, GraphSON_2_0 + graph_protocol = GraphProtocol.GRAPHSON_2_0 + row_factory = graph_graphson2_row_factory + + # Only apply if not set explicitly + if graph_options.graph_protocol is None: + graph_options.graph_protocol = graph_protocol + if execution_profile.row_factory is None: + execution_profile.row_factory = row_factory + def _transform_params(self, parameters, graph_options): if not isinstance(parameters, dict): raise ValueError('The parameters must be a dictionary. Unnamed parameters are not allowed.') @@ -2640,12 +2729,16 @@ def _transform_params(self, parameters, graph_options): # Serialize python types to graphson serializer = GraphSON1Serializer if graph_options.graph_protocol == GraphProtocol.GRAPHSON_2_0: - serializer = GraphSON2Serializer - - serialized_parameters = { - p: serializer.serialize(v) - for p, v in six.iteritems(parameters) - } + serializer = GraphSON2Serializer() + elif graph_options.graph_protocol == GraphProtocol.GRAPHSON_3_0: + # only required for core graphs + context = { + 'cluster': self.cluster, + 'graph_name': graph_options.graph_name.decode('utf-8') if graph_options.graph_name else None + } + serializer = GraphSON3Serializer(context) + + serialized_parameters = serializer.serialize(parameters) return [json.dumps(serialized_parameters).encode('utf-8')] def _target_analytics_master(self, future): diff --git a/cassandra/datastax/graph/__init__.py b/cassandra/datastax/graph/__init__.py index 0c03c9249d..d828c7f707 100644 --- a/cassandra/datastax/graph/__init__.py +++ b/cassandra/datastax/graph/__init__.py @@ -17,6 +17,7 @@ from cassandra.datastax.graph.query import ( GraphOptions, GraphProtocol, GraphStatement, SimpleGraphStatement, Result, graph_object_row_factory, single_object_row_factory, - graph_result_row_factory, graph_graphson2_row_factory + graph_result_row_factory, graph_graphson2_row_factory, + graph_graphson3_row_factory ) from cassandra.datastax.graph.graphson import * diff --git a/cassandra/datastax/graph/fluent/__init__.py b/cassandra/datastax/graph/fluent/__init__.py index 193272d986..7d1ba0b60c 100644 --- a/cassandra/datastax/graph/fluent/__init__.py +++ b/cassandra/datastax/graph/fluent/__init__.py @@ -31,29 +31,29 @@ from cassandra.cluster import Session, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT from cassandra.datastax.graph import GraphOptions, GraphProtocol + from cassandra.datastax.graph.query import _GraphSONContextRowFactory from cassandra.datastax.graph.fluent.serializers import ( - GremlinGraphSONReader, - deserializers, - gremlin_deserializers + GremlinGraphSONReaderV2, + GremlinGraphSONReaderV3, + dse_graphson2_deserializers, + gremlin_graphson2_deserializers, + dse_graphson3_deserializers, + gremlin_graphson3_deserializers ) from cassandra.datastax.graph.fluent.query import _DefaultTraversalBatch, _query_from_traversal log = logging.getLogger(__name__) - __all__ = ['BaseGraphRowFactory', 'dse_graphson_reader', 'graphson_reader', 'graph_traversal_row_factory', + __all__ = ['BaseGraphRowFactory', 'graph_traversal_row_factory', 'graph_traversal_dse_object_row_factory', 'DSESessionRemoteGraphConnection', 'DseGraph'] - # Create our custom GraphSONReader/Writer - dse_graphson_reader = GremlinGraphSONReader(deserializer_map=deserializers) - graphson_reader = GremlinGraphSONReader(deserializer_map=gremlin_deserializers) - # Traversal result keys _bulk_key = 'bulk' _result_key = 'result' - class BaseGraphRowFactory(object): + class BaseGraphRowFactory(_GraphSONContextRowFactory): """ Base row factory for graph traversal. This class basically wraps a graphson reader function to handle additional features of Gremlin/DSE @@ -61,37 +61,51 @@ class BaseGraphRowFactory(object): Currently supported: - bulk results + """ + + def __call__(self, column_names, rows): + for row in rows: + parsed_row = self.graphson_reader.readObject(row[0]) + yield parsed_row[_result_key] + bulk = parsed_row.get(_bulk_key, 1) + for _ in range(bulk - 1): + yield copy.deepcopy(parsed_row[_result_key]) - :param graphson_reader: The function used to read the graphson. - Use example:: + class _GremlinGraphSON2RowFactory(BaseGraphRowFactory): + """Row Factory that returns the decoded graphson2.""" + graphson_reader_class = GremlinGraphSONReaderV2 + graphson_reader_kwargs = {'deserializer_map': gremlin_graphson2_deserializers} - my_custom_row_factory = BaseGraphRowFactory(custom_graphson_reader.readObject) - """ - def __init__(self, graphson_reader): - self._graphson_reader = graphson_reader + class _DseGraphSON2RowFactory(BaseGraphRowFactory): + """Row Factory that returns the decoded graphson2 as DSE types.""" + graphson_reader_class = GremlinGraphSONReaderV2 + graphson_reader_kwargs = {'deserializer_map': dse_graphson2_deserializers} - def __call__(self, column_names, rows): - results = [] + gremlin_graphson2_traversal_row_factory = _GremlinGraphSON2RowFactory + # TODO remove in next major + graph_traversal_row_factory = gremlin_graphson2_traversal_row_factory - for row in rows: - parsed_row = self._graphson_reader(row[0]) - bulk = parsed_row.get(_bulk_key, 1) - if bulk > 1: # Avoid deepcopy call if bulk <= 1 - results.extend([copy.deepcopy(parsed_row[_result_key]) - for _ in range(bulk - 1)]) + dse_graphson2_traversal_row_factory = _DseGraphSON2RowFactory + # TODO remove in next major + graph_traversal_dse_object_row_factory = dse_graphson2_traversal_row_factory - results.append(parsed_row[_result_key]) - return results + class _GremlinGraphSON3RowFactory(BaseGraphRowFactory): + """Row Factory that returns the decoded graphson2.""" + graphson_reader_class = GremlinGraphSONReaderV3 + graphson_reader_kwargs = {'deserializer_map': gremlin_graphson3_deserializers} - graph_traversal_row_factory = BaseGraphRowFactory(graphson_reader.readObject) - graph_traversal_row_factory.__doc__ = "Row Factory that returns the decoded graphson." + class _DseGraphSON3RowFactory(BaseGraphRowFactory): + """Row Factory that returns the decoded graphson3 as DSE types.""" + graphson_reader_class = GremlinGraphSONReaderV3 + graphson_reader_kwargs = {'deserializer_map': dse_graphson3_deserializers} - graph_traversal_dse_object_row_factory = BaseGraphRowFactory(dse_graphson_reader.readObject) - graph_traversal_dse_object_row_factory.__doc__ = "Row Factory that returns the decoded graphson as DSE types." + + gremlin_graphson3_traversal_row_factory = _GremlinGraphSON3RowFactory + dse_graphson3_traversal_row_factory = _DseGraphSON3RowFactory class DSESessionRemoteGraphConnection(RemoteConnection): @@ -117,21 +131,35 @@ def __init__(self, session, graph_name=None, execution_profile=EXEC_PROFILE_GRAP self.graph_name = graph_name self.execution_profile = execution_profile - def submit(self, bytecode): + def _traversers_generator(self, traversers): + for t in traversers: + yield Traverser(t) - query = DseGraph.query_from_traversal(bytecode) - ep = self.session.execution_profile_clone_update(self.execution_profile, - row_factory=graph_traversal_row_factory) - graph_options = ep.graph_options.copy() + def submit(self, bytecode): + ep = self.session.execution_profile_clone_update(self.execution_profile) + graph_options = ep.graph_options + graph_options.graph_name = self.graph_name or graph_options.graph_name + # traversal can't use anything else than bytecode graph_options.graph_language = DseGraph.DSE_GRAPH_QUERY_LANGUAGE - if self.graph_name: - graph_options.graph_name = self.graph_name - - ep.graph_options = graph_options - + # We resolve the execution profile options here , to know how what gremlin factory to set + self.session._resolve_execution_profile_options(ep) + + context = None + if graph_options.graph_protocol == GraphProtocol.GRAPHSON_2_0: + row_factory = gremlin_graphson2_traversal_row_factory + elif graph_options.graph_protocol == GraphProtocol.GRAPHSON_3_0: + row_factory = gremlin_graphson3_traversal_row_factory + context = { + 'cluster': self.session.cluster, + 'graph_name': graph_options.graph_name.decode('utf-8') + } + else: + raise ValueError('Unknown graph protocol: {}'.format(graph_options.graph_protocol)) + + ep.row_factory = row_factory + query = DseGraph.query_from_traversal(bytecode, graph_options.graph_protocol, context) traversers = self.session.execute_graph(query, execution_profile=ep) - traversers = [Traverser(t) for t in traversers] - return RemoteTraversal(iter(traversers), TraversalSideEffects()) + return RemoteTraversal(self._traversers_generator(traversers), TraversalSideEffects()) def __str__(self): return "".format(self.graph_name) @@ -149,12 +177,20 @@ class DseGraph(object): Graph query language, Default is 'bytecode-json' (GraphSON). """ + DSE_GRAPH_QUERY_PROTOCOL = GraphProtocol.GRAPHSON_2_0 + """ + Graph query language, Default is GraphProtocol.GRAPHSON_2_0. + """ + @staticmethod - def query_from_traversal(traversal): + def query_from_traversal(traversal, graph_protocol=DSE_GRAPH_QUERY_PROTOCOL, context=None): """ From a GraphTraversal, return a query string based on the language specified in `DseGraph.DSE_GRAPH_QUERY_LANGUAGE`. :param traversal: The GraphTraversal object + :param graph_protocol: The graph protocol. Default is `DseGraph.DSE_GRAPH_QUERY_PROTOCOL`. + :param context: The dict of the serialization context, needed for GraphSON3 (tuple, udt). + e.g: {'cluster': dse_cluster, 'graph_name': name} """ if isinstance(traversal, GraphTraversal): @@ -165,7 +201,7 @@ def query_from_traversal(traversal): log.warning("GraphTraversal session, graph_name and execution_profile are " "only taken into account when executed with TinkerPop.") - return _query_from_traversal(traversal) + return _query_from_traversal(traversal, graph_protocol, context) @staticmethod def traversal_source(session=None, graph_name=None, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT, @@ -201,18 +237,27 @@ def traversal_source(session=None, graph_name=None, execution_profile=EXEC_PROFI return traversal_source @staticmethod - def create_execution_profile(graph_name): + def create_execution_profile(graph_name, graph_protocol=DSE_GRAPH_QUERY_PROTOCOL, **kwargs): """ Creates an ExecutionProfile for GraphTraversal execution. You need to register that execution profile to the cluster by using `cluster.add_execution_profile`. :param graph_name: The graph name + :param graph_protocol: (Optional) The graph protocol, default is `DSE_GRAPH_QUERY_PROTOCOL`. """ - ep = GraphExecutionProfile(row_factory=graph_traversal_dse_object_row_factory, + if graph_protocol == GraphProtocol.GRAPHSON_2_0: + row_factory = dse_graphson2_traversal_row_factory + elif graph_protocol == GraphProtocol.GRAPHSON_3_0: + row_factory = dse_graphson3_traversal_row_factory + else: + raise ValueError('Unknown graph protocol: {}'.format(graph_protocol)) + + ep = GraphExecutionProfile(row_factory=row_factory, graph_options=GraphOptions(graph_name=graph_name, graph_language=DseGraph.DSE_GRAPH_QUERY_LANGUAGE, - graph_protocol=GraphProtocol.GRAPHSON_2_0)) + graph_protocol=graph_protocol), + **kwargs) return ep @staticmethod diff --git a/cassandra/datastax/graph/fluent/_predicates.py b/cassandra/datastax/graph/fluent/_predicates.py index b63dd90043..95bd533d5e 100644 --- a/cassandra/datastax/graph/fluent/_predicates.py +++ b/cassandra/datastax/graph/fluent/_predicates.py @@ -18,7 +18,7 @@ from cassandra.util import Distance -__all__ = ['GeoP', 'TextDistanceP', 'Search', 'GeoUnit', 'Geo'] +__all__ = ['GeoP', 'TextDistanceP', 'Search', 'GeoUnit', 'Geo', 'CqlCollection'] class GeoP(object): @@ -138,6 +138,41 @@ def phrase(value, proximity): return TextDistanceP.phrase(value, proximity) +class CqlCollection(object): + + @staticmethod + def contains(value): + """ + Search for a value inside a cql list/set column. + :param value: the value to look for. + """ + return P('contains', value) + + @staticmethod + def contains_value(value): + """ + Search for a map value. + :param value: the value to look for. + """ + return P('containsValue', value) + + @staticmethod + def contains_key(value): + """ + Search for a map key. + :param value: the value to look for. + """ + return P('containsKey', value) + + @staticmethod + def entry_eq(value): + """ + Search for a map entry. + :param value: the value to look for. + """ + return P('entryEq', value) + + class GeoUnit(object): _EARTH_MEAN_RADIUS_KM = 6371.0087714 _DEGREES_TO_RADIANS = math.pi / 180 diff --git a/cassandra/datastax/graph/fluent/_query.py b/cassandra/datastax/graph/fluent/_query.py index b5d24df05b..bd89046852 100644 --- a/cassandra/datastax/graph/fluent/_query.py +++ b/cassandra/datastax/graph/fluent/_query.py @@ -12,29 +12,104 @@ # See the License for the specific language governing permissions and # limitations under the License. +import six import logging -from cassandra.graph import SimpleGraphStatement +from cassandra.graph import SimpleGraphStatement, GraphProtocol from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT from gremlin_python.process.graph_traversal import GraphTraversal -from gremlin_python.structure.io.graphsonV2d0 import GraphSONWriter +from gremlin_python.structure.io.graphsonV2d0 import GraphSONWriter as GraphSONWriterV2 +from gremlin_python.structure.io.graphsonV3d0 import GraphSONWriter as GraphSONWriterV3 -from cassandra.datastax.graph.fluent.serializers import serializers +from cassandra.datastax.graph.fluent.serializers import GremlinUserTypeIO, \ + dse_graphson2_serializers, dse_graphson3_serializers log = logging.getLogger(__name__) -graphson_writer = GraphSONWriter(serializer_map=serializers) __all__ = ['TraversalBatch', '_query_from_traversal', '_DefaultTraversalBatch'] -def _query_from_traversal(traversal): +class _GremlinGraphSONWriterAdapter(object): + + def __init__(self, context, **kwargs): + super(_GremlinGraphSONWriterAdapter, self).__init__(**kwargs) + self.context = context + self.user_types = None + + def serialize(self, value, _): + return self.toDict(value) + + def get_serializer(self, value): + serializer = None + try: + serializer = self.serializers[type(value)] + except KeyError: + for key, ser in self.serializers.items(): + if isinstance(value, key): + serializer = ser + + if self.context: + # Check if UDT + if self.user_types is None: + try: + user_types = self.context['cluster']._user_types[self.context['graph_name']] + self.user_types = dict(map(reversed, six.iteritems(user_types))) + except KeyError: + self.user_types = {} + + # Custom detection to map a namedtuple to udt + if (tuple in self.serializers and serializer is self.serializers[tuple] and hasattr(value, '_fields') or + (not serializer and type(value) in self.user_types)): + serializer = GremlinUserTypeIO + + if serializer: + try: + # A serializer can have specialized serializers (e.g for Int32 and Int64, so value dependant) + serializer = serializer.get_specialized_serializer(value) + except AttributeError: + pass + + return serializer + + def toDict(self, obj): + serializer = self.get_serializer(obj) + return serializer.dictify(obj, self) if serializer else obj + + def definition(self, value): + serializer = self.get_serializer(value) + return serializer.definition(value, self) + + +class GremlinGraphSON2Writer(_GremlinGraphSONWriterAdapter, GraphSONWriterV2): + pass + + +class GremlinGraphSON3Writer(_GremlinGraphSONWriterAdapter, GraphSONWriterV3): + pass + + +graphson2_writer = GremlinGraphSON2Writer +graphson3_writer = GremlinGraphSON3Writer + + +def _query_from_traversal(traversal, graph_protocol, context=None): """ From a GraphTraversal, return a query string. :param traversal: The GraphTraversal object + :param graphson_protocol: The graph protocol to determine the output format. """ + if graph_protocol == GraphProtocol.GRAPHSON_2_0: + graphson_writer = graphson2_writer(context, serializer_map=dse_graphson2_serializers) + elif graph_protocol == GraphProtocol.GRAPHSON_3_0: + if context is None: + raise ValueError('Missing context for GraphSON3 serialization requires.') + graphson_writer = graphson3_writer(context, serializer_map=dse_graphson3_serializers) + else: + raise ValueError('Unknown graph protocol: {}'.format(graph_protocol)) + try: query = graphson_writer.writeObject(traversal) except Exception: @@ -87,9 +162,11 @@ def execute(self): """ raise NotImplementedError() - def as_graph_statement(self): + def as_graph_statement(self, graph_protocol=GraphProtocol.GRAPHSON_2_0): """ Return the traversal batch as GraphStatement. + + :param graph_protocol: The graph protocol for the GraphSONWriter. Default is GraphProtocol.GRAPHSON_2_0. """ raise NotImplementedError() @@ -115,32 +192,35 @@ def __init__(self, *args, **kwargs): super(_DefaultTraversalBatch, self).__init__(*args, **kwargs) self._traversals = [] - @property - def _query(self): - return u"[{0}]".format(','.join(self._traversals)) - def add(self, traversal): if not isinstance(traversal, GraphTraversal): raise ValueError('traversal should be a gremlin GraphTraversal') - query = _query_from_traversal(traversal) - self._traversals.append(query) - + self._traversals.append(traversal) return self def add_all(self, traversals): for traversal in traversals: self.add(traversal) - def as_graph_statement(self): - return SimpleGraphStatement(self._query) + def as_graph_statement(self, graph_protocol=GraphProtocol.GRAPHSON_2_0, context=None): + statements = [_query_from_traversal(t, graph_protocol, context) for t in self._traversals] + query = u"[{0}]".format(','.join(statements)) + return SimpleGraphStatement(query) def execute(self): if self._session is None: raise ValueError('A DSE Session must be provided to execute the traversal batch.') execution_profile = self._execution_profile if self._execution_profile else EXEC_PROFILE_GRAPH_DEFAULT - return self._session.execute_graph(self._query, execution_profile=execution_profile) + graph_options = self._session.get_execution_profile(execution_profile).graph_options + context = { + 'cluster': self._session.cluster, + 'graph_name': graph_options.graph_name + } + statement = self.as_graph_statement(graph_options.graph_protocol, context=context) \ + if graph_options.graph_protocol else self.as_graph_statement(context=context) + return self._session.execute_graph(statement, execution_profile=execution_profile) def clear(self): del self._traversals[:] diff --git a/cassandra/datastax/graph/fluent/_serializers.py b/cassandra/datastax/graph/fluent/_serializers.py index 56591603af..db8e715ef8 100644 --- a/cassandra/datastax/graph/fluent/_serializers.py +++ b/cassandra/datastax/graph/fluent/_serializers.py @@ -17,33 +17,89 @@ import six from gremlin_python.structure.io.graphsonV2d0 import ( - GraphSONReader, - GraphSONUtil, - VertexDeserializer, - VertexPropertyDeserializer, - PropertyDeserializer, - EdgeDeserializer, - PathDeserializer + GraphSONReader as GraphSONReaderV2, + GraphSONUtil as GraphSONUtil, # no difference between v2 and v3 + VertexDeserializer as VertexDeserializerV2, + VertexPropertyDeserializer as VertexPropertyDeserializerV2, + PropertyDeserializer as PropertyDeserializerV2, + EdgeDeserializer as EdgeDeserializerV2, + PathDeserializer as PathDeserializerV2 ) -from cassandra.datastax.graph.graphson import ( - GraphSON2Serializer, - GraphSON2Deserializer +from gremlin_python.structure.io.graphsonV3d0 import ( + GraphSONReader as GraphSONReaderV3, + VertexDeserializer as VertexDeserializerV3, + VertexPropertyDeserializer as VertexPropertyDeserializerV3, + PropertyDeserializer as PropertyDeserializerV3, + EdgeDeserializer as EdgeDeserializerV3, + PathDeserializer as PathDeserializerV3 ) +try: + from gremlin_python.structure.io.graphsonV2d0 import ( + TraversalMetricsDeserializer as TraversalMetricsDeserializerV2, + MetricsDeserializer as MetricsDeserializerV2 + ) + from gremlin_python.structure.io.graphsonV3d0 import ( + TraversalMetricsDeserializer as TraversalMetricsDeserializerV3, + MetricsDeserializer as MetricsDeserializerV3 + ) +except ImportError: + TraversalMetricsDeserializerV2 = MetricsDeserializerV2 = None + TraversalMetricsDeserializerV3 = MetricsDeserializerV3 = None + +from cassandra.graph import ( + GraphSON2Serializer, + GraphSON2Deserializer, + GraphSON3Serializer, + GraphSON3Deserializer +) +from cassandra.graph.graphson import UserTypeIO, TypeWrapperTypeIO from cassandra.datastax.graph.fluent.predicates import GeoP, TextDistanceP from cassandra.util import Distance __all__ = ['GremlinGraphSONReader', 'GeoPSerializer', 'TextDistancePSerializer', - 'DistanceIO', 'gremlin_deserializers', 'deserializers', 'serializers'] + 'DistanceIO', 'gremlin_deserializers', 'deserializers', 'serializers', + 'GremlinGraphSONReaderV2', 'GremlinGraphSONReaderV3', 'dse_graphson2_serializers', + 'dse_graphson2_deserializers', 'dse_graphson3_serializers', 'dse_graphson3_deserializers', + 'gremlin_graphson2_deserializers', 'gremlin_graphson3_deserializers', 'GremlinUserTypeIO'] class _GremlinGraphSONTypeSerializer(object): + TYPE_KEY = "@type" + VALUE_KEY = "@value" + serializer = None - @classmethod - def dictify(cls, v, _): - return GraphSON2Serializer.serialize(v) + def __init__(self, serializer): + self.serializer = serializer + + def dictify(self, v, writer): + value = self.serializer.serialize(v, writer) + if self.serializer is TypeWrapperTypeIO: + graphson_base_type = v.type_io.graphson_base_type + graphson_type = v.type_io.graphson_type + else: + graphson_base_type = self.serializer.graphson_base_type + graphson_type = self.serializer.graphson_type + + if graphson_base_type is None: + out = value + else: + out = {self.TYPE_KEY: graphson_type} + if value is not None: + out[self.VALUE_KEY] = value + + return out + + def definition(self, value, writer=None): + return self.serializer.definition(value, writer) + + def get_specialized_serializer(self, value): + ser = self.serializer.get_specialized_serializer(value) + if ser is not self.serializer: + return _GremlinGraphSONTypeSerializer(ser) + return self class _GremlinGraphSONTypeDeserializer(object): @@ -54,22 +110,44 @@ def __init__(self, deserializer): self.deserializer = deserializer def objectify(self, v, reader): - return self.deserializer.deserialize(v, reader=reader) + return self.deserializer.deserialize(v, reader) -def _make_gremlin_deserializer(graphson_type): +def _make_gremlin_graphson2_deserializer(graphson_type): return _GremlinGraphSONTypeDeserializer( GraphSON2Deserializer.get_deserializer(graphson_type.graphson_type) ) -class GremlinGraphSONReader(GraphSONReader): +def _make_gremlin_graphson3_deserializer(graphson_type): + return _GremlinGraphSONTypeDeserializer( + GraphSON3Deserializer.get_deserializer(graphson_type.graphson_type) + ) + + +class _GremlinGraphSONReader(object): """Gremlin GraphSONReader Adapter, required to use gremlin types""" + context = None + + def __init__(self, context, deserializer_map=None): + self.context = context + super(_GremlinGraphSONReader, self).__init__(deserializer_map) + def deserialize(self, obj): return self.toObject(obj) +class GremlinGraphSONReaderV2(_GremlinGraphSONReader, GraphSONReaderV2): + pass + +# TODO remove next major +GremlinGraphSONReader = GremlinGraphSONReaderV2 + +class GremlinGraphSONReaderV3(_GremlinGraphSONReader, GraphSONReaderV3): + pass + + class GeoPSerializer(object): @classmethod def dictify(cls, p, writer): @@ -100,32 +178,85 @@ def dictify(cls, v, _): return GraphSONUtil.typedValue('Distance', six.text_type(v), prefix='dse') -serializers = OrderedDict([ - (t, _GremlinGraphSONTypeSerializer) - for t in six.iterkeys(GraphSON2Serializer.get_type_definitions()) +GremlinUserTypeIO = _GremlinGraphSONTypeSerializer(UserTypeIO) + +# GraphSON2 +dse_graphson2_serializers = OrderedDict([ + (t, _GremlinGraphSONTypeSerializer(s)) + for t, s in six.iteritems(GraphSON2Serializer.get_type_definitions()) ]) -# Predicates -serializers.update(OrderedDict([ +dse_graphson2_serializers.update(OrderedDict([ (Distance, DistanceIO), (GeoP, GeoPSerializer), (TextDistanceP, TextDistancePSerializer) ])) -deserializers = { - k: _make_gremlin_deserializer(v) +# TODO remove next major, this is just in case someone was using it +serializers = dse_graphson2_serializers + +dse_graphson2_deserializers = { + k: _make_gremlin_graphson2_deserializer(v) for k, v in six.iteritems(GraphSON2Deserializer.get_type_definitions()) } -deserializers.update({ +dse_graphson2_deserializers.update({ "dse:Distance": DistanceIO, }) -gremlin_deserializers = deserializers.copy() -gremlin_deserializers.update({ - 'g:Vertex': VertexDeserializer, - 'g:VertexProperty': VertexPropertyDeserializer, - 'g:Edge': EdgeDeserializer, - 'g:Property': PropertyDeserializer, - 'g:Path': PathDeserializer +# TODO remove next major, this is just in case someone was using it +deserializers = dse_graphson2_deserializers + +gremlin_graphson2_deserializers = dse_graphson2_deserializers.copy() +gremlin_graphson2_deserializers.update({ + 'g:Vertex': VertexDeserializerV2, + 'g:VertexProperty': VertexPropertyDeserializerV2, + 'g:Edge': EdgeDeserializerV2, + 'g:Property': PropertyDeserializerV2, + 'g:Path': PathDeserializerV2 }) + +if TraversalMetricsDeserializerV2: + gremlin_graphson2_deserializers.update({ + 'g:TraversalMetrics': TraversalMetricsDeserializerV2, + 'g:lMetrics': MetricsDeserializerV2 + }) + +# TODO remove next major, this is just in case someone was using it +gremlin_deserializers = gremlin_graphson2_deserializers + +# GraphSON3 +dse_graphson3_serializers = OrderedDict([ + (t, _GremlinGraphSONTypeSerializer(s)) + for t, s in six.iteritems(GraphSON3Serializer.get_type_definitions()) +]) + +dse_graphson3_serializers.update(OrderedDict([ + (Distance, DistanceIO), + (GeoP, GeoPSerializer), + (TextDistanceP, TextDistancePSerializer) +])) + +dse_graphson3_deserializers = { + k: _make_gremlin_graphson3_deserializer(v) + for k, v in six.iteritems(GraphSON3Deserializer.get_type_definitions()) +} + +dse_graphson3_deserializers.update({ + "dse:Distance": DistanceIO +}) + +gremlin_graphson3_deserializers = dse_graphson3_deserializers.copy() +gremlin_graphson3_deserializers.update({ + 'g:Vertex': VertexDeserializerV3, + 'g:VertexProperty': VertexPropertyDeserializerV3, + 'g:Edge': EdgeDeserializerV3, + 'g:Property': PropertyDeserializerV3, + 'g:Path': PathDeserializerV3 +}) + +if TraversalMetricsDeserializerV3: + gremlin_graphson3_deserializers.update({ + 'g:TraversalMetrics': TraversalMetricsDeserializerV3, + 'g:Metrics': MetricsDeserializerV3 + }) diff --git a/cassandra/datastax/graph/graphson.py b/cassandra/datastax/graph/graphson.py index 620adf045e..8419c7992b 100644 --- a/cassandra/datastax/graph/graphson.py +++ b/cassandra/datastax/graph/graphson.py @@ -19,52 +19,73 @@ import json from decimal import Decimal from collections import OrderedDict +import logging +import itertools +from functools import partial import six -if six.PY3: +try: import ipaddress +except: + ipaddress = None -from cassandra.util import Polygon, Point, LineString + +from cassandra.cqltypes import cql_types_from_string +from cassandra.metadata import UserType +from cassandra.util import Polygon, Point, LineString, Duration from cassandra.datastax.graph.types import Vertex, VertexProperty, Edge, Path __all__ = ['GraphSON1Serializer', 'GraphSON1Deserializer', 'GraphSON1TypeDeserializer', - 'GraphSON2Serializer', 'GraphSON2Deserializer', - 'GraphSON2Reader', 'BooleanTypeIO', 'Int16TypeIO', 'Int32TypeIO', 'DoubleTypeIO', + 'GraphSON2Serializer', 'GraphSON2Deserializer', 'GraphSON2Reader', + 'GraphSON3Serializer', 'GraphSON3Deserializer', 'GraphSON3Reader', + 'to_bigint', 'to_int', 'to_double', 'to_float', 'to_smallint', + 'BooleanTypeIO', 'Int16TypeIO', 'Int32TypeIO', 'DoubleTypeIO', 'FloatTypeIO', 'UUIDTypeIO', 'BigDecimalTypeIO', 'DurationTypeIO', 'InetTypeIO', 'InstantTypeIO', 'LocalDateTypeIO', 'LocalTimeTypeIO', 'Int64TypeIO', 'BigIntegerTypeIO', - 'LocalDateTypeIO', 'PolygonTypeIO', 'PointTypeIO', 'LineStringTypeIO', 'BlobTypeIO'] + 'LocalDateTypeIO', 'PolygonTypeIO', 'PointTypeIO', 'LineStringTypeIO', 'BlobTypeIO', + 'GraphSON3Serializer', 'GraphSON3Deserializer', 'UserTypeIO', 'TypeWrapperTypeIO'] """ Supported types: -DSE Graph GraphSON 2.0 Python Driver ------------- | -------------- | ------------ -text | ------ | str -boolean | g:Boolean | bool -bigint | g:Int64 | long -int | g:Int32 | int -double | g:Double | float -float | g:Float | float -uuid | g:UUID | UUID -bigdecimal | gx:BigDecimal | Decimal -duration | gx:Duration | timedelta -inet | gx:InetAddress | str (unicode), IPV4Address/IPV6Address (PY3) -timestamp | gx:Instant | datetime.datetime -date | gx:LocalDate | datetime.date -time | gx:LocalTime | datetime.time -smallint | gx:Int16 | int -varint | gx:BigInteger | long -date | gx:LocalDate | Date -polygon | dse:Polygon | Polygon -point | dse:Point | Point -linestring | dse:LineString | LineString -blob | dse:Blob | bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) +DSE Graph GraphSON 2.0 GraphSON 3.0 | Python Driver +------------ | -------------- | -------------- | ------------ +text | string | string | str +boolean | g:Boolean | g:Boolean | bool +bigint | g:Int64 | g:Int64 | long +int | g:Int32 | g:Int32 | int +double | g:Double | g:Double | float +float | g:Float | g:Float | float +uuid | g:UUID | g:UUID | UUID +bigdecimal | gx:BigDecimal | gx:BigDecimal | Decimal +duration | gx:Duration | N/A | timedelta (Classic graph only) +DSE Duration | N/A | dse:Duration | Duration (Core graph only) +inet | gx:InetAddress | gx:InetAddress | str (unicode), IPV4Address/IPV6Address (PY3) +timestamp | gx:Instant | gx:Instant | datetime.datetime +date | gx:LocalDate | gx:LocalDate | datetime.date +time | gx:LocalTime | gx:LocalTime | datetime.time +smallint | gx:Int16 | gx:Int16 | int +varint | gx:BigInteger | gx:BigInteger | long +date | gx:LocalDate | gx:LocalDate | Date +polygon | dse:Polygon | dse:Polygon | Polygon +point | dse:Point | dse:Point | Point +linestring | dse:Linestring | dse:LineString | LineString +blob | dse:Blob | dse:Blob | bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) +blob | gx:ByteBuffer | gx:ByteBuffer | bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) +list | N/A | g:List | list (Core graph only) +map | N/A | g:Map | dict (Core graph only) +set | N/A | g:Set | set or list (Core graph only) + Can return a list due to numerical values returned by Java +tuple | N/A | dse:Tuple | tuple (Core graph only) +udt | N/A | dse:UDT | class or namedtuple (Core graph only) """ MAX_INT32 = 2 ** 32 - 1 MIN_INT32 = -2 ** 31 +log = logging.getLogger(__name__) + class _GraphSONTypeType(type): """GraphSONType metaclass, required to create a class property.""" @@ -80,9 +101,14 @@ class GraphSONTypeIO(object): prefix = 'g' graphson_base_type = None + cql_type = None + + @classmethod + def definition(cls, value, writer=None): + return {'cqlType': cls.cql_type} @classmethod - def serialize(cls, value): + def serialize(cls, value, writer=None): return six.text_type(value) @classmethod @@ -94,18 +120,23 @@ def get_specialized_serializer(cls, value): return cls +class TextTypeIO(GraphSONTypeIO): + cql_type = 'text' + + class BooleanTypeIO(GraphSONTypeIO): graphson_base_type = 'Boolean' + cql_type = 'boolean' @classmethod - def serialize(cls, value): + def serialize(cls, value, writer=None): return bool(value) class IntegerTypeIO(GraphSONTypeIO): @classmethod - def serialize(cls, value): + def serialize(cls, value, writer=None): return value @classmethod @@ -119,14 +150,17 @@ def get_specialized_serializer(cls, value): class Int16TypeIO(IntegerTypeIO): prefix = 'gx' graphson_base_type = 'Int16' + cql_type = 'smallint' class Int32TypeIO(IntegerTypeIO): graphson_base_type = 'Int32' + cql_type = 'int' class Int64TypeIO(IntegerTypeIO): graphson_base_type = 'Int64' + cql_type = 'bigint' @classmethod def deserialize(cls, value, reader=None): @@ -137,6 +171,11 @@ def deserialize(cls, value, reader=None): class FloatTypeIO(GraphSONTypeIO): graphson_base_type = 'Float' + cql_type = 'float' + + @classmethod + def serialize(cls, value, writer=None): + return value @classmethod def deserialize(cls, value, reader=None): @@ -145,6 +184,7 @@ def deserialize(cls, value, reader=None): class DoubleTypeIO(FloatTypeIO): graphson_base_type = 'Double' + cql_type = 'double' class BigIntegerTypeIO(IntegerTypeIO): @@ -157,9 +197,10 @@ class LocalDateTypeIO(GraphSONTypeIO): prefix = 'gx' graphson_base_type = 'LocalDate' + cql_type = 'date' @classmethod - def serialize(cls, value): + def serialize(cls, value, writer=None): return value.isoformat() @classmethod @@ -170,20 +211,14 @@ def deserialize(cls, value, reader=None): # negative date return value - @classmethod - def get_specialized_serializer(cls, value): - if isinstance(value, datetime.datetime): - return InstantTypeIO - - return cls - class InstantTypeIO(GraphSONTypeIO): prefix = 'gx' graphson_base_type = 'Instant' + cql_type = 'timestamp' @classmethod - def serialize(cls, value): + def serialize(cls, value, writer=None): if isinstance(value, datetime.datetime): value = datetime.datetime(*value.utctimetuple()[:6]).replace(microsecond=value.microsecond) else: @@ -209,9 +244,10 @@ class LocalTimeTypeIO(GraphSONTypeIO): prefix = 'gx' graphson_base_type = 'LocalTime' + cql_type = 'time' @classmethod - def serialize(cls, value): + def serialize(cls, value, writer=None): return value.strftime(cls.FORMATS[2]) @classmethod @@ -233,9 +269,10 @@ def deserialize(cls, value, reader=None): class BlobTypeIO(GraphSONTypeIO): prefix = 'dse' graphson_base_type = 'Blob' + cql_type = 'blob' @classmethod - def serialize(cls, value): + def serialize(cls, value, writer=None): value = base64.b64encode(value) if six.PY3: value = value.decode('utf-8') @@ -246,8 +283,14 @@ def deserialize(cls, value, reader=None): return bytearray(base64.b64decode(value)) +class ByteBufferTypeIO(BlobTypeIO): + prefix = 'gx' + graphson_base_type = 'ByteBuffer' + + class UUIDTypeIO(GraphSONTypeIO): graphson_base_type = 'UUID' + cql_type = 'uuid' @classmethod def deserialize(cls, value, reader=None): @@ -257,6 +300,7 @@ def deserialize(cls, value, reader=None): class BigDecimalTypeIO(GraphSONTypeIO): prefix = 'gx' graphson_base_type = 'BigDecimal' + cql_type = 'bigdecimal' @classmethod def deserialize(cls, value, reader=None): @@ -266,6 +310,7 @@ def deserialize(cls, value, reader=None): class DurationTypeIO(GraphSONTypeIO): prefix = 'gx' graphson_base_type = 'Duration' + cql_type = 'duration' _duration_regex = re.compile(r""" ^P((?P\d+)D)? @@ -280,7 +325,7 @@ class DurationTypeIO(GraphSONTypeIO): _seconds_in_day = 24 * _seconds_in_hour @classmethod - def serialize(cls, value): + def serialize(cls, value, writer=None): total_seconds = int(value.total_seconds()) days, total_seconds = divmod(total_seconds, cls._seconds_in_day) hours, total_seconds = divmod(total_seconds, cls._seconds_in_hour) @@ -303,9 +348,47 @@ def deserialize(cls, value, reader=None): minutes=duration['minutes'], seconds=duration['seconds']) +class DseDurationTypeIO(GraphSONTypeIO): + prefix = 'dse' + graphson_base_type = 'Duration' + cql_type = 'duration' + + @classmethod + def serialize(cls, value, writer=None): + return { + 'months': value.months, + 'days': value.days, + 'nanos': value.nanoseconds + } + + @classmethod + def deserialize(cls, value, reader=None): + return Duration( + reader.deserialize(value['months']), + reader.deserialize(value['days']), + reader.deserialize(value['nanos']) + ) + + +class TypeWrapperTypeIO(GraphSONTypeIO): + + @classmethod + def definition(cls, value, writer=None): + return {'cqlType': value.type_io.cql_type} + + @classmethod + def serialize(cls, value, writer=None): + return value.type_io.serialize(value.value) + + @classmethod + def deserialize(cls, value, reader=None): + return value.type_io.deserialize(value.value) + + class PointTypeIO(GraphSONTypeIO): prefix = 'dse' graphson_base_type = 'Point' + cql_type = "org.apache.cassandra.db.marshal.PointType" @classmethod def deserialize(cls, value, reader=None): @@ -315,6 +398,7 @@ def deserialize(cls, value, reader=None): class LineStringTypeIO(GraphSONTypeIO): prefix = 'dse' graphson_base_type = 'LineString' + cql_type = "org.apache.cassandra.db.marshal.LineStringType" @classmethod def deserialize(cls, value, reader=None): @@ -324,6 +408,7 @@ def deserialize(cls, value, reader=None): class PolygonTypeIO(GraphSONTypeIO): prefix = 'dse' graphson_base_type = 'Polygon' + cql_type = "org.apache.cassandra.db.marshal.PolygonType" @classmethod def deserialize(cls, value, reader=None): @@ -333,6 +418,7 @@ def deserialize(cls, value, reader=None): class InetTypeIO(GraphSONTypeIO): prefix = 'gx' graphson_base_type = 'InetAddress' + cql_type = 'inet' class VertexTypeIO(GraphSONTypeIO): @@ -397,13 +483,268 @@ class PathTypeIO(GraphSONTypeIO): @classmethod def deserialize(cls, value, reader=None): - labels = [set(label) for label in value['labels']] - objects = [reader.deserialize(obj) for obj in value['objects']] + labels = [set(label) for label in reader.deserialize(value['labels'])] + objects = [obj for obj in reader.deserialize(value['objects'])] p = Path(labels, []) p.objects = objects # avoid the object processing in Path.__init__ return p +class TraversalMetricsTypeIO(GraphSONTypeIO): + graphson_base_type = 'TraversalMetrics' + + @classmethod + def deserialize(cls, value, reader=None): + return reader.deserialize(value) + + +class MetricsTypeIO(GraphSONTypeIO): + graphson_base_type = 'Metrics' + + @classmethod + def deserialize(cls, value, reader=None): + return reader.deserialize(value) + + +class JsonMapTypeIO(GraphSONTypeIO): + """In GraphSON2, dict are simply serialized as json map""" + + @classmethod + def serialize(cls, value, writer=None): + out = {} + for k, v in six.iteritems(value): + out[k] = writer.serialize(v, writer) + + return out + + +class MapTypeIO(GraphSONTypeIO): + """In GraphSON3, dict has its own type""" + + graphson_base_type = 'Map' + cql_type = 'map' + + @classmethod + def definition(cls, value, writer=None): + out = OrderedDict([('cqlType', cls.cql_type)]) + out['definition'] = [] + for k, v in six.iteritems(value): + # we just need the first pair to write the def + out['definition'].append(writer.definition(k)) + out['definition'].append(writer.definition(v)) + break + return out + + @classmethod + def serialize(cls, value, writer=None): + out = [] + for k, v in six.iteritems(value): + out.append(writer.serialize(k, writer)) + out.append(writer.serialize(v, writer)) + + return out + + @classmethod + def deserialize(cls, value, reader=None): + out = {} + a, b = itertools.tee(value) + for key, val in zip( + itertools.islice(a, 0, None, 2), + itertools.islice(b, 1, None, 2) + ): + out[reader.deserialize(key)] = reader.deserialize(val) + return out + + +class ListTypeIO(GraphSONTypeIO): + """In GraphSON3, list has its own type""" + + graphson_base_type = 'List' + cql_type = 'list' + + @classmethod + def definition(cls, value, writer=None): + out = OrderedDict([('cqlType', cls.cql_type)]) + out['definition'] = [] + if value: + out['definition'].append(writer.definition(value[0])) + return out + + @classmethod + def serialize(cls, value, writer=None): + return [writer.serialize(v, writer) for v in value] + + @classmethod + def deserialize(cls, value, reader=None): + return [reader.deserialize(obj) for obj in value] + + +class SetTypeIO(GraphSONTypeIO): + """In GraphSON3, set has its own type""" + + graphson_base_type = 'Set' + cql_type = 'set' + + @classmethod + def definition(cls, value, writer=None): + out = OrderedDict([('cqlType', cls.cql_type)]) + out['definition'] = [] + for v in value: + # we only take into account the first value for the definition + out['definition'].append(writer.definition(v)) + break + return out + + @classmethod + def serialize(cls, value, writer=None): + return [writer.serialize(v, writer) for v in value] + + @classmethod + def deserialize(cls, value, reader=None): + lst = [reader.deserialize(obj) for obj in value] + + s = set(lst) + if len(s) != len(lst): + log.warning("Coercing g:Set to list due to numerical values returned by Java. " + "See TINKERPOP-1844 for details.") + return lst + + return s + + +class BulkSetTypeIO(GraphSONTypeIO): + graphson_base_type = "BulkSet" + + @classmethod + def deserialize(cls, value, reader=None): + out = [] + + a, b = itertools.tee(value) + for val, bulk in zip( + itertools.islice(a, 0, None, 2), + itertools.islice(b, 1, None, 2) + ): + val = reader.deserialize(val) + bulk = reader.deserialize(bulk) + for n in range(bulk): + out.append(val) + + return out + + +class TupleTypeIO(GraphSONTypeIO): + prefix = 'dse' + graphson_base_type = 'Tuple' + cql_type = 'tuple' + + @classmethod + def definition(cls, value, writer=None): + out = OrderedDict() + out['cqlType'] = cls.cql_type + serializers = [writer.get_serializer(s) for s in value] + out['definition'] = [s.definition(v, writer) for v, s in zip(value, serializers)] + return out + + @classmethod + def serialize(cls, value, writer=None): + out = cls.definition(value, writer) + out['value'] = [writer.serialize(v, writer) for v in value] + return out + + @classmethod + def deserialize(cls, value, reader=None): + return tuple(reader.deserialize(obj) for obj in value['value']) + + +class UserTypeIO(GraphSONTypeIO): + prefix = 'dse' + graphson_base_type = 'UDT' + cql_type = 'udt' + + FROZEN_REMOVAL_REGEX = re.compile(r'frozen<"*([^"]+)"*>') + + @classmethod + def cql_types_from_string(cls, typ): + # sanitizing: remove frozen references and double quotes... + return cql_types_from_string( + re.sub(cls.FROZEN_REMOVAL_REGEX, r'\1', typ) + ) + + @classmethod + def get_udt_definition(cls, value, writer): + user_type_name = writer.user_types[type(value)] + keyspace = writer.context['graph_name'] + return writer.context['cluster'].metadata.keyspaces[keyspace].user_types[user_type_name] + + @classmethod + def is_collection(cls, typ): + return typ in ['list', 'tuple', 'map', 'set'] + + @classmethod + def is_udt(cls, typ, writer): + keyspace = writer.context['graph_name'] + if keyspace in writer.context['cluster'].metadata.keyspaces: + return typ in writer.context['cluster'].metadata.keyspaces[keyspace].user_types + return False + + @classmethod + def field_definition(cls, types, writer, name=None): + """ + Build the udt field definition. This is required when we have a complex udt type. + """ + index = -1 + out = [OrderedDict() if name is None else OrderedDict([('fieldName', name)])] + + while types: + index += 1 + typ = types.pop(0) + if index > 0: + out.append(OrderedDict()) + + if cls.is_udt(typ, writer): + keyspace = writer.context['graph_name'] + udt = writer.context['cluster'].metadata.keyspaces[keyspace].user_types[typ] + out[index].update(cls.definition(udt, writer)) + elif cls.is_collection(typ): + out[index]['cqlType'] = typ + definition = cls.field_definition(types, writer) + out[index]['definition'] = definition if isinstance(definition, list) else [definition] + else: + out[index]['cqlType'] = typ + + return out if len(out) > 1 else out[0] + + @classmethod + def definition(cls, value, writer=None): + udt = value if isinstance(value, UserType) else cls.get_udt_definition(value, writer) + return OrderedDict([ + ('cqlType', cls.cql_type), + ('keyspace', udt.keyspace), + ('name', udt.name), + ('definition', [ + cls.field_definition(cls.cql_types_from_string(typ), writer, name=name) + for name, typ in zip(udt.field_names, udt.field_types)]) + ]) + + @classmethod + def serialize(cls, value, writer=None): + udt = cls.get_udt_definition(value, writer) + out = cls.definition(value, writer) + out['value'] = [] + for name, typ in zip(udt.field_names, udt.field_types): + out['value'].append(writer.serialize(getattr(value, name), writer)) + return out + + @classmethod + def deserialize(cls, value, reader=None): + udt_class = reader.context['cluster']._user_types[value['keyspace']][value['name']] + kwargs = zip( + list(map(lambda v: v['fieldName'], value['definition'])), + [reader.deserialize(v) for v in value['value']] + ) + return udt_class(**dict(kwargs)) + + class _BaseGraphSONSerializer(object): _serializers = OrderedDict() @@ -448,15 +789,19 @@ def get_serializer(cls, value): return serializer @classmethod - def serialize(cls, value): + def serialize(cls, value, writer=None): """ - Serialize a python object to graphson. + Serialize a python object to GraphSON. + + e.g 'P42DT10H5M37S' + e.g. {'key': value} :param value: The python object to serialize. + :param writer: A graphson serializer for recursive types (Optional) """ serializer = cls.get_serializer(value) if serializer: - return serializer.serialize(value) + return serializer.serialize(value, writer or cls) return value @@ -470,27 +815,34 @@ class GraphSON1Serializer(_BaseGraphSONSerializer): # We want that iteration order to be consistent, so we use an OrderedDict, # not a dict. _serializers = OrderedDict([ + (str, TextTypeIO), (bool, BooleanTypeIO), - (bytearray, BlobTypeIO), + (bytearray, ByteBufferTypeIO), (Decimal, BigDecimalTypeIO), (datetime.date, LocalDateTypeIO), (datetime.time, LocalTimeTypeIO), (datetime.timedelta, DurationTypeIO), + (datetime.datetime, InstantTypeIO), (uuid.UUID, UUIDTypeIO), (Polygon, PolygonTypeIO), (Point, PointTypeIO), - (LineString, LineStringTypeIO) + (LineString, LineStringTypeIO), + (dict, JsonMapTypeIO), + (float, FloatTypeIO) ]) -if six.PY2: - GraphSON1Serializer.register(buffer, BlobTypeIO) -else: - GraphSON1Serializer.register(memoryview, BlobTypeIO) - GraphSON1Serializer.register(bytes, BlobTypeIO) +if ipaddress: GraphSON1Serializer.register(ipaddress.IPv4Address, InetTypeIO) GraphSON1Serializer.register(ipaddress.IPv6Address, InetTypeIO) +if six.PY2: + GraphSON1Serializer.register(buffer, ByteBufferTypeIO) + GraphSON1Serializer.register(unicode, TextTypeIO) +else: + GraphSON1Serializer.register(memoryview, ByteBufferTypeIO) + GraphSON1Serializer.register(bytes, ByteBufferTypeIO) + class _BaseGraphSONDeserializer(object): @@ -526,7 +878,7 @@ class GraphSON1Deserializer(_BaseGraphSONDeserializer): """ Deserialize graphson1 types to python objects. """ - _TYPES = [UUIDTypeIO, BigDecimalTypeIO, InstantTypeIO, BlobTypeIO, + _TYPES = [UUIDTypeIO, BigDecimalTypeIO, InstantTypeIO, BlobTypeIO, ByteBufferTypeIO, PointTypeIO, LineStringTypeIO, PolygonTypeIO, LocalDateTypeIO, LocalTimeTypeIO, DurationTypeIO, InetTypeIO] @@ -581,7 +933,7 @@ def deserialize_decimal(cls, value): @classmethod def deserialize_blob(cls, value): - return cls._deserializers[BlobTypeIO.graphson_type].deserialize(value) + return cls._deserializers[ByteBufferTypeIO.graphson_type].deserialize(value) @classmethod def deserialize_point(cls, value): @@ -604,7 +956,7 @@ def deserialize_boolean(cls, value): return value -# Remove in the next major +# TODO Remove in the next major GraphSON1TypeDeserializer = GraphSON1Deserializer GraphSON1TypeSerializer = GraphSON1Serializer @@ -615,8 +967,7 @@ class GraphSON2Serializer(_BaseGraphSONSerializer): _serializers = GraphSON1Serializer.get_type_definitions() - @classmethod - def serialize(cls, value): + def serialize(self, value, writer=None): """ Serialize a type to GraphSON2. @@ -624,15 +975,24 @@ def serialize(cls, value): :param value: The python object to serialize. """ - serializer = cls.get_serializer(value) + serializer = self.get_serializer(value) if not serializer: - # if no serializer found, we can't type it. `value` will be jsonized as string. - return value + raise ValueError("Unable to find a serializer for value of type: ".format(type(value))) + + val = serializer.serialize(value, writer or self) + if serializer is TypeWrapperTypeIO: + graphson_base_type = value.type_io.graphson_base_type + graphson_type = value.type_io.graphson_type + else: + graphson_base_type = serializer.graphson_base_type + graphson_type = serializer.graphson_type - value = serializer.serialize(value) - out = {cls.TYPE_KEY: serializer.graphson_type} - if value is not None: - out[cls.VALUE_KEY] = value + if graphson_base_type is None: + out = val + else: + out = {self.TYPE_KEY: graphson_type} + if val is not None: + out[self.VALUE_KEY] = val return out @@ -647,7 +1007,7 @@ class GraphSON2Deserializer(_BaseGraphSONDeserializer): _TYPES = GraphSON1Deserializer._TYPES + [ Int16TypeIO, Int32TypeIO, Int64TypeIO, DoubleTypeIO, FloatTypeIO, BigIntegerTypeIO, VertexTypeIO, VertexPropertyTypeIO, EdgeTypeIO, - PathTypeIO, PropertyTypeIO] + PathTypeIO, PropertyTypeIO, TraversalMetricsTypeIO, MetricsTypeIO] _deserializers = { t.graphson_type: t @@ -660,10 +1020,11 @@ class GraphSON2Reader(object): GraphSON2 Reader that parse json and deserialize to python objects. """ - def __init__(self, extra_deserializer_map=None): + def __init__(self, context, extra_deserializer_map=None): """ :param extra_deserializer_map: map from GraphSON type tag to deserializer instance implementing `deserialize` """ + self.context = context self.deserializers = GraphSON2Deserializer.get_type_definitions() if extra_deserializer_map: self.deserializers.update(extra_deserializer_map) @@ -690,3 +1051,91 @@ def deserialize(self, obj): return [self.deserialize(o) for o in obj] else: return obj + + +class TypeIOWrapper(object): + """Used to force a graphson type during serialization""" + + type_io = None + value = None + + def __init__(self, type_io, value): + self.type_io = type_io + self.value = value + + +def _wrap_value(type_io, value): + return TypeIOWrapper(type_io, value) + + +to_bigint = partial(_wrap_value, Int64TypeIO) +to_int = partial(_wrap_value, Int32TypeIO) +to_smallint = partial(_wrap_value, Int16TypeIO) +to_double = partial(_wrap_value, DoubleTypeIO) +to_float = partial(_wrap_value, FloatTypeIO) + + +class GraphSON3Serializer(GraphSON2Serializer): + + _serializers = GraphSON2Serializer.get_type_definitions() + + context = None + """A dict of the serialization context""" + + def __init__(self, context): + self.context = context + self.user_types = None + + def definition(self, value): + serializer = self.get_serializer(value) + return serializer.definition(value, self) + + def get_serializer(self, value): + """Custom get_serializer to support UDT/Tuple""" + + serializer = super(GraphSON3Serializer, self).get_serializer(value) + is_namedtuple_udt = serializer is TupleTypeIO and hasattr(value, '_fields') + if not serializer or is_namedtuple_udt: + # Check if UDT + if self.user_types is None: + try: + user_types = self.context['cluster']._user_types[self.context['graph_name']] + self.user_types = dict(map(reversed, six.iteritems(user_types))) + except KeyError: + self.user_types = {} + + serializer = UserTypeIO if (is_namedtuple_udt or (type(value) in self.user_types)) else serializer + + return serializer + + +GraphSON3Serializer.register(dict, MapTypeIO) +GraphSON3Serializer.register(list, ListTypeIO) +GraphSON3Serializer.register(set, SetTypeIO) +GraphSON3Serializer.register(tuple, TupleTypeIO) +GraphSON3Serializer.register(Duration, DseDurationTypeIO) +GraphSON3Serializer.register(TypeIOWrapper, TypeWrapperTypeIO) + + +class GraphSON3Deserializer(GraphSON2Deserializer): + _TYPES = GraphSON2Deserializer._TYPES + [MapTypeIO, ListTypeIO, + SetTypeIO, TupleTypeIO, + UserTypeIO, DseDurationTypeIO, BulkSetTypeIO] + + _deserializers = {t.graphson_type: t for t in _TYPES} + + +class GraphSON3Reader(GraphSON2Reader): + """ + GraphSON3 Reader that parse json and deserialize to python objects. + """ + + def __init__(self, context, extra_deserializer_map=None): + """ + :param context: A dict of the context, mostly used as context for udt deserialization. + :param extra_deserializer_map: map from GraphSON type tag to deserializer instance implementing `deserialize` + """ + self.context = context + self.deserializers = GraphSON3Deserializer.get_type_definitions() + if extra_deserializer_map: + self.deserializers.update(extra_deserializer_map) diff --git a/cassandra/datastax/graph/query.py b/cassandra/datastax/graph/query.py index 50a03b5561..7c0e265dbf 100644 --- a/cassandra/datastax/graph/query.py +++ b/cassandra/datastax/graph/query.py @@ -19,14 +19,14 @@ from cassandra import ConsistencyLevel from cassandra.query import Statement, SimpleStatement -from cassandra.datastax.graph.types import Vertex, Edge, Path -from cassandra.datastax.graph.graphson import GraphSON2Reader +from cassandra.datastax.graph.types import Vertex, Edge, Path, VertexProperty +from cassandra.datastax.graph.graphson import GraphSON2Reader, GraphSON3Reader __all__ = [ 'GraphProtocol', 'GraphOptions', 'GraphStatement', 'SimpleGraphStatement', 'single_object_row_factory', 'graph_result_row_factory', 'graph_object_row_factory', - 'graph_graphson2_row_factory', 'Result' + 'graph_graphson2_row_factory', 'Result', 'graph_graphson3_row_factory' ] # (attr, description, server option) @@ -45,21 +45,24 @@ # this is defined by the execution profile attribute, not in graph options _request_timeout_key = 'request-timeout' -_graphson2_reader = GraphSON2Reader() - class GraphProtocol(object): - GRAPHSON_1_0 = 'graphson-1.0' + GRAPHSON_1_0 = b'graphson-1.0' """ GraphSON1 """ - GRAPHSON_2_0 = 'graphson-2.0' + GRAPHSON_2_0 = b'graphson-2.0' """ GraphSON2 """ + GRAPHSON_3_0 = b'graphson-3.0' + """ + GraphSON3 + """ + class GraphOptions(object): """ @@ -67,11 +70,13 @@ class GraphOptions(object): """ # See _graph_options map above for notes on valid options + DEFAULT_GRAPH_PROTOCOL = GraphProtocol.GRAPHSON_1_0 + DEFAULT_GRAPH_LANGUAGE = b'gremlin-groovy' + def __init__(self, **kwargs): self._graph_options = {} kwargs.setdefault('graph_source', 'g') - kwargs.setdefault('graph_language', 'gremlin-groovy') - kwargs.setdefault('graph_protocol', GraphProtocol.GRAPHSON_1_0) + kwargs.setdefault('graph_language', GraphOptions.DEFAULT_GRAPH_LANGUAGE) for attr, value in six.iteritems(kwargs): if attr not in _graph_option_names: warn("Unknown keyword argument received for GraphOptions: {0}".format(attr)) @@ -222,11 +227,31 @@ def _graph_object_sequence(objects): yield res -def graph_graphson2_row_factory(column_names, rows): - """ - Row Factory that returns the decoded graphson as DSE types. - """ - return [_graphson2_reader.read(row[0])['result'] for row in rows] +class _GraphSONContextRowFactory(object): + graphson_reader_class = None + graphson_reader_kwargs = None + + def __init__(self, cluster): + context = {'cluster': cluster} + kwargs = self.graphson_reader_kwargs or {} + self.graphson_reader = self.graphson_reader_class(context, **kwargs) + + def __call__(self, column_names, rows): + return [self.graphson_reader.read(row[0])['result'] for row in rows] + + +class _GraphSON2RowFactory(_GraphSONContextRowFactory): + """Row factory to deserialize GraphSON2 results.""" + graphson_reader_class = GraphSON2Reader + + +class _GraphSON3RowFactory(_GraphSONContextRowFactory): + """Row factory to deserialize GraphSON3 results.""" + graphson_reader_class = GraphSON3Reader + + +graph_graphson2_row_factory = _GraphSON2RowFactory +graph_graphson3_row_factory = _GraphSON3RowFactory class Result(object): @@ -302,3 +327,6 @@ def as_path(self): return Path(self.labels, self.objects) except (AttributeError, ValueError, TypeError): raise TypeError("Could not create Path from %r" % (self,)) + + def as_vertex_property(self): + return VertexProperty(self.value.get('label'), self.value.get('value'), self.value.get('properties', {})) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index e9c03f583b..1caeec0542 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -666,10 +666,15 @@ class KeyspaceMetadata(object): .. versionadded:: 3.15 """ + graph_engine = None + """ + A string indicating whether a graph engine is enabled for this keyspace (Core/Classic). + """ + _exc_info = None """ set if metadata parsing failed """ - def __init__(self, name, durable_writes, strategy_class, strategy_options): + def __init__(self, name, durable_writes, strategy_class, strategy_options, graph_engine=None): self.name = name self.durable_writes = durable_writes self.replication_strategy = ReplicationStrategy.create(strategy_class, strategy_options) @@ -679,17 +684,28 @@ def __init__(self, name, durable_writes, strategy_class, strategy_options): self.functions = {} self.aggregates = {} self.views = {} + self.graph_engine = graph_engine + + @property + def is_graph_enabled(self): + return self.graph_engine is not None def export_as_string(self): """ Returns a CQL query string that can be used to recreate the entire keyspace, including user-defined types and tables. """ - cql = "\n\n".join([self.as_cql_query() + ';'] + - self.user_type_strings() + - [f.export_as_string() for f in self.functions.values()] + - [a.export_as_string() for a in self.aggregates.values()] + - [t.export_as_string() for t in self.tables.values()]) + # Make sure tables with vertex are exported before tables with edges + tables_with_vertex = [t for t in self.tables.values() if hasattr(t, 'vertex') and t.vertex] + other_tables = [t for t in self.tables.values() if t not in tables_with_vertex] + + cql = "\n\n".join( + [self.as_cql_query() + ';'] + + self.user_type_strings() + + [f.export_as_string() for f in self.functions.values()] + + [a.export_as_string() for a in self.aggregates.values()] + + [t.export_as_string() for t in tables_with_vertex + other_tables]) + if self._exc_info: import traceback ret = "/*\nWarning: Keyspace %s is incomplete because of an error processing metadata.\n" % \ @@ -715,7 +731,10 @@ def as_cql_query(self): ret = "CREATE KEYSPACE %s WITH replication = %s " % ( protect_name(self.name), self.replication_strategy.export_for_schema()) - return ret + (' AND durable_writes = %s' % ("true" if self.durable_writes else "false")) + ret = ret + (' AND durable_writes = %s' % ("true" if self.durable_writes else "false")) + if self.graph_engine is not None: + ret = ret + (" AND graph_engine = '%s'" % self.graph_engine) + return ret def user_type_strings(self): user_type_strings = [] @@ -1340,6 +1359,90 @@ def _make_option_strings(cls, options_map): return list(sorted(ret)) +class TableMetadataV3(TableMetadata): + """ + For C* 3.0+. `option_maps` take a superset of map names, so if nothing + changes structurally, new option maps can just be appended to the list. + """ + compaction_options = {} + + option_maps = [ + 'compaction', 'compression', 'caching', + 'nodesync' # added DSE 6.0 + ] + + @property + def is_cql_compatible(self): + return True + + @classmethod + def _make_option_strings(cls, options_map): + ret = [] + options_copy = dict(options_map.items()) + + for option in cls.option_maps: + value = options_copy.get(option) + if isinstance(value, Mapping): + del options_copy[option] + params = ("'%s': '%s'" % (k, v) for k, v in value.items()) + ret.append("%s = {%s}" % (option, ', '.join(params))) + + for name, value in options_copy.items(): + if value is not None: + if name == "comment": + value = value or "" + ret.append("%s = %s" % (name, protect_value(value))) + + return list(sorted(ret)) + + +# TODO This should inherit V4 later? +class TableMetadataDSE68(TableMetadataV3): + + vertex = None + """A :class:`.VertexMetadata` instance, if graph enabled""" + + edge = None + """A :class:`.EdgeMetadata` instance, if graph enabled""" + + def as_cql_query(self, formatted=False): + ret = super(TableMetadataDSE68, self).as_cql_query(formatted) + + if self.vertex: + ret += " AND VERTEX LABEL %s" % protect_name(self.vertex.label_name) + + if self.edge: + ret += " AND EDGE LABEL %s" % protect_name(self.edge.label_name) + + ret += self._export_edge_as_cql( + self.edge.from_label, + self.edge.from_partition_key_columns, + self.edge.from_clustering_columns, "FROM") + + ret += self._export_edge_as_cql( + self.edge.to_label, + self.edge.to_partition_key_columns, + self.edge.to_clustering_columns, "TO") + + return ret + + @staticmethod + def _export_edge_as_cql(label_name, partition_keys, + clustering_columns, keyword): + ret = " %s %s(" % (keyword, protect_name(label_name)) + + if len(partition_keys) == 1: + ret += protect_name(partition_keys[0]) + else: + ret += "(%s)" % ", ".join([protect_name(k) for k in partition_keys]) + + if clustering_columns: + ret += ", %s" % ", ".join([protect_name(k) for k in clustering_columns]) + ret += ")" + + return ret + + class TableExtensionInterface(object): """ Defines CQL/DDL for Cassandra table extensions. @@ -2301,6 +2404,8 @@ class SchemaParserV3(SchemaParserV22): _function_agg_arument_type_col = 'argument_types' + _table_metadata_class = TableMetadataV3 + recognized_table_options = ( 'bloom_filter_fp_chance', 'caching', @@ -2384,7 +2489,7 @@ def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_row trigger_rows = trigger_rows or self.keyspace_table_trigger_rows[keyspace_name][table_name] index_rows = index_rows or self.keyspace_table_index_rows[keyspace_name][table_name] - table_meta = TableMetadataV3(keyspace_name, table_name, virtual=virtual) + table_meta = self._table_metadata_class(keyspace_name, table_name, virtual=virtual) try: table_meta.options = self._build_table_options(row) flags = row.get('flags', set()) @@ -2640,15 +2745,15 @@ def _query_all(self): # ignore them if we got an error self.virtual_keyspaces_result = self._handle_results( virtual_ks_success, virtual_ks_result, - expected_failures=InvalidRequest + expected_failures=(InvalidRequest,) ) self.virtual_tables_result = self._handle_results( virtual_table_success, virtual_table_result, - expected_failures=InvalidRequest + expected_failures=(InvalidRequest,) ) self.virtual_columns_result = self._handle_results( virtual_column_success, virtual_column_result, - expected_failures=InvalidRequest + expected_failures=(InvalidRequest,) ) self._aggregate_results() @@ -2703,41 +2808,174 @@ class SchemaParserDSE67(SchemaParserV4): ("nodesync",)) -class TableMetadataV3(TableMetadata): +class SchemaParserDSE68(SchemaParserDSE67): """ - For C* 3.0+. `option_maps` take a superset of map names, so if nothing - changes structurally, new option maps can just be appended to the list. + For DSE 6.8+ """ - compaction_options = {} - option_maps = [ - 'compaction', 'compression', 'caching', - 'nodesync' # added DSE 6.0 - ] + _SELECT_VERTICES = "SELECT * FROM system_schema.vertices" + _SELECT_EDGES = "SELECT * FROM system_schema.edges" - @property - def is_cql_compatible(self): - return True + _table_metadata_class = TableMetadataDSE68 - @classmethod - def _make_option_strings(cls, options_map): - ret = [] - options_copy = dict(options_map.items()) + def __init__(self, connection, timeout): + super(SchemaParserDSE68, self).__init__(connection, timeout) + self.keyspace_table_vertex_rows = defaultdict(lambda: defaultdict(list)) + self.keyspace_table_edge_rows = defaultdict(lambda: defaultdict(list)) - for option in cls.option_maps: - value = options_copy.get(option) - if isinstance(value, Mapping): - del options_copy[option] - params = ("'%s': '%s'" % (k, v) for k, v in value.items()) - ret.append("%s = {%s}" % (option, ', '.join(params))) + def get_all_keyspaces(self): + for keyspace_meta in super(SchemaParserDSE68, self).get_all_keyspaces(): - for name, value in options_copy.items(): - if value is not None: - if name == "comment": - value = value or "" - ret.append("%s = %s" % (name, protect_value(value))) + def _build_table_graph_metadata(table_meta): + for row in self.keyspace_table_vertex_rows[keyspace_meta.name][table_meta.name]: + vertex_meta = self._build_table_vertex_metadata(row) + table_meta.vertex = vertex_meta - return list(sorted(ret)) + for row in self.keyspace_table_edge_rows[keyspace_meta.name][table_meta.name]: + edge_meta = self._build_table_edge_metadata(keyspace_meta, row) + table_meta.edge = edge_meta + + # Make sure we process vertices before edges + for t in [t for t in six.itervalues(keyspace_meta.tables) + if t.name in self.keyspace_table_vertex_rows[keyspace_meta.name]]: + _build_table_graph_metadata(t) + + # all other tables... + for t in [t for t in six.itervalues(keyspace_meta.tables) + if t.name not in self.keyspace_table_vertex_rows[keyspace_meta.name]]: + _build_table_graph_metadata(t) + + yield keyspace_meta + + def get_table(self, keyspaces, keyspace, table): + table_meta = super(SchemaParserDSE68, self).get_table(keyspaces, keyspace, table) + cl = ConsistencyLevel.ONE + where_clause = bind_params(" WHERE keyspace_name = %%s AND %s = %%s" % (self._table_name_col), (keyspace, table), _encoder) + vertices_query = QueryMessage(query=self._SELECT_VERTICES + where_clause, consistency_level=cl) + edges_query = QueryMessage(query=self._SELECT_EDGES + where_clause, consistency_level=cl) + + (vertices_success, vertices_result), (edges_success, edges_result) \ + = self.connection.wait_for_responses(vertices_query, edges_query, timeout=self.timeout, fail_on_error=False) + vertices_result = self._handle_results(vertices_success, vertices_result) + edges_result = self._handle_results(edges_success, edges_result) + + if vertices_result: + table_meta.vertex = self._build_table_vertex_metadata(vertices_result[0]) + elif edges_result: + table_meta.edge = self._build_table_edge_metadata(keyspaces[keyspace], edges_result[0]) + + return table_meta + + @staticmethod + def _build_keyspace_metadata_internal(row): + name = row["keyspace_name"] + durable_writes = row.get("durable_writes", None) + replication = dict(row.get("replication")) if 'replication' in row else {} + replication_class = replication.pop("class") if 'class' in replication else None + graph_engine = row.get("graph_engine", None) + return KeyspaceMetadata(name, durable_writes, replication_class, replication, graph_engine) + + @staticmethod + def _build_table_vertex_metadata(row): + return VertexMetadata(row.get("keyspace_name"), row.get("table_name"), + row.get("label_name")) + + @staticmethod + def _build_table_edge_metadata(keyspace_meta, row): + from_table = row.get("from_table") + from_table_meta = keyspace_meta.tables.get(from_table) + from_label = from_table_meta.vertex.label_name + to_table = row.get("to_table") + to_table_meta = keyspace_meta.tables.get(to_table) + to_label = to_table_meta.vertex.label_name + + return EdgeMetadata( + row.get("keyspace_name"), row.get("table_name"), + row.get("label_name"), from_table, from_label, + row.get("from_partition_key_columns"), + row.get("from_clustering_columns"), to_table, to_label, + row.get("to_partition_key_columns"), + row.get("to_clustering_columns")) + + def _query_all(self): + cl = ConsistencyLevel.ONE + queries = [ + # copied from v4 + QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), + QueryMessage(query=self._SELECT_TABLES, consistency_level=cl), + QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), + QueryMessage(query=self._SELECT_TYPES, consistency_level=cl), + QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), + QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), + QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl), + QueryMessage(query=self._SELECT_INDEXES, consistency_level=cl), + QueryMessage(query=self._SELECT_VIEWS, consistency_level=cl), + QueryMessage(query=self._SELECT_VIRTUAL_KEYSPACES, consistency_level=cl), + QueryMessage(query=self._SELECT_VIRTUAL_TABLES, consistency_level=cl), + QueryMessage(query=self._SELECT_VIRTUAL_COLUMNS, consistency_level=cl), + # dse7.0 only + QueryMessage(query=self._SELECT_VERTICES, consistency_level=cl), + QueryMessage(query=self._SELECT_EDGES, consistency_level=cl) + ] + + responses = self.connection.wait_for_responses( + *queries, timeout=self.timeout, fail_on_error=False) + ( + # copied from V4 + (ks_success, ks_result), + (table_success, table_result), + (col_success, col_result), + (types_success, types_result), + (functions_success, functions_result), + (aggregates_success, aggregates_result), + (triggers_success, triggers_result), + (indexes_success, indexes_result), + (views_success, views_result), + (virtual_ks_success, virtual_ks_result), + (virtual_table_success, virtual_table_result), + (virtual_column_success, virtual_column_result), + # dse6.8 responses + (vertices_success, vertices_result), + (edges_success, edges_result) + ) = responses + + # copied from V4 + self.keyspaces_result = self._handle_results(ks_success, ks_result) + self.tables_result = self._handle_results(table_success, table_result) + self.columns_result = self._handle_results(col_success, col_result) + self.triggers_result = self._handle_results(triggers_success, triggers_result) + self.types_result = self._handle_results(types_success, types_result) + self.functions_result = self._handle_results(functions_success, functions_result) + self.aggregates_result = self._handle_results(aggregates_success, aggregates_result) + self.indexes_result = self._handle_results(indexes_success, indexes_result) + self.views_result = self._handle_results(views_success, views_result) + + self.virtual_keyspaces_result = self._handle_results(virtual_ks_success, + virtual_ks_result) + self.virtual_tables_result = self._handle_results(virtual_table_success, + virtual_table_result) + self.virtual_columns_result = self._handle_results(virtual_column_success, + virtual_column_result) + # dse6.8-only results + self.vertices_result = self._handle_results(vertices_success, vertices_result) + self.edges_result = self._handle_results(edges_success, edges_result) + + self._aggregate_results() + + def _aggregate_results(self): + super(SchemaParserDSE68, self)._aggregate_results() + + m = self.keyspace_table_vertex_rows + for row in self.vertices_result: + ksname = row["keyspace_name"] + cfname = row['table_name'] + m[ksname][cfname].append(row) + + m = self.keyspace_table_edge_rows + for row in self.edges_result: + ksname = row["keyspace_name"] + cfname = row['table_name'] + m[ksname][cfname].append(row) class MaterializedViewMetadata(object): @@ -2746,8 +2984,7 @@ class MaterializedViewMetadata(object): """ keyspace_name = None - - """ A string name of the view.""" + """ A string name of the keyspace of this view.""" name = None """ A string name of the view.""" @@ -2849,11 +3086,89 @@ def export_as_string(self): return self.as_cql_query(formatted=True) + ";" +class VertexMetadata(object): + """ + A representation of a vertex on a table + """ + + keyspace_name = None + """ A string name of the keyspace. """ + + table_name = None + """ A string name of the table this vertex is on. """ + + label_name = None + """ A string name of the label of this vertex.""" + + def __init__(self, keyspace_name, table_name, label_name): + self.keyspace_name = keyspace_name + self.table_name = table_name + self.label_name = label_name + + +class EdgeMetadata(object): + """ + A representation of an edge on a table + """ + + keyspace_name = None + """A string name of the keyspace """ + + table_name = None + """A string name of the table this edge is on""" + + label_name = None + """A string name of the label of this edge""" + + from_table = None + """A string name of the from table of this edge (incoming vertex)""" + + from_label = None + """A string name of the from table label of this edge (incoming vertex)""" + + from_partition_key_columns = None + """The columns that match the partition key of the incoming vertex table.""" + + from_clustering_columns = None + """The columns that match the clustering columns of the incoming vertex table.""" + + to_table = None + """A string name of the to table of this edge (outgoing vertex)""" + + to_label = None + """A string name of the to table label of this edge (outgoing vertex)""" + + to_partition_key_columns = None + """The columns that match the partition key of the outgoing vertex table.""" + + to_clustering_columns = None + """The columns that match the clustering columns of the outgoing vertex table.""" + + def __init__( + self, keyspace_name, table_name, label_name, from_table, + from_label, from_partition_key_columns, from_clustering_columns, + to_table, to_label, to_partition_key_columns, + to_clustering_columns): + self.keyspace_name = keyspace_name + self.table_name = table_name + self.label_name = label_name + self.from_table = from_table + self.from_label = from_label + self.from_partition_key_columns = from_partition_key_columns + self.from_clustering_columns = from_clustering_columns + self.to_table = to_table + self.to_label = to_label + self.to_partition_key_columns = to_partition_key_columns + self.to_clustering_columns = to_clustering_columns + + def get_schema_parser(connection, server_version, dse_version, timeout): version = Version(server_version) if dse_version: v = Version(dse_version) - if v >= Version('6.7.0'): + if v >= Version('6.8.0'): + return SchemaParserDSE68(connection, timeout) + elif v >= Version('6.7.0'): return SchemaParserDSE67(connection, timeout) elif v >= Version('6.0.0'): return SchemaParserDSE60(connection, timeout) diff --git a/test-requirements.txt b/test-requirements.txt index 2760e27b47..0cc44c3c97 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -18,4 +18,5 @@ requests backports.ssl_match_hostname; python_version < '2.7.9' futurist; python_version >= '3.7' asynctest; python_version > '3.4' -gremlinpython>=3.3.4,<3.3.9 +gremlinpython>=3.4.0,<3.5.0 +ipaddress; python_version < '3.3.0' diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 5870191ea8..c8e3163094 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -22,6 +22,7 @@ import unittest2 as unittest except ImportError: import unittest # noqa + from packaging.version import Version import logging import socket @@ -130,6 +131,11 @@ def _get_cass_version_from_dse(dse_version): cass_ver = "4.0.0.67" else: cass_ver = '4.0.0.' + ''.join(dse_version.split('.')) + elif dse_version.startswith('6.8'): + if dse_version == '6.8.0': + cass_ver = "4.0.0.68" + else: + cass_ver = '4.0.0.' + ''.join(dse_version.split('.')) else: log.error("Unknown dse version found {0}, defaulting to 2.1".format(dse_version)) cass_ver = "2.1" @@ -338,6 +344,7 @@ def _id_and_mark(f): lessthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION <= Version('4.0'), 'Cassandra version less or equal to 4.0 required') lessthancass40 = unittest.skipUnless(CASSANDRA_VERSION < Version('4.0'), 'Cassandra version less than 4.0 required') lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < Version('3.0'), 'Cassandra version less then 3.0 required') +greaterthanorequaldse68 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.8'), "DSE 6.8 or greater required for this test") greaterthanorequaldse67 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.7'), "DSE 6.7 or greater required for this test") greaterthanorequaldse60 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.0'), "DSE 6.0 or greater required for this test") greaterthanorequaldse51 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('5.1'), "DSE 5.1 or greater required for this test") @@ -398,12 +405,23 @@ def use_single_node(start=True, workloads=[], configuration_options={}, dse_opti configuration_options=configuration_options, dse_options=dse_options) +def check_log_error(): + global CCM_CLUSTER + log.debug("Checking log error of cluster {0}".format(CCM_CLUSTER.name)) + for node in CCM_CLUSTER.nodelist(): + errors = node.grep_log_for_errors() + for error in errors: + for line in error: + print(line) + + def remove_cluster(): if USE_CASS_EXTERNAL or KEEP_TEST_CLUSTER: return global CCM_CLUSTER if CCM_CLUSTER: + check_log_error() log.debug("Removing cluster {0}".format(CCM_CLUSTER.name)) tries = 0 while tries < 100: @@ -506,6 +524,23 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, if dse_version >= Version('5.0'): CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) + if dse_version >= Version('5.1'): + # For Inet4Address + CCM_CLUSTER.set_dse_configuration_options({ + 'graph': { + 'gremlin_server': { + 'scriptEngines': { + 'gremlin-groovy': { + 'config': { + 'sandbox_rules': { + 'whitelist_packages': ['java.net'] + } + } + } + } + } + } + }) if 'spark' in workloads: config_options = {"initial_spark_worker_resources": 0.1} if dse_version >= Version('6.7'): diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py index a943beac41..c5da6c0154 100644 --- a/tests/integration/advanced/__init__.py +++ b/tests/integration/advanced/__init__.py @@ -18,147 +18,28 @@ import unittest # noqa from six.moves.urllib.request import build_opener, Request, HTTPHandler -import sys import re import os import time from os.path import expanduser -from uuid import UUID -from decimal import Decimal + from ccmlib import common -import datetime -import six -from packaging.version import Version - -from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT, EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT - -from tests.integration import PROTOCOL_VERSION, DSE_VERSION, get_server_versions, BasicKeyspaceUnitTestCase, \ - drop_keyspace_shutdown_cluster, get_node, USE_CASS_EXTERNAL, CASSANDRA_IP -from tests.integration import use_singledc, use_single_node, wait_for_node_socket -from cassandra.protocol import ServerError -from cassandra.util import Point, LineString, Polygon -from cassandra.graph import Edge, Vertex, Path -from cassandra.graph import GraphSON1Deserializer -from cassandra.graph.graphson import InetTypeIO -from cassandra.datastax.graph.query import _graphson2_reader -from cassandra.cluster import (GraphAnalyticsExecutionProfile, GraphExecutionProfile, - EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, default_lbp_factory) -from cassandra.policies import DSELoadBalancingPolicy -from cassandra.graph.query import GraphOptions, GraphProtocol, graph_graphson2_row_factory -home = expanduser('~') +from cassandra.cluster import Cluster + +from tests.integration import PROTOCOL_VERSION, get_server_versions, BasicKeyspaceUnitTestCase, \ + drop_keyspace_shutdown_cluster, get_node, USE_CASS_EXTERNAL, set_default_cass_ip +from tests.integration import use_singledc, use_single_node, wait_for_node_socket, CASSANDRA_IP +home = expanduser('~') # Home directory of the Embedded Apache Directory Server to use ADS_HOME = os.getenv('ADS_HOME', home) -MAKE_STRICT = "schema.config().option('graph.schema_mode').set('production')" -MAKE_NON_STRICT = "schema.config().option('graph.schema_mode').set('development')" -ALLOW_SCANS = "schema.config().option('graph.allow_scan').set('true')" - -# A map of common types and their corresponding groovy declaration for use in schema creation and insertion -MAX_LONG = 9223372036854775807 -MIN_LONG = -9223372036854775808 -ZERO_LONG = 0 - -if sys.version_info < (3, 0): - MAX_LONG = long(MAX_LONG) - MIN_LONG = long(MIN_LONG) - ZERO_LONG = long(ZERO_LONG) - -deserializers = GraphSON1Deserializer()._deserializers - -TYPE_MAP = {"point1": ["Point()", Point(.5, .13), GraphSON1Deserializer.deserialize_point], - "point2": ["Point()", Point(-5, .0), GraphSON1Deserializer.deserialize_point], - - "linestring1": ["Linestring()", LineString(((1.0, 2.0), (3.0, 4.0), (-89.0, 90.0))), - GraphSON1Deserializer.deserialize_linestring], - "polygon1": ["Polygon()", Polygon([(10.0, 10.0), (80.0, 10.0), (80., 88.0), (10., 89.0), (10., 10.0)], - [[(20., 20.0), (20., 30.0), (30., 30.0), (30., 20.0), (20., 20.0)], - [(40., 20.0), (40., 30.0), (50., 30.0), (50., 20.0), (40., 20.0)]]), - GraphSON1Deserializer.deserialize_polygon], - "smallint1": ["Smallint()", 1, GraphSON1Deserializer.deserialize_smallint], - "varint1": ["Varint()", 2147483647, GraphSON1Deserializer.deserialize_varint], - - "bigint1": ["Bigint()", MAX_LONG, GraphSON1Deserializer.deserialize_bigint], - "bigint2": ["Bigint()", MIN_LONG, GraphSON1Deserializer.deserialize_bigint], - "bigint3": ["Bigint()", ZERO_LONG, GraphSON1Deserializer.deserialize_bigint], - - "int1": ["Int()", 100, GraphSON1Deserializer.deserialize_int], - "float1": ["Float()", .5, GraphSON1Deserializer.deserialize_float], - "double1": ["Double()", .3415681, GraphSON1Deserializer.deserialize_double], - "uuid1": ["Uuid()", UUID('12345678123456781234567812345678'), GraphSON1Deserializer.deserialize_uuid], - "decimal1": ["Decimal()", Decimal(10), GraphSON1Deserializer.deserialize_decimal], - "blob1": ["Blob()", bytearray(b"Hello World"), GraphSON1Deserializer.deserialize_blob], - - "timestamp1": ["Timestamp()", datetime.datetime.now().replace(microsecond=0), - GraphSON1Deserializer.deserialize_timestamp], - "timestamp2": ["Timestamp()", datetime.datetime.max.replace(microsecond=0), - GraphSON1Deserializer.deserialize_timestamp], - # These are valid values but are pending for DSP-14093 to be fixed - #"timestamp3": ["Timestamp()", datetime.datetime(159, 1, 1, 23, 59, 59), - # GraphSON1TypeDeserializer.deserialize_timestamp], - #"timestamp4": ["Timestamp()", datetime.datetime.min, - # GraphSON1TypeDeserializer.deserialize_timestamp], - - "duration1": ["Duration()", datetime.timedelta(1, 16, 0), - GraphSON1Deserializer.deserialize_duration], - "duration2": ["Duration()", datetime.timedelta(days=1, seconds=16, milliseconds=15), - GraphSON1Deserializer.deserialize_duration], - } - - -if six.PY2: - TYPE_MAP["blob3"] = ["Blob()", buffer(b"Hello World"), GraphSON1Deserializer.deserialize_blob] - - TYPE_MAP["inet1"] = ["Inet()", "127.0.0.1", GraphSON1Deserializer.deserialize_inet] - TYPE_MAP["inet2"] = ["Inet()", "2001:db8:85a3:8d3:1319:8a2e:370:7348", GraphSON1Deserializer.deserialize_inet] - -else: - TYPE_MAP["blob4"] = ["Blob()", bytes(b"Hello World Again"), GraphSON1Deserializer.deserialize_blob] - TYPE_MAP["blob5"] = ["Blob()", memoryview(b"And Again Hello World"), GraphSON1Deserializer.deserialize_blob] - - import ipaddress - deserializer_plus_to_ipaddressv4 = lambda x: ipaddress.IPv4Address(GraphSON1Deserializer.deserialize_inet(x)) - deserializer_plus_to_ipaddressv6 = lambda x: ipaddress.IPv6Address(GraphSON1Deserializer.deserialize_inet(x)) - - def generic_ip_deserializer(string_ip_adress): - if ":" in string_ip_adress: - return deserializer_plus_to_ipaddressv6(string_ip_adress) - return deserializer_plus_to_ipaddressv4(string_ip_adress) - - class GenericIpAddressIO(InetTypeIO): - @classmethod - def deserialize(cls, value, reader=None): - return generic_ip_deserializer(value) - - _graphson2_reader.deserializers[GenericIpAddressIO.graphson_type] = GenericIpAddressIO - - TYPE_MAP["inet1"] = ["Inet()", ipaddress.IPv4Address("127.0.0.1"), deserializer_plus_to_ipaddressv4] - TYPE_MAP["inet2"] = ["Inet()", ipaddress.IPv6Address("2001:db8:85a3:8d3:1319:8a2e:370:7348"), - deserializer_plus_to_ipaddressv6] - -if DSE_VERSION and DSE_VERSION >= Version("5.1"): - TYPE_MAP["datetime1"]= ["Date()", datetime.date.today(), GraphSON1Deserializer.deserialize_date] - TYPE_MAP["time1"] = ["Time()", datetime.time(12, 6, 12, 444), GraphSON1Deserializer.deserialize_time] - TYPE_MAP["time2"] = ["Time()", datetime.time(12, 6, 12), GraphSON1Deserializer.deserialize_time] - TYPE_MAP["time3"] = ["Time()", datetime.time(12, 6), GraphSON1Deserializer.deserialize_time] - TYPE_MAP["time4"] = ["Time()", datetime.time.min, GraphSON1Deserializer.deserialize_time] - TYPE_MAP["time5"] = ["Time()", datetime.time.max, GraphSON1Deserializer.deserialize_time] - TYPE_MAP["blob2"] = ["Blob()", bytearray(b"AKDLIElksadlaswqA" * 100000), GraphSON1Deserializer.deserialize_blob] - TYPE_MAP["datetime1"]= ["Date()", datetime.date.today(), GraphSON1Deserializer.deserialize_date] - TYPE_MAP["datetime2"]= ["Date()", datetime.date(159, 1, 3), GraphSON1Deserializer.deserialize_date] - TYPE_MAP["datetime3"]= ["Date()", datetime.date.min, GraphSON1Deserializer.deserialize_date] - TYPE_MAP["datetime4"]= ["Date()", datetime.date.max, GraphSON1Deserializer.deserialize_date] - TYPE_MAP["time1"] = ["Time()", datetime.time(12, 6, 12, 444), GraphSON1Deserializer.deserialize_time] - TYPE_MAP["time2"] = ["Time()", datetime.time(12, 6, 12), GraphSON1Deserializer.deserialize_time] - TYPE_MAP["time3"] = ["Time()", datetime.time(12, 6), GraphSON1Deserializer.deserialize_time] - TYPE_MAP["time4"] = ["Time()", datetime.time.min, GraphSON1Deserializer.deserialize_time] - TYPE_MAP["time5"] = ["Time()", datetime.time.max, GraphSON1Deserializer.deserialize_time] - TYPE_MAP["blob2"] = ["Blob()", bytearray(b"AKDLIElksadlaswqA" * 100000), GraphSON1Deserializer.deserialize_blob] + def find_spark_master(session): - # Itterate over the nodes the one with port 7080 open is the spark master + # Iterate over the nodes the one with port 7080 open is the spark master for host in session.hosts: ip = host.address port = 7077 @@ -216,7 +97,7 @@ def use_cluster_with_graph(num_nodes): when started all at once. """ if USE_CASS_EXTERNAL: - set_default_dse_ip() + set_default_cass_ip() return # Create the cluster but don't start it. @@ -248,178 +129,12 @@ def use_cluster_with_graph(num_nodes): wait_for_spark_workers(3, 120) -def reset_graph(session, graph_name): - session.execute_graph('system.graph(name).ifNotExists().create()', {'name': graph_name}, - execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) - wait_for_graph_inserted(session, graph_name) - - -def wait_for_graph_inserted(session, graph_name): - count = 0 - exists = session.execute_graph('system.graph(name).exists()', {'name': graph_name}, - execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0].value - while not exists and count < 50: - time.sleep(1) - exists = session.execute_graph('system.graph(name).exists()', {'name': graph_name}, - execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0].value - return exists - - -class BasicGraphUnitTestCase(BasicKeyspaceUnitTestCase): - """ - This is basic graph unit test case that provides various utility methods that can be leveraged for testcase setup and tear - down - """ - @property - def graph_name(self): - return self._testMethodName.lower() - - def session_setup(self): - lbp = DSELoadBalancingPolicy(default_lbp_factory()) - - ep_graphson2 = GraphExecutionProfile( - request_timeout=60, - load_balancing_policy=lbp, - graph_options=GraphOptions( - graph_name=self.graph_name, - graph_protocol=GraphProtocol.GRAPHSON_2_0 - ), - row_factory=graph_graphson2_row_factory) - - ep_graphson1 = GraphExecutionProfile( - request_timeout=60, - load_balancing_policy=lbp, - graph_options=GraphOptions( - graph_name=self.graph_name - ) - ) - - ep_analytics = GraphAnalyticsExecutionProfile( - request_timeout=60, - load_balancing_policy=lbp, - graph_options = GraphOptions( - graph_language=b'gremlin-groovy', - graph_name=self.graph_name - ) - ) - - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={ - EXEC_PROFILE_GRAPH_DEFAULT: ep_graphson1, - EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT: ep_analytics, - "graphson2": ep_graphson2 - }) - self.session = self.cluster.connect() - self.ks_name = self._testMethodName.lower() - self.cass_version, self.cql_version = get_server_versions() - - def setUp(self): - if DSE_VERSION: - self.session_setup() - self.reset_graph() - - self.clear_schema() - - def tearDown(self): - if DSE_VERSION: - self.cluster.shutdown() - - def clear_schema(self): - self.session.execute_graph('schema.clear()') - - def reset_graph(self): - reset_graph(self.session, self.graph_name) - - - def wait_for_graph_inserted(self): - wait_for_graph_inserted(self.session, self.graph_name) - - -class BasicSharedGraphUnitTestCase(BasicKeyspaceUnitTestCase): - """ - This is basic graph unit test case that provides various utility methods that can be leveraged for testcase setup and tear - down - """ - - @classmethod - def session_setup(cls): - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - cls.session = cls.cluster.connect() - cls.ks_name = cls.__name__.lower() - cls.cass_version, cls.cql_version = get_server_versions() - cls.graph_name = cls.__name__.lower() - - @classmethod - def setUpClass(cls): - if DSE_VERSION: - cls.session_setup() - cls.reset_graph() - profiles = cls.cluster.profile_manager.profiles - profiles[EXEC_PROFILE_GRAPH_DEFAULT].request_timeout = 60 - profiles[EXEC_PROFILE_GRAPH_DEFAULT].graph_options.graph_name = cls.graph_name - profiles[EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT].request_timeout = 60 - profiles[EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT].graph_options.graph_name = cls.graph_name - cls.clear_schema() - - @classmethod - def tearDownClass(cls): - if DSE_VERSION: - cls.cluster.shutdown() - - @classmethod - def clear_schema(self): - self.session.execute_graph('schema.clear()') - - @classmethod - def reset_graph(self): - reset_graph(self.session, self.graph_name) - - def wait_for_graph_inserted(self): - wait_for_graph_inserted(self.session, self.graph_name) - - -def fetchCustomGeoType(type): - if type.lower().startswith("point"): - return getPointType() - elif type.lower().startswith("line"): - return getLineType() - elif type.lower().startswith("poly"): - return getPolygonType() - else: - return None - - -geo_condition = DSE_VERSION and DSE_VERSION < Version('5.1') -def getPointType(): - if geo_condition: - return "Point()" - - return "Point().withGeoBounds()" - -def getPointTypeWithBounds(lowerX, lowerY, upperX, upperY): - if geo_condition: - return "Point()" - - return "Point().withBounds({0}, {1}, {2}, {3})".format(lowerX, lowerY, upperX, upperY) - -def getLineType(): - if geo_condition: - return "Linestring()" - - return "Linestring().withGeoBounds()" - -def getPolygonType(): - if geo_condition: - return "Polygon()" - - return "Polygon().withGeoBounds()" - - class BasicGeometricUnitTestCase(BasicKeyspaceUnitTestCase): """ This base test class is used by all the geomteric tests. It contains class level teardown and setup methods. It also contains the test fixtures used by those tests """ + @classmethod def common_dse_setup(cls, rf, keyspace_creation=True): cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) @@ -432,277 +147,22 @@ def common_dse_setup(cls, rf, keyspace_creation=True): @classmethod def setUpClass(cls): - if DSE_VERSION: - cls.common_dse_setup(1) - cls.initalizeTables() + cls.common_dse_setup(1) + cls.initalizeTables() @classmethod def tearDownClass(cls): - if DSE_VERSION: - drop_keyspace_shutdown_cluster(cls.ks_name, cls.session, cls.cluster) + drop_keyspace_shutdown_cluster(cls.ks_name, cls.session, cls.cluster) @classmethod def initalizeTables(cls): udt_type = "CREATE TYPE udt1 (g {0})".format(cls.cql_type_name) - large_table = "CREATE TABLE tbl (k uuid PRIMARY KEY, g {0}, l list<{0}>, s set<{0}>, m0 map<{0},int>, m1 map, t tuple<{0},{0},{0}>, u frozen)".format(cls.cql_type_name) - simple_table = "CREATE TABLE tblpk (k {0} primary key, v int)".format( cls.cql_type_name) - cluster_table = "CREATE TABLE tblclustering (k0 int, k1 {0}, v int, primary key (k0, k1))".format(cls.cql_type_name) + large_table = "CREATE TABLE tbl (k uuid PRIMARY KEY, g {0}, l list<{0}>, s set<{0}>, m0 map<{0},int>, m1 map, t tuple<{0},{0},{0}>, u frozen)".format( + cls.cql_type_name) + simple_table = "CREATE TABLE tblpk (k {0} primary key, v int)".format(cls.cql_type_name) + cluster_table = "CREATE TABLE tblclustering (k0 int, k1 {0}, v int, primary key (k0, k1))".format( + cls.cql_type_name) cls.session.execute(udt_type) cls.session.execute(large_table) cls.session.execute(simple_table) cls.session.execute(cluster_table) - - -def generate_line_graph(length): - query_parts = [] - query_parts.append(ALLOW_SCANS+';') - query_parts.append("schema.propertyKey('index').Int().ifNotExists().create();") - query_parts.append("schema.propertyKey('distance').Int().ifNotExists().create();") - query_parts.append("schema.vertexLabel('lp').properties('index').ifNotExists().create();") - query_parts.append("schema.edgeLabel('goesTo').properties('distance').connection('lp', 'lp').ifNotExists().create();") - for index in range(0, length): - query_parts.append('''Vertex vertex{0} = graph.addVertex(label, 'lp', 'index', {0}); '''.format(index)) - if index is not 0: - query_parts.append('''vertex{0}.addEdge('goesTo', vertex{1}, 'distance', 5); '''.format(index-1, index)) - final_graph_generation_statement = "".join(query_parts) - return final_graph_generation_statement - - -def generate_classic(session): - to_run = [MAKE_STRICT, ALLOW_SCANS, '''schema.propertyKey('name').Text().ifNotExists().create(); - schema.propertyKey('age').Int().ifNotExists().create(); - schema.propertyKey('lang').Text().ifNotExists().create(); - schema.propertyKey('weight').Float().ifNotExists().create(); - schema.vertexLabel('person').properties('name', 'age').ifNotExists().create(); - schema.vertexLabel('software').properties('name', 'lang').ifNotExists().create(); - schema.edgeLabel('created').properties('weight').connection('person', 'software').ifNotExists().create(); - schema.edgeLabel('created').connection('software', 'software').add(); - schema.edgeLabel('knows').properties('weight').connection('person', 'person').ifNotExists().create();''', - '''Vertex marko = graph.addVertex(label, 'person', 'name', 'marko', 'age', 29); - Vertex vadas = graph.addVertex(label, 'person', 'name', 'vadas', 'age', 27); - Vertex lop = graph.addVertex(label, 'software', 'name', 'lop', 'lang', 'java'); - Vertex josh = graph.addVertex(label, 'person', 'name', 'josh', 'age', 32); - Vertex ripple = graph.addVertex(label, 'software', 'name', 'ripple', 'lang', 'java'); - Vertex peter = graph.addVertex(label, 'person', 'name', 'peter', 'age', 35); - marko.addEdge('knows', vadas, 'weight', 0.5f); - marko.addEdge('knows', josh, 'weight', 1.0f); - marko.addEdge('created', lop, 'weight', 0.4f); - josh.addEdge('created', ripple, 'weight', 1.0f); - josh.addEdge('created', lop, 'weight', 0.4f); - peter.addEdge('created', lop, 'weight', 0.2f);'''] - - for run in to_run: - succeed = False - count = 0 - # Retry up to 10 times this is an issue for - # Graph Mult-NodeClusters - while count < 10 and not succeed: - try: - session.execute_graph(run) - succeed = True - except (ServerError): - print("error creating classic graph retrying") - time.sleep(.5) - count += 1 - - -def generate_multi_field_graph(session): - to_run = [ALLOW_SCANS, - '''schema.propertyKey('shortvalue').Smallint().ifNotExists().create(); - schema.vertexLabel('shortvertex').properties('shortvalue').ifNotExists().create(); - short s1 = 5000; graph.addVertex(label, "shortvertex", "shortvalue", s1);''', - '''schema.propertyKey('intvalue').Int().ifNotExists().create(); - schema.vertexLabel('intvertex').properties('intvalue').ifNotExists().create(); - int i1 = 1000000000; graph.addVertex(label, "intvertex", "intvalue", i1);''', - '''schema.propertyKey('intvalue2').Int().ifNotExists().create(); - schema.vertexLabel('intvertex2').properties('intvalue2').ifNotExists().create(); - Integer i2 = 100000000; graph.addVertex(label, "intvertex2", "intvalue2", i2);''', - '''schema.propertyKey('longvalue').Bigint().ifNotExists().create(); - schema.vertexLabel('longvertex').properties('longvalue').ifNotExists().create(); - long l1 = 9223372036854775807; graph.addVertex(label, "longvertex", "longvalue", l1);''', - '''schema.propertyKey('longvalue2').Bigint().ifNotExists().create(); - schema.vertexLabel('longvertex2').properties('longvalue2').ifNotExists().create(); - Long l2 = 100000000000000000L; graph.addVertex(label, "longvertex2", "longvalue2", l2);''', - '''schema.propertyKey('floatvalue').Float().ifNotExists().create(); - schema.vertexLabel('floatvertex').properties('floatvalue').ifNotExists().create(); - float f1 = 3.5f; graph.addVertex(label, "floatvertex", "floatvalue", f1);''', - '''schema.propertyKey('doublevalue').Double().ifNotExists().create(); - schema.vertexLabel('doublevertex').properties('doublevalue').ifNotExists().create(); - double d1 = 3.5e40; graph.addVertex(label, "doublevertex", "doublevalue", d1);''', - '''schema.propertyKey('doublevalue2').Double().ifNotExists().create(); - schema.vertexLabel('doublevertex2').properties('doublevalue2').ifNotExists().create(); - Double d2 = 3.5e40d; graph.addVertex(label, "doublevertex2", "doublevalue2", d2);'''] - - - for run in to_run: - session.execute_graph(run) - - if DSE_VERSION >= Version('5.1'): - to_run_51=['''schema.propertyKey('datevalue1').Date().ifNotExists().create(); - schema.vertexLabel('datevertex1').properties('datevalue1').ifNotExists().create();''', - '''schema.propertyKey('negdatevalue2').Date().ifNotExists().create(); - schema.vertexLabel('negdatevertex2').properties('negdatevalue2').ifNotExists().create();'''] - for i in range(1,4): - to_run_51.append('''schema.propertyKey('timevalue{0}').Time().ifNotExists().create(); - schema.vertexLabel('timevertex{0}').properties('timevalue{0}').ifNotExists().create();'''.format(i)) - - for run in to_run_51: - session.execute_graph(run) - - session.execute_graph('''graph.addVertex(label, "datevertex1", "datevalue1", date1);''', - {'date1': '1999-07-29' }) - session.execute_graph('''graph.addVertex(label, "negdatevertex2", "negdatevalue2", date2);''', - {'date2': '-1999-07-28' }) - - session.execute_graph('''graph.addVertex(label, "timevertex1", "timevalue1", time1);''', - {'time1': '14:02'}) - session.execute_graph('''graph.addVertex(label, "timevertex2", "timevalue2", time2);''', - {'time2': '14:02:20'}) - session.execute_graph('''graph.addVertex(label, "timevertex3", "timevalue3", time3);''', - {'time3': '14:02:20.222'}) - - -def generate_type_graph_schema(session, prime_schema=True): - """ - This method will prime the schema for all types in the TYPE_MAP - """ - session.execute_graph(ALLOW_SCANS) - if(prime_schema): - create_vertex= "schema.vertexLabel('{0}').ifNotExists().create();".\ - format(generate_type_graph_schema.single_vertex) - session.execute_graph(create_vertex) - for key in TYPE_MAP.keys(): - prop_type = fetchCustomGeoType(key) - if prop_type is None: - prop_type=TYPE_MAP[key][0] - vertex_label = key - prop_name = key+"value" - insert_string = "" - insert_string += "schema.propertyKey('{0}').{1}.ifNotExists().create();".format(prop_name, prop_type) - insert_string += "schema.vertexLabel('{}').properties('{}').add();".\ - format(generate_type_graph_schema.single_vertex, prop_name) - session.execute_graph(insert_string) - else: - session.execute_graph(MAKE_NON_STRICT) -generate_type_graph_schema.single_vertex = "single_vertex_label" - -def generate_address_book_graph(session, size): - to_run = [ALLOW_SCANS, - "schema.propertyKey('name').Text().create()\n" + - "schema.propertyKey('pointPropWithBoundsWithSearchIndex')." + getPointTypeWithBounds(-100, -100, 100, 100) + ".create()\n" + - "schema.propertyKey('pointPropWithBounds')." + getPointTypeWithBounds(-100, -100, 100, 100) + ".create()\n" + - "schema.propertyKey('pointPropWithGeoBoundsWithSearchIndex')." + getPointType() + ".create()\n" + - "schema.propertyKey('pointPropWithGeoBounds')." + getPointType() + ".create()\n" + - "schema.propertyKey('city').Text().create()\n" + - "schema.propertyKey('state').Text().create()\n" + - "schema.propertyKey('description').Text().create()\n" + - "schema.vertexLabel('person').properties('name', 'city', 'state', 'description', 'pointPropWithBoundsWithSearchIndex', 'pointPropWithBounds', 'pointPropWithGeoBoundsWithSearchIndex', 'pointPropWithGeoBounds').create()", - "schema.vertexLabel('person').index('searchPointWithBounds').secondary().by('pointPropWithBounds').add()", - "schema.vertexLabel('person').index('searchPointWithGeoBounds').secondary().by('pointPropWithGeoBounds').add()", - - "g.addV('person').property('name', 'Paul Thomas Joe').property('city', 'Rochester').property('state', 'MN').property('pointPropWithBoundsWithSearchIndex', Geo.point(-92.46295, 44.0234)).property('pointPropWithBounds', Geo.point(-92.46295, 44.0234)).property('pointPropWithGeoBoundsWithSearchIndex', Geo.point(-92.46295, 44.0234)).property('pointPropWithGeoBounds', Geo.point(-92.46295, 44.0234)).property('description', 'Lives by the hospital')", - "g.addV('person').property('name', 'George Bill Steve').property('city', 'Minneapolis').property('state', 'MN').property('pointPropWithBoundsWithSearchIndex', Geo.point(-93.266667, 44.093333)).property('pointPropWithBounds', Geo.point(-93.266667, 44.093333)).property('pointPropWithGeoBoundsWithSearchIndex', Geo.point(-93.266667, 44.093333)).property('pointPropWithGeoBounds', Geo.point(-93.266667, 44.093333)).property('description', 'A cold dude')", - "g.addV('person').property('name', 'James Paul Smith').property('city', 'Chicago').property('state', 'IL').property('pointPropWithBoundsWithSearchIndex', Geo.point(-87.684722, 41.836944)).property('description', 'Likes to hang out')", - "g.addV('person').property('name', 'Jill Alice').property('city', 'Atlanta').property('state', 'GA').property('pointPropWithBoundsWithSearchIndex', Geo.point(-84.39, 33.755)).property('description', 'Enjoys a nice cold coca cola')", - ] - - if not Version('5.0') <= DSE_VERSION < Version('5.1'): - to_run.append("schema.vertexLabel('person').index('search').search().by('pointPropWithBoundsWithSearchIndex').withError(0.00001, 0.0).by('pointPropWithGeoBoundsWithSearchIndex').withError(0.00001, 0.0).add()") - - for run in to_run: - session.execute_graph(run) - - -def generate_large_complex_graph(session, size): - prof = session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, request_timeout=32) - to_run = ''' - schema.config().option('graph.schema_mode').set('development'); - schema.config().option('graph.allow_scan').set('true'); - ''' - session.execute_graph(to_run, execution_profile=prof) - to_run = ''' - int size = 2000; - List ids = new ArrayList(); - schema.propertyKey('ts').Int().single().ifNotExists().create(); - schema.propertyKey('sin').Int().single().ifNotExists().create(); - schema.propertyKey('cos').Int().single().ifNotExists().create(); - schema.propertyKey('ii').Int().single().ifNotExists().create(); - schema.vertexLabel('lcg').properties('ts', 'sin', 'cos', 'ii').ifNotExists().create(); - schema.edgeLabel('linked').connection('lcg', 'lcg').ifNotExists().create(); - Vertex v = graph.addVertex(label, 'lcg'); - v.property("ts", 100001); - v.property("sin", 0); - v.property("cos", 1); - v.property("ii", 0); - ids.add(v.id()); - Random rand = new Random(); - for (int ii = 1; ii < size; ii++) { - v = graph.addVertex(label, 'lcg'); - v.property("ii", ii); - v.property("ts", 100001 + ii); - v.property("sin", Math.sin(ii/5.0)); - v.property("cos", Math.cos(ii/5.0)); - Vertex u = g.V(ids.get(rand.nextInt(ids.size()))).next(); - v.addEdge("linked", u); - ids.add(u.id()); - ids.add(v.id()); - } - g.V().count();''' - - session.execute_graph(to_run, execution_profile=prof) - - -def validate_classic_vertex(test, vertex): - vertex_props = vertex.properties.keys() - test.assertEqual(len(vertex_props), 2) - test.assertIn('name', vertex_props) - test.assertTrue('lang' in vertex_props or 'age' in vertex_props) - - -def validate_classic_vertex_return_type(test, vertex): - validate_generic_vertex_result_type(vertex) - vertex_props = vertex.properties - test.assertIn('name', vertex_props) - test.assertTrue('lang' in vertex_props or 'age' in vertex_props) - - -def validate_generic_vertex_result_type(test, vertex): - test.assertIsInstance(vertex, Vertex) - for attr in ('id', 'type', 'label', 'properties'): - test.assertIsNotNone(getattr(vertex, attr)) - - -def validate_classic_edge_properties(test, edge_properties): - test.assertEqual(len(edge_properties.keys()), 1) - test.assertIn('weight', edge_properties) - test.assertIsInstance(edge_properties, dict) - - -def validate_classic_edge(test, edge): - validate_generic_edge_result_type(test, edge) - validate_classic_edge_properties(test, edge.properties) - - -def validate_line_edge(test, edge): - validate_generic_edge_result_type(test, edge) - edge_props = edge.properties - test.assertEqual(len(edge_props.keys()), 1) - test.assertIn('distance', edge_props) - - -def validate_generic_edge_result_type(test, edge): - test.assertIsInstance(edge, Edge) - for attr in ('properties', 'outV', 'outVLabel', 'inV', 'inVLabel', 'label', 'type', 'id'): - test.assertIsNotNone(getattr(edge, attr)) - - -def validate_path_result_type(test, path): - test.assertIsInstance(path, Path) - test.assertIsNotNone(path.labels) - for obj in path.objects: - if isinstance(obj, Edge): - validate_classic_edge(test, obj) - elif isinstance(obj, Vertex): - validate_classic_vertex(test, obj) - else: - test.fail("Invalid object found in path " + str(object.type)) diff --git a/tests/integration/advanced/graph/__init__.py b/tests/integration/advanced/graph/__init__.py index 2c9ca172f8..6002d57f78 100644 --- a/tests/integration/advanced/graph/__init__.py +++ b/tests/integration/advanced/graph/__init__.py @@ -11,3 +11,1195 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import sys +import logging +import inspect +from packaging.version import Version +import ipaddress +from uuid import UUID +from decimal import Decimal +import datetime + +from cassandra.util import Point, LineString, Polygon, Duration +import six + +from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT +from cassandra.cluster import GraphAnalyticsExecutionProfile, GraphExecutionProfile, EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, \ + default_lbp_factory +from cassandra.policies import DSELoadBalancingPolicy + +from cassandra.graph import GraphSON1Deserializer +from cassandra.graph.graphson import InetTypeIO, GraphSON2Deserializer, GraphSON3Deserializer +from cassandra.graph import Edge, Vertex, Path +from cassandra.graph.query import GraphOptions, GraphProtocol, graph_graphson2_row_factory, \ + graph_graphson3_row_factory + +from tests.integration import DSE_VERSION +from tests.integration.advanced import * + + +def setup_module(): + if DSE_VERSION: + dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}} + use_single_node_with_graph(dse_options=dse_options) + + +log = logging.getLogger(__name__) + +MAX_LONG = 9223372036854775807 +MIN_LONG = -9223372036854775808 +ZERO_LONG = 0 + +if sys.version_info < (3, 0): + MAX_LONG = long(MAX_LONG) + MIN_LONG = long(MIN_LONG) + ZERO_LONG = long(ZERO_LONG) + +MAKE_STRICT = "schema.config().option('graph.schema_mode').set('production')" +MAKE_NON_STRICT = "schema.config().option('graph.schema_mode').set('development')" +ALLOW_SCANS = "schema.config().option('graph.allow_scan').set('true')" + +deserializer_plus_to_ipaddressv4 = lambda x: ipaddress.IPv4Address(GraphSON1Deserializer.deserialize_inet(x)) +deserializer_plus_to_ipaddressv6 = lambda x: ipaddress.IPv6Address(GraphSON1Deserializer.deserialize_inet(x)) + + +def generic_ip_deserializer(string_ip_address): + if ":" in string_ip_address: + return deserializer_plus_to_ipaddressv6(string_ip_address) + return deserializer_plus_to_ipaddressv4(string_ip_address) + + +class GenericIpAddressIO(InetTypeIO): + @classmethod + def deserialize(cls, value, reader=None): + return generic_ip_deserializer(value) + +GraphSON2Deserializer._deserializers[GenericIpAddressIO.graphson_type] = GenericIpAddressIO +GraphSON3Deserializer._deserializers[GenericIpAddressIO.graphson_type] = GenericIpAddressIO + +if DSE_VERSION: + if DSE_VERSION >= Version('6.8.0'): + CREATE_CLASSIC_GRAPH = "system.graph(name).engine(Classic).create()" + else: + CREATE_CLASSIC_GRAPH = "system.graph(name).create()" + + +def reset_graph(session, graph_name): + ks = list(session.execute( + "SELECT * FROM system_schema.keyspaces WHERE keyspace_name = '{}';".format(graph_name))) + if ks: + try: + session.execute_graph('system.graph(name).drop()', {'name': graph_name}, + execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) + except: + pass + + session.execute_graph(CREATE_CLASSIC_GRAPH, {'name': graph_name}, + execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) + wait_for_graph_inserted(session, graph_name) + + +def wait_for_graph_inserted(session, graph_name): + count = 0 + exists = session.execute_graph('system.graph(name).exists()', {'name': graph_name}, + execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0].value + while not exists and count < 50: + time.sleep(1) + exists = session.execute_graph('system.graph(name).exists()', {'name': graph_name}, + execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0].value + return exists + + +class BasicGraphUnitTestCase(BasicKeyspaceUnitTestCase): + """ + This is basic graph unit test case that provides various utility methods that can be leveraged for testcase setup and tear + down + """ + + @property + def graph_name(self): + return self._testMethodName.lower() + + def session_setup(self): + lbp = DSELoadBalancingPolicy(default_lbp_factory()) + + ep_graphson2 = GraphExecutionProfile( + request_timeout=60, + load_balancing_policy=lbp, + graph_options=GraphOptions( + graph_name=self.graph_name, + graph_protocol=GraphProtocol.GRAPHSON_2_0 + ), + row_factory=graph_graphson2_row_factory) + + ep_graphson3 = GraphExecutionProfile( + request_timeout=60, + load_balancing_policy=lbp, + graph_options=GraphOptions( + graph_name=self.graph_name, + graph_protocol=GraphProtocol.GRAPHSON_3_0 + ), + row_factory=graph_graphson3_row_factory) + + ep_graphson1 = GraphExecutionProfile( + request_timeout=60, + load_balancing_policy=lbp, + graph_options=GraphOptions( + graph_name=self.graph_name + ) + ) + + ep_analytics = GraphAnalyticsExecutionProfile( + request_timeout=60, + load_balancing_policy=lbp, + graph_options=GraphOptions( + graph_source=b'a', + graph_language=b'gremlin-groovy', + graph_name=self.graph_name + ) + ) + + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={ + EXEC_PROFILE_GRAPH_DEFAULT: ep_graphson1, + EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT: ep_analytics, + "graphson1": ep_graphson1, + "graphson2": ep_graphson2, + "graphson3": ep_graphson3 + }) + + self.session = self.cluster.connect() + self.ks_name = self._testMethodName.lower() + self.cass_version, self.cql_version = get_server_versions() + + def setUp(self): + self.session_setup() + self.reset_graph() + self.clear_schema() + # enable dev and scan modes + self.session.execute_graph(MAKE_NON_STRICT) + self.session.execute_graph(ALLOW_SCANS) + + def tearDown(self): + self.cluster.shutdown() + + def clear_schema(self): + self.session.execute_graph(""" + schema.clear(); + """) + + def reset_graph(self): + reset_graph(self.session, self.graph_name) + + def wait_for_graph_inserted(self): + wait_for_graph_inserted(self.session, self.graph_name) + + def _execute(self, query, graphson, params=None, execution_profile_options=None, **kwargs): + queries = query if isinstance(query, list) else [query] + ep = self.get_execution_profile(graphson) + if execution_profile_options: + ep = self.session.execution_profile_clone_update(ep, **execution_profile_options) + + results = [] + for query in queries: + log.debug(query) + rf = self.session.execute_graph_async(query, parameters=params, execution_profile=ep, **kwargs) + results.append(rf.result()) + self.assertEqual(rf.message.custom_payload['graph-results'], graphson) + + return results[0] if len(results) == 1 else results + + def get_execution_profile(self, graphson, traversal=False): + ep = 'graphson1' + if graphson == GraphProtocol.GRAPHSON_2_0: + ep = 'graphson2' + elif graphson == GraphProtocol.GRAPHSON_3_0: + ep = 'graphson3' + + return ep if traversal is False else 'traversal_' + ep + + def resultset_to_list(self, rs): + results_list = [] + for result in rs: + try: + results_list.append(result.value) + except: + results_list.append(result) + + return results_list + + +class GraphUnitTestCase(BasicKeyspaceUnitTestCase): + + @property + def graph_name(self): + return self._testMethodName.lower() + + def session_setup(self): + lbp = DSELoadBalancingPolicy(default_lbp_factory()) + + ep_graphson2 = GraphExecutionProfile( + request_timeout=60, + load_balancing_policy=lbp, + graph_options=GraphOptions( + graph_name=self.graph_name, + graph_protocol=GraphProtocol.GRAPHSON_2_0 + ), + row_factory=graph_graphson2_row_factory) + + ep_graphson3 = GraphExecutionProfile( + request_timeout=60, + load_balancing_policy=lbp, + graph_options=GraphOptions( + graph_name=self.graph_name, + graph_protocol=GraphProtocol.GRAPHSON_3_0 + ), + row_factory=graph_graphson3_row_factory) + + ep_graphson1 = GraphExecutionProfile( + request_timeout=60, + load_balancing_policy=lbp, + graph_options=GraphOptions( + graph_name=self.graph_name, + graph_language='gremlin-groovy' + ) + ) + + ep_analytics = GraphAnalyticsExecutionProfile( + request_timeout=60, + load_balancing_policy=lbp, + graph_options=GraphOptions( + graph_source=b'a', + graph_language=b'gremlin-groovy', + graph_name=self.graph_name + ) + ) + + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={ + EXEC_PROFILE_GRAPH_DEFAULT: ep_graphson1, + EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT: ep_analytics, + "graphson1": ep_graphson1, + "graphson2": ep_graphson2, + "graphson3": ep_graphson3 + }) + + self.session = self.cluster.connect() + self.ks_name = self._testMethodName.lower() + self.cass_version, self.cql_version = get_server_versions() + + def setUp(self): + """basic setup only""" + self.session_setup() + + def setup_graph(self, schema): + """Config dependant setup""" + schema.drop_graph(self.session, self.graph_name) + schema.create_graph(self.session, self.graph_name) + schema.clear(self.session) + if schema is ClassicGraphSchema: + # enable dev and scan modes + self.session.execute_graph(MAKE_NON_STRICT) + self.session.execute_graph(ALLOW_SCANS) + + def teardown_graph(self, schema): + schema.drop_graph(self.session, self.graph_name) + + def tearDown(self): + self.cluster.shutdown() + + def execute_graph_queries(self, queries, params=None, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT, + verify_graphson=False, **kwargs): + results = [] + for query in queries: + log.debug(query) + rf = self.session.execute_graph_async(query, parameters=params, + execution_profile=execution_profile, **kwargs) + if verify_graphson: + self.assertEqual(rf.message.custom_payload['graph-results'], verify_graphson) + results.append(rf.result()) + + return results + + def execute_graph(self, query, graphson, params=None, execution_profile_options=None, traversal=False, **kwargs): + queries = query if isinstance(query, list) else [query] + ep = self.get_execution_profile(graphson) + if traversal: + ep = 'traversal_' + ep + if execution_profile_options: + ep = self.session.execution_profile_clone_update(ep, **execution_profile_options) + + results = self.execute_graph_queries(queries, params, ep, verify_graphson=graphson, **kwargs) + + return results[0] if len(results) == 1 else results + + def get_execution_profile(self, graphson, traversal=False): + ep = 'graphson1' + if graphson == GraphProtocol.GRAPHSON_2_0: + ep = 'graphson2' + elif graphson == GraphProtocol.GRAPHSON_3_0: + ep = 'graphson3' + + return ep if traversal is False else 'traversal_' + ep + + def resultset_to_list(self, rs): + results_list = [] + for result in rs: + try: + results_list.append(result.value) + except: + results_list.append(result) + + return results_list + + +class BasicSharedGraphUnitTestCase(BasicKeyspaceUnitTestCase): + """ + This is basic graph unit test case that provides various utility methods that can be leveraged for testcase setup and tear + down + """ + + @classmethod + def session_setup(cls): + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.session = cls.cluster.connect() + cls.ks_name = cls.__name__.lower() + cls.cass_version, cls.cql_version = get_server_versions() + cls.graph_name = cls.__name__.lower() + + @classmethod + def setUpClass(cls): + if DSE_VERSION: + cls.session_setup() + cls.reset_graph() + profiles = cls.cluster.profile_manager.profiles + profiles[EXEC_PROFILE_GRAPH_DEFAULT].request_timeout = 60 + profiles[EXEC_PROFILE_GRAPH_DEFAULT].graph_options.graph_name = cls.graph_name + profiles[EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT].request_timeout = 60 + profiles[EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT].graph_options.graph_name = cls.graph_name + + @classmethod + def tearDownClass(cls): + if DSE_VERSION: + cls.cluster.shutdown() + + @classmethod + def clear_schema(self): + self.session.execute_graph('schema.clear()') + + @classmethod + def reset_graph(self): + reset_graph(self.session, self.graph_name) + + def wait_for_graph_inserted(self): + wait_for_graph_inserted(self.session, self.graph_name) + + +class GraphFixtures(object): + + @staticmethod + def line(length, single_script=True): + raise NotImplementedError() + + @staticmethod + def classic(): + raise NotImplementedError() + + @staticmethod + def multiple_fields(): + raise NotImplementedError() + + @staticmethod + def large(): + raise NotImplementedError() + + +class ClassicGraphFixtures(GraphFixtures): + + @staticmethod + def datatypes(): + data = { + "point1": ["Point()", Point(.5, .13), GraphSON1Deserializer.deserialize_point], + "point2": ["Point()", Point(-5, .0), GraphSON1Deserializer.deserialize_point], + + "linestring1": ["Linestring()", LineString(((1.0, 2.0), (3.0, 4.0), (-89.0, 90.0))), + GraphSON1Deserializer.deserialize_linestring], + "polygon1": ["Polygon()", Polygon([(10.0, 10.0), (80.0, 10.0), (80., 88.0), (10., 89.0), (10., 10.0)], + [[(20., 20.0), (20., 30.0), (30., 30.0), (30., 20.0), (20., 20.0)], + [(40., 20.0), (40., 30.0), (50., 30.0), (50., 20.0), (40., 20.0)]]), + GraphSON1Deserializer.deserialize_polygon], + "int1": ["Int()", 2, GraphSON1Deserializer.deserialize_int], + "smallint1": ["Smallint()", 1, GraphSON1Deserializer.deserialize_smallint], + "bigint1": ["Bigint()", MAX_LONG, GraphSON1Deserializer.deserialize_bigint], + "bigint2": ["Bigint()", MIN_LONG, GraphSON1Deserializer.deserialize_bigint], + "bigint3": ["Bigint()", ZERO_LONG, GraphSON1Deserializer.deserialize_bigint], + "varint1": ["Varint()", 2147483647, GraphSON1Deserializer.deserialize_varint], + "int1": ["Int()", 100, GraphSON1Deserializer.deserialize_int], + "float1": ["Float()", 0.3415681, GraphSON1Deserializer.deserialize_float], + "double1": ["Double()", 0.34156811237335205, GraphSON1Deserializer.deserialize_double], + "uuid1": ["Uuid()", UUID('12345678123456781234567812345678'), GraphSON1Deserializer.deserialize_uuid], + "decimal1": ["Decimal()", Decimal(10), GraphSON1Deserializer.deserialize_decimal], + "blob1": ["Blob()", bytearray(b"Hello World"), GraphSON1Deserializer.deserialize_blob], + + "timestamp1": ["Timestamp()", datetime.datetime.utcnow().replace(microsecond=0), + GraphSON1Deserializer.deserialize_timestamp], + "timestamp2": ["Timestamp()", datetime.datetime.max.replace(microsecond=0), + GraphSON1Deserializer.deserialize_timestamp], + # These are valid values but are pending for DSP-14093 to be fixed + #"timestamp3": ["Timestamp()", datetime.datetime(159, 1, 1, 23, 59, 59), + # GraphSON1TypeDeserializer.deserialize_timestamp], + #"timestamp4": ["Timestamp()", datetime.datetime.min, + # GraphSON1TypeDeserializer.deserialize_timestamp], + "inet1": ["Inet()", ipaddress.IPv4Address(u"127.0.0.1"), deserializer_plus_to_ipaddressv4], + "inet2": ["Inet()", ipaddress.IPv6Address(u"2001:db8:85a3:8d3:1319:8a2e:370:7348"), + deserializer_plus_to_ipaddressv6], + "duration1": ["Duration()", datetime.timedelta(1, 16, 0), + GraphSON1Deserializer.deserialize_duration], + "duration2": ["Duration()", datetime.timedelta(days=1, seconds=16, milliseconds=15), + GraphSON1Deserializer.deserialize_duration] + } + + if six.PY2: + data["blob2"] = ["Blob()", buffer(b"Hello World"), GraphSON1Deserializer.deserialize_blob] + else: + data["blob3"] = ["Blob()", bytes(b"Hello World Again"), GraphSON1Deserializer.deserialize_blob] + data["blob4"] = ["Blob()", memoryview(b"And Again Hello World"), GraphSON1Deserializer.deserialize_blob] + + if DSE_VERSION >= Version("5.1"): + data["time1"] = ["Time()", datetime.time(12, 6, 12, 444), GraphSON1Deserializer.deserialize_time] + data["time2"] = ["Time()", datetime.time(12, 6, 12), GraphSON1Deserializer.deserialize_time] + data["time3"] = ["Time()", datetime.time(12, 6), GraphSON1Deserializer.deserialize_time] + data["time4"] = ["Time()", datetime.time.min, GraphSON1Deserializer.deserialize_time] + data["time5"] = ["Time()", datetime.time.max, GraphSON1Deserializer.deserialize_time] + data["blob5"] = ["Blob()", bytearray(b"AKDLIElksadlaswqA" * 10000), GraphSON1Deserializer.deserialize_blob] + data["datetime1"] = ["Date()", datetime.date.today(), GraphSON1Deserializer.deserialize_date] + data["datetime2"] = ["Date()", datetime.date(159, 1, 3), GraphSON1Deserializer.deserialize_date] + data["datetime3"] = ["Date()", datetime.date.min, GraphSON1Deserializer.deserialize_date] + data["datetime4"] = ["Date()", datetime.date.max, GraphSON1Deserializer.deserialize_date] + data["time1"] = ["Time()", datetime.time(12, 6, 12, 444), GraphSON1Deserializer.deserialize_time] + data["time2"] = ["Time()", datetime.time(12, 6, 12), GraphSON1Deserializer.deserialize_time] + data["time3"] = ["Time()", datetime.time(12, 6), GraphSON1Deserializer.deserialize_time] + data["time4"] = ["Time()", datetime.time.min, GraphSON1Deserializer.deserialize_time] + data["time5"] = ["Time()", datetime.time.max, GraphSON1Deserializer.deserialize_time] + + return data + + @staticmethod + def line(length, single_script=False): + queries = [ALLOW_SCANS + ';', + """schema.propertyKey('index').Int().ifNotExists().create(); + schema.propertyKey('distance').Int().ifNotExists().create(); + schema.vertexLabel('lp').properties('index').ifNotExists().create(); + schema.edgeLabel('goesTo').properties('distance').connection('lp', 'lp').ifNotExists().create();"""] + + vertex_script = ["Vertex vertex0 = graph.addVertex(label, 'lp', 'index', 0);"] + for index in range(1, length): + if not single_script and len(vertex_script) > 25: + queries.append("\n".join(vertex_script)) + vertex_script = [ + "Vertex vertex{pindex} = g.V().hasLabel('lp').has('index', {pindex}).next()".format( + pindex=index-1)] + + vertex_script.append(''' + Vertex vertex{vindex} = graph.addVertex(label, 'lp', 'index', {vindex}); + vertex{pindex}.addEdge('goesTo', vertex{vindex}, 'distance', 5); '''.format( + vindex=index, pindex=index - 1)) + + queries.append("\n".join(vertex_script)) + return queries + + @staticmethod + def classic(): + queries = [ALLOW_SCANS, + '''schema.propertyKey('name').Text().ifNotExists().create(); + schema.propertyKey('age').Int().ifNotExists().create(); + schema.propertyKey('lang').Text().ifNotExists().create(); + schema.propertyKey('weight').Float().ifNotExists().create(); + schema.vertexLabel('person').properties('name', 'age').ifNotExists().create(); + schema.vertexLabel('software').properties('name', 'lang').ifNotExists().create(); + schema.edgeLabel('created').properties('weight').connection('person', 'software').ifNotExists().create(); + schema.edgeLabel('created').connection('software', 'software').add(); + schema.edgeLabel('knows').properties('weight').connection('person', 'person').ifNotExists().create();''', + + '''Vertex marko = graph.addVertex(label, 'person', 'name', 'marko', 'age', 29); + Vertex vadas = graph.addVertex(label, 'person', 'name', 'vadas', 'age', 27); + Vertex lop = graph.addVertex(label, 'software', 'name', 'lop', 'lang', 'java'); + Vertex josh = graph.addVertex(label, 'person', 'name', 'josh', 'age', 32); + Vertex ripple = graph.addVertex(label, 'software', 'name', 'ripple', 'lang', 'java'); + Vertex peter = graph.addVertex(label, 'person', 'name', 'peter', 'age', 35); + Vertex carl = graph.addVertex(label, 'person', 'name', 'carl', 'age', 35); + marko.addEdge('knows', vadas, 'weight', 0.5f); + marko.addEdge('knows', josh, 'weight', 1.0f); + marko.addEdge('created', lop, 'weight', 0.4f); + josh.addEdge('created', ripple, 'weight', 1.0f); + josh.addEdge('created', lop, 'weight', 0.4f); + peter.addEdge('created', lop, 'weight', 0.2f);'''] + + return "\n".join(queries) + + @staticmethod + def multiple_fields(): + query_params = {} + queries= [ALLOW_SCANS, + '''schema.propertyKey('shortvalue').Smallint().ifNotExists().create(); + schema.vertexLabel('shortvertex').properties('shortvalue').ifNotExists().create(); + short s1 = 5000; graph.addVertex(label, "shortvertex", "shortvalue", s1); + schema.propertyKey('intvalue').Int().ifNotExists().create(); + schema.vertexLabel('intvertex').properties('intvalue').ifNotExists().create(); + int i1 = 1000000000; graph.addVertex(label, "intvertex", "intvalue", i1); + schema.propertyKey('intvalue2').Int().ifNotExists().create(); + schema.vertexLabel('intvertex2').properties('intvalue2').ifNotExists().create(); + Integer i2 = 100000000; graph.addVertex(label, "intvertex2", "intvalue2", i2); + schema.propertyKey('longvalue').Bigint().ifNotExists().create(); + schema.vertexLabel('longvertex').properties('longvalue').ifNotExists().create(); + long l1 = 9223372036854775807; graph.addVertex(label, "longvertex", "longvalue", l1); + schema.propertyKey('longvalue2').Bigint().ifNotExists().create(); + schema.vertexLabel('longvertex2').properties('longvalue2').ifNotExists().create(); + Long l2 = 100000000000000000L; graph.addVertex(label, "longvertex2", "longvalue2", l2); + schema.propertyKey('floatvalue').Float().ifNotExists().create(); + schema.vertexLabel('floatvertex').properties('floatvalue').ifNotExists().create(); + float f1 = 3.5f; graph.addVertex(label, "floatvertex", "floatvalue", f1); + schema.propertyKey('doublevalue').Double().ifNotExists().create(); + schema.vertexLabel('doublevertex').properties('doublevalue').ifNotExists().create(); + double d1 = 3.5e40; graph.addVertex(label, "doublevertex", "doublevalue", d1); + schema.propertyKey('doublevalue2').Double().ifNotExists().create(); + schema.vertexLabel('doublevertex2').properties('doublevalue2').ifNotExists().create(); + Double d2 = 3.5e40d; graph.addVertex(label, "doublevertex2", "doublevalue2", d2);'''] + + if DSE_VERSION >= Version('5.1'): + queries.append('''schema.propertyKey('datevalue1').Date().ifNotExists().create(); + schema.vertexLabel('datevertex1').properties('datevalue1').ifNotExists().create(); + schema.propertyKey('negdatevalue2').Date().ifNotExists().create(); + schema.vertexLabel('negdatevertex2').properties('negdatevalue2').ifNotExists().create();''') + + for i in range(1, 4): + queries.append('''schema.propertyKey('timevalue{0}').Time().ifNotExists().create(); + schema.vertexLabel('timevertex{0}').properties('timevalue{0}').ifNotExists().create();'''.format( + i)) + + queries.append('graph.addVertex(label, "datevertex1", "datevalue1", date1);') + query_params['date1'] = '1999-07-29' + + queries.append('graph.addVertex(label, "negdatevertex2", "negdatevalue2", date2);') + query_params['date2'] = '-1999-07-28' + + queries.append('graph.addVertex(label, "timevertex1", "timevalue1", time1);') + query_params['time1'] = '14:02' + queries.append('graph.addVertex(label, "timevertex2", "timevalue2", time2);') + query_params['time2'] = '14:02:20' + queries.append('graph.addVertex(label, "timevertex3", "timevalue3", time3);') + query_params['time3'] = '14:02:20.222' + + return queries, query_params + + @staticmethod + def large(): + query_parts = [''' + int size = 2000; + List ids = new ArrayList(); + schema.propertyKey('ts').Int().single().ifNotExists().create(); + schema.propertyKey('sin').Int().single().ifNotExists().create(); + schema.propertyKey('cos').Int().single().ifNotExists().create(); + schema.propertyKey('ii').Int().single().ifNotExists().create(); + schema.vertexLabel('lcg').properties('ts', 'sin', 'cos', 'ii').ifNotExists().create(); + schema.edgeLabel('linked').connection('lcg', 'lcg').ifNotExists().create(); + Vertex v = graph.addVertex(label, 'lcg'); + v.property("ts", 100001); + v.property("sin", 0); + v.property("cos", 1); + v.property("ii", 0); + ids.add(v.id()); + Random rand = new Random(); + for (int ii = 1; ii < size; ii++) { + v = graph.addVertex(label, 'lcg'); + v.property("ii", ii); + v.property("ts", 100001 + ii); + v.property("sin", Math.sin(ii/5.0)); + v.property("cos", Math.cos(ii/5.0)); + Vertex u = g.V(ids.get(rand.nextInt(ids.size()))).next(); + v.addEdge("linked", u); + ids.add(v.id()); + } + g.V().count();'''] + + return "\n".join(query_parts) + + @staticmethod + def address_book(): + p1 = "Point()" + p2 = "Point()" + if DSE_VERSION >= Version('5.1'): + p1 = "Point().withBounds(-100, -100, 100, 100)" + p2 = "Point().withGeoBounds()" + + queries = [ + ALLOW_SCANS, + "schema.propertyKey('name').Text().ifNotExists().create()", + "schema.propertyKey('pointPropWithBoundsWithSearchIndex').{}.ifNotExists().create()".format(p1), + "schema.propertyKey('pointPropWithBounds').{}.ifNotExists().create()".format(p1), + "schema.propertyKey('pointPropWithGeoBoundsWithSearchIndex').{}.ifNotExists().create()".format(p2), + "schema.propertyKey('pointPropWithGeoBounds').{}.ifNotExists().create()".format(p2), + "schema.propertyKey('city').Text().ifNotExists().create()", + "schema.propertyKey('state').Text().ifNotExists().create()", + "schema.propertyKey('description').Text().ifNotExists().create()", + "schema.vertexLabel('person').properties('name', 'city', 'state', 'description', 'pointPropWithBoundsWithSearchIndex', 'pointPropWithBounds', 'pointPropWithGeoBoundsWithSearchIndex', 'pointPropWithGeoBounds').ifNotExists().create()", + "schema.vertexLabel('person').index('searchPointWithBounds').secondary().by('pointPropWithBounds').ifNotExists().add()", + "schema.vertexLabel('person').index('searchPointWithGeoBounds').secondary().by('pointPropWithGeoBounds').ifNotExists().add()", + + "g.addV('person').property('name', 'Paul Thomas Joe').property('city', 'Rochester').property('state', 'MN').property('pointPropWithBoundsWithSearchIndex', Geo.point(-92.46295, 44.0234)).property('pointPropWithBounds', Geo.point(-92.46295, 44.0234)).property('pointPropWithGeoBoundsWithSearchIndex', Geo.point(-92.46295, 44.0234)).property('pointPropWithGeoBounds', Geo.point(-92.46295, 44.0234)).property('description', 'Lives by the hospital').next()", + "g.addV('person').property('name', 'George Bill Steve').property('city', 'Minneapolis').property('state', 'MN').property('pointPropWithBoundsWithSearchIndex', Geo.point(-93.266667, 44.093333)).property('pointPropWithBounds', Geo.point(-93.266667, 44.093333)).property('pointPropWithGeoBoundsWithSearchIndex', Geo.point(-93.266667, 44.093333)).property('pointPropWithGeoBounds', Geo.point(-93.266667, 44.093333)).property('description', 'A cold dude').next()", + "g.addV('person').property('name', 'James Paul Smith').property('city', 'Chicago').property('state', 'IL').property('pointPropWithBoundsWithSearchIndex', Geo.point(-87.684722, 41.836944)).property('description', 'Likes to hang out').next()", + "g.addV('person').property('name', 'Jill Alice').property('city', 'Atlanta').property('state', 'GA').property('pointPropWithBoundsWithSearchIndex', Geo.point(-84.39, 33.755)).property('description', 'Enjoys a nice cold coca cola').next()" + ] + + if not Version('5.0') <= DSE_VERSION < Version('5.1'): + queries.append("schema.vertexLabel('person').index('search').search().by('pointPropWithBoundsWithSearchIndex').withError(0.00001, 0.0).by('pointPropWithGeoBoundsWithSearchIndex').withError(0.00001, 0.0).ifNotExists().add()") + + return "\n".join(queries) + + +class CoreGraphFixtures(GraphFixtures): + + @staticmethod + def datatypes(): + data = ClassicGraphFixtures.datatypes() + del data['duration1'] + del data['duration2'] + + # Core Graphs only types + data["map1"] = ["mapOf(Text, Text)", {'test': 'test'}, None] + data["map2"] = ["mapOf(Text, Point)", {'test': Point(.5, .13)}, None] + data["map3"] = ["frozen(mapOf(Int, Varchar))", {42: 'test'}, None] + + data["list1"] = ["listOf(Text)", ['test', 'hello', 'world'], None] + data["list2"] = ["listOf(Int)", [42, 632, 32], None] + data["list3"] = ["listOf(Point)", [Point(.5, .13), Point(42.5, .13)], None] + data["list4"] = ["frozen(listOf(Int))", [42, 55, 33], None] + + data["set1"] = ["setOf(Text)", {'test', 'hello', 'world'}, None] + data["set2"] = ["setOf(Int)", {42, 632, 32}, None] + data["set3"] = ["setOf(Point)", {Point(.5, .13), Point(42.5, .13)}, None] + data["set4"] = ["frozen(setOf(Int))", {42, 55, 33}, None] + + data["tuple1"] = ["tupleOf(Int, Text)", (42, "world"), None] + data["tuple2"] = ["tupleOf(Int, tupleOf(Text, tupleOf(Text, Point)))", (42, ("world", ('this', Point(.5, .13)))), None] + data["tuple3"] = ["tupleOf(Int, tupleOf(Text, frozen(mapOf(Text, Text))))", (42, ("world", {'test': 'test'})), None] + data["tuple4"] = ["tupleOf(Int, tupleOf(Text, frozen(listOf(Int))))", (42, ("world", [65, 89])), None] + data["tuple5"] = ["tupleOf(Int, tupleOf(Text, frozen(setOf(Int))))", (42, ("world", {65, 55})), None] + data["tuple6"] = ["tupleOf(Int, tupleOf(Text, tupleOf(Text, LineString)))", + (42, ("world", ('this', LineString(((1.0, 2.0), (3.0, 4.0), (-89.0, 90.0)))))), None] + + data["tuple7"] = ["tupleOf(Int, tupleOf(Text, tupleOf(Text, Polygon)))", + (42, ("world", ('this', Polygon([(10.0, 10.0), (80.0, 10.0), (80., 88.0), (10., 89.0), (10., 10.0)], + [[(20., 20.0), (20., 30.0), (30., 30.0), (30., 20.0), (20., 20.0)], + [(40., 20.0), (40., 30.0), (50., 30.0), (50., 20.0), (40., 20.0)]])))), None] + data["dse_duration1"] = ["Duration()", Duration(42, 12, 10303312), None] + data["dse_duration2"] = ["Duration()", Duration(50, 32, 11), None] + + return data + + @staticmethod + def line(length, single_script=False): + queries = [""" + schema.vertexLabel('lp').ifNotExists().partitionBy('index', Int).create(); + schema.edgeLabel('goesTo').ifNotExists().from('lp').to('lp').property('distance', Int).create(); + """] + + vertex_script = ["g.addV('lp').property('index', 0).next();"] + for index in range(1, length): + if not single_script and len(vertex_script) > 25: + queries.append("\n".join(vertex_script)) + vertex_script = [] + + vertex_script.append(''' + g.addV('lp').property('index', {index}).next(); + g.V().hasLabel('lp').has('index', {pindex}).as('pp').V().hasLabel('lp').has('index', {index}).as('p'). + addE('goesTo').from('pp').to('p').property('distance', 5).next(); + '''.format( + index=index, pindex=index - 1)) + + queries.append("\n".join(vertex_script)) + return queries + + @staticmethod + def classic(): + queries = [ + ''' + schema.vertexLabel('person').ifNotExists().partitionBy('name', Text).property('age', Int).create(); + schema.vertexLabel('software')ifNotExists().partitionBy('name', Text).property('lang', Text).create(); + schema.edgeLabel('created').ifNotExists().from('person').to('software').property('weight', Double).create(); + schema.edgeLabel('knows').ifNotExists().from('person').to('person').property('weight', Double).create(); + ''', + + ''' + Vertex marko = g.addV('person').property('name', 'marko').property('age', 29).next(); + Vertex vadas = g.addV('person').property('name', 'vadas').property('age', 27).next(); + Vertex lop = g.addV('software').property('name', 'lop').property('lang', 'java').next(); + Vertex josh = g.addV('person').property('name', 'josh').property('age', 32).next(); + Vertex peter = g.addV('person').property('name', 'peter').property('age', 35).next(); + Vertex carl = g.addV('person').property('name', 'carl').property('age', 35).next(); + Vertex ripple = g.addV('software').property('name', 'ripple').property('lang', 'java').next(); + + // TODO, switch to VertexReference and use v.id() + g.V().hasLabel('person').has('name', 'vadas').as('v').V().hasLabel('person').has('name', 'marko').as('m').addE('knows').from('m').to('v').property('weight', 0.5d).next(); + g.V().hasLabel('person').has('name', 'josh').as('j').V().hasLabel('person').has('name', 'marko').as('m').addE('knows').from('m').to('j').property('weight', 1.0d).next(); + g.V().hasLabel('software').has('name', 'lop').as('l').V().hasLabel('person').has('name', 'marko').as('m').addE('created').from('m').to('l').property('weight', 0.4d).next(); + g.V().hasLabel('software').has('name', 'ripple').as('r').V().hasLabel('person').has('name', 'josh').as('j').addE('created').from('j').to('r').property('weight', 1.0d).next(); + g.V().hasLabel('software').has('name', 'lop').as('l').V().hasLabel('person').has('name', 'josh').as('j').addE('created').from('j').to('l').property('weight', 0.4d).next(); + g.V().hasLabel('software').has('name', 'lop').as('l').V().hasLabel('person').has('name', 'peter').as('p').addE('created').from('p').to('l').property('weight', 0.2d).next(); + + '''] + + return queries + + @staticmethod + def multiple_fields(): + ## no generic test currently needs this + raise NotImplementedError() + + @staticmethod + def large(): + query_parts = [ + ''' + schema.vertexLabel('lcg').ifNotExists().partitionBy('ts', Int).property('sin', Double). + property('cos', Double).property('ii', Int).create(); + schema.edgeLabel('linked').ifNotExists().from('lcg').to('lcg').create(); + ''', + + ''' + int size = 2000; + List ids = new ArrayList(); + v = g.addV('lcg').property('ts', 100001).property('sin', 0d).property('cos', 1d).property('ii', 0).next(); + ids.add(v.id()); + Random rand = new Random(); + for (int ii = 1; ii < size; ii++) { + v = g.addV('lcg').property('ts', 100001 + ii).property('sin', Math.sin(ii/5.0)).property('cos', Math.cos(ii/5.0)).property('ii', ii).next(); + + uid = ids.get(rand.nextInt(ids.size())) + g.V(v.id()).as('v').V(uid).as('u').addE('linked').from('v').to('u').next(); + ids.add(v.id()); + } + g.V().count();''' + ] + + return query_parts + + @staticmethod + def address_book(): + queries = [ + "schema.vertexLabel('person').ifNotExists().partitionBy('name', Text)." + "property('pointPropWithBoundsWithSearchIndex', Point)." + "property('pointPropWithBounds', Point)." + "property('pointPropWithGeoBoundsWithSearchIndex', Point)." + "property('pointPropWithGeoBounds', Point)." + "property('city', Text)." + "property('state', Text)." + "property('description', Text).create()", + "schema.vertexLabel('person').searchIndex().by('name').by('pointPropWithBounds').by('pointPropWithGeoBounds').by('description').asText().create()", + "g.addV('person').property('name', 'Paul Thomas Joe').property('city', 'Rochester').property('state', 'MN').property('pointPropWithBoundsWithSearchIndex', Geo.point(-92.46295, 44.0234)).property('pointPropWithBounds', Geo.point(-92.46295, 44.0234)).property('pointPropWithGeoBoundsWithSearchIndex', Geo.point(-92.46295, 44.0234)).property('pointPropWithGeoBounds', Geo.point(-92.46295, 44.0234)).property('description', 'Lives by the hospital').next()", + "g.addV('person').property('name', 'George Bill Steve').property('city', 'Minneapolis').property('state', 'MN').property('pointPropWithBoundsWithSearchIndex', Geo.point(-93.266667, 44.093333)).property('pointPropWithBounds', Geo.point(-93.266667, 44.093333)).property('pointPropWithGeoBoundsWithSearchIndex', Geo.point(-93.266667, 44.093333)).property('pointPropWithGeoBounds', Geo.point(-93.266667, 44.093333)).property('description', 'A cold dude').next()", + "g.addV('person').property('name', 'James Paul Smith').property('city', 'Chicago').property('state', 'IL').property('pointPropWithBoundsWithSearchIndex', Geo.point(-87.684722, 41.836944)).property('description', 'Likes to hang out').next()", + "g.addV('person').property('name', 'Jill Alice').property('city', 'Atlanta').property('state', 'GA').property('pointPropWithBoundsWithSearchIndex', Geo.point(-84.39, 33.755)).property('description', 'Enjoys a nice cold coca cola').next()" + ] + + if not Version('5.0') <= DSE_VERSION < Version('5.1'): + queries.append("schema.vertexLabel('person').searchIndex().by('pointPropWithBoundsWithSearchIndex').by('pointPropWithGeoBounds')" + ".by('pointPropWithGeoBoundsWithSearchIndex').create()") + + return queries + + +def validate_classic_vertex(test, vertex): + vertex_props = vertex.properties.keys() + test.assertEqual(len(vertex_props), 2) + test.assertIn('name', vertex_props) + test.assertTrue('lang' in vertex_props or 'age' in vertex_props) + + +def validate_classic_vertex_return_type(test, vertex): + validate_generic_vertex_result_type(vertex) + vertex_props = vertex.properties + test.assertIn('name', vertex_props) + test.assertTrue('lang' in vertex_props or 'age' in vertex_props) + + +def validate_generic_vertex_result_type(test, vertex): + test.assertIsInstance(vertex, Vertex) + for attr in ('id', 'type', 'label', 'properties'): + test.assertIsNotNone(getattr(vertex, attr)) + + +def validate_classic_edge_properties(test, edge_properties): + test.assertEqual(len(edge_properties.keys()), 1) + test.assertIn('weight', edge_properties) + test.assertIsInstance(edge_properties, dict) + + +def validate_classic_edge(test, edge): + validate_generic_edge_result_type(test, edge) + validate_classic_edge_properties(test, edge.properties) + + +def validate_line_edge(test, edge): + validate_generic_edge_result_type(test, edge) + edge_props = edge.properties + test.assertEqual(len(edge_props.keys()), 1) + test.assertIn('distance', edge_props) + + +def validate_generic_edge_result_type(test, edge): + test.assertIsInstance(edge, Edge) + for attr in ('properties', 'outV', 'outVLabel', 'inV', 'inVLabel', 'label', 'type', 'id'): + test.assertIsNotNone(getattr(edge, attr)) + + +def validate_path_result_type(test, path): + test.assertIsInstance(path, Path) + test.assertIsNotNone(path.labels) + for obj in path.objects: + if isinstance(obj, Edge): + validate_classic_edge(test, obj) + elif isinstance(obj, Vertex): + validate_classic_vertex(test, obj) + else: + test.fail("Invalid object found in path " + str(object.type)) + + +class GraphTestConfiguration(object): + """Possible Configurations: + ClassicGraphSchema: + graphson1 + graphson2 + graphson3 + + CoreGraphSchema + graphson3 + """ + + @classmethod + def schemas(cls): + schemas = [ClassicGraphSchema] + if DSE_VERSION >= Version("6.8"): + schemas.append(CoreGraphSchema) + return schemas + + @classmethod + def graphson_versions(cls): + graphson_versions = [GraphProtocol.GRAPHSON_1_0] + if DSE_VERSION >= Version("6.0"): + graphson_versions.append(GraphProtocol.GRAPHSON_2_0) + if DSE_VERSION >= Version("6.8"): + graphson_versions.append(GraphProtocol.GRAPHSON_3_0) + return graphson_versions + + @classmethod + def schema_configurations(cls, schema=None): + schemas = cls.schemas() if schema is None else [schema] + configurations = [] + for s in schemas: + configurations.append(s) + + return configurations + + @classmethod + def configurations(cls, schema=None, graphson=None): + schemas = cls.schemas() if schema is None else [schema] + graphson_versions = cls.graphson_versions() if graphson is None else [graphson] + + configurations = [] + for s in schemas: + for g in graphson_versions: + if s is CoreGraphSchema and g != GraphProtocol.GRAPHSON_3_0: + continue + configurations.append((s, g)) + + return configurations + + @staticmethod + def _make_graph_schema_test_method(func, schema): + def test_input(self): + self.setup_graph(schema) + try: + func(self, schema) + except: + raise + finally: + self.teardown_graph(schema) + + schema_name = 'classic' if schema is ClassicGraphSchema else 'core' + test_input.__name__ = '{func}_{schema}'.format( + func=func.__name__.lstrip('_'), schema=schema_name) + return test_input + + @staticmethod + def _make_graph_test_method(func, schema, graphson): + def test_input(self): + self.setup_graph(schema) + try: + func(self, schema, graphson) + except: + raise + finally: + self.teardown_graph(schema) + + graphson_name = 'graphson1' + if graphson == GraphProtocol.GRAPHSON_2_0: + graphson_name = 'graphson2' + elif graphson == GraphProtocol.GRAPHSON_3_0: + graphson_name = 'graphson3' + + schema_name = 'classic' if schema is ClassicGraphSchema else 'core' + + # avoid keyspace name too long issue + if DSE_VERSION < Version('6.7'): + schema_name = schema_name[0] + graphson_name = 'g' + graphson_name[-1] + + test_input.__name__ = '{func}_{schema}_{graphson}'.format( + func=func.__name__.lstrip('_'), schema=schema_name, graphson=graphson_name) + return test_input + + @classmethod + def generate_tests(cls, schema=None, graphson=None, traversal=False): + """Generate tests for a graph configuration""" + def decorator(klass): + if DSE_VERSION: + predicate = inspect.ismethod if six.PY2 else inspect.isfunction + for name, func in inspect.getmembers(klass, predicate=predicate): + if not name.startswith('_test'): + continue + for _schema, _graphson in cls.configurations(schema, graphson): + if traversal and _graphson == GraphProtocol.GRAPHSON_1_0: + continue + test_input = cls._make_graph_test_method(func, _schema, _graphson) + log.debug("Generated test '{}.{}'".format(klass.__name__, test_input.__name__)) + setattr(klass, test_input.__name__, test_input) + return klass + + return decorator + + @classmethod + def generate_schema_tests(cls, schema=None): + """Generate schema tests for a graph configuration""" + def decorator(klass): + if DSE_VERSION: + predicate = inspect.ismethod if six.PY2 else inspect.isfunction + for name, func in inspect.getmembers(klass, predicate=predicate): + if not name.startswith('_test'): + continue + for _schema in cls.schema_configurations(schema): + test_input = cls._make_graph_schema_test_method(func, _schema) + log.debug("Generated test '{}.{}'".format(klass.__name__, test_input.__name__)) + setattr(klass, test_input.__name__, test_input) + return klass + + return decorator + + +class VertexLabel(object): + """ + Helper that represents a new VertexLabel: + + VertexLabel(['Int()', 'Float()']) # a vertex with 2 properties named property1 and property2 + VertexLabel([('int1', 'Int()'), 'Float()']) # a vertex with 2 properties named int1 and property1 + """ + + id = 0 + label = None + properties = None + + def __init__(self, properties): + VertexLabel.id += 1 + self.id = VertexLabel.id + self.label = "vertex{}".format(self.id) + self.properties = {'pkid': self.id} + property_count = 0 + for p in properties: + if isinstance(p, tuple): + name, typ = p + else: + property_count += 1 + name = "property-v{}-{}".format(self.id, property_count) + typ = p + self.properties[name] = typ + + @property + def non_pk_properties(self): + return {p: v for p, v in six.iteritems(self.properties) if p != 'pkid'} + + +class GraphSchema(object): + + has_geo_bounds = DSE_VERSION and DSE_VERSION >= Version('5.1') + fixtures = GraphFixtures + + @classmethod + def sanitize_type(cls, typ): + if typ.lower().startswith("point"): + return cls.sanitize_point_type() + elif typ.lower().startswith("line"): + return cls.sanitize_line_type() + elif typ.lower().startswith("poly"): + return cls.sanitize_polygon_type() + else: + return typ + + @classmethod + def sanitize_point_type(cls): + return "Point().withGeoBounds()" if cls.has_geo_bounds else "Point()" + + @classmethod + def sanitize_line_type(cls): + return "Linestring().withGeoBounds()" if cls.has_geo_bounds else "Linestring()" + + @classmethod + def sanitize_polygon_type(cls): + return "Polygon().withGeoBounds()" if cls.has_geo_bounds else "Polygon()" + + @staticmethod + def drop_graph(session, graph_name): + ks = list(session.execute( + "SELECT * FROM system_schema.keyspaces WHERE keyspace_name = '{}';".format(graph_name))) + if not ks: + return + + try: + session.execute_graph('system.graph(name).drop()', {'name': graph_name}, + execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) + except: + pass + + @staticmethod + def create_graph(session, graph_name): + raise NotImplementedError() + + @staticmethod + def clear(session): + pass + + @staticmethod + def create_vertex_label(session, vertex_label, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): + raise NotImplementedError() + + @staticmethod + def add_vertex(session, vertex_label, name, value, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): + raise NotImplementedError() + + @classmethod + def ensure_properties(cls, session, obj, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): + if not isinstance(obj, (Vertex, Edge)): + return + + # This pre-processing is due to a change in TinkerPop + # properties are not returned automatically anymore + # with some queries. + if not obj.properties: + if isinstance(obj, Edge): + obj.properties = {} + for p in cls.get_edge_properties(session, obj, execution_profile=execution_profile): + obj.properties.update(p) + elif isinstance(obj, Vertex): + obj.properties = { + p.label: p + for p in cls.get_vertex_properties(session, obj, execution_profile=execution_profile) + } + + @staticmethod + def get_vertex_properties(session, vertex, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): + return session.execute_graph("g.V(vertex_id).properties().toList()", {'vertex_id': vertex.id}, + execution_profile=execution_profile) + + @staticmethod + def get_edge_properties(session, edge, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): + v = session.execute_graph("g.E(edge_id).properties().toList()", {'edge_id': edge.id}, + execution_profile=execution_profile) + return v + + +class ClassicGraphSchema(GraphSchema): + + fixtures = ClassicGraphFixtures + + @staticmethod + def create_graph(session, graph_name): + session.execute_graph(CREATE_CLASSIC_GRAPH, {'name': graph_name}, + execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) + wait_for_graph_inserted(session, graph_name) + + @staticmethod + def clear(session): + session.execute_graph('schema.clear()') + + @classmethod + def create_vertex_label(cls, session, vertex_label, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): + statements = ["schema.propertyKey('pkid').Int().ifNotExists().create();"] + for k, v in six.iteritems(vertex_label.non_pk_properties): + typ = cls.sanitize_type(v) + statements.append("schema.propertyKey('{name}').{type}.create();".format( + name=k, type=typ + )) + + statements.append("schema.vertexLabel('{label}').partitionKey('pkid').properties(".format( + label=vertex_label.label)) + property_names = [name for name in six.iterkeys(vertex_label.non_pk_properties)] + statements.append(", ".join(["'{}'".format(p) for p in property_names])) + statements.append(").create();") + + to_run = "\n".join(statements) + session.execute_graph(to_run, execution_profile=execution_profile) + + @staticmethod + def add_vertex(session, vertex_label, name, value, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): + statement = "g.addV('{label}').property('pkid', {pkid}).property('{property_name}', val);".format( + pkid=vertex_label.id, label=vertex_label.label, property_name=name) + parameters = {'val': value} + return session.execute_graph(statement, parameters, execution_profile=execution_profile) + + +class CoreGraphSchema(GraphSchema): + + fixtures = CoreGraphFixtures + + @classmethod + def sanitize_type(cls, typ): + typ = super(CoreGraphSchema, cls).sanitize_type(typ) + return typ.replace('()', '') + + @classmethod + def sanitize_point_type(cls): + return "Point" + + @classmethod + def sanitize_line_type(cls): + return "LineString" + + @classmethod + def sanitize_polygon_type(cls): + return "Polygon" + + @staticmethod + def create_graph(session, graph_name): + session.execute_graph('system.graph(name).create()', {'name': graph_name}, + execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) + wait_for_graph_inserted(session, graph_name) + + @classmethod + def create_vertex_label(cls, session, vertex_label, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): + statements = ["schema.vertexLabel('{label}').partitionBy('pkid', Int)".format( + label=vertex_label.label)] + + for name, typ in six.iteritems(vertex_label.non_pk_properties): + typ = cls.sanitize_type(typ) + statements.append(".property('{name}', {type})".format(name=name, type=typ)) + statements.append(".create();") + + to_run = "\n".join(statements) + session.execute_graph(to_run, execution_profile=execution_profile) + + @staticmethod + def add_vertex(session, vertex_label, name, value, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): + statement = "g.addV('{label}').property('pkid', {pkid}).property('{property_name}', val);".format( + pkid=vertex_label.id, label=vertex_label.label, property_name=name) + parameters = {'val': value} + return session.execute_graph(statement, parameters, execution_profile=execution_profile) diff --git a/tests/integration/advanced/graph/fluent/test_graph.py b/tests/integration/advanced/graph/fluent/test_graph.py index 623e275352..174c15277b 100644 --- a/tests/integration/advanced/graph/fluent/test_graph.py +++ b/tests/integration/advanced/graph/fluent/test_graph.py @@ -12,49 +12,65 @@ # See the License for the specific language governing permissions and # limitations under the License. - import sys +import datetime +import six +import time +from collections import namedtuple +from packaging.version import Version + +from cassandra import cluster +from cassandra.cluster import ContinuousPagingOptions from cassandra.datastax.graph.fluent import DseGraph +from cassandra.graph import Vertex, Edge, VertexProperty, GraphProtocol +from cassandra.util import Point, Polygon, LineString + from gremlin_python.process.graph_traversal import GraphTraversal, GraphTraversalSource from gremlin_python.process.traversal import P -from tests.integration import DSE_VERSION, requiredse, greaterthanorequaldse60 -from tests.integration.advanced import BasicGraphUnitTestCase, use_single_node_with_graph_and_solr, \ - use_single_node_with_graph, generate_classic, generate_line_graph, generate_multi_field_graph, \ - generate_large_complex_graph, generate_type_graph_schema, validate_classic_vertex, validate_classic_edge, \ - validate_generic_vertex_result_type, validate_classic_edge_properties, validate_line_edge, \ - validate_generic_edge_result_type, validate_path_result_type, TYPE_MAP +from gremlin_python.structure.graph import Edge as TravEdge +from gremlin_python.structure.graph import Vertex as TravVertex, VertexProperty as TravVertexProperty +from tests.integration import DSE_VERSION, greaterthanorequaldse68 +from tests.integration.advanced.graph import GraphUnitTestCase, \ + ClassicGraphSchema, CoreGraphSchema, \ + validate_classic_vertex, validate_classic_edge, validate_generic_vertex_result_type,\ + validate_classic_edge_properties, validate_line_edge, \ + validate_generic_edge_result_type, validate_path_result_type, VertexLabel, \ + GraphTestConfiguration +from tests.integration import greaterthanorequaldse60, requiredse -from gremlin_python.structure.graph import Edge as TravEdge -from gremlin_python.structure.graph import Vertex as TravVertex -from cassandra.graph import Vertex, Edge -from cassandra.util import Point, Polygon, LineString -import datetime -from six import string_types -import six -if six.PY3: - import ipaddress +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa -def setup_module(): - if DSE_VERSION: - dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}} - use_single_node_with_graph(dse_options=dse_options) +import ipaddress def check_equality_base(testcase, original, read_value): if isinstance(original, float): testcase.assertAlmostEqual(original, read_value, delta=.01) - elif six.PY3 and isinstance(original, ipaddress.IPv4Address): + elif isinstance(original, ipaddress.IPv4Address): testcase.assertAlmostEqual(original, ipaddress.IPv4Address(read_value)) - elif six.PY3 and isinstance(original, ipaddress.IPv6Address): + elif isinstance(original, ipaddress.IPv6Address): testcase.assertAlmostEqual(original, ipaddress.IPv6Address(read_value)) else: testcase.assertEqual(original, read_value) -class AbstractTraversalTest(): +class _AbstractTraversalTest(GraphUnitTestCase): + + def setUp(self): + super(_AbstractTraversalTest, self).setUp() + self.ep_graphson2 = DseGraph().create_execution_profile(self.graph_name, + graph_protocol=GraphProtocol.GRAPHSON_2_0) + self.ep_graphson3 = DseGraph().create_execution_profile(self.graph_name, + graph_protocol=GraphProtocol.GRAPHSON_3_0) - def test_basic_query(self): + self.cluster.add_execution_profile('traversal_graphson2', self.ep_graphson2) + self.cluster.add_execution_profile('traversal_graphson3', self.ep_graphson3) + + def _test_basic_query(self, schema, graphson): """ Test to validate that basic graph queries works @@ -70,16 +86,15 @@ def test_basic_query(self): @test_category dse graph """ - - g = self.fetch_traversal_source() - generate_classic(self.session) - traversal =g.V().has('name', 'marko').out('knows').values('name') - results_list = self.execute_traversal(traversal) + g = self.fetch_traversal_source(graphson) + self.execute_graph(schema.fixtures.classic(), graphson) + traversal = g.V().has('name', 'marko').out('knows').values('name') + results_list = self.execute_traversal(traversal, graphson) self.assertEqual(len(results_list), 2) self.assertIn('vadas', results_list) self.assertIn('josh', results_list) - def test_classic_graph(self): + def _test_classic_graph(self, schema, graphson): """ Test to validate that basic graph generation, and vertex and edges are surfaced correctly @@ -95,38 +110,40 @@ def test_classic_graph(self): @test_category dse graph """ - generate_classic(self.session) - g = self.fetch_traversal_source() - traversal = g.V() - vert_list = self.execute_traversal(traversal) + self.execute_graph(schema.fixtures.classic(), graphson) + ep = self.get_execution_profile(graphson) + g = self.fetch_traversal_source(graphson) + traversal = g.V() + vert_list = self.execute_traversal(traversal, graphson) for vertex in vert_list: + schema.ensure_properties(self.session, vertex, execution_profile=ep) self._validate_classic_vertex(g, vertex) - traversal = g.E() - edge_list = self.execute_traversal(traversal) + traversal = g.E() + edge_list = self.execute_traversal(traversal, graphson) for edge in edge_list: + schema.ensure_properties(self.session, edge, execution_profile=ep) self._validate_classic_edge(g, edge) - def test_graph_classic_path(self): + def _test_graph_classic_path(self, schema, graphson): """ Test to validate that the path version of the result type is generated correctly. It also tests basic path results as that is not covered elsewhere @since 1.0.0 @jira_ticket PYTHON-641 - @expected_result path object should be unpacked correctly including all nested edges and verticies + @expected_result path object should be unpacked correctly including all nested edges and vertices @test_category dse graph """ - generate_classic(self.session) - g = self.fetch_traversal_source() + self.execute_graph(schema.fixtures.classic(), graphson) + g = self.fetch_traversal_source(graphson) traversal = g.V().hasLabel('person').has('name', 'marko').as_('a').outE('knows').inV().as_('c', 'd').outE('created').as_('e', 'f', 'g').inV().path() - path_list = self.execute_traversal(traversal) + path_list = self.execute_traversal(traversal, graphson) self.assertEqual(len(path_list), 2) for path in path_list: self._validate_path_result_type(g, path) - - def test_range_query(self): + def _test_range_query(self, schema, graphson): """ Test to validate range queries are handled correctly. @@ -141,18 +158,18 @@ def test_range_query(self): @test_category dse graph """ + self.execute_graph(schema.fixtures.line(150), graphson) + ep = self.get_execution_profile(graphson) + g = self.fetch_traversal_source(graphson) - query_to_run = generate_line_graph(150) - self.session.execute_graph(query_to_run) - g = self.fetch_traversal_source() - - traversal = g.E().range(0,10) - edges = self.execute_traversal(traversal) + traversal = g.E().range(0, 10) + edges = self.execute_traversal(traversal, graphson) self.assertEqual(len(edges), 10) for edge in edges: + schema.ensure_properties(self.session, edge, execution_profile=ep) self._validate_line_edge(g, edge) - def test_result_types(self): + def _test_result_types(self, schema, graphson): """ Test to validate that the edge and vertex version of results are constructed correctly. @@ -161,14 +178,14 @@ def test_result_types(self): @expected_result edge/vertex result types should be unpacked correctly. @test_category dse graph """ - generate_multi_field_graph(self.session) # TODO: we could just make a single vertex with properties of all types, or even a simple query that just uses a sequence of groovy expressions - g = self.fetch_traversal_source() + self.execute_graph(schema.fixtures.line(150), graphson) + g = self.fetch_traversal_source(graphson) traversal = g.V() - vertices = self.execute_traversal(traversal) + vertices = self.execute_traversal(traversal, graphson) for vertex in vertices: self._validate_type(g, vertex) - def test_large_result_set(self): + def _test_large_result_set(self, schema, graphson): """ Test to validate that large result sets return correctly. @@ -180,14 +197,14 @@ def test_large_result_set(self): @test_category dse graph """ - generate_large_complex_graph(self.session, 5000) - g = self.fetch_traversal_source() + self.execute_graph(schema.fixtures.large(), graphson) + g = self.fetch_traversal_source(graphson) traversal = g.V() - vertices = self.execute_traversal(traversal) + vertices = self.execute_traversal(traversal, graphson) for vertex in vertices: - self._validate_generic_vertex_result_type(g,vertex) + self._validate_generic_vertex_result_type(g, vertex) - def test_vertex_meta_properties(self): + def _test_vertex_meta_properties(self, schema, graphson): """ Test verifying vertex property properties @@ -196,6 +213,9 @@ def test_vertex_meta_properties(self): @test_category dse graph """ + if schema is not ClassicGraphSchema: + raise unittest.SkipTest('skipped because multiple properties are only supported with classic graphs') + s = self.session s.execute_graph("schema.propertyKey('k0').Text().ifNotExists().create();") s.execute_graph("schema.propertyKey('k1').Text().ifNotExists().create();") @@ -206,15 +226,15 @@ def test_vertex_meta_properties(self): v.property('key', 'meta_prop', 'k0', 'v0', 'k1', 'v1') v''')[0] - g = self.fetch_traversal_source() + g = self.fetch_traversal_source(graphson) traversal = g.V() # This should contain key, and value where value is a property # This should be a vertex property and should contain sub properties - results = self.execute_traversal(traversal) + results = self.execute_traversal(traversal, graphson) self._validate_meta_property(g, results[0]) - def test_vertex_multiple_properties(self): + def _test_vertex_multiple_properties(self, schema, graphson): """ Test verifying vertex property form for various Cardinality @@ -229,6 +249,9 @@ def test_vertex_multiple_properties(self): @test_category dse graph """ + if schema is not ClassicGraphSchema: + raise unittest.SkipTest('skipped because multiple properties are only supported with classic graphs') + s = self.session s.execute_graph('''Schema schema = graph.schema(); schema.propertyKey('mult_key').Text().multiple().ifNotExists().create(); @@ -243,10 +266,10 @@ def test_vertex_multiple_properties(self): mpw2v = s.execute_graph('''g.addV('MPW2').property('mult_key', 'value0').property('mult_key', 'value1')''')[0] - g = self.fetch_traversal_source() + g = self.fetch_traversal_source(graphson) traversal = g.V(mpw1v.id).properties() - vertex_props = self.execute_traversal(traversal) + vertex_props = self.execute_traversal(traversal, graphson) self.assertEqual(len(vertex_props), 1) @@ -254,10 +277,10 @@ def test_vertex_multiple_properties(self): self.assertEqual(vertex_props[0].value, "value") # multiple_with_two_values - #v = s.execute_graph('''g.addV(label, 'MPW2', 'mult_key', 'value0', 'mult_key', 'value1')''')[0] + #v = s.execute_graph('''g.addV(label, 'MPW2', 'mult_key', 'value0', 'mult_key', 'value1')''')[0] traversal = g.V(mpw2v.id).properties() - vertex_props = self.execute_traversal(traversal) + vertex_props = self.execute_traversal(traversal, graphson) self.assertEqual(len(vertex_props), 2) self.assertEqual(self.fetch_key_from_prop(vertex_props[0]), 'mult_key') @@ -270,18 +293,16 @@ def test_vertex_multiple_properties(self): v.property('single_key', 'value') v''')[0] traversal = g.V(v.id).properties() - vertex_props = self.execute_traversal(traversal) + vertex_props = self.execute_traversal(traversal, graphson) self.assertEqual(len(vertex_props), 1) self.assertEqual(self.fetch_key_from_prop(vertex_props[0]), "single_key") self.assertEqual(vertex_props[0].value, "value") - def should_parse_meta_properties(self): g = self.fetch_traversal_source() g.addV("meta_v").property("meta_prop", "hello", "sub_prop", "hi", "sub_prop2", "hi2") - - def test_all_graph_types_with_schema(self): + def _test_all_graph_types_with_schema(self, schema, graphson): """ Exhaustively goes through each type that is supported by dse_graph. creates a vertex for each type using a dse-tinkerpop traversal, @@ -294,31 +315,24 @@ def test_all_graph_types_with_schema(self): @test_category dse graph """ - generate_type_graph_schema(self.session) - # if result set is not parsed correctly this will throw an exception + self._write_and_read_data_types(schema, graphson) - self._write_and_read_data_types() - - - def test_all_graph_types_without_schema(self): + def _test_all_graph_types_without_schema(self, schema, graphson): """ Exhaustively goes through each type that is supported by dse_graph. creates a vertex for each type using a dse-tinkerpop traversal, It then attempts to fetch it from the server and compares it to what was inserted Do not prime the graph with the correct schema first - @since 1.0.0 @jira_ticket PYTHON-641 @expected_result inserted objects are equivalent to those retrieved - @test_category dse graph """ + if schema is not ClassicGraphSchema: + raise unittest.SkipTest('schema-less is only for classic graphs') + self._write_and_read_data_types(schema, graphson, use_schema=False) - # Prime graph using common utilites - generate_type_graph_schema(self.session, prime_schema=False) - self._write_and_read_data_types() - - def test_dsl(self): + def _test_dsl(self, schema, graphson): """ The test creates a SocialTraversal and a SocialTraversalSource as part of a DSL. Then calls it's method and checks the results to verify @@ -342,17 +356,24 @@ def __init__(self, *args, **kwargs): def people(self, *names): return self.get_graph_traversal().V().has("name", P.within(*names)) - generate_classic(self.session) - g = self.fetch_traversal_source(traversal_class=SocialTraversalSource) + self.execute_graph(schema.fixtures.classic(), graphson) + if schema is CoreGraphSchema: + self.execute_graph(""" + schema.edgeLabel('knows').from('person').to('person').materializedView('person__knows__person_by_in_name'). + ifNotExists().partitionBy('in_name').clusterBy('out_name', Asc).create() + """, graphson) + time.sleep(1) # give some time to the MV to be populated + g = self.fetch_traversal_source(graphson, traversal_class=SocialTraversalSource) traversal = g.people("marko", "albert").knows("vadas") - results = self.execute_traversal(traversal) - + results = self.execute_traversal(traversal, graphson) self.assertEqual(len(results), 1) only_vertex = results[0] + schema.ensure_properties(self.session, only_vertex, + execution_profile=self.get_execution_profile(graphson)) self._validate_classic_vertex(g, only_vertex) - def test_bulked_results(self): + def _test_bulked_results(self, schema, graphson): """ Send a query expecting a bulked result and the driver "undoes" the bulk and returns the expected list @@ -363,32 +384,182 @@ def test_bulked_results(self): @test_category dse graph """ - generate_classic(self.session) - g = self.fetch_traversal_source() + self.execute_graph(schema.fixtures.classic(), graphson) + g = self.fetch_traversal_source(graphson) barrier_traversal = g.E().label().barrier() - results = self.execute_traversal(barrier_traversal) - self.assertEqual(["created", "created", "created", "created", "knows", "knows"], results) - - def _write_and_read_data_types(self): - g = self.fetch_traversal_source() - for key in TYPE_MAP.keys(): - vertex_label = generate_type_graph_schema.single_vertex - property_name = key + "value" - data_value = TYPE_MAP[key][1] - - write_traversal = g.addV(vertex_label).property(property_name, data_value) - self.execute_traversal(write_traversal) - - read_traversal = g.V().hasLabel(vertex_label).has(property_name).values() - results = self.execute_traversal(read_traversal) - - self._check_equality(g, data_value, results[0]) - - def fetch_edge_props(self, g, edge): + results = self.execute_traversal(barrier_traversal, graphson) + self.assertEqual(sorted(["created", "created", "created", "created", "knows", "knows"]), sorted(results)) + + def _test_udt_with_classes(self, schema, graphson): + class Address(object): + + def __init__(self, address, city, state): + self.address = address + self.city = city + self.state = state + + def __eq__(self, other): + return self.address == other.address and self.city == other.city and self.state == other.state + + class AddressWithTags(object): + + def __init__(self, address, city, state, tags): + self.address = address + self.city = city + self.state = state + self.tags = tags + + def __eq__(self, other): + return (self.address == other.address and self.city == other.city + and self.state == other.state and self.tags == other.tags) + + class ComplexAddress(object): + + def __init__(self, address, address_tags, city, state, props): + self.address = address + self.address_tags = address_tags + self.city = city + self.state = state + self.props = props + + def __eq__(self, other): + return (self.address == other.address and self.address_tags == other.address_tags + and self.city == other.city and self.state == other.state + and self.props == other.props) + + class ComplexAddressWithOwners(object): + + def __init__(self, address, address_tags, city, state, props, owners): + self.address = address + self.address_tags = address_tags + self.city = city + self.state = state + self.props = props + self.owners = owners + + def __eq__(self, other): + return (self.address == other.address and self.address_tags == other.address_tags + and self.city == other.city and self.state == other.state + and self.props == other.props and self.owners == other.owners) + + self.__test_udt(schema, graphson, Address, AddressWithTags, ComplexAddress, ComplexAddressWithOwners) + + def _test_udt_with_namedtuples(self, schema, graphson): + AddressTuple = namedtuple('Address', ('address', 'city', 'state')) + AddressWithTagsTuple = namedtuple('AddressWithTags', ('address', 'city', 'state', 'tags')) + ComplexAddressTuple = namedtuple('ComplexAddress', ('address', 'address_tags', 'city', 'state', 'props')) + ComplexAddressWithOwnersTuple = namedtuple('ComplexAddressWithOwners', ('address', 'address_tags', 'city', + 'state', 'props', 'owners')) + + self.__test_udt(schema, graphson, AddressTuple, AddressWithTagsTuple, + ComplexAddressTuple, ComplexAddressWithOwnersTuple) + + def _write_and_read_data_types(self, schema, graphson, use_schema=True): + g = self.fetch_traversal_source(graphson) + ep = self.get_execution_profile(graphson) + for data in six.itervalues(schema.fixtures.datatypes()): + typ, value, deserializer = data + vertex_label = VertexLabel([typ]) + property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + if use_schema or schema is CoreGraphSchema: + schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) + + write_traversal = g.addV(str(vertex_label.label)).property('pkid', vertex_label.id).\ + property(property_name, value) + self.execute_traversal(write_traversal, graphson) + + read_traversal = g.V().hasLabel(str(vertex_label.label)).has(property_name).properties() + results = self.execute_traversal(read_traversal, graphson) + + for result in results: + if result.label == 'pkid': + continue + self._check_equality(g, value, result.value) + + def __test_udt(self, schema, graphson, address_class, address_with_tags_class, + complex_address_class, complex_address_with_owners_class): + if schema is not CoreGraphSchema or DSE_VERSION < Version('6.8'): + raise unittest.SkipTest("Graph UDT is only supported with DSE 6.8+ and Core graphs.") + + ep = self.get_execution_profile(graphson) + + Address = address_class + AddressWithTags = address_with_tags_class + ComplexAddress = complex_address_class + ComplexAddressWithOwners = complex_address_with_owners_class + + # setup udt + self.session.execute_graph(""" + schema.type('address').property('address', Text).property('city', Text).property('state', Text).create(); + schema.type('addressTags').property('address', Text).property('city', Text).property('state', Text). + property('tags', setOf(Text)).create(); + schema.type('complexAddress').property('address', Text).property('address_tags', frozen(typeOf('addressTags'))). + property('city', Text).property('state', Text).property('props', mapOf(Text, Int)).create(); + schema.type('complexAddressWithOwners').property('address', Text). + property('address_tags', frozen(typeOf('addressTags'))). + property('city', Text).property('state', Text).property('props', mapOf(Text, Int)). + property('owners', frozen(listOf(tupleOf(Text, Int)))).create(); + """, execution_profile=ep) + + time.sleep(2) # wait the UDT to be discovered + self.session.cluster.register_user_type(self.graph_name, 'address', Address) + self.session.cluster.register_user_type(self.graph_name, 'addressTags', AddressWithTags) + self.session.cluster.register_user_type(self.graph_name, 'complexAddress', ComplexAddress) + self.session.cluster.register_user_type(self.graph_name, 'complexAddressWithOwners', ComplexAddressWithOwners) + + data = { + "udt1": ["typeOf('address')", Address('1440 Rd Smith', 'Quebec', 'QC')], + "udt2": ["tupleOf(typeOf('address'), Text)", (Address('1440 Rd Smith', 'Quebec', 'QC'), 'hello')], + "udt3": ["tupleOf(frozen(typeOf('address')), Text)", (Address('1440 Rd Smith', 'Quebec', 'QC'), 'hello')], + "udt4": ["tupleOf(tupleOf(Int, typeOf('address')), Text)", + ((42, Address('1440 Rd Smith', 'Quebec', 'QC')), 'hello')], + "udt5": ["tupleOf(tupleOf(Int, typeOf('addressTags')), Text)", + ((42, AddressWithTags('1440 Rd Smith', 'Quebec', 'QC', {'t1', 't2'})), 'hello')], + "udt6": ["tupleOf(tupleOf(Int, typeOf('complexAddress')), Text)", + ((42, ComplexAddress('1440 Rd Smith', + AddressWithTags('1440 Rd Smith', 'Quebec', 'QC', {'t1', 't2'}), + 'Quebec', 'QC', {'p1': 42, 'p2': 33})), 'hello')], + "udt7": ["tupleOf(tupleOf(Int, frozen(typeOf('complexAddressWithOwners'))), Text)", + ((42, ComplexAddressWithOwners( + '1440 Rd Smith', + AddressWithTags('1440 CRd Smith', 'Quebec', 'QC', {'t1', 't2'}), + 'Quebec', 'QC', {'p1': 42, 'p2': 33}, [('Mike', 43), ('Gina', 39)]) + ), 'hello')] + } + + g = self.fetch_traversal_source(graphson) + for typ, value in six.itervalues(data): + vertex_label = VertexLabel([typ]) + property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) + + write_traversal = g.addV(str(vertex_label.label)).property('pkid', vertex_label.id). \ + property(property_name, value) + self.execute_traversal(write_traversal, graphson) + + #vertex = list(schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] + #vertex_properties = list(schema.get_vertex_properties( + # self.session, vertex, execution_profile=ep)) + + read_traversal = g.V().hasLabel(str(vertex_label.label)).has(property_name).properties() + vertex_properties = self.execute_traversal(read_traversal, graphson) + + self.assertEqual(len(vertex_properties), 2) # include pkid + for vp in vertex_properties: + if vp.label == 'pkid': + continue + + self.assertIsInstance(vp, (VertexProperty, TravVertexProperty)) + self.assertEqual(vp.label, property_name) + self.assertEqual(vp.value, value) + + @staticmethod + def fetch_edge_props(g, edge): edge_props = g.E(edge.id).properties().toList() return edge_props - def fetch_vertex_props(self, g, vertex): + @staticmethod + def fetch_vertex_props(g, vertex): vertex_props = g.V(vertex.id).properties().toList() return vertex_props @@ -398,24 +569,21 @@ def _check_equality(self, g, original, read_value): @requiredse -class ImplicitExecutionTest(AbstractTraversalTest, BasicGraphUnitTestCase): +@GraphTestConfiguration.generate_tests(traversal=True) +class ImplicitExecutionTest(_AbstractTraversalTest): """ This test class will execute all tests of the AbstractTraversalTestClass using implicit execution This all traversal will be run directly using toList() """ - def setUp(self): - super(ImplicitExecutionTest, self).setUp() - if DSE_VERSION: - self.ep = DseGraph().create_execution_profile(self.graph_name) - self.cluster.add_execution_profile(self.graph_name, self.ep) - - def fetch_key_from_prop(self, property): + @staticmethod + def fetch_key_from_prop(property): return property.key - def fetch_traversal_source(self, **kwargs): - return DseGraph().traversal_source(self.session, self.graph_name, execution_profile=self.ep, **kwargs) + def fetch_traversal_source(self, graphson, **kwargs): + ep = self.get_execution_profile(graphson, traversal=True) + return DseGraph().traversal_source(self.session, self.graph_name, execution_profile=ep, **kwargs) - def execute_traversal(self, traversal): + def execute_traversal(self, traversal, graphson=None): return traversal.toList() def _validate_classic_vertex(self, g, vertex): @@ -426,12 +594,12 @@ def _validate_classic_vertex(self, g, vertex): self.assertIn('name', vertex_prop_keys) self.assertTrue('lang' in vertex_prop_keys or 'age' in vertex_prop_keys) - def _validate_generic_vertex_result_type(self,g, vertex): + def _validate_generic_vertex_result_type(self, g, vertex): # Checks a vertex object for it's generic properties properties = self.fetch_vertex_props(g, vertex) for attr in ('id', 'label'): self.assertIsNotNone(getattr(vertex, attr)) - self.assertTrue( len(properties)>2) + self.assertTrue(len(properties) > 2) def _validate_classic_edge_properties(self, g, edge): # Checks the properties on a classic edge for correctness @@ -467,13 +635,13 @@ def _validate_path_result_type(self, g, objects_path): self.fail("Invalid object found in path " + str(object.type)) def _validate_meta_property(self, g, vertex): - meta_props = g.V(vertex.id).properties().toList() + meta_props = g.V(vertex.id).properties().toList() self.assertEqual(len(meta_props), 1) meta_prop = meta_props[0] - self.assertEqual(meta_prop.value,"meta_prop") - self.assertEqual(meta_prop.key,"key") + self.assertEqual(meta_prop.value, "meta_prop") + self.assertEqual(meta_prop.key, "key") - nested_props = vertex_props = g.V(vertex.id).properties().properties().toList() + nested_props = g.V(vertex.id).properties().properties().toList() self.assertEqual(len(nested_props), 2) for nested_prop in nested_props: self.assertTrue(nested_prop.key in ['k0', 'k1']) @@ -487,37 +655,42 @@ def _validate_type(self, g, vertex): _validate_prop(key, value, self) -@requiredse -class ExplicitExecutionBase(BasicGraphUnitTestCase): - def setUp(self): - super(ExplicitExecutionBase, self).setUp() - if DSE_VERSION: - self.ep = DseGraph().create_execution_profile(self.graph_name) - self.cluster.add_execution_profile(self.graph_name, self.ep) - - def fetch_traversal_source(self, **kwargs): - return DseGraph().traversal_source(self.session, self.graph_name, **kwargs) - - def execute_traversal(self, traversal): - query = DseGraph.query_from_traversal(traversal) - #Use an ep that is configured with the correct row factory, and bytecode-json language flat set - result_set = self.session.execute_graph(query, execution_profile=self.ep) +class ExplicitExecutionBase(GraphUnitTestCase): + + def fetch_traversal_source(self, graphson, **kwargs): + ep = self.get_execution_profile(graphson, traversal=True) + return DseGraph().traversal_source(self.session, self.graph_name, execution_profile=ep, **kwargs) + + def execute_traversal(self, traversal, graphson): + ep = self.get_execution_profile(graphson, traversal=True) + ep = self.session.get_execution_profile(ep) + context = None + if graphson == GraphProtocol.GRAPHSON_3_0: + context = { + 'cluster': self.cluster, + 'graph_name': ep.graph_options.graph_name.decode('utf-8') if ep.graph_options.graph_name else None + } + query = DseGraph.query_from_traversal(traversal, graphson, context=context) + # Use an ep that is configured with the correct row factory, and bytecode-json language flat set + result_set = self.execute_graph(query, graphson, traversal=True) return list(result_set) @requiredse -class ExplicitExecutionTest(ExplicitExecutionBase, AbstractTraversalTest): +@GraphTestConfiguration.generate_tests(traversal=True) +class ExplicitExecutionTest(ExplicitExecutionBase, _AbstractTraversalTest): """ This test class will execute all tests of the AbstractTraversalTestClass using Explicit execution All queries will be run by converting them to byte code, and calling execute graph explicitly with a generated ep. """ - def fetch_key_from_prop(self, property): + @staticmethod + def fetch_key_from_prop(property): return property.label def _validate_classic_vertex(self, g, vertex): validate_classic_vertex(self, vertex) - def _validate_generic_vertex_result_type(self,g, vertex): + def _validate_generic_vertex_result_type(self, g, vertex): validate_generic_vertex_result_type(self, vertex) def _validate_classic_edge_properties(self, g, edge): @@ -534,7 +707,7 @@ def _validate_generic_edge_result_type(self, edge): def _validate_type(self, g, vertex): for key in vertex.properties: - value = vertex.properties[key][0].value + value = vertex.properties[key][0].value _validate_prop(key, value, self) def _validate_path_result_type(self, g, path_obj): @@ -546,12 +719,12 @@ def _validate_path_result_type(self, g, path_obj): props = [] if isinstance(obj, Edge): obj.properties = { - p['key']: p['value'] + p.key: p.value for p in self.fetch_edge_props(g, obj) } elif isinstance(obj, Vertex): obj.properties = { - p['label']: p['value'] + p.label: p.value for p in self.fetch_vertex_props(g, obj) } @@ -568,6 +741,9 @@ def _validate_meta_property(self, g, vertex): def _validate_prop(key, value, unittest): + if key == 'index': + return + if any(key.startswith(t) for t in ('int', 'short')): typ = int @@ -585,7 +761,7 @@ def _validate_prop(key, value, unittest): elif any(key.startswith(t) for t in ('Linestring',)): typ = LineString elif any(key.startswith(t) for t in ('neg',)): - typ = string_types + typ = six.string_types elif any(key.startswith(t) for t in ('date',)): typ = datetime.date elif any(key.startswith(t) for t in ('time',)): @@ -595,20 +771,21 @@ def _validate_prop(key, value, unittest): unittest.assertIsInstance(value, typ) -@requiredse @greaterthanorequaldse60 +@GraphTestConfiguration.generate_tests(traversal=True) class BatchStatementTests(ExplicitExecutionBase): def setUp(self): super(BatchStatementTests, self).setUp() - self.g = self.fetch_traversal_source() + self.ep_graphson2 = DseGraph().create_execution_profile(self.graph_name, + graph_protocol=GraphProtocol.GRAPHSON_2_0) + self.ep_graphson3 = DseGraph().create_execution_profile(self.graph_name, + graph_protocol=GraphProtocol.GRAPHSON_3_0) - if hasattr(self, "batch"): - self.batch.clear() - else: - self.batch = DseGraph.batch(session=self.session, execution_profile=self.ep) + self.cluster.add_execution_profile('traversal_graphson2', self.ep_graphson2) + self.cluster.add_execution_profile('traversal_graphson3', self.ep_graphson3) - def test_batch_with_schema(self): + def _test_batch_with_schema(self, schema, graphson): """ Sends a Batch statement and verifies it has succeeded with a schema created @@ -618,10 +795,9 @@ def test_batch_with_schema(self): @test_category dse graph """ - generate_type_graph_schema(self.session) - self._send_batch_and_read_results() + self._send_batch_and_read_results(schema, graphson) - def test_batch_without_schema(self): + def _test_batch_without_schema(self, schema, graphson): """ Sends a Batch statement and verifies it has succeeded without a schema created @@ -631,10 +807,11 @@ def test_batch_without_schema(self): @test_category dse graph """ - generate_type_graph_schema(self.session) - self._send_batch_and_read_results() + if schema is not ClassicGraphSchema: + raise unittest.SkipTest('schema-less is only for classic graphs') + self._send_batch_and_read_results(schema, graphson, use_schema=False) - def test_batch_with_schema_add_all(self): + def _test_batch_with_schema_add_all(self, schema, graphson): """ Sends a Batch statement and verifies it has succeeded with a schema created. Uses :method:`dse_graph.query._BatchGraphStatement.add_all` to add the statements @@ -646,10 +823,9 @@ def test_batch_with_schema_add_all(self): @test_category dse graph """ - generate_type_graph_schema(self.session) - self._send_batch_and_read_results(add_all=True) + self._send_batch_and_read_results(schema, graphson, add_all=True) - def test_batch_without_schema_add_all(self): + def _test_batch_without_schema_add_all(self, schema, graphson): """ Sends a Batch statement and verifies it has succeeded without a schema created Uses :method:`dse_graph.query._BatchGraphStatement.add_all` to add the statements @@ -661,8 +837,9 @@ def test_batch_without_schema_add_all(self): @test_category dse graph """ - generate_type_graph_schema(self.session, prime_schema=False) - self._send_batch_and_read_results(add_all=True) + if schema is not ClassicGraphSchema: + raise unittest.SkipTest('schema-less is only for classic graphs') + self._send_batch_and_read_results(schema, graphson, add_all=True, use_schema=False) def test_only_graph_traversals_are_accepted(self): """ @@ -674,42 +851,141 @@ def test_only_graph_traversals_are_accepted(self): @test_category dse graph """ - self.assertRaises(ValueError, self.batch.add, '{"@value":{"step":[["addV","poc_int"],' - '["property","bigint1value",{"@value":12,"@type":"g:Int32"}]]},' - '"@type":"g:Bytecode"}') + batch = DseGraph.batch() + self.assertRaises(ValueError, batch.add, '{"@value":{"step":[["addV","poc_int"],' + '["property","bigint1value",{"@value":12,"@type":"g:Int32"}]]},' + '"@type":"g:Bytecode"}') another_batch = DseGraph.batch() - self.assertRaises(ValueError, self.batch.add, another_batch) + self.assertRaises(ValueError, batch.add, another_batch) - def _send_batch_and_read_results(self, add_all=False): - # For each supported type fetch create a vetex containing that type - vertex_label = generate_type_graph_schema.single_vertex + def _send_batch_and_read_results(self, schema, graphson, add_all=False, use_schema=True): traversals = [] - for key in TYPE_MAP.keys(): - property_name = key + "value" - traversal = self.g.addV(vertex_label).property(property_name, TYPE_MAP[key][1]) + datatypes = schema.fixtures.datatypes() + values = {} + g = self.fetch_traversal_source(graphson) + ep = self.get_execution_profile(graphson) + batch = DseGraph.batch(session=self.session, + execution_profile=self.get_execution_profile(graphson, traversal=True)) + for data in six.itervalues(datatypes): + typ, value, deserializer = data + vertex_label = VertexLabel([typ]) + property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + values[property_name] = value + if use_schema or schema is CoreGraphSchema: + schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) + + traversal = g.addV(str(vertex_label.label)).property('pkid', vertex_label.id).property(property_name, value) if not add_all: - self.batch.add(traversal) + batch.add(traversal) traversals.append(traversal) if add_all: - self.batch.add_all(traversals) - - self.assertEqual(len(TYPE_MAP), len(self.batch)) + batch.add_all(traversals) - self.batch.execute() + self.assertEqual(len(datatypes), len(batch)) - traversal = self.g.V() - vertices = self.execute_traversal(traversal) + batch.execute() - self.assertEqual(len(vertices), len(TYPE_MAP), "g.V() returned {}".format(vertices)) + vertices = self.execute_traversal(g.V(), graphson) + self.assertEqual(len(vertices), len(datatypes), "g.V() returned {}".format(vertices)) # Iterate over all the vertices and check that they match the original input for vertex in vertices: - key = list(vertex.properties.keys())[0].replace("value", "") - original = TYPE_MAP[key][1] - self._check_equality(self.g, original, vertex) + schema.ensure_properties(self.session, vertex, execution_profile=ep) + key = [k for k in list(vertex.properties.keys()) if k != 'pkid'][0].replace("value", "") + original = values[key] + self._check_equality(original, vertex) - def _check_equality(self,g, original, vertex): + def _check_equality(self, original, vertex): for key in vertex.properties: - value = vertex.properties[key][0].value + if key == 'pkid': + continue + value = vertex.properties[key].value \ + if isinstance(vertex.properties[key], VertexProperty) else vertex.properties[key][0].value check_equality_base(self, original, value) + + +class ContinuousPagingOptionsForTests(ContinuousPagingOptions): + def __init__(self, + page_unit=ContinuousPagingOptions.PagingUnit.ROWS, max_pages=1, # max_pages=1 + max_pages_per_second=0, max_queue_size=4): + super(ContinuousPagingOptionsForTests, self).__init__(page_unit, max_pages, max_pages_per_second, + max_queue_size) + + +def reset_paging_options(): + cluster.ContinuousPagingOptions = ContinuousPagingOptions + + +@greaterthanorequaldse68 +@GraphTestConfiguration.generate_tests(schema=CoreGraphSchema) +class GraphPagingTest(GraphUnitTestCase): + + def setUp(self): + super(GraphPagingTest, self).setUp() + self.addCleanup(reset_paging_options) + self.ep_graphson3 = DseGraph().create_execution_profile(self.graph_name, + graph_protocol=GraphProtocol.GRAPHSON_3_0) + self.cluster.add_execution_profile('traversal_graphson3', self.ep_graphson3) + + def _setup_data(self, schema, graphson): + self.execute_graph( + "schema.vertexLabel('person').ifNotExists().partitionBy('name', Text).property('age', Int).create();", + graphson) + for i in range(100): + self.execute_graph("g.addV('person').property('name', 'batman-{}')".format(i), graphson) + + def _test_cont_paging_is_enabled_by_default(self, schema, graphson): + """ + Test that graph paging is automatically enabled with a >=6.8 cluster. + + @jira_ticket PYTHON-1045 + @expected_result the default continuous paging options are used + + @test_category dse graph + """ + # with traversals... I don't have access to the response future... so this is a hack to ensure paging is on + cluster.ContinuousPagingOptions = ContinuousPagingOptionsForTests + ep = self.get_execution_profile(graphson, traversal=True) + self._setup_data(schema, graphson) + self.session.default_fetch_size = 10 + g = DseGraph.traversal_source(self.session, execution_profile=ep) + results = g.V().toList() + self.assertEqual(len(results), 10) # only 10 results due to our hack + + def _test_cont_paging_can_be_disabled(self, schema, graphson): + """ + Test that graph paging can be disabled. + + @jira_ticket PYTHON-1045 + @expected_result the default continuous paging options are not used + + @test_category dse graph + """ + # with traversals... I don't have access to the response future... so this is a hack to ensure paging is on + cluster.ContinuousPagingOptions = ContinuousPagingOptionsForTests + ep = self.get_execution_profile(graphson, traversal=True) + ep = self.session.execution_profile_clone_update(ep, continuous_paging_options=None) + self._setup_data(schema, graphson) + self.session.default_fetch_size = 10 + g = DseGraph.traversal_source(self.session, execution_profile=ep) + results = g.V().toList() + self.assertEqual(len(results), 100) # 100 results since paging is disabled + + def _test_cont_paging_with_custom_options(self, schema, graphson): + """ + Test that we can specify custom paging options. + + @jira_ticket PYTHON-1045 + @expected_result we get only the desired number of results + + @test_category dse graph + """ + ep = self.get_execution_profile(graphson, traversal=True) + ep = self.session.execution_profile_clone_update(ep, + continuous_paging_options=ContinuousPagingOptions(max_pages=1)) + self._setup_data(schema, graphson) + self.session.default_fetch_size = 10 + g = DseGraph.traversal_source(self.session, execution_profile=ep) + results = g.V().toList() + self.assertEqual(len(results), 10) # only 10 results since paging is disabled diff --git a/tests/integration/advanced/graph/fluent/test_search.py b/tests/integration/advanced/graph/fluent/test_search.py index dde4e73ab4..d50016d576 100644 --- a/tests/integration/advanced/graph/fluent/test_search.py +++ b/tests/integration/advanced/graph/fluent/test_search.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from cassandra.datastax.graph.fluent import DseGraph -import time -from cassandra.datastax.graph.fluent.predicates import Search, Geo, GeoUnit -from tests.integration.advanced import BasicSharedGraphUnitTestCase, generate_address_book_graph, use_single_node_with_graph_and_solr -from tests.integration import greaterthanorequaldse51, requiredse, DSE_VERSION from cassandra.util import Distance from cassandra import InvalidRequest +from cassandra.graph import GraphProtocol +from cassandra.datastax.graph.fluent import DseGraph +from cassandra.datastax.graph.fluent.predicates import Search, Geo, GeoUnit, CqlCollection + +from tests.integration.advanced import use_single_node_with_graph_and_solr +from tests.integration.advanced.graph import GraphUnitTestCase, CoreGraphSchema, ClassicGraphSchema, GraphTestConfiguration +from tests.integration import greaterthanorequaldse51, DSE_VERSION, requiredse def setup_module(): @@ -26,9 +28,23 @@ def setup_module(): use_single_node_with_graph_and_solr() -class AbstractSearchTest(): +class AbstractSearchTest(GraphUnitTestCase): + + def setUp(self): + super(AbstractSearchTest, self).setUp() + self.ep_graphson2 = DseGraph().create_execution_profile(self.graph_name, + graph_protocol=GraphProtocol.GRAPHSON_2_0) + self.ep_graphson3 = DseGraph().create_execution_profile(self.graph_name, + graph_protocol=GraphProtocol.GRAPHSON_3_0) + + self.cluster.add_execution_profile('traversal_graphson2', self.ep_graphson2) + self.cluster.add_execution_profile('traversal_graphson3', self.ep_graphson3) - def test_search_by_prefix(self): + def fetch_traversal_source(self, graphson): + ep = self.get_execution_profile(graphson, traversal=True) + return DseGraph().traversal_source(self.session, self.graph_name, execution_profile=ep) + + def _test_search_by_prefix(self, schema, graphson): """ Test to validate that solr searches by prefix function. @@ -38,13 +54,14 @@ def test_search_by_prefix(self): @test_category dse graph """ - g = self.fetch_traversal_source() + self.execute_graph(schema.fixtures.address_book(), graphson) + g = self.fetch_traversal_source(graphson) traversal = g.V().has("person", "name", Search.prefix("Paul")).values("name") - results_list = self.execute_traversal(traversal) + results_list = self.execute_traversal(traversal, graphson) self.assertEqual(len(results_list), 1) self.assertEqual(results_list[0], "Paul Thomas Joe") - def test_search_by_regex(self): + def _test_search_by_regex(self, schema, graphson): """ Test to validate that solr searches by regex function. @@ -54,14 +71,15 @@ def test_search_by_regex(self): @test_category dse graph """ - g = self.fetch_traversal_source() - traversal = g.V().has("person", "name", Search.regex(".*Paul.*")).values("name") - results_list = self.execute_traversal(traversal) + self.execute_graph(schema.fixtures.address_book(), graphson) + g = self.fetch_traversal_source(graphson) + traversal = g.V().has("person", "name", Search.regex(".*Paul.*")).values("name") + results_list = self.execute_traversal(traversal, graphson) self.assertEqual(len(results_list), 2) - self.assertIn("Paul Thomas Joe", results_list ) - self.assertIn("James Paul Smith", results_list ) + self.assertIn("Paul Thomas Joe", results_list) + self.assertIn("James Paul Smith", results_list) - def test_search_by_token(self): + def _test_search_by_token(self, schema, graphson): """ Test to validate that solr searches by token. @@ -71,15 +89,15 @@ def test_search_by_token(self): @test_category dse graph """ - g = self.fetch_traversal_source() - traversal = g.V().has("person", "description", Search.token("cold")).values("name") - results_list = self.execute_traversal(traversal) + self.execute_graph(schema.fixtures.address_book(), graphson) + g = self.fetch_traversal_source(graphson) + traversal = g.V().has("person", "description", Search.token("cold")).values("name") + results_list = self.execute_traversal(traversal, graphson) self.assertEqual(len(results_list), 2) - self.assertIn("Jill Alice", results_list ) + self.assertIn("Jill Alice", results_list) self.assertIn("George Bill Steve", results_list) - - def test_search_by_token_prefix(self): + def _test_search_by_token_prefix(self, schema, graphson): """ Test to validate that solr searches by token prefix. @@ -89,15 +107,15 @@ def test_search_by_token_prefix(self): @test_category dse graph """ - g = self.fetch_traversal_source() + self.execute_graph(schema.fixtures.address_book(), graphson) + g = self.fetch_traversal_source(graphson) traversal = g.V().has("person", "description", Search.token_prefix("h")).values("name") - results_list = self.execute_traversal(traversal) + results_list = self.execute_traversal(traversal, graphson) self.assertEqual(len(results_list), 2) - self.assertIn("Paul Thomas Joe", results_list ) - self.assertIn( "James Paul Smith", results_list ) + self.assertIn("Paul Thomas Joe", results_list) + self.assertIn( "James Paul Smith", results_list) - - def test_search_by_token_regex(self): + def _test_search_by_token_regex(self, schema, graphson): """ Test to validate that solr searches by token regex. @@ -107,15 +125,15 @@ def test_search_by_token_regex(self): @test_category dse graph """ - - g = self.fetch_traversal_source() - traversal = g.V().has("person", "description", Search.token_regex("(nice|hospital)")).values("name") - results_list = self.execute_traversal(traversal) + self.execute_graph(schema.fixtures.address_book(), graphson) + g = self.fetch_traversal_source(graphson) + traversal = g.V().has("person", "description", Search.token_regex("(nice|hospital)")).values("name") + results_list = self.execute_traversal(traversal, graphson) self.assertEqual(len(results_list), 2) self.assertIn("Paul Thomas Joe", results_list ) self.assertIn( "Jill Alice", results_list ) - def _assert_in_distance(self, inside, names): + def _assert_in_distance(self, schema, graphson, inside, names): """ Helper function that asserts that an exception is arisen if geodetic predicates are used in cartesian geometry. Also asserts that the expected list is equal to the returned from @@ -124,27 +142,33 @@ def _assert_in_distance(self, inside, names): def assert_equal_list(L1, L2): return len(L1) == len(L2) and sorted(L1) == sorted(L2) - g = self.fetch_traversal_source() + self.execute_graph(schema.fixtures.address_book(), graphson) + g = self.fetch_traversal_source(graphson) traversal = g.V().has("person", "pointPropWithBoundsWithSearchIndex", inside).values("name") - # throws an exception because of a SOLR/Search limitation in the indexing process - # may be resolved in the future - self.assertRaises(InvalidRequest, self.execute_traversal, traversal) + if schema is ClassicGraphSchema: + # throws an exception because of a SOLR/Search limitation in the indexing process + # may be resolved in the future + self.assertRaises(InvalidRequest, self.execute_traversal, traversal, graphson) + else: + traversal = g.V().has("person", "pointPropWithBoundsWithSearchIndex", inside).values("name") + results_list = self.execute_traversal(traversal, graphson) + assert_equal_list(names, results_list) traversal = g.V().has("person", "pointPropWithBounds", inside).values("name") - results_list = self.execute_traversal(traversal) + results_list = self.execute_traversal(traversal, graphson) assert_equal_list(names, results_list) traversal = g.V().has("person", "pointPropWithGeoBoundsWithSearchIndex", inside).values("name") - results_list = self.execute_traversal(traversal) + results_list = self.execute_traversal(traversal, graphson) assert_equal_list(names, results_list) traversal = g.V().has("person", "pointPropWithGeoBounds", inside).values("name") - results_list = self.execute_traversal(traversal) + results_list = self.execute_traversal(traversal, graphson) assert_equal_list(names, results_list) @greaterthanorequaldse51 - def test_search_by_distance(self): + def _test_search_by_distance(self, schema, graphson): """ Test to validate that solr searches by distance. @@ -154,13 +178,13 @@ def test_search_by_distance(self): @test_category dse graph """ - self._assert_in_distance( + self._assert_in_distance(schema, graphson, Geo.inside(Distance(-92, 44, 2)), ["Paul Thomas Joe", "George Bill Steve"] ) @greaterthanorequaldse51 - def test_search_by_distance_with_meters_units(self): + def _test_search_by_distance_meters_units(self, schema, graphson): """ Test to validate that solr searches by distance. @@ -170,13 +194,13 @@ def test_search_by_distance_with_meters_units(self): @test_category dse graph """ - self._assert_in_distance( + self._assert_in_distance(schema, graphson, Geo.inside(Distance(-92, 44, 56000), GeoUnit.METERS), ["Paul Thomas Joe"] ) @greaterthanorequaldse51 - def test_search_by_distance_with_miles_units(self): + def _test_search_by_distance_miles_units(self, schema, graphson): """ Test to validate that solr searches by distance. @@ -186,13 +210,13 @@ def test_search_by_distance_with_miles_units(self): @test_category dse graph """ - self._assert_in_distance( + self._assert_in_distance(schema, graphson, Geo.inside(Distance(-92, 44, 70), GeoUnit.MILES), ["Paul Thomas Joe", "George Bill Steve"] ) @greaterthanorequaldse51 - def test_search_by_distance_check_limit(self): + def _test_search_by_distance_check_limit(self, schema, graphson): """ Test to validate that solr searches by distance using several units. It will also validate that and exception is arisen if geodetic predicates are used against cartesian geometry @@ -205,29 +229,29 @@ def test_search_by_distance_check_limit(self): @test_category dse graph """ # Paul Thomas Joe and George Bill Steve are 64.6923761881464 km apart - self._assert_in_distance( + self._assert_in_distance(schema, graphson, Geo.inside(Distance(-92.46295, 44.0234, 65), GeoUnit.KILOMETERS), ["George Bill Steve", "Paul Thomas Joe"] ) - self._assert_in_distance( + self._assert_in_distance(schema, graphson, Geo.inside(Distance(-92.46295, 44.0234, 64), GeoUnit.KILOMETERS), ["Paul Thomas Joe"] ) # Paul Thomas Joe and George Bill Steve are 40.19797892069464 miles apart - self._assert_in_distance( + self._assert_in_distance(schema, graphson, Geo.inside(Distance(-92.46295, 44.0234, 41), GeoUnit.MILES), ["George Bill Steve", "Paul Thomas Joe"] ) - self._assert_in_distance( + self._assert_in_distance(schema, graphson, Geo.inside(Distance(-92.46295, 44.0234, 40), GeoUnit.MILES), ["Paul Thomas Joe"] ) @greaterthanorequaldse51 - def test_search_by_fuzzy(self): + def _test_search_by_fuzzy(self, schema, graphson): """ Test to validate that solr searches by distance. @@ -237,18 +261,19 @@ def test_search_by_fuzzy(self): @test_category dse graph """ - g = self.fetch_traversal_source() - traversal = g.V().has("person", "name", Search.fuzzy("Paul Thamas Joe" ,1)).values("name") - results_list = self.execute_traversal(traversal) + self.execute_graph(schema.fixtures.address_book(), graphson) + g = self.fetch_traversal_source(graphson) + traversal = g.V().has("person", "name", Search.fuzzy("Paul Thamas Joe", 1)).values("name") + results_list = self.execute_traversal(traversal, graphson) self.assertEqual(len(results_list), 1) - self.assertIn("Paul Thomas Joe", results_list ) + self.assertIn("Paul Thomas Joe", results_list) - traversal = g.V().has("person", "name", Search.fuzzy("Paul Thames Joe" ,1)).values("name") - results_list = self.execute_traversal(traversal) + traversal = g.V().has("person", "name", Search.fuzzy("Paul Thames Joe", 1)).values("name") + results_list = self.execute_traversal(traversal, graphson) self.assertEqual(len(results_list), 0) @greaterthanorequaldse51 - def test_search_by_fuzzy_token(self): + def _test_search_by_fuzzy_token(self, schema, graphson): """ Test to validate that fuzzy searches. @@ -258,21 +283,22 @@ def test_search_by_fuzzy_token(self): @test_category dse graph """ - g = self.fetch_traversal_source() - traversal = g.V().has("person", "description", Search.token_fuzzy("lives", 1)).values("name"); + self.execute_graph(schema.fixtures.address_book(), graphson) + g = self.fetch_traversal_source(graphson) + traversal = g.V().has("person", "description", Search.token_fuzzy("lives", 1)).values("name") # Should match 'Paul Thomas Joe' since description contains 'Lives' # Should match 'James Paul Joe' since description contains 'Likes' - results_list = self.execute_traversal(traversal) + results_list = self.execute_traversal(traversal, graphson) self.assertEqual(len(results_list), 2) - self.assertIn("Paul Thomas Joe", results_list ) - self.assertIn("James Paul Smith", results_list ) + self.assertIn("Paul Thomas Joe", results_list) + self.assertIn("James Paul Smith", results_list) - traversal = g.V().has("person", "description", Search.token_fuzzy("loues", 1)).values("name"); - results_list = self.execute_traversal(traversal) + traversal = g.V().has("person", "description", Search.token_fuzzy("loues", 1)).values("name") + results_list = self.execute_traversal(traversal, graphson) self.assertEqual(len(results_list), 0) @greaterthanorequaldse51 - def test_search_by_phrase(self): + def _test_search_by_phrase(self, schema, graphson): """ Test to validate that phrase searches. @@ -282,66 +308,232 @@ def test_search_by_phrase(self): @test_category dse graph """ - g = self.fetch_traversal_source() - traversal = g.V().has("person", "description", Search.phrase("a cold", 2)).values("name"); + self.execute_graph(schema.fixtures.address_book(), graphson) + g = self.fetch_traversal_source(graphson) + traversal = g.V().has("person", "description", Search.phrase("a cold", 2)).values("name") #Should match 'George Bill Steve' since 'A cold dude' is at distance of 0 for 'a cold'. #Should match 'Jill Alice' since 'Enjoys a very nice cold coca cola' is at distance of 2 for 'a cold'. - results_list = self.execute_traversal(traversal) + results_list = self.execute_traversal(traversal, graphson) self.assertEqual(len(results_list), 2) - self.assertIn('George Bill Steve', results_list ) - self.assertIn('Jill Alice', results_list ) + self.assertIn('George Bill Steve', results_list) + self.assertIn('Jill Alice', results_list) - traversal = g.V().has("user", "description", Search.phrase("a bald", 2)).values("name"); - results_list = self.execute_traversal(traversal) + traversal = g.V().has("person", "description", Search.phrase("a bald", 2)).values("name") + results_list = self.execute_traversal(traversal, graphson) self.assertEqual(len(results_list), 0) - @requiredse -class ImplicitSearchTest(AbstractSearchTest, BasicSharedGraphUnitTestCase): +@GraphTestConfiguration.generate_tests(traversal=True) +class ImplicitSearchTest(AbstractSearchTest): """ This test class will execute all tests of the AbstractSearchTest using implicit execution All traversals will be run directly using toList() """ - @classmethod - def setUpClass(self): - super(ImplicitSearchTest, self).setUpClass() - if DSE_VERSION: - self.ep = DseGraph().create_execution_profile(self.graph_name) - self.cluster.add_execution_profile(self.graph_name, self.ep) - generate_address_book_graph(self.session, 0) - time.sleep(20) - def fetch_key_from_prop(self, property): return property.key - def fetch_traversal_source(self): - return DseGraph().traversal_source(self.session, self.graph_name, execution_profile=self.ep) - - def execute_traversal(self, traversal): + def execute_traversal(self, traversal, graphson=None): return traversal.toList() @requiredse -class ExplicitSearchTest(AbstractSearchTest, BasicSharedGraphUnitTestCase): +@GraphTestConfiguration.generate_tests(traversal=True) +class ExplicitSearchTest(AbstractSearchTest): """ This test class will execute all tests of the AbstractSearchTest using implicit execution All traversals will be converted to byte code then they will be executed explicitly. """ - @classmethod - def setUpClass(self): - super(ExplicitSearchTest, self).setUpClass() - if DSE_VERSION: - self.ep = DseGraph().create_execution_profile(self.graph_name) - self.cluster.add_execution_profile(self.graph_name, self.ep) - generate_address_book_graph(self.session, 0) - time.sleep(20) - - def fetch_traversal_source(self): - return DseGraph().traversal_source(self.session, self.graph_name) - - def execute_traversal(self, traversal): - query = DseGraph.query_from_traversal(traversal) + + def execute_traversal(self, traversal, graphson): + ep = self.get_execution_profile(graphson, traversal=True) + ep = self.session.get_execution_profile(ep) + context = None + if graphson == GraphProtocol.GRAPHSON_3_0: + context = { + 'cluster': self.cluster, + 'graph_name': ep.graph_options.graph_name.decode('utf-8') if ep.graph_options.graph_name else None + } + query = DseGraph.query_from_traversal(traversal, graphson, context=context) #Use an ep that is configured with the correct row factory, and bytecode-json language flat set - result_set = self.session.execute_graph(query, execution_profile=self.ep) + result_set = self.execute_graph(query, graphson, traversal=True) + return list(result_set) + + +@requiredse +class BaseCqlCollectionPredicatesTest(GraphUnitTestCase): + + def setUp(self): + super(BaseCqlCollectionPredicatesTest, self).setUp() + self.ep_graphson3 = DseGraph().create_execution_profile(self.graph_name, + graph_protocol=GraphProtocol.GRAPHSON_3_0) + self.cluster.add_execution_profile('traversal_graphson3', self.ep_graphson3) + + def fetch_traversal_source(self, graphson): + ep = self.get_execution_profile(graphson, traversal=True) + return DseGraph().traversal_source(self.session, self.graph_name, execution_profile=ep) + + def setup_vertex_label(self, graphson): + ep = self.get_execution_profile(graphson) + self.session.execute_graph(""" + schema.vertexLabel('cqlcollections').ifNotExists().partitionBy('name', Varchar) + .property('list', listOf(Text)) + .property('frozen_list', frozen(listOf(Text))) + .property('set', setOf(Text)) + .property('frozen_set', frozen(setOf(Text))) + .property('map_keys', mapOf(Int, Text)) + .property('map_values', mapOf(Int, Text)) + .property('map_entries', mapOf(Int, Text)) + .property('frozen_map', frozen(mapOf(Int, Text))) + .create() + """, execution_profile=ep) + + self.session.execute_graph(""" + schema.vertexLabel('cqlcollections').secondaryIndex('list').by('list').create(); + schema.vertexLabel('cqlcollections').secondaryIndex('frozen_list').by('frozen_list').indexFull().create(); + schema.vertexLabel('cqlcollections').secondaryIndex('set').by('set').create(); + schema.vertexLabel('cqlcollections').secondaryIndex('frozen_set').by('frozen_set').indexFull().create(); + schema.vertexLabel('cqlcollections').secondaryIndex('map_keys').by('map_keys').indexKeys().create(); + schema.vertexLabel('cqlcollections').secondaryIndex('map_values').by('map_values').indexValues().create(); + schema.vertexLabel('cqlcollections').secondaryIndex('map_entries').by('map_entries').indexEntries().create(); + schema.vertexLabel('cqlcollections').secondaryIndex('frozen_map').by('frozen_map').indexFull().create(); + """, execution_profile=ep) + + def _test_contains_list(self, schema, graphson): + """ + Test to validate that the cql predicate contains works with list + + @since TODO dse 6.8 + @jira_ticket PYTHON-1039 + @expected_result contains predicate work on a list + + @test_category dse graph + """ + self.setup_vertex_label(graphson) + g = self.fetch_traversal_source(graphson) + traversal = g.addV("cqlcollections").property("name", "list1").property("list", ['item1', 'item2']) + self.execute_traversal(traversal, graphson) + traversal = g.addV("cqlcollections").property("name", "list2").property("list", ['item3', 'item4']) + self.execute_traversal(traversal, graphson) + traversal = g.V().has("cqlcollections", "list", CqlCollection.contains("item1")).values("name") + results_list = self.execute_traversal(traversal, graphson) + self.assertEqual(len(results_list), 1) + self.assertIn("list1", results_list) + + def _test_contains_set(self, schema, graphson): + """ + Test to validate that the cql predicate contains works with set + + @since TODO dse 6.8 + @jira_ticket PYTHON-1039 + @expected_result contains predicate work on a set + + @test_category dse graph + """ + self.setup_vertex_label(graphson) + g = self.fetch_traversal_source(graphson) + traversal = g.addV("cqlcollections").property("name", "set1").property("set", {'item1', 'item2'}) + self.execute_traversal(traversal, graphson) + traversal = g.addV("cqlcollections").property("name", "set2").property("set", {'item3', 'item4'}) + self.execute_traversal(traversal, graphson) + traversal = g.V().has("cqlcollections", "set", CqlCollection.contains("item1")).values("name") + results_list = self.execute_traversal(traversal, graphson) + self.assertEqual(len(results_list), 1) + self.assertIn("set1", results_list) + + def _test_contains_key_map(self, schema, graphson): + """ + Test to validate that the cql predicate contains_key works with map + + @since TODO dse 6.8 + @jira_ticket PYTHON-1039 + @expected_result contains_key predicate work on a map + + @test_category dse graph + """ + self.setup_vertex_label(graphson) + g = self.fetch_traversal_source(graphson) + traversal = g.addV("cqlcollections").property("name", "map1").property("map_keys", {0: 'item1', 1: 'item2'}) + self.execute_traversal(traversal, graphson) + traversal = g.addV("cqlcollections").property("name", "map2").property("map_keys", {2: 'item3', 3: 'item4'}) + self.execute_traversal(traversal, graphson) + traversal = g.V().has("cqlcollections", "map_keys", CqlCollection.contains_key(0)).values("name") + results_list = self.execute_traversal(traversal, graphson) + self.assertEqual(len(results_list), 1) + self.assertIn("map1", results_list) + + def _test_contains_value_map(self, schema, graphson): + """ + Test to validate that the cql predicate contains_value works with map + + @since TODO dse 6.8 + @jira_ticket PYTHON-1039 + @expected_result contains_value predicate work on a map + + @test_category dse graph + """ + self.setup_vertex_label(graphson) + g = self.fetch_traversal_source(graphson) + traversal = g.addV("cqlcollections").property("name", "map1").property("map_values", {0: 'item1', 1: 'item2'}) + self.execute_traversal(traversal, graphson) + traversal = g.addV("cqlcollections").property("name", "map2").property("map_values", {2: 'item3', 3: 'item4'}) + self.execute_traversal(traversal, graphson) + traversal = g.V().has("cqlcollections", "map_values", CqlCollection.contains_value('item3')).values("name") + results_list = self.execute_traversal(traversal, graphson) + self.assertEqual(len(results_list), 1) + self.assertIn("map2", results_list) + + def _test_entry_eq_map(self, schema, graphson): + """ + Test to validate that the cql predicate entry_eq works with map + + @since TODO dse 6.8 + @jira_ticket PYTHON-1039 + @expected_result entry_eq predicate work on a map + + @test_category dse graph + """ + self.setup_vertex_label(graphson) + g = self.fetch_traversal_source(graphson) + traversal = g.addV("cqlcollections").property("name", "map1").property("map_entries", {0: 'item1', 1: 'item2'}) + self.execute_traversal(traversal, graphson) + traversal = g.addV("cqlcollections").property("name", "map2").property("map_entries", {2: 'item3', 3: 'item4'}) + self.execute_traversal(traversal, graphson) + traversal = g.V().has("cqlcollections", "map_entries", CqlCollection.entry_eq([2, 'item3'])).values("name") + results_list = self.execute_traversal(traversal, graphson) + self.assertEqual(len(results_list), 1) + self.assertIn("map2", results_list) + + +@requiredse +@GraphTestConfiguration.generate_tests(traversal=True, schema=CoreGraphSchema) +class ImplicitCqlCollectionPredicatesTest(BaseCqlCollectionPredicatesTest): + """ + This test class will execute all tests of the BaseCqlCollectionTest using implicit execution + All traversals will be run directly using toList() + """ + + def execute_traversal(self, traversal, graphson=None): + return traversal.toList() + + +@requiredse +@GraphTestConfiguration.generate_tests(traversal=True, schema=CoreGraphSchema) +class ExplicitCqlCollectionPredicatesTest(BaseCqlCollectionPredicatesTest): + """ + This test class will execute all tests of the AbstractSearchTest using implicit execution + All traversals will be converted to byte code then they will be executed explicitly. + """ + + def execute_traversal(self, traversal, graphson): + ep = self.get_execution_profile(graphson, traversal=True) + ep = self.session.get_execution_profile(ep) + context = None + if graphson == GraphProtocol.GRAPHSON_3_0: + context = { + 'cluster': self.cluster, + 'graph_name': ep.graph_options.graph_name.decode('utf-8') if ep.graph_options.graph_name else None + } + query = DseGraph.query_from_traversal(traversal, graphson, context=context) + result_set = self.execute_graph(query, graphson, traversal=True) return list(result_set) diff --git a/tests/integration/advanced/graph/test_graph.py b/tests/integration/advanced/graph/test_graph.py new file mode 100644 index 0000000000..898779f789 --- /dev/null +++ b/tests/integration/advanced/graph/test_graph.py @@ -0,0 +1,271 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import re + +from cassandra import OperationTimedOut, InvalidRequest +from cassandra.protocol import SyntaxException +from cassandra.policies import WhiteListRoundRobinPolicy +from cassandra.cluster import NoHostAvailable +from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, GraphExecutionProfile, Cluster +from cassandra.graph import single_object_row_factory, Vertex, graph_object_row_factory, \ + graph_graphson2_row_factory, graph_graphson3_row_factory +from cassandra.util import SortedSet + +from tests.integration import PROTOCOL_VERSION, DSE_VERSION, greaterthanorequaldse51, greaterthanorequaldse68, requiredse +from tests.integration.advanced.graph import BasicGraphUnitTestCase, GraphUnitTestCase, \ + GraphProtocol, ClassicGraphSchema, CoreGraphSchema, use_single_node_with_graph + + +def setup_module(): + if DSE_VERSION: + dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}} + use_single_node_with_graph(dse_options=dse_options) + + +@requiredse +class GraphTimeoutTests(BasicGraphUnitTestCase): + + def test_should_wait_indefinitely_by_default(self): + """ + Tests that by default the client should wait indefinitely for server timeouts + + @since 1.0.0 + @jira_ticket PYTHON-589 + + @test_category dse graph + """ + desired_timeout = 1000 + + graph_source = "test_timeout_1" + ep_name = graph_source + ep = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT) + ep.graph_options = ep.graph_options.copy() + ep.graph_options.graph_source = graph_source + self.cluster.add_execution_profile(ep_name, ep) + + to_run = '''graph.schema().config().option("graph.traversal_sources.{0}.evaluation_timeout").set('{1} ms')'''.format( + graph_source, desired_timeout) + self.session.execute_graph(to_run, execution_profile=ep_name) + with self.assertRaises(InvalidRequest) as ir: + self.session.execute_graph("java.util.concurrent.TimeUnit.MILLISECONDS.sleep(35000L);1+1", + execution_profile=ep_name) + self.assertTrue("evaluation exceeded the configured threshold of 1000" in str(ir.exception) or + "evaluation exceeded the configured threshold of evaluation_timeout at 1000" in str( + ir.exception)) + + def test_request_timeout_less_then_server(self): + """ + Tests that with explicit request_timeouts set, that a server timeout is honored if it's relieved prior to the + client timeout + + @since 1.0.0 + @jira_ticket PYTHON-589 + + @test_category dse graph + """ + desired_timeout = 1000 + graph_source = "test_timeout_2" + ep_name = graph_source + ep = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, request_timeout=32) + ep.graph_options = ep.graph_options.copy() + ep.graph_options.graph_source = graph_source + self.cluster.add_execution_profile(ep_name, ep) + + to_run = '''graph.schema().config().option("graph.traversal_sources.{0}.evaluation_timeout").set('{1} ms')'''.format( + graph_source, desired_timeout) + self.session.execute_graph(to_run, execution_profile=ep_name) + with self.assertRaises(InvalidRequest) as ir: + self.session.execute_graph("java.util.concurrent.TimeUnit.MILLISECONDS.sleep(35000L);1+1", + execution_profile=ep_name) + self.assertTrue("evaluation exceeded the configured threshold of 1000" in str(ir.exception) or + "evaluation exceeded the configured threshold of evaluation_timeout at 1000" in str( + ir.exception)) + + def test_server_timeout_less_then_request(self): + """ + Tests that with explicit request_timeouts set, that a client timeout is honored if it's triggered prior to the + server sending a timeout. + + @since 1.0.0 + @jira_ticket PYTHON-589 + + @test_category dse graph + """ + graph_source = "test_timeout_3" + ep_name = graph_source + ep = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, request_timeout=1) + ep.graph_options = ep.graph_options.copy() + ep.graph_options.graph_source = graph_source + self.cluster.add_execution_profile(ep_name, ep) + server_timeout = 10000 + to_run = '''graph.schema().config().option("graph.traversal_sources.{0}.evaluation_timeout").set('{1} ms')'''.format( + graph_source, server_timeout) + self.session.execute_graph(to_run, execution_profile=ep_name) + + with self.assertRaises(Exception) as e: + self.session.execute_graph("java.util.concurrent.TimeUnit.MILLISECONDS.sleep(35000L);1+1", + execution_profile=ep_name) + self.assertTrue(isinstance(e, InvalidRequest) or isinstance(e, OperationTimedOut)) + + +@requiredse +class GraphProfileTests(BasicGraphUnitTestCase): + def test_graph_profile(self): + """ + Test verifying various aspects of graph config properties. + + @since 1.0.0 + @jira_ticket PYTHON-570 + + @test_category dse graph + """ + hosts = self.cluster.metadata.all_hosts() + first_host = hosts[0].address + second_hosts = "1.2.3.4" + + self._execute(ClassicGraphSchema.fixtures.classic(), graphson=GraphProtocol.GRAPHSON_1_0) + # Create various execution policies + exec_dif_factory = GraphExecutionProfile(row_factory=single_object_row_factory) + exec_dif_factory.graph_options.graph_name = self.graph_name + exec_dif_lbp = GraphExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy([first_host])) + exec_dif_lbp.graph_options.graph_name = self.graph_name + exec_bad_lbp = GraphExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy([second_hosts])) + exec_dif_lbp.graph_options.graph_name = self.graph_name + exec_short_timeout = GraphExecutionProfile(request_timeout=1, + load_balancing_policy=WhiteListRoundRobinPolicy([first_host])) + exec_short_timeout.graph_options.graph_name = self.graph_name + + # Add a single execution policy on cluster creation + local_cluster = Cluster(protocol_version=PROTOCOL_VERSION, + execution_profiles={"exec_dif_factory": exec_dif_factory}) + local_session = local_cluster.connect() + self.addCleanup(local_cluster.shutdown) + + rs1 = self.session.execute_graph('g.V()') + rs2 = local_session.execute_graph('g.V()', execution_profile='exec_dif_factory') + + # Verify default and non default policy works + self.assertFalse(isinstance(rs2[0], Vertex)) + self.assertTrue(isinstance(rs1[0], Vertex)) + # Add other policies validate that lbp are honored + local_cluster.add_execution_profile("exec_dif_ldp", exec_dif_lbp) + local_session.execute_graph('g.V()', execution_profile="exec_dif_ldp") + local_cluster.add_execution_profile("exec_bad_lbp", exec_bad_lbp) + with self.assertRaises(NoHostAvailable): + local_session.execute_graph('g.V()', execution_profile="exec_bad_lbp") + + # Try with missing EP + with self.assertRaises(ValueError): + local_session.execute_graph('g.V()', execution_profile='bad_exec_profile') + + # Validate that timeout is honored + local_cluster.add_execution_profile("exec_short_timeout", exec_short_timeout) + with self.assertRaises(Exception) as e: + self.assertTrue(isinstance(e, InvalidRequest) or isinstance(e, OperationTimedOut)) + local_session.execute_graph('java.util.concurrent.TimeUnit.MILLISECONDS.sleep(2000L);', + execution_profile='exec_short_timeout') + + +@requiredse +class GraphMetadataTest(BasicGraphUnitTestCase): + + @greaterthanorequaldse51 + def test_dse_workloads(self): + """ + Test to ensure dse_workloads is populated appropriately. + Field added in DSE 5.1 + + @since DSE 2.0 + @jira_ticket PYTHON-667 + @expected_result dse_workloads set is set on host model + + @test_category metadata + """ + for host in self.cluster.metadata.all_hosts(): + self.assertIsInstance(host.dse_workloads, SortedSet) + self.assertIn("Cassandra", host.dse_workloads) + self.assertIn("Graph", host.dse_workloads) + + +@requiredse +class GraphExecutionProfileOptionsResolveTest(GraphUnitTestCase): + """ + Test that the execution profile options are properly resolved for graph queries. + + @since DSE 6.8 + @jira_ticket PYTHON-1004 PYTHON-1056 + @expected_result execution profile options are properly determined following the rules. + """ + + def test_default_options(self): + ep = self.session.get_execution_profile(EXEC_PROFILE_GRAPH_DEFAULT) + self.assertEqual(ep.graph_options.graph_protocol, None) + self.assertEqual(ep.row_factory, None) + self.session._resolve_execution_profile_options(ep) + self.assertEqual(ep.graph_options.graph_protocol, GraphProtocol.GRAPHSON_1_0) + self.assertEqual(ep.row_factory, graph_object_row_factory) + + def test_default_options_when_not_groovy(self): + ep = self.session.get_execution_profile(EXEC_PROFILE_GRAPH_DEFAULT) + self.assertEqual(ep.graph_options.graph_protocol, None) + self.assertEqual(ep.row_factory, None) + ep.graph_options.graph_language = 'whatever' + self.session._resolve_execution_profile_options(ep) + self.assertEqual(ep.graph_options.graph_protocol, GraphProtocol.GRAPHSON_2_0) + self.assertEqual(ep.row_factory, graph_graphson2_row_factory) + + def test_default_options_when_explicitly_specified(self): + ep = self.session.get_execution_profile(EXEC_PROFILE_GRAPH_DEFAULT) + self.assertEqual(ep.graph_options.graph_protocol, None) + self.assertEqual(ep.row_factory, None) + obj = object() + ep.graph_options.graph_protocol = obj + ep.row_factory = obj + self.session._resolve_execution_profile_options(ep) + self.assertEqual(ep.graph_options.graph_protocol, obj) + self.assertEqual(ep.row_factory, obj) + + @greaterthanorequaldse68 + def test_graph_protocol_default_for_core_is_graphson3(self): + """Test that graphson3 is automatically resolved for a core graph query""" + self.setup_graph(CoreGraphSchema) + ep = self.session.get_execution_profile(EXEC_PROFILE_GRAPH_DEFAULT) + self.assertEqual(ep.graph_options.graph_protocol, None) + self.assertEqual(ep.row_factory, None) + # Ensure we have the graph metadata + self.session.cluster.refresh_schema_metadata() + self.session._resolve_execution_profile_options(ep) + self.assertEqual(ep.graph_options.graph_protocol, GraphProtocol.GRAPHSON_3_0) + self.assertEqual(ep.row_factory, graph_graphson3_row_factory) + + self.execute_graph_queries(CoreGraphSchema.fixtures.classic(), verify_graphson=GraphProtocol.GRAPHSON_3_0) + + @greaterthanorequaldse68 + def test_graph_protocol_default_for_core_fallback_to_graphson1_if_no_graph_name(self): + """Test that graphson1 is set when we cannot detect if it's a core graph""" + self.setup_graph(CoreGraphSchema) + default_ep = self.session.get_execution_profile(EXEC_PROFILE_GRAPH_DEFAULT) + graph_options = default_ep.graph_options.copy() + graph_options.graph_name = None + ep = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, graph_options=graph_options) + self.session._resolve_execution_profile_options(ep) + self.assertEqual(ep.graph_options.graph_protocol, GraphProtocol.GRAPHSON_1_0) + self.assertEqual(ep.row_factory, graph_object_row_factory) + + regex = re.compile(".*Variable.*is unknown.*", re.S) + with six.assertRaisesRegex(self, SyntaxException, regex): + self.execute_graph_queries(CoreGraphSchema.fixtures.classic(), + execution_profile=ep, verify_graphson=GraphProtocol.GRAPHSON_1_0) diff --git a/tests/integration/advanced/graph/test_graph_cont_paging.py b/tests/integration/advanced/graph/test_graph_cont_paging.py new file mode 100644 index 0000000000..065d01d939 --- /dev/null +++ b/tests/integration/advanced/graph/test_graph_cont_paging.py @@ -0,0 +1,78 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from cassandra.cluster import ContinuousPagingOptions + +from tests.integration import greaterthanorequaldse68 +from tests.integration.advanced.graph import GraphUnitTestCase, CoreGraphSchema, GraphTestConfiguration + + +@greaterthanorequaldse68 +@GraphTestConfiguration.generate_tests(schema=CoreGraphSchema) +class GraphPagingTest(GraphUnitTestCase): + + def _setup_data(self, schema, graphson): + self.execute_graph("schema.vertexLabel('person').ifNotExists().partitionBy('name', Text).property('age', Int).create();", graphson) + for i in range(100): + self.execute_graph("g.addV('person').property('name', 'batman-{}')".format(i), graphson) + + def _test_cont_paging_is_enabled_by_default(self, schema, graphson): + """ + Test that graph paging is automatically enabled with a >=6.8 cluster. + + @jira_ticket PYTHON-1045 + @expected_result the response future has a continuous_paging_session since graph paging is enabled + + @test_category dse graph + """ + ep = self.get_execution_profile(graphson) + self._setup_data(schema, graphson) + rf = self.session.execute_graph_async("g.V()", execution_profile=ep) + results = list(rf.result()) + self.assertIsNotNone(rf._continuous_paging_session) + self.assertEqual(len(results), 100) + + def _test_cont_paging_can_be_disabled(self, schema, graphson): + """ + Test that graph paging can be disabled. + + @jira_ticket PYTHON-1045 + @expected_result the response future doesn't have a continuous_paging_session since graph paging is disabled + + @test_category dse graph + """ + ep = self.get_execution_profile(graphson) + new_ep = self.session.execution_profile_clone_update(ep, continuous_paging_options=None) + self._setup_data(schema, graphson) + rf = self.session.execute_graph_async("g.V()", execution_profile=new_ep) + results = list(rf.result()) + self.assertIsNone(rf._continuous_paging_session) + self.assertEqual(len(results), 100) + + def _test_cont_paging_with_custom_options(self, schema, graphson): + """ + Test that we can specify custom paging options. + + @jira_ticket PYTHON-1045 + @expected_result we get only the desired number of results + + @test_category dse graph + """ + ep = self.get_execution_profile(graphson) + new_ep = self.session.execution_profile_clone_update( + ep, continuous_paging_options=ContinuousPagingOptions(max_pages=1)) + self._setup_data(schema, graphson) + self.session.default_fetch_size = 10 + results = list(self.session.execute_graph("g.V()", execution_profile=new_ep)) + self.assertEqual(len(results), 10) diff --git a/tests/integration/advanced/graph/test_graph_datatype.py b/tests/integration/advanced/graph/test_graph_datatype.py new file mode 100644 index 0000000000..d4d28b80df --- /dev/null +++ b/tests/integration/advanced/graph/test_graph_datatype.py @@ -0,0 +1,260 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import time +import six +import logging +from packaging.version import Version +from collections import namedtuple + +from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT +from cassandra.graph import graph_result_row_factory +from cassandra.graph.query import GraphProtocol +from cassandra.graph.types import VertexProperty + +from tests.integration.advanced.graph import BasicGraphUnitTestCase, ClassicGraphFixtures, \ + ClassicGraphSchema, CoreGraphSchema +from tests.integration.advanced.graph import VertexLabel, GraphTestConfiguration, GraphUnitTestCase +from tests.integration import DSE_VERSION, requiredse + +log = logging.getLogger(__name__) + + +@requiredse +class GraphBasicDataTypesTests(BasicGraphUnitTestCase): + + def test_result_types(self): + """ + Test to validate that the edge and vertex version of results are constructed correctly. + + @since 1.0.0 + @jira_ticket PYTHON-479 + @expected_result edge/vertex result types should be unpacked correctly. + @test_category dse graph + """ + queries, params = ClassicGraphFixtures.multiple_fields() + for query in queries: + self.session.execute_graph(query, params) + + prof = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, row_factory=graph_result_row_factory) # requires simplified row factory to avoid shedding id/~type information used for validation below + rs = self.session.execute_graph("g.V()", execution_profile=prof) + + for result in rs: + self._validate_type(result) + + def _validate_type(self, vertex): + for properties in vertex.properties.values(): + prop = properties[0] + + if DSE_VERSION >= Version("5.1"): + type_indicator = prop['id']['~label'] + else: + type_indicator = prop['id']['~type'] + + if any(type_indicator.startswith(t) for t in + ('int', 'short', 'long', 'bigint', 'decimal', 'smallint', 'varint')): + typ = six.integer_types + elif any(type_indicator.startswith(t) for t in ('float', 'double')): + typ = float + elif any(type_indicator.startswith(t) for t in ('duration', 'date', 'negdate', 'time', + 'blob', 'timestamp', 'point', 'linestring', 'polygon', + 'inet', 'uuid')): + typ = six.text_type + else: + pass + self.fail("Received unexpected type: %s" % type_indicator) + self.assertIsInstance(prop['value'], typ) + + +class GenericGraphDataTypeTest(GraphUnitTestCase): + + def _test_all_datatypes(self, schema, graphson): + ep = self.get_execution_profile(graphson) + + for data in six.itervalues(schema.fixtures.datatypes()): + typ, value, deserializer = data + vertex_label = VertexLabel([typ]) + property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) + vertex = list(schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] + + vertex_properties = list(schema.get_vertex_properties( + self.session, vertex, execution_profile=ep)) + + if graphson == GraphProtocol.GRAPHSON_1_0: + vertex_properties = [vp.as_vertex_property() for vp in vertex_properties] + + prop_returned = 1 if DSE_VERSION < Version('5.1') else 2 # include pkid >=5.1 + self.assertEqual(len(vertex_properties), prop_returned) + for vp in vertex_properties: + if vp.label == 'pkid': + continue + + self.assertIsInstance(vp, VertexProperty) + self.assertEqual(vp.label, property_name) + if graphson == GraphProtocol.GRAPHSON_1_0: + deserialized_value = deserializer(vp.value) + self.assertEqual(deserialized_value, value) + else: + self.assertEqual(vp.value, value) + + def __test_udt(self, schema, graphson, address_class, address_with_tags_class, + complex_address_class, complex_address_with_owners_class): + if schema is not CoreGraphSchema or DSE_VERSION < Version('6.8'): + raise unittest.SkipTest("Graph UDT is only supported with DSE 6.8+ and Core graphs.") + + ep = self.get_execution_profile(graphson) + + Address = address_class + AddressWithTags = address_with_tags_class + ComplexAddress = complex_address_class + ComplexAddressWithOwners = complex_address_with_owners_class + + # setup udt + self.session.execute_graph(""" + schema.type('address').property('address', Text).property('city', Text).property('state', Text).create(); + schema.type('addressTags').property('address', Text).property('city', Text).property('state', Text). + property('tags', setOf(Text)).create(); + schema.type('complexAddress').property('address', Text).property('address_tags', frozen(typeOf('addressTags'))). + property('city', Text).property('state', Text).property('props', mapOf(Text, Int)).create(); + schema.type('complexAddressWithOwners').property('address', Text). + property('address_tags', frozen(typeOf('addressTags'))). + property('city', Text).property('state', Text).property('props', mapOf(Text, Int)). + property('owners', frozen(listOf(tupleOf(Text, Int)))).create(); + """, execution_profile=ep) + + time.sleep(2) # wait the UDT to be discovered + self.session.cluster.register_user_type(self.graph_name, 'address', Address) + self.session.cluster.register_user_type(self.graph_name, 'addressTags', AddressWithTags) + self.session.cluster.register_user_type(self.graph_name, 'complexAddress', ComplexAddress) + self.session.cluster.register_user_type(self.graph_name, 'complexAddressWithOwners', ComplexAddressWithOwners) + + data = { + "udt1": ["typeOf('address')", Address('1440 Rd Smith', 'Quebec', 'QC')], + "udt2": ["tupleOf(typeOf('address'), Text)", (Address('1440 Rd Smith', 'Quebec', 'QC'), 'hello')], + "udt3": ["tupleOf(frozen(typeOf('address')), Text)", (Address('1440 Rd Smith', 'Quebec', 'QC'), 'hello')], + "udt4": ["tupleOf(tupleOf(Int, typeOf('address')), Text)", + ((42, Address('1440 Rd Smith', 'Quebec', 'QC')), 'hello')], + "udt5": ["tupleOf(tupleOf(Int, typeOf('addressTags')), Text)", + ((42, AddressWithTags('1440 Rd Smith', 'Quebec', 'QC', {'t1', 't2'})), 'hello')], + "udt6": ["tupleOf(tupleOf(Int, typeOf('complexAddress')), Text)", + ((42, ComplexAddress('1440 Rd Smith', + AddressWithTags('1440 Rd Smith', 'Quebec', 'QC', {'t1', 't2'}), + 'Quebec', 'QC', {'p1': 42, 'p2': 33})), 'hello')], + "udt7": ["tupleOf(tupleOf(Int, frozen(typeOf('complexAddressWithOwners'))), Text)", + ((42, ComplexAddressWithOwners( + '1440 Rd Smith', + AddressWithTags('1440 CRd Smith', 'Quebec', 'QC', {'t1', 't2'}), + 'Quebec', 'QC', {'p1': 42, 'p2': 33}, [('Mike', 43), ('Gina', 39)]) + ), 'hello')] + } + + for typ, value in six.itervalues(data): + vertex_label = VertexLabel([typ]) + property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) + + vertex = list(schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] + vertex_properties = list(schema.get_vertex_properties( + self.session, vertex, execution_profile=ep)) + + self.assertEqual(len(vertex_properties), 2) # include pkid + for vp in vertex_properties: + if vp.label == 'pkid': + continue + + self.assertIsInstance(vp, VertexProperty) + self.assertEqual(vp.label, property_name) + self.assertEqual(vp.value, value) + + def _test_udt_with_classes(self, schema, graphson): + class Address(object): + + def __init__(self, address, city, state): + self.address = address + self.city = city + self.state = state + + def __eq__(self, other): + return self.address == other.address and self.city == other.city and self.state == other.state + + class AddressWithTags(object): + + def __init__(self, address, city, state, tags): + self.address = address + self.city = city + self.state = state + self.tags = tags + + def __eq__(self, other): + return (self.address == other.address and self.city == other.city + and self.state == other.state and self.tags == other.tags) + + class ComplexAddress(object): + + def __init__(self, address, address_tags, city, state, props): + self.address = address + self.address_tags = address_tags + self.city = city + self.state = state + self.props = props + + def __eq__(self, other): + return (self.address == other.address and self.address_tags == other.address_tags + and self.city == other.city and self.state == other.state + and self.props == other.props) + + class ComplexAddressWithOwners(object): + + def __init__(self, address, address_tags, city, state, props, owners): + self.address = address + self.address_tags = address_tags + self.city = city + self.state = state + self.props = props + self.owners = owners + + def __eq__(self, other): + return (self.address == other.address and self.address_tags == other.address_tags + and self.city == other.city and self.state == other.state + and self.props == other.props and self.owners == other.owners) + + self.__test_udt(schema, graphson, Address, AddressWithTags, ComplexAddress, ComplexAddressWithOwners) + + def _test_udt_with_namedtuples(self, schema, graphson): + AddressTuple = namedtuple('Address', ('address', 'city', 'state')) + AddressWithTagsTuple = namedtuple('AddressWithTags', ('address', 'city', 'state', 'tags')) + ComplexAddressTuple = namedtuple('ComplexAddress', ('address', 'address_tags', 'city', 'state', 'props')) + ComplexAddressWithOwnersTuple = namedtuple('ComplexAddressWithOwners', ('address', 'address_tags', 'city', + 'state', 'props', 'owners')) + + self.__test_udt(schema, graphson, AddressTuple, AddressWithTagsTuple, + ComplexAddressTuple, ComplexAddressWithOwnersTuple) + + +@requiredse +@GraphTestConfiguration.generate_tests(schema=ClassicGraphSchema) +class ClassicGraphDataTypeTest(GenericGraphDataTypeTest): + pass + + +@requiredse +@GraphTestConfiguration.generate_tests(schema=CoreGraphSchema) +class CoreGraphDataTypeTest(GenericGraphDataTypeTest): + pass diff --git a/tests/integration/advanced/graph/test_graph_query.py b/tests/integration/advanced/graph/test_graph_query.py new file mode 100644 index 0000000000..5ef4e2c749 --- /dev/null +++ b/tests/integration/advanced/graph/test_graph_query.py @@ -0,0 +1,575 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import sys +import six +from packaging.version import Version + +from copy import copy +from itertools import chain +import json +import time + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from cassandra import OperationTimedOut, ConsistencyLevel, InvalidRequest +from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, NoHostAvailable +from cassandra.protocol import ServerError, SyntaxException +from cassandra.query import QueryTrace +from cassandra.util import Point +from cassandra.graph import (SimpleGraphStatement, single_object_row_factory, + Result, GraphOptions, GraphProtocol, to_bigint) +from cassandra.datastax.graph.query import _graph_options + +from tests.integration import DSE_VERSION, requiredse +from tests.integration.advanced.graph import BasicGraphUnitTestCase, GraphTestConfiguration, \ + validate_classic_vertex, GraphUnitTestCase, validate_classic_edge, validate_path_result_type, \ + validate_line_edge, validate_generic_vertex_result_type, \ + ClassicGraphSchema, CoreGraphSchema, VertexLabel + + +@requiredse +class BasicGraphQueryTest(BasicGraphUnitTestCase): + + def test_consistency_passing(self): + """ + Test to validated that graph consistency levels are properly surfaced to the base driver + + @since 1.0.0 + @jira_ticket PYTHON-509 + @expected_result graph consistency levels are surfaced correctly + @test_category dse graph + """ + cl_attrs = ('graph_read_consistency_level', 'graph_write_consistency_level') + + # Iterates over the graph options and constructs an array containing + # The graph_options that correlate to graoh read and write consistency levels + graph_params = [a[2] for a in _graph_options if a[0] in cl_attrs] + + s = self.session + default_profile = s.cluster.profile_manager.profiles[EXEC_PROFILE_GRAPH_DEFAULT] + default_graph_opts = default_profile.graph_options + try: + # Checks the default graph attributes and ensures that both graph_read_consistency_level and graph_write_consistency_level + # Are None by default + for attr in cl_attrs: + self.assertIsNone(getattr(default_graph_opts, attr)) + + res = s.execute_graph("null") + for param in graph_params: + self.assertNotIn(param, res.response_future.message.custom_payload) + + # session defaults are passed + opts = GraphOptions() + opts.update(default_graph_opts) + cl = {0: ConsistencyLevel.ONE, 1: ConsistencyLevel.LOCAL_QUORUM} + for k, v in cl.items(): + setattr(opts, cl_attrs[k], v) + default_profile.graph_options = opts + + res = s.execute_graph("null") + + for k, v in cl.items(): + self.assertEqual(res.response_future.message.custom_payload[graph_params[k]], six.b(ConsistencyLevel.value_to_name[v])) + + # passed profile values override session defaults + cl = {0: ConsistencyLevel.ALL, 1: ConsistencyLevel.QUORUM} + opts = GraphOptions() + opts.update(default_graph_opts) + for k, v in cl.items(): + attr_name = cl_attrs[k] + setattr(opts, attr_name, v) + self.assertNotEqual(getattr(default_profile.graph_options, attr_name), getattr(opts, attr_name)) + tmp_profile = s.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, graph_options=opts) + res = s.execute_graph("null", execution_profile=tmp_profile) + + for k, v in cl.items(): + self.assertEqual(res.response_future.message.custom_payload[graph_params[k]], six.b(ConsistencyLevel.value_to_name[v])) + finally: + default_profile.graph_options = default_graph_opts + + def test_execute_graph_row_factory(self): + s = self.session + + # default Results + default_profile = s.cluster.profile_manager.profiles[EXEC_PROFILE_GRAPH_DEFAULT] + self.assertEqual(default_profile.row_factory, None) # will be resolved to graph_object_row_factory + result = s.execute_graph("123")[0] + self.assertIsInstance(result, Result) + self.assertEqual(result.value, 123) + + # other via parameter + prof = s.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, row_factory=single_object_row_factory) + rs = s.execute_graph("123", execution_profile=prof) + self.assertEqual(rs.response_future.row_factory, single_object_row_factory) + self.assertEqual(json.loads(rs[0]), {'result': 123}) + + def test_execute_graph_timeout(self): + s = self.session + + value = [1, 2, 3] + query = "[%r]" % (value,) + + # default is passed down + default_graph_profile = s.cluster.profile_manager.profiles[EXEC_PROFILE_GRAPH_DEFAULT] + rs = self.session.execute_graph(query) + self.assertEqual(rs[0].value, value) + self.assertEqual(rs.response_future.timeout, default_graph_profile.request_timeout) + + # tiny timeout times out as expected + tmp_profile = copy(default_graph_profile) + tmp_profile.request_timeout = sys.float_info.min + + max_retry_count = 10 + for _ in range(max_retry_count): + start = time.time() + try: + with self.assertRaises(OperationTimedOut): + s.execute_graph(query, execution_profile=tmp_profile) + break + except: + end = time.time() + self.assertAlmostEqual(start, end, 1) + else: + raise Exception("session.execute_graph didn't time out in {0} tries".format(max_retry_count)) + + def test_profile_graph_options(self): + s = self.session + statement = SimpleGraphStatement("true") + ep = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT) + self.assertTrue(s.execute_graph(statement, execution_profile=ep)[0].value) + + # bad graph name to verify it's passed + ep.graph_options = ep.graph_options.copy() + ep.graph_options.graph_name = "definitely_not_correct" + try: + s.execute_graph(statement, execution_profile=ep) + except NoHostAvailable: + self.assertTrue(DSE_VERSION >= Version("6.0")) + else: + if DSE_VERSION < Version("6.8"): # >6.8 returns true + self.fail("Should have risen ServerError or InvalidRequest") + + def test_additional_custom_payload(self): + s = self.session + custom_payload = {'some': 'example'.encode('utf-8'), 'items': 'here'.encode('utf-8')} + sgs = SimpleGraphStatement("null", custom_payload=custom_payload) + future = s.execute_graph_async(sgs) + + default_profile = s.cluster.profile_manager.profiles[EXEC_PROFILE_GRAPH_DEFAULT] + default_graph_opts = default_profile.graph_options + for k, v in chain(custom_payload.items(), default_graph_opts.get_options_map().items()): + self.assertEqual(future.message.custom_payload[k], v) + + +class GenericGraphQueryTest(GraphUnitTestCase): + + def _test_basic_query(self, schema, graphson): + """ + Test to validate that basic graph query results can be executed with a sane result set. + + Creates a simple classic tinkerpot graph, and attempts to find all vertices + related the vertex marco, that have a label of knows. + See reference graph here + http://www.tinkerpop.com/docs/3.0.0.M1/ + + @since 1.0.0 + @jira_ticket PYTHON-457 + @expected_result graph should find two vertices related to marco via 'knows' edges. + + @test_category dse graph + """ + self.execute_graph(schema.fixtures.classic(), graphson) + rs = self.execute_graph('''g.V().has('name','marko').out('knows').values('name')''', graphson) + self.assertFalse(rs.has_more_pages) + results_list = self.resultset_to_list(rs) + self.assertEqual(len(results_list), 2) + self.assertIn('vadas', results_list) + self.assertIn('josh', results_list) + + def _test_geometric_graph_types(self, schema, graphson): + """ + Test to validate that geometric types function correctly + + Creates a very simple graph, and tries to insert a simple point type + + @since 1.0.0 + @jira_ticket DSP-8087 + @expected_result json types associated with insert is parsed correctly + + @test_category dse graph + """ + vertex_label = VertexLabel([('pointP', "Point()")]) + ep = self.get_execution_profile(graphson) + schema.create_vertex_label(self.session, vertex_label, ep) + # import org.apache.cassandra.db.marshal.geometry.Point; + rs = schema.add_vertex(self.session, vertex_label, 'pointP', Point(0, 1), ep) + + # if result set is not parsed correctly this will throw an exception + self.assertIsNotNone(rs) + + def _test_execute_graph_trace(self, schema, graphson): + value = [1, 2, 3] + query = "[%r]" % (value,) + + # default is no trace + rs = self.execute_graph(query, graphson) + results = self.resultset_to_list(rs) + self.assertEqual(results[0], value) + self.assertIsNone(rs.get_query_trace()) + + # request trace + rs = self.execute_graph(query, graphson, trace=True) + results = self.resultset_to_list(rs) + self.assertEqual(results[0], value) + qt = rs.get_query_trace(max_wait_sec=10) + self.assertIsInstance(qt, QueryTrace) + self.assertIsNotNone(qt.duration) + + def _test_range_query(self, schema, graphson): + """ + Test to validate range queries are handled correctly. + + Creates a very large line graph script and executes it. Then proceeds to to a range + limited query against it, and ensure that the results are formatted correctly and that + the result set is properly sized. + + @since 1.0.0 + @jira_ticket PYTHON-457 + @expected_result result set should be properly formatted and properly sized + + @test_category dse graph + """ + self.execute_graph(schema.fixtures.line(150), graphson) + rs = self.execute_graph("g.E().range(0,10)", graphson) + self.assertFalse(rs.has_more_pages) + results = self.resultset_to_list(rs) + self.assertEqual(len(results), 10) + ep = self.get_execution_profile(graphson) + for result in results: + schema.ensure_properties(self.session, result, execution_profile=ep) + validate_line_edge(self, result) + + def _test_classic_graph(self, schema, graphson): + """ + Test to validate that basic graph generation, and vertex and edges are surfaced correctly + + Creates a simple classic tinkerpot graph, and iterates over the the vertices and edges + ensureing that each one is correct. See reference graph here + http://www.tinkerpop.com/docs/3.0.0.M1/ + + @since 1.0.0 + @jira_ticket PYTHON-457 + @expected_result graph should generate and all vertices and edge results should be + + @test_category dse graph + """ + self.execute_graph(schema.fixtures.classic(), graphson) + rs = self.execute_graph('g.V()', graphson) + ep = self.get_execution_profile(graphson) + for vertex in rs: + schema.ensure_properties(self.session, vertex, execution_profile=ep) + validate_classic_vertex(self, vertex) + rs = self.execute_graph('g.E()', graphson) + for edge in rs: + schema.ensure_properties(self.session, edge, execution_profile=ep) + validate_classic_edge(self, edge) + + def _test_graph_classic_path(self, schema, graphson): + """ + Test to validate that the path version of the result type is generated correctly. It also + tests basic path results as that is not covered elsewhere + + @since 1.0.0 + @jira_ticket PYTHON-479 + @expected_result path object should be unpacked correctly including all nested edges and verticies + @test_category dse graph + """ + self.execute_graph(schema.fixtures.classic(), graphson) + rs = self.execute_graph("g.V().hasLabel('person').has('name', 'marko').as('a').outE('knows').inV().as('c', 'd')." + " outE('created').as('e', 'f', 'g').inV().path()", + graphson) + rs_list = list(rs) + self.assertEqual(len(rs_list), 2) + for result in rs_list: + try: + path = result.as_path() + except: + path = result + + ep = self.get_execution_profile(graphson) + for obj in path.objects: + schema.ensure_properties(self.session, obj, ep) + + validate_path_result_type(self, path) + + def _test_large_create_script(self, schema, graphson): + """ + Test to validate that server errors due to large groovy scripts are properly surfaced + + Creates a very large line graph script and executes it. Then proceeds to create a line graph script + that is to large for the server to handle expects a server error to be returned + + @since 1.0.0 + @jira_ticket PYTHON-457 + @expected_result graph should generate and all vertices and edge results should be + + @test_category dse graph + """ + self.execute_graph(schema.fixtures.line(150), graphson) + self.execute_graph(schema.fixtures.line(300), graphson) # This should passed since the queries are splitted + self.assertRaises(SyntaxException, self.execute_graph, schema.fixtures.line(300, single_script=True), graphson) # this is not and too big + + def _test_large_result_set(self, schema, graphson): + """ + Test to validate that large result sets return correctly. + + Creates a very large graph. Ensures that large result sets are handled appropriately. + + @since 1.0.0 + @jira_ticket PYTHON-457 + @expected_result when limits of result sets are hit errors should be surfaced appropriately + + @test_category dse graph + """ + self.execute_graph(schema.fixtures.large(), graphson, execution_profile_options={'request_timeout': 32}) + rs = self.execute_graph("g.V()", graphson) + for result in rs: + validate_generic_vertex_result_type(self, result) + + def _test_param_passing(self, schema, graphson): + """ + Test to validate that parameter passing works as expected + + @since 1.0.0 + @jira_ticket PYTHON-457 + @expected_result parameters work as expected + + @test_category dse graph + """ + + # unused parameters are passed, but ignored + self.execute_graph("null", graphson, params={"doesn't": "matter", "what's": "passed"}) + + # multiple params + rs = self.execute_graph("[a, b]", graphson, params={'a': 0, 'b': 1}) + results = self.resultset_to_list(rs) + self.assertEqual(results[0], 0) + self.assertEqual(results[1], 1) + + if graphson == GraphProtocol.GRAPHSON_1_0: + # different value types + for param in (None, "string", 1234, 5.678, True, False): + result = self.resultset_to_list(self.execute_graph('x', graphson, params={'x': param}))[0] + self.assertEqual(result, param) + + def _test_vertex_property_properties(self, schema, graphson): + """ + Test verifying vertex property properties + + @since 1.0.0 + @jira_ticket PYTHON-487 + + @test_category dse graph + """ + if schema is not ClassicGraphSchema: + raise unittest.SkipTest('skipped because rich properties are only supported with classic graphs') + + self.execute_graph("schema.propertyKey('k0').Text().ifNotExists().create();", graphson) + self.execute_graph("schema.propertyKey('k1').Text().ifNotExists().create();", graphson) + self.execute_graph("schema.propertyKey('key').Text().properties('k0', 'k1').ifNotExists().create();", graphson) + self.execute_graph("schema.vertexLabel('MLP').properties('key').ifNotExists().create();", graphson) + v = self.execute_graph('''v = graph.addVertex('MLP') + v.property('key', 'value', 'k0', 'v0', 'k1', 'v1') + v''', graphson)[0] + self.assertEqual(len(v.properties), 1) + self.assertEqual(len(v.properties['key']), 1) + p = v.properties['key'][0] + self.assertEqual(p.label, 'key') + self.assertEqual(p.value, 'value') + self.assertEqual(p.properties, {'k0': 'v0', 'k1': 'v1'}) + + def _test_vertex_multiple_properties(self, schema, graphson): + """ + Test verifying vertex property form for various Cardinality + + All key types are encoded as a list, regardless of cardinality + + Single cardinality properties have only one value -- the last one added + + Default is single (this is config dependent) + + @since 1.0.0 + @jira_ticket PYTHON-487 + + @test_category dse graph + """ + if schema is not ClassicGraphSchema: + raise unittest.SkipTest('skipped because multiple properties are only supported with classic graphs') + + self.execute_graph('''Schema schema = graph.schema(); + schema.propertyKey('mult_key').Text().multiple().ifNotExists().create(); + schema.propertyKey('single_key').Text().single().ifNotExists().create(); + schema.vertexLabel('MPW1').properties('mult_key').ifNotExists().create(); + schema.vertexLabel('SW1').properties('single_key').ifNotExists().create();''', graphson) + + v = self.execute_graph('''v = graph.addVertex('MPW1') + v.property('mult_key', 'value') + v''', graphson)[0] + self.assertEqual(len(v.properties), 1) + self.assertEqual(len(v.properties['mult_key']), 1) + self.assertEqual(v.properties['mult_key'][0].label, 'mult_key') + self.assertEqual(v.properties['mult_key'][0].value, 'value') + + # multiple_with_two_values + v = self.execute_graph('''g.addV('MPW1').property('mult_key', 'value0').property('mult_key', 'value1')''', graphson)[0] + self.assertEqual(len(v.properties), 1) + self.assertEqual(len(v.properties['mult_key']), 2) + self.assertEqual(v.properties['mult_key'][0].label, 'mult_key') + self.assertEqual(v.properties['mult_key'][1].label, 'mult_key') + self.assertEqual(v.properties['mult_key'][0].value, 'value0') + self.assertEqual(v.properties['mult_key'][1].value, 'value1') + + # single_with_one_value + v = self.execute_graph('''v = graph.addVertex('SW1') + v.property('single_key', 'value') + v''', graphson)[0] + self.assertEqual(len(v.properties), 1) + self.assertEqual(len(v.properties['single_key']), 1) + self.assertEqual(v.properties['single_key'][0].label, 'single_key') + self.assertEqual(v.properties['single_key'][0].value, 'value') + + if DSE_VERSION < Version('6.8'): + # single_with_two_values + with self.assertRaises(InvalidRequest): + v = self.execute_graph(''' + v = graph.addVertex('SW1') + v.property('single_key', 'value0').property('single_key', 'value1').next() + v + ''', graphson)[0] + else: + # >=6.8 single_with_two_values, first one wins + v = self.execute_graph('''v = graph.addVertex('SW1') + v.property('single_key', 'value0').property('single_key', 'value1') + v''', graphson)[0] + self.assertEqual(v.properties['single_key'][0].value, 'value0') + + def _test_result_forms(self, schema, graphson): + """ + Test to validate that geometric types function correctly + + Creates a very simple graph, and tries to insert a simple point type + + @since 1.0.0 + @jira_ticket DSP-8087 + @expected_result json types associated with insert is parsed correctly + + @test_category dse graph + """ + self.execute_graph(schema.fixtures.classic(), graphson) + ep = self.get_execution_profile(graphson) + + results = self.resultset_to_list(self.session.execute_graph('g.V()', execution_profile=ep)) + self.assertGreater(len(results), 0, "Result set was empty this was not expected") + for v in results: + schema.ensure_properties(self.session, v, ep) + validate_classic_vertex(self, v) + + results = self.resultset_to_list(self.session.execute_graph('g.E()', execution_profile=ep)) + self.assertGreater(len(results), 0, "Result set was empty this was not expected") + for e in results: + schema.ensure_properties(self.session, e, ep) + validate_classic_edge(self, e) + + def _test_query_profile(self, schema, graphson): + """ + Test to validate profiling results are deserialized properly. + + @since 1.6.0 + @jira_ticket PYTHON-1057 + @expected_result TraversalMetrics and Metrics are deserialized properly + + @test_category dse graph + """ + if graphson == GraphProtocol.GRAPHSON_1_0: + raise unittest.SkipTest('skipped because there is no metrics deserializer with graphson1') + + ep = self.get_execution_profile(graphson) + results = list(self.session.execute_graph("g.V().profile()", execution_profile=ep)) + self.assertEqual(len(results), 1) + self.assertIn('metrics', results[0]) + self.assertIn('dur', results[0]) + self.assertEqual(len(results[0]['metrics']), 2) + self.assertIn('dur', results[0]['metrics'][0]) + + def _test_query_bulkset(self, schema, graphson): + """ + Test to validate bulkset results are deserialized properly. + + @since 1.6.0 + @jira_ticket PYTHON-1060 + @expected_result BulkSet is deserialized properly to a list + + @test_category dse graph + """ + self.execute_graph(schema.fixtures.classic(), graphson) + ep = self.get_execution_profile(graphson) + results = list(self.session.execute_graph( + 'g.V().hasLabel("person").aggregate("x").by("age").cap("x")', + execution_profile=ep)) + self.assertEqual(len(results), 1) + results = results[0] + if type(results) is Result: + results = results.value + else: + self.assertEqual(len(results), 5) + self.assertEqual(results.count(35), 2) + + +@GraphTestConfiguration.generate_tests(schema=ClassicGraphSchema) +class ClassicGraphQueryTest(GenericGraphQueryTest): + pass + + +@GraphTestConfiguration.generate_tests(schema=CoreGraphSchema) +class CoreGraphQueryTest(GenericGraphQueryTest): + pass + + +@GraphTestConfiguration.generate_tests(schema=CoreGraphSchema) +class CoreGraphQueryWithTypeWrapperTest(GraphUnitTestCase): + + def _test_basic_query_with_type_wrapper(self, schema, graphson): + """ + Test to validate that a query using a type wrapper works. + + @since 2.8.0 + @jira_ticket PYTHON-1051 + @expected_result graph query works and doesn't raise an exception + + @test_category dse graph + """ + ep = self.get_execution_profile(graphson) + vl = VertexLabel(['tupleOf(Int, Bigint)']) + schema.create_vertex_label(self.session, vl, execution_profile=ep) + + prop_name = next(six.iterkeys(vl.non_pk_properties)) + with self.assertRaises(InvalidRequest): + schema.add_vertex(self.session, vl, prop_name, (1, 42), execution_profile=ep) + + schema.add_vertex(self.session, vl, prop_name, (1, to_bigint(42)), execution_profile=ep) diff --git a/tests/integration/advanced/test_adv_metadata.py b/tests/integration/advanced/test_adv_metadata.py index 25df0323a1..2c69a769a3 100644 --- a/tests/integration/advanced/test_adv_metadata.py +++ b/tests/integration/advanced/test_adv_metadata.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests.integration import (BasicSharedKeyspaceUnitTestCase, +from packaging.version import Version + +from tests.integration import (BasicExistingKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCaseRF1, - greaterthanorequaldse51, - greaterthanorequaldse60, use_single_node, + greaterthanorequaldse51, greaterthanorequaldse60, + greaterthanorequaldse68, use_single_node, DSE_VERSION, requiredse) try: @@ -211,7 +213,7 @@ def test_rlac_on_table(self): table_meta = self.cluster.metadata.keyspaces[self.keyspace_name].tables['reports'] self.assertTrue(restrict_cql in table_meta.export_as_string()) - @unittest.skip("Dse 5.1 doesn't current MV and RLAC remove after update") + @unittest.skip("Dse 5.1 doesn't support MV and RLAC remove after update") @greaterthanorequaldse51 def test_rlac_on_mv(self): """ @@ -284,3 +286,82 @@ def test_nodesync_on_table(self): table_3rf = self.cluster.metadata.keyspaces["test3rf"].tables['test'] self.assertNotIn('nodesync =', table_3rf.export_as_string()) self.assertIsNone(table_3rf.options['nodesync']) + + +@greaterthanorequaldse68 +class GraphMetadataTests(BasicExistingKeyspaceUnitTestCase): + """ + Various tests to ensure that graph metadata are visible through driver metadata + @since DSE6.8 + @jira_ticket PYTHON-996 + @expected_result graph metadata are fetched + @test_category metadata + """ + + @classmethod + def setUpClass(cls): + if DSE_VERSION and DSE_VERSION >= Version('6.8'): + super(GraphMetadataTests, cls).setUpClass() + cls.session.execute(""" + CREATE KEYSPACE ks_no_graph_engine WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}; + """) + cls.session.execute(""" + CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} and graph_engine = 'Core'; + """ % (cls.ks_name,)) + + cls.session.execute(""" + CREATE TABLE %s.person (name text PRIMARY KEY) WITH VERTEX LABEL; + """ % (cls.ks_name,)) + + cls.session.execute(""" + CREATE TABLE %s.software(company text, name text, version int, PRIMARY KEY((company, name), version)) WITH VERTEX LABEL rocksolidsoftware; + """ % (cls.ks_name,)) + + cls.session.execute(""" + CREATE TABLE %s.contributors (contributor text, company_name text, software_name text, software_version int, + PRIMARY KEY (contributor, company_name, software_name, software_version) ) + WITH CLUSTERING ORDER BY (company_name ASC, software_name ASC, software_version ASC) + AND EDGE LABEL contrib FROM person(contributor) TO rocksolidsoftware((company_name, software_name), software_version); + """ % (cls.ks_name,)) + + @classmethod + def tearDownClass(cls): + if DSE_VERSION and DSE_VERSION >= Version('6.8'): + cls.session.execute('DROP KEYSPACE {0}'.format('ks_no_graph_engine')) + cls.session.execute('DROP KEYSPACE {0}'.format(cls.ks_name)) + cls.cluster.shutdown() + + def test_keyspace_metadata(self): + self.assertIsNone(self.cluster.metadata.keyspaces['ks_no_graph_engine'].graph_engine, None) + self.assertEqual(self.cluster.metadata.keyspaces[self.ks_name].graph_engine, 'Core') + + def test_keyspace_metadata_alter_graph_engine(self): + self.session.execute("ALTER KEYSPACE %s WITH graph_engine = 'Tinker'" % (self.ks_name,)) + self.assertEqual(self.cluster.metadata.keyspaces[self.ks_name].graph_engine, 'Tinker') + self.session.execute("ALTER KEYSPACE %s WITH graph_engine = 'Core'" % (self.ks_name,)) + self.assertEqual(self.cluster.metadata.keyspaces[self.ks_name].graph_engine, 'Core') + + def test_vertex_metadata(self): + vertex_meta = self.cluster.metadata.keyspaces[self.ks_name].tables['person'].vertex + self.assertEqual(vertex_meta.keyspace_name, self.ks_name) + self.assertEqual(vertex_meta.table_name, 'person') + self.assertEqual(vertex_meta.label_name, 'person') + + vertex_meta = self.cluster.metadata.keyspaces[self.ks_name].tables['software'].vertex + self.assertEqual(vertex_meta.keyspace_name, self.ks_name) + self.assertEqual(vertex_meta.table_name, 'software') + self.assertEqual(vertex_meta.label_name, 'rocksolidsoftware') + + def test_edge_metadata(self): + edge_meta = self.cluster.metadata.keyspaces[self.ks_name].tables['contributors'].edge + self.assertEqual(edge_meta.keyspace_name, self.ks_name) + self.assertEqual(edge_meta.table_name, 'contributors') + self.assertEqual(edge_meta.label_name, 'contrib') + self.assertEqual(edge_meta.from_table, 'person') + self.assertEqual(edge_meta.from_label, 'person') + self.assertEqual(edge_meta.from_partition_key_columns, ['contributor']) + self.assertEqual(edge_meta.from_clustering_columns, []) + self.assertEqual(edge_meta.to_table, 'software') + self.assertEqual(edge_meta.to_label, 'rocksolidsoftware') + self.assertEqual(edge_meta.to_partition_key_columns, ['company_name', 'software_name']) + self.assertEqual(edge_meta.to_clustering_columns, ['software_version']) diff --git a/tests/integration/advanced/test_auth.py b/tests/integration/advanced/test_auth.py index 533a9e70ed..59bd3dec5c 100644 --- a/tests/integration/advanced/test_auth.py +++ b/tests/integration/advanced/test_auth.py @@ -30,10 +30,9 @@ from cassandra.protocol import Unauthorized from cassandra.query import SimpleStatement from tests.integration import (get_cluster, greaterthanorequaldse51, - remove_cluster, requiredse) -from tests.integration.advanced import (ADS_HOME, DSE_VERSION, - generate_classic, reset_graph, - use_single_node_with_graph) + remove_cluster, requiredse, DSE_VERSION) +from tests.integration.advanced import ADS_HOME, use_single_node_with_graph +from tests.integration.advanced.graph import reset_graph, ClassicGraphFixtures log = logging.getLogger(__name__) @@ -225,7 +224,7 @@ def test_connect_with_kerberos_and_graph(self): reset_graph(self.session, self._testMethodName.lower()) profiles = self.cluster.profile_manager.profiles profiles[EXEC_PROFILE_GRAPH_DEFAULT].graph_options.graph_name = self._testMethodName.lower() - generate_classic(self.session) + self.session.execute_graph(ClassicGraphFixtures.classic()) rs = self.session.execute_graph('g.V()') self.assertIsNotNone(rs) diff --git a/tests/integration/advanced/test_spark.py b/tests/integration/advanced/test_spark.py index 8864e9a947..a307913abb 100644 --- a/tests/integration/advanced/test_spark.py +++ b/tests/integration/advanced/test_spark.py @@ -17,7 +17,8 @@ from cassandra.cluster import EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT from cassandra.graph import SimpleGraphStatement from tests.integration import DSE_VERSION, requiredse -from tests.integration.advanced import BasicGraphUnitTestCase, use_singledc_wth_graph_and_spark, generate_classic, find_spark_master +from tests.integration.advanced import use_singledc_wth_graph_and_spark, find_spark_master +from tests.integration.advanced.graph import BasicGraphUnitTestCase, ClassicGraphFixtures log = logging.getLogger(__name__) @@ -38,12 +39,12 @@ class SparkLBTests(BasicGraphUnitTestCase): @test_category dse graph """ def test_spark_analytic_query(self): - generate_classic(self.session) + self.session.execute_graph(ClassicGraphFixtures.classic()) spark_master = find_spark_master(self.session) # Run multipltle times to ensure we don't round robin for i in range(3): to_run = SimpleGraphStatement("g.V().count()") rs = self.session.execute_graph(to_run, execution_profile=EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT) - self.assertEqual(rs[0].value, 6) + self.assertEqual(rs[0].value, 7) self.assertEqual(rs.response_future._current_host.address, spark_master) diff --git a/tests/integration/simulacron/utils.py b/tests/integration/simulacron/utils.py index 0d474651c2..5cee5ac3f1 100644 --- a/tests/integration/simulacron/utils.py +++ b/tests/integration/simulacron/utils.py @@ -17,7 +17,7 @@ import time from six.moves.urllib.request import build_opener, Request, HTTPHandler -from cassandra.metadata import SchemaParserV4 +from cassandra.metadata import SchemaParserV4, SchemaParserDSE68 from tests.integration import CASSANDRA_VERSION, SIMULACRON_JAR, DSE_VERSION @@ -355,6 +355,11 @@ def prime_driver_defaults(): "message": "Invalid Query!"}) ) + # prepare empty rows for NGDG + for query in [SchemaParserDSE68._SELECT_VERTICES, + SchemaParserDSE68._SELECT_EDGES]: + PrimeQuery(query, result='success', then={'rows': [], 'column_types': {'row1': 'int'}}) + def prime_cluster(data_centers="3", version=None, cluster_name=DEFAULT_CLUSTER, dse_version=None): """ diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index a5038672d4..858be75283 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -2007,14 +2007,21 @@ def test_dct_alias(self): dct_table = self.cluster.metadata.keyspaces.get(self.ks_name).tables.get(self.function_table_name) # Format can very slightly between versions, strip out whitespace for consistency sake - self.assertTrue("c1'org.apache.cassandra.db.marshal.DynamicCompositeType(" - "s=>org.apache.cassandra.db.marshal.UTF8Type," - "i=>org.apache.cassandra.db.marshal.Int32Type)'" - in dct_table.as_cql_query().replace(" ", "")) + try: + self.assertTrue("c1'org.apache.cassandra.db.marshal.DynamicCompositeType(" + "s=>org.apache.cassandra.db.marshal.UTF8Type," + "i=>org.apache.cassandra.db.marshal.Int32Type)'" + in dct_table.as_cql_query().replace(" ", "")) + except: + # C* 4.0 + self.assertTrue("c1'org.apache.cassandra.db.marshal.DynamicCompositeType(" + "i=>org.apache.cassandra.db.marshal.Int32Type," + "s=>org.apache.cassandra.db.marshal.UTF8Type)'" + in dct_table.as_cql_query().replace(" ", "")) @greaterthanorequalcass30 -class Materia3lizedViewMetadataTestSimple(BasicSharedKeyspaceUnitTestCase): +class MaterializedViewMetadataTestSimple(BasicSharedKeyspaceUnitTestCase): def setUp(self): self.session.execute("CREATE TABLE {0}.{1} (pk int PRIMARY KEY, c int)".format(self.keyspace_name, self.function_table_name)) diff --git a/tests/unit/advanced/test_graph.py b/tests/unit/advanced/test_graph.py index 702b3c376b..f25a229f42 100644 --- a/tests/unit/advanced/test_graph.py +++ b/tests/unit/advanced/test_graph.py @@ -236,7 +236,9 @@ def test_path_str_repr(self): class GraphOptionTests(unittest.TestCase): - opt_mapping = dict((t[0], t[2]) for t in _graph_options if not t[0].endswith('consistency_level')) # cl excluded from general tests because it requires mapping to names + opt_mapping = dict((t[0], t[2]) for t in _graph_options if not + (t[0].endswith('consistency_level') or # cl excluded from general tests because it requires mapping to names + t[0] == 'graph_protocol')) # default is None api_params = dict((p, str(i)) for i, p in enumerate(opt_mapping)) @@ -245,8 +247,15 @@ def test_init(self): self._verify_api_params(opts, self.api_params) self._verify_api_params(GraphOptions(), { 'graph_source': 'g', - 'graph_language': 'gremlin-groovy', - 'graph_protocol': GraphProtocol.GRAPHSON_1_0 + 'graph_language': 'gremlin-groovy' + }) + + def test_with_graph_protocol(self): + opts = GraphOptions(graph_protocol='graphson-2-0') + self.assertEqual(opts._graph_options, { + 'graph-source': b'g', + 'graph-language': b'gremlin-groovy', + 'graph-results': b'graphson-2-0' }) def test_init_unknown_kwargs(self): @@ -311,7 +320,10 @@ def test_del_attr(self): def _verify_api_params(self, opts, api_params): self.assertEqual(len(opts._graph_options), len(api_params)) for name, value in api_params.items(): - value = six.b(value) + try: + value = six.b(value) + except: + pass # already bytes self.assertEqual(getattr(opts, name), value) self.assertEqual(opts._graph_options[self.opt_mapping[name]], value) diff --git a/tests/unit/advanced/test_insights.py b/tests/unit/advanced/test_insights.py index b63e48ba6a..3db8883542 100644 --- a/tests/unit/advanced/test_insights.py +++ b/tests/unit/advanced/test_insights.py @@ -169,7 +169,7 @@ def test_graph_execution_profile(self): 'serialConsistency': None, 'speculativeExecution': {'namespace': 'cassandra.policies', 'options': {}, 'type': 'NoSpeculativeExecutionPolicy'}, - 'graphOptions': {'graphProtocol': 'graphson-1.0', + 'graphOptions': {'graphProtocol': None, 'language': 'gremlin-groovy', 'source': 'g'}, } @@ -195,7 +195,7 @@ def test_graph_analytics_execution_profile(self): 'serialConsistency': None, 'speculativeExecution': {'namespace': 'cassandra.policies', 'options': {}, 'type': 'NoSpeculativeExecutionPolicy'}, - 'graphOptions': {'graphProtocol': 'graphson-1.0', + 'graphOptions': {'graphProtocol': None, 'language': 'gremlin-groovy', 'source': 'a'}, } diff --git a/tests/unit/advanced/test_metadata.py b/tests/unit/advanced/test_metadata.py new file mode 100644 index 0000000000..addd514169 --- /dev/null +++ b/tests/unit/advanced/test_metadata.py @@ -0,0 +1,141 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from cassandra.metadata import ( + KeyspaceMetadata, TableMetadataDSE68, + VertexMetadata, EdgeMetadata +) + + +class GraphMetadataToCQLTests(unittest.TestCase): + + def _create_edge_metadata(self, partition_keys=['pk1'], clustering_keys=['c1']): + return EdgeMetadata( + 'keyspace', 'table', 'label', 'from_table', 'from_label', + partition_keys, clustering_keys, 'to_table', 'to_label', + partition_keys, clustering_keys) + + def _create_vertex_metadata(self, label_name='label'): + return VertexMetadata('keyspace', 'table', label_name) + + def _create_keyspace_metadata(self, graph_engine): + return KeyspaceMetadata( + 'keyspace', True, 'org.apache.cassandra.locator.SimpleStrategy', + {'replication_factor': 1}, graph_engine=graph_engine) + + def _create_table_metadata(self, with_vertex=False, with_edge=False): + tm = TableMetadataDSE68('keyspace', 'table') + if with_vertex: + tm.vertex = self._create_vertex_metadata() if with_vertex is True else with_vertex + elif with_edge: + tm.edge = self._create_edge_metadata() if with_edge is True else with_edge + + return tm + + def test_keyspace_no_graph_engine(self): + km = self._create_keyspace_metadata(None) + self.assertEqual(km.graph_engine, None) + self.assertNotIn( + "graph_engine", + km.as_cql_query() + ) + + def test_keyspace_with_graph_engine(self): + graph_engine = 'Core' + km = self._create_keyspace_metadata(graph_engine) + self.assertEqual(km.graph_engine, graph_engine) + cql = km.as_cql_query() + self.assertIn( + "graph_engine", + cql + ) + self.assertIn( + "Core", + cql + ) + + def test_table_no_vertex_or_edge(self): + tm = self._create_table_metadata() + self.assertIsNone(tm.vertex) + self.assertIsNone(tm.edge) + cql = tm.as_cql_query() + self.assertNotIn("VERTEX LABEL", cql) + self.assertNotIn("EDGE LABEL", cql) + + def test_table_with_vertex(self): + tm = self._create_table_metadata(with_vertex=True) + self.assertIsInstance(tm.vertex, VertexMetadata) + self.assertIsNone(tm.edge) + cql = tm.as_cql_query() + self.assertIn("VERTEX LABEL", cql) + self.assertNotIn("EDGE LABEL", cql) + + def test_table_with_edge(self): + tm = self._create_table_metadata(with_edge=True) + self.assertIsNone(tm.vertex) + self.assertIsInstance(tm.edge, EdgeMetadata) + cql = tm.as_cql_query() + self.assertNotIn("VERTEX LABEL", cql) + self.assertIn("EDGE LABEL", cql) + self.assertIn("FROM from_label", cql) + self.assertIn("TO to_label", cql) + + def test_vertex_with_label(self): + tm = self. _create_table_metadata(with_vertex=True) + self.assertTrue(tm.as_cql_query().endswith('VERTEX LABEL label')) + + def test_edge_single_partition_key_and_clustering_key(self): + tm = self._create_table_metadata(with_edge=True) + self.assertIn( + 'FROM from_label(pk1, c1)', + tm.as_cql_query() + ) + + def test_edge_multiple_partition_keys(self): + edge = self._create_edge_metadata(partition_keys=['pk1', 'pk2']) + tm = self. _create_table_metadata(with_edge=edge) + self.assertIn( + 'FROM from_label((pk1, pk2), ', + tm.as_cql_query() + ) + + def test_edge_no_clustering_keys(self): + edge = self._create_edge_metadata(clustering_keys=[]) + tm = self. _create_table_metadata(with_edge=edge) + self.assertIn( + 'FROM from_label(pk1) ', + tm.as_cql_query() + ) + + def test_edge_multiple_clustering_keys(self): + edge = self._create_edge_metadata(clustering_keys=['c1', 'c2']) + tm = self. _create_table_metadata(with_edge=edge) + self.assertIn( + 'FROM from_label(pk1, c1, c2) ', + tm.as_cql_query() + ) + + def test_edge_multiple_partition_and_clustering_keys(self): + edge = self._create_edge_metadata(partition_keys=['pk1', 'pk2'], + clustering_keys=['c1', 'c2']) + tm = self. _create_table_metadata(with_edge=edge) + self.assertIn( + 'FROM from_label((pk1, pk2), c1, c2) ', + tm.as_cql_query() + ) From 6384d6fc3da86d05f07dd08205fac134d8334979 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 11 Oct 2019 16:22:21 -0500 Subject: [PATCH 1664/2005] Add changelog entry for PYTHON-1158 --- CHANGELOG.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 86f98c80b9..b3bfdbd237 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,10 @@ ====== Unreleased +Features +-------- +* Use 4.0 schema parser in 4 alpha and snapshot builds (PYTHON-1158) + Bug Fixes --------- * Connection setup methods prevent using ExecutionProfile in cqlengine (PYTHON-1009) From 8366eb45775743435e32455f42af8a946edd86cd Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 15 Oct 2019 15:34:03 -0500 Subject: [PATCH 1665/2005] Disallow manual ssl context with cloud configuration --- cassandra/cluster.py | 9 +++------ tests/integration/advanced/cloud/test_cloud.py | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cc8be5ba63..7366fc9ca7 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -910,12 +910,9 @@ def __init__(self, """ if cloud is not None: - if contact_points is not _NOT_SET or endpoint_factory: - raise ValueError(("contact_points and endpoint_factory" - "cannot be specified with a cloud configuration")) - - if ssl_context: - cloud['ssl_context'] = ssl_context + if contact_points is not _NOT_SET or endpoint_factory or ssl_context: + raise ValueError("contact_points, endpoint_factory and ssl_context " + "cannot be specified with a cloud configuration") cloud_config = dscloud.get_cloud_config(cloud) diff --git a/tests/integration/advanced/cloud/test_cloud.py b/tests/integration/advanced/cloud/test_cloud.py index bda9f7f6b0..c6025d3316 100644 --- a/tests/integration/advanced/cloud/test_cloud.py +++ b/tests/integration/advanced/cloud/test_cloud.py @@ -86,7 +86,7 @@ def test_support_overriding_auth_provider(self): self.assertEqual(self.cluster.auth_provider.password, 'invalid') def test_support_overriding_ssl_context(self): - with self.assertRaises(DriverException): + with self.assertRaises(ValueError): # will fail since the ssl_context is self.connect(self.creds, ssl_context=SSLContext(PROTOCOL_TLSv1)) From 31e3a9e6403f948d2dc1e0b249208110ab9eed88 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 16 Oct 2019 09:49:02 -0400 Subject: [PATCH 1666/2005] Add event loops cloud limitation --- docs/cloud.rst | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/cloud.rst b/docs/cloud.rst index 2393a4b107..e9fcbbef19 100644 --- a/docs/cloud.rst +++ b/docs/cloud.rst @@ -20,10 +20,19 @@ To connect to a DataStax Apollo cluster: session = cluster.connect() Apollo Differences -================= +================== In most circumstances, the client code for interacting with an Apollo cluster will be the same as interacting with any other Cassandra cluster. The exceptions being: * A cloud configuration must be passed to a :class:`~.Cluster` instance via the `cloud` attribute (as demonstrated above). * An SSL connection will be established automatically. Manual SSL configuration is not allowed, and using `ssl_context` or `ssl_options` will result in an exception. * A :class:`~.Cluster`'s `contact_points` attribute should not be used. The cloud config contains all of the necessary contact information. * If a consistency level is not specified for an execution profile or query, then :attr:`.ConsistencyLevel.LOCAL_QUORUM` will be used as the default. + +Limitations +=========== + +Event loops +----------- + +Twisted and Evenlet aren't supported yet. These event loops are still using the old way to configure +SSL (ssl_options), which is not compatible with the secure connect bundle provided by Apollo. From b6b55120fe4f694c28791f008568d68c9aa8af13 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 16 Oct 2019 10:42:15 -0400 Subject: [PATCH 1667/2005] Move cassandra.cloud to cassandra.datastax.cloud --- cassandra/cluster.py | 3 +-- cassandra/datastax/__init__.py | 13 +++++++++++++ cassandra/{ => datastax}/cloud/__init__.py | 0 setup.py | 3 ++- tests/integration/advanced/cloud/__init__.py | 1 + tests/integration/advanced/cloud/test_cloud.py | 9 +++++---- tests/unit/advanced/cloud/test_cloud.py | 4 ++-- 7 files changed, 24 insertions(+), 9 deletions(-) create mode 100644 cassandra/datastax/__init__.py rename cassandra/{ => datastax}/cloud/__init__.py (100%) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 7366fc9ca7..af186fda1c 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -18,7 +18,6 @@ """ from __future__ import absolute_import -import os import atexit from collections import defaultdict from concurrent.futures import ThreadPoolExecutor, FIRST_COMPLETED, wait as wait_futures @@ -78,7 +77,7 @@ named_tuple_factory, dict_factory, tuple_factory, FETCH_SIZE_UNSET) from cassandra.timestamps import MonotonicTimestampGenerator from cassandra.compat import Mapping -from cassandra import cloud as dscloud +from cassandra.datastax import cloud as dscloud def _is_eventlet_monkey_patched(): diff --git a/cassandra/datastax/__init__.py b/cassandra/datastax/__init__.py new file mode 100644 index 0000000000..2c9ca172f8 --- /dev/null +++ b/cassandra/datastax/__init__.py @@ -0,0 +1,13 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/cassandra/cloud/__init__.py b/cassandra/datastax/cloud/__init__.py similarity index 100% rename from cassandra/cloud/__init__.py rename to cassandra/datastax/cloud/__init__.py diff --git a/setup.py b/setup.py index 2712897112..1259092849 100644 --- a/setup.py +++ b/setup.py @@ -416,7 +416,8 @@ def run_setup(extensions): url='http://github.com/datastax/python-driver', author='Tyler Hobbs', author_email='tyler@datastax.com', - packages=['cassandra', 'cassandra.io', 'cassandra.cqlengine', 'cassandra.cloud'], + packages=['cassandra', 'cassandra.io', 'cassandra.cqlengine', 'cassandra.datastax', + 'cassandra.datastax.cloud'], keywords='cassandra,cql,orm', include_package_data=True, install_requires=dependencies, diff --git a/tests/integration/advanced/cloud/__init__.py b/tests/integration/advanced/cloud/__init__.py index bd568b69ef..83f5e21ce3 100644 --- a/tests/integration/advanced/cloud/__init__.py +++ b/tests/integration/advanced/cloud/__init__.py @@ -41,6 +41,7 @@ class CloudProxyCluster(unittest.TestCase): creds = os.path.join(creds_dir, 'creds-v1.zip') creds_no_auth = os.path.join(creds_dir, 'creds-v1-wo-creds.zip') creds_unreachable = os.path.join(creds_dir, 'creds-v1-unreachable.zip') + creds_invalid_ca = os.path.join(creds_dir, 'creds-v1-invalid-ca.zip') cluster, connect = None, False session = None diff --git a/tests/integration/advanced/cloud/test_cloud.py b/tests/integration/advanced/cloud/test_cloud.py index d2f67f1590..d096799e0f 100644 --- a/tests/integration/advanced/cloud/test_cloud.py +++ b/tests/integration/advanced/cloud/test_cloud.py @@ -85,11 +85,12 @@ def test_support_overriding_auth_provider(self): self.assertEqual(self.cluster.auth_provider.username, 'invalid') self.assertEqual(self.cluster.auth_provider.password, 'invalid') - def test_support_overriding_ssl_context(self): - with self.assertRaises(ValueError): - # will fail since the ssl_context is + def test_error_overriding_ssl_context(self): + with self.assertRaises(ValueError) as cm: self.connect(self.creds, ssl_context=SSLContext(PROTOCOL_TLSv1)) + self.assertIn('cannot be specified with a cloud configuration', str(cm.exception)) + def test_error_when_bundle_doesnt_exist(self): try: self.connect('/invalid/path/file.zip') @@ -134,7 +135,7 @@ def test_metadata_unreachable(self): def test_metadata_ssl_error(self): with self.assertRaises(DriverException) as cm: - self.connect(self.creds, ssl_context=SSLContext(PROTOCOL_TLSv1)) + self.connect(self.creds_invalid_ca) self.assertIn('Unable to connect to the metadata', str(cm.exception)) diff --git a/tests/unit/advanced/cloud/test_cloud.py b/tests/unit/advanced/cloud/test_cloud.py index 6855beced2..e6001fb474 100644 --- a/tests/unit/advanced/cloud/test_cloud.py +++ b/tests/unit/advanced/cloud/test_cloud.py @@ -13,7 +13,7 @@ import os -from cassandra import cloud +from cassandra.datastax import cloud from mock import patch @@ -56,7 +56,7 @@ def _check_config(self, config): def test_read_cloud_config_from_zip(self): - with patch('cassandra.cloud.read_metadata_info', side_effect=self._read_metadata_info_side_effect): + with patch('cassandra.datastax.cloud.read_metadata_info', side_effect=self._read_metadata_info_side_effect): config = cloud.get_cloud_config(self.config_zip) self._check_config(config) From 6a1b7e6ff43506a2d93378b7a86ff4dc4e18158b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 16 Oct 2019 10:52:50 -0400 Subject: [PATCH 1668/2005] move PRODUCT_APOLLO var in the cloud module --- cassandra/cluster.py | 4 +--- cassandra/connection.py | 4 ++-- cassandra/datastax/cloud/__init__.py | 2 ++ 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index af186fda1c..a8ee3519ee 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -128,8 +128,6 @@ def _is_gevent_monkey_patched(): _NOT_SET = object() -PRODUCT_APOLLO = "DATASTAX_APOLLO" - class NoHostAvailable(Exception): """ @@ -2995,7 +2993,7 @@ def connect(self): self._protocol_version = self._cluster.protocol_version self._set_new_connection(self._reconnect_internal()) - self._cluster.metadata.dbaas = self._connection.product_type == PRODUCT_APOLLO + self._cluster.metadata.dbaas = self._connection._product_type == dscloud.PRODUCT_APOLLO def _set_new_connection(self, conn): """ diff --git a/cassandra/connection.py b/cassandra/connection.py index 276a462f33..ba08ae2960 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -512,7 +512,7 @@ class Connection(object): _ssl_impl = ssl _check_hostname = False - product_type = None + _product_type = None def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, @@ -942,7 +942,7 @@ def _handle_options_response(self, options_response): id(self), self.endpoint) supported_cql_versions = options_response.cql_versions remote_supported_compressions = options_response.options['COMPRESSION'] - self.product_type = options_response.options.get('PRODUCT_TYPE', [None])[0] + self._product_type = options_response.options.get('PRODUCT_TYPE', [None])[0] if self.cql_version: if self.cql_version not in supported_cql_versions: diff --git a/cassandra/datastax/cloud/__init__.py b/cassandra/datastax/cloud/__init__.py index 4110296f5b..d2b65f7cfb 100644 --- a/cassandra/datastax/cloud/__init__.py +++ b/cassandra/datastax/cloud/__init__.py @@ -33,6 +33,8 @@ __all__ = ['get_cloud_config'] +PRODUCT_APOLLO = "DATASTAX_APOLLO" + class CloudConfig(object): From b053e4941399aeaf5e1bab8fa8754698ab8617fb Mon Sep 17 00:00:00 2001 From: Oz Tiram Date: Thu, 17 Oct 2019 13:48:05 +0200 Subject: [PATCH 1669/2005] Fix broken link to map type documentation This fixes the previous link which is broken. --- cassandra/cqlengine/columns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 7c36371ba7..49116129fc 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -933,7 +933,7 @@ class Map(BaseContainerColumn): """ Stores a key -> value map (dictionary) - http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_map_t.html + https://docs.datastax.com/en/dse/6.7/cql/cql/cql_using/useMap.html """ _python_type_hashable = False From d06784f2b48edc4a275e968c3d3c91d6bfe59b91 Mon Sep 17 00:00:00 2001 From: Henning Schroeder Date: Sun, 20 Oct 2019 22:48:05 +0200 Subject: [PATCH 1670/2005] re-raising the CQLEngineException will fail on Python 3 - instead a NameError will be thrown because unicode is unknown --- cassandra/cqlengine/management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 42ded8a6c2..536bde6349 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -232,7 +232,7 @@ def _sync_table(model, connection=None): except CQLEngineException as ex: # 1.2 doesn't return cf names, so we have to examine the exception # and ignore if it says the column family already exists - if "Cannot add already existing column family" not in unicode(ex): + if "Cannot add already existing column family" not in six.text_type(ex): raise else: log.debug(format_log_context("sync_table checking existing table %s", keyspace=ks_name, connection=connection), cf_name) From 7519deed8bb6b5335c4d7979fb01dfcd6428caad Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 21 Oct 2019 08:03:45 -0500 Subject: [PATCH 1671/2005] Disable pull requests on weekly jobs --- build.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/build.yaml b/build.yaml index b389c2abc3..e8b1147092 100644 --- a/build.yaml +++ b/build.yaml @@ -50,6 +50,7 @@ schedules: release_test: schedule: per_commit + disable_pull_requests: true branches: include: [/release-.+/] env_vars: | @@ -57,6 +58,7 @@ schedules: weekly_master: schedule: 0 10 * * 6 + disable_pull_requests: true branches: include: [master] env_vars: | @@ -68,6 +70,7 @@ schedules: weekly_gevent: schedule: 0 14 * * 6 + disable_pull_requests: true branches: include: [master] env_vars: | @@ -79,6 +82,7 @@ schedules: weekly_eventlet: schedule: 0 18 * * 6 + disable_pull_requests: true branches: include: [master] env_vars: | @@ -90,6 +94,7 @@ schedules: weekly_asyncio: schedule: 0 22 * * 6 + disable_pull_requests: true branches: include: [master] env_vars: | @@ -101,6 +106,7 @@ schedules: weekly_async: schedule: 0 10 * * 7 + disable_pull_requests: true branches: include: [master] env_vars: | @@ -112,6 +118,7 @@ schedules: weekly_twister: schedule: 0 14 * * 7 + disable_pull_requests: true branches: include: [master] env_vars: | From 6b02aa1d12a336c1dadcd8bc5d6dc29b9ad5f31c Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 21 Oct 2019 08:27:24 -0500 Subject: [PATCH 1672/2005] Disable pull requests on weekly jobs --- build.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/build.yaml b/build.yaml index 6428b25c1f..335de1ec5a 100644 --- a/build.yaml +++ b/build.yaml @@ -50,6 +50,7 @@ schedules: release_test: schedule: per_commit + disable_pull_requests: true branches: include: [/release-.+/] env_vars: | @@ -57,6 +58,7 @@ schedules: weekly_master: schedule: 0 10 * * 6 + disable_pull_requests: true branches: include: [master] env_vars: | @@ -68,6 +70,7 @@ schedules: weekly_gevent: schedule: 0 14 * * 6 + disable_pull_requests: true branches: include: [master] env_vars: | @@ -79,6 +82,7 @@ schedules: weekly_eventlet: schedule: 0 18 * * 6 + disable_pull_requests: true branches: include: [master] env_vars: | @@ -90,6 +94,7 @@ schedules: weekly_asyncio: schedule: 0 22 * * 6 + disable_pull_requests: true branches: include: [master] env_vars: | @@ -101,6 +106,7 @@ schedules: weekly_async: schedule: 0 10 * * 7 + disable_pull_requests: true branches: include: [master] env_vars: | @@ -112,6 +118,7 @@ schedules: weekly_twister: schedule: 0 14 * * 7 + disable_pull_requests: true branches: include: [master] env_vars: | From c0504742da9c6ae7a31055bdbee92eac99636ee8 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 22 Oct 2019 15:14:32 -0400 Subject: [PATCH 1673/2005] ConnectionTests.test_heart_beat_timeout shouldn't send OptionMessage when the CC reconnect --- tests/integration/simulacron/test_connection.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index dd7dc261d7..c19b61635c 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -19,13 +19,14 @@ import logging import time -from mock import Mock +from mock import Mock, patch from cassandra import OperationTimedOut from cassandra.cluster import (EXEC_PROFILE_DEFAULT, Cluster, ExecutionProfile, _Scheduler, NoHostAvailable) from cassandra.policies import HostStateListener, RoundRobinPolicy from cassandra.io.asyncorereactor import AsyncoreConnection +from cassandra.connection import DEFAULT_CQL_VERSION from tests import connection_class, thread_pool_executor_class from tests.unit.cython.utils import cythontest from tests.integration import (PROTOCOL_VERSION, requiressimulacron) @@ -75,9 +76,20 @@ def make_query_plan(self, working_keyspace=None, query=None): return hosts +def _send_options_message(self): + """ + Mock that doesn't the OptionMessage. It is required for the heart_beat_timeout + test to avoid a condition where the CC tries to reconnect in the executor but can't + since we prime that message.""" + self._compressor = None + self.cql_version = DEFAULT_CQL_VERSION + self._send_startup_message(no_compact=self.no_compact) + + @requiressimulacron class ConnectionTests(SimulacronBase): + @patch('cassandra.connection.Connection._send_options_message', _send_options_message) def test_heart_beat_timeout(self): """ Test to ensure the hosts are marked as down after a OTO is received. From f2c1211c0f171c2495890a2766746bb098788bb7 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 22 Oct 2019 16:45:21 -0500 Subject: [PATCH 1674/2005] Check hostname for the proxy is valid when connecting to cloud node (#17) Check hostname for the proxy is valid when connecting to node --- cassandra/cluster.py | 5 +++-- tests/integration/advanced/cloud/test_cloud.py | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 83c6503f98..646b714c95 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -907,13 +907,14 @@ def __init__(self, """ if cloud is not None: - if contact_points is not _NOT_SET or endpoint_factory or ssl_context: - raise ValueError("contact_points, endpoint_factory and ssl_context " + if contact_points is not _NOT_SET or endpoint_factory or ssl_context or ssl_options: + raise ValueError("contact_points, endpoint_factory, ssl_context, and ssl_options " "cannot be specified with a cloud configuration") cloud_config = dscloud.get_cloud_config(cloud) ssl_context = cloud_config.ssl_context + ssl_options = {'check_hostname': True} if (auth_provider is None and cloud_config.username and cloud_config.password): auth_provider = PlainTextAuthProvider(cloud_config.username, cloud_config.password) diff --git a/tests/integration/advanced/cloud/test_cloud.py b/tests/integration/advanced/cloud/test_cloud.py index d096799e0f..0ad8ea7f13 100644 --- a/tests/integration/advanced/cloud/test_cloud.py +++ b/tests/integration/advanced/cloud/test_cloud.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License +from cassandra.datastax.cloud import parse_metadata_info from cassandra.query import SimpleStatement try: @@ -91,6 +92,23 @@ def test_error_overriding_ssl_context(self): self.assertIn('cannot be specified with a cloud configuration', str(cm.exception)) + def test_error_overriding_ssl_options(self): + with self.assertRaises(ValueError) as cm: + self.connect(self.creds, ssl_options={'check_hostname': True}) + + self.assertIn('cannot be specified with a cloud configuration', str(cm.exception)) + + def _bad_hostname_metadata(self, config, http_data): + config = parse_metadata_info(config, http_data) + config.sni_host = "127.0.0.1" + return config + + def test_verify_hostname(self): + with patch('cassandra.datastax.cloud.parse_metadata_info', wraps=self._bad_hostname_metadata): + with self.assertRaises(NoHostAvailable) as e: + self.connect(self.creds) + self.assertIn("hostname", str(e.exception)) + def test_error_when_bundle_doesnt_exist(self): try: self.connect('/invalid/path/file.zip') From becfbe5996e4f760e49dfe2154b2f1c64b9dbe9e Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 23 Oct 2019 08:23:44 -0500 Subject: [PATCH 1675/2005] Docs changes for 3.20 --- cassandra/cluster.py | 1 + docs.yaml | 66 +++--- docs/.nav | 1 + docs/api/index.rst | 1 + docs/cloud.rst | 12 +- docs/cqlengine/queryset.rst | 402 ++++++++++++++++++------------------ docs/index.rst | 28 +-- 7 files changed, 259 insertions(+), 252 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 646b714c95..8fcbe336d6 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -793,6 +793,7 @@ def default_retry_policy(self, policy): cloud = None """ A dict of the cloud configuration. Example:: + { # path to the secure connect bundle 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip' diff --git a/docs.yaml b/docs.yaml index a5ecceb6ec..1de32844a9 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,38 +22,44 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - - name: '3.19' - ref: ac2471f9 - - name: '3.18' - ref: ec36b957 - - name: '3.17' - ref: 38e359e1 - - name: '3.16' - ref: '3.16.0' - - name: '3.15' - ref: '2ce0bd97' - - name: '3.14' - ref: '9af8bd19' - - name: '3.13' - ref: '3.13.0' - - name: '3.12' - ref: '43b9c995' - - name: '3.11' - ref: '3.11.0' - - name: '3.10' - ref: 64572368 - - name: 3.9 - ref: 3.9-doc - - name: 3.8 - ref: 3.8-doc - - name: 3.7 - ref: 3.7-doc - - name: 3.6 - ref: 3.6-doc - - name: 3.5 - ref: 3.5-doc + - name: '3.20' + ref: '38613962' +# - name: '3.19' +# ref: ac2471f9 +# - name: '3.18' +# ref: ec36b957 +# - name: '3.17' +# ref: 38e359e1 +# - name: '3.16' +# ref: '3.16.0' +# - name: '3.15' +# ref: '2ce0bd97' +# - name: '3.14' +# ref: '9af8bd19' +# - name: '3.13' +# ref: '3.13.0' +# - name: '3.12' +# ref: '43b9c995' +# - name: '3.11' +# ref: '3.11.0' +# - name: '3.10' +# ref: 64572368 +# - name: 3.9 +# ref: 3.9-doc +# - name: 3.8 +# ref: 3.8-doc +# - name: 3.7 +# ref: 3.7-doc +# - name: 3.6 +# ref: 3.6-doc +# - name: 3.5 +# ref: 3.5-doc redirects: - \A\/(.*)/\Z: /\1.html rewrites: - search: cassandra.apache.org/doc/cql3/CQL.html replace: cassandra.apache.org/doc/cql3/CQL-3.0.html + - search: http://www.datastax.com/documentation/cql/3.1/ + replace: https://docs.datastax.com/en/archived/cql/3.1/ + - search: http://www.datastax.com/docs/1.2/cql_cli/cql/BATCH + replace: https://docs.datastax.com/en/dse/6.7/cql/cql/cql_reference/cql_commands/cqlBatch.html diff --git a/docs/.nav b/docs/.nav index f4b3f2236c..7b39d9001d 100644 --- a/docs/.nav +++ b/docs/.nav @@ -9,5 +9,6 @@ security upgrading user_defined_types dates_and_times +cloud faq api diff --git a/docs/api/index.rst b/docs/api/index.rst index a56d12031a..cf792283d0 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -20,6 +20,7 @@ Core Driver cassandra/concurrent cassandra/connection cassandra/util + cassandra/timestamps cassandra/io/asyncioreactor cassandra/io/asyncorereactor cassandra/io/eventletreactor diff --git a/docs/cloud.rst b/docs/cloud.rst index e9fcbbef19..a7e2fb93c1 100644 --- a/docs/cloud.rst +++ b/docs/cloud.rst @@ -23,16 +23,16 @@ Apollo Differences ================== In most circumstances, the client code for interacting with an Apollo cluster will be the same as interacting with any other Cassandra cluster. The exceptions being: - * A cloud configuration must be passed to a :class:`~.Cluster` instance via the `cloud` attribute (as demonstrated above). - * An SSL connection will be established automatically. Manual SSL configuration is not allowed, and using `ssl_context` or `ssl_options` will result in an exception. - * A :class:`~.Cluster`'s `contact_points` attribute should not be used. The cloud config contains all of the necessary contact information. - * If a consistency level is not specified for an execution profile or query, then :attr:`.ConsistencyLevel.LOCAL_QUORUM` will be used as the default. +* A cloud configuration must be passed to a :class:`~.Cluster` instance via the `cloud` attribute (as demonstrated above). +* An SSL connection will be established automatically. Manual SSL configuration is not allowed, and using `ssl_context` or `ssl_options` will result in an exception. +* A :class:`~.Cluster`'s `contact_points` attribute should not be used. The cloud config contains all of the necessary contact information. +* If a consistency level is not specified for an execution profile or query, then :attr:`.ConsistencyLevel.LOCAL_QUORUM` will be used as the default. + Limitations =========== Event loops ------------ - +^^^^^^^^^^^ Twisted and Evenlet aren't supported yet. These event loops are still using the old way to configure SSL (ssl_options), which is not compatible with the secure connect bundle provided by Apollo. diff --git a/docs/cqlengine/queryset.rst b/docs/cqlengine/queryset.rst index 69574c85e4..fa99585141 100644 --- a/docs/cqlengine/queryset.rst +++ b/docs/cqlengine/queryset.rst @@ -6,113 +6,113 @@ Making Queries Retrieving objects ================== - Once you've populated Cassandra with data, you'll probably want to retrieve some of it. This is accomplished with QuerySet objects. This section will describe how to use QuerySet objects to retrieve the data you're looking for. +Once you've populated Cassandra with data, you'll probably want to retrieve some of it. This is accomplished with QuerySet objects. This section will describe how to use QuerySet objects to retrieve the data you're looking for. Retrieving all objects ---------------------- - The simplest query you can make is to return all objects from a table. +The simplest query you can make is to return all objects from a table. - This is accomplished with the ``.all()`` method, which returns a QuerySet of all objects in a table +This is accomplished with the ``.all()`` method, which returns a QuerySet of all objects in a table - Using the Person example model, we would get all Person objects like this: +Using the Person example model, we would get all Person objects like this: - .. code-block:: python +.. code-block:: python - all_objects = Person.objects.all() + all_objects = Person.objects.all() .. _retrieving-objects-with-filters: Retrieving objects with filters ------------------------------- - Typically, you'll want to query only a subset of the records in your database. +Typically, you'll want to query only a subset of the records in your database. - That can be accomplished with the QuerySet's ``.filter(\*\*)`` method. +That can be accomplished with the QuerySet's ``.filter(\*\*)`` method. - For example, given the model definition: +For example, given the model definition: - .. code-block:: python +.. code-block:: python - class Automobile(Model): - manufacturer = columns.Text(primary_key=True) - year = columns.Integer(primary_key=True) - model = columns.Text() - price = columns.Decimal() - options = columns.Set(columns.Text) + class Automobile(Model): + manufacturer = columns.Text(primary_key=True) + year = columns.Integer(primary_key=True) + model = columns.Text() + price = columns.Decimal() + options = columns.Set(columns.Text) - ...and assuming the Automobile table contains a record of every car model manufactured in the last 20 years or so, we can retrieve only the cars made by a single manufacturer like this: +...and assuming the Automobile table contains a record of every car model manufactured in the last 20 years or so, we can retrieve only the cars made by a single manufacturer like this: - .. code-block:: python +.. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') + q = Automobile.objects.filter(manufacturer='Tesla') - You can also use the more convenient syntax: +You can also use the more convenient syntax: - .. code-block:: python +.. code-block:: python - q = Automobile.objects(Automobile.manufacturer == 'Tesla') + q = Automobile.objects(Automobile.manufacturer == 'Tesla') - We can then further filter our query with another call to **.filter** +We can then further filter our query with another call to **.filter** - .. code-block:: python +.. code-block:: python - q = q.filter(year=2012) + q = q.filter(year=2012) - *Note: all queries involving any filtering MUST define either an '=' or an 'in' relation to either a primary key column, or an indexed column.* +*Note: all queries involving any filtering MUST define either an '=' or an 'in' relation to either a primary key column, or an indexed column.* Accessing objects in a QuerySet =============================== - There are several methods for getting objects out of a queryset +There are several methods for getting objects out of a queryset - * iterating over the queryset - .. code-block:: python +* iterating over the queryset + .. code-block:: python - for car in Automobile.objects.all(): - #...do something to the car instance - pass + for car in Automobile.objects.all(): + #...do something to the car instance + pass - * list index - .. code-block:: python +* list index + .. code-block:: python - q = Automobile.objects.all() - q[0] #returns the first result - q[1] #returns the second result + q = Automobile.objects.all() + q[0] #returns the first result + q[1] #returns the second result - .. note:: + .. note:: - * CQL does not support specifying a start position in it's queries. Therefore, accessing elements using array indexing will load every result up to the index value requested - * Using negative indices requires a "SELECT COUNT()" to be executed. This has a performance cost on large datasets. + * CQL does not support specifying a start position in it's queries. Therefore, accessing elements using array indexing will load every result up to the index value requested + * Using negative indices requires a "SELECT COUNT()" to be executed. This has a performance cost on large datasets. - * list slicing - .. code-block:: python +* list slicing + .. code-block:: python - q = Automobile.objects.all() - q[1:] #returns all results except the first - q[1:9] #returns a slice of the results + q = Automobile.objects.all() + q[1:] #returns all results except the first + q[1:9] #returns a slice of the results - .. note:: + .. note:: - * CQL does not support specifying a start position in it's queries. Therefore, accessing elements using array slicing will load every result up to the index value requested - * Using negative indices requires a "SELECT COUNT()" to be executed. This has a performance cost on large datasets. + * CQL does not support specifying a start position in it's queries. Therefore, accessing elements using array slicing will load every result up to the index value requested + * Using negative indices requires a "SELECT COUNT()" to be executed. This has a performance cost on large datasets. - * calling :attr:`get() ` on the queryset - .. code-block:: python +* calling :attr:`get() ` on the queryset + .. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') - q = q.filter(year=2012) - car = q.get() + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year=2012) + car = q.get() - this returns the object matching the queryset + this returns the object matching the queryset - * calling :attr:`first() ` on the queryset - .. code-block:: python +* calling :attr:`first() ` on the queryset + .. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') - q = q.filter(year=2012) - car = q.first() + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year=2012) + car = q.first() - this returns the first value in the queryset + this returns the first value in the queryset .. _query-filtering-operators: @@ -121,198 +121,196 @@ Filtering Operators :attr:`Equal To ` - The default filtering operator. +The default filtering operator. - .. code-block:: python +.. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') - q = q.filter(year=2012) #year == 2012 + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year=2012) #year == 2012 In addition to simple equal to queries, cqlengine also supports querying with other operators by appending a ``__`` to the field name on the filtering call :attr:`in (__in) ` - .. code-block:: python +.. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') - q = q.filter(year__in=[2011, 2012]) + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year__in=[2011, 2012]) :attr:`> (__gt) ` - .. code-block:: python +.. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') - q = q.filter(year__gt=2010) # year > 2010 + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year__gt=2010) # year > 2010 - # or the nicer syntax + # or the nicer syntax - q.filter(Automobile.year > 2010) + q.filter(Automobile.year > 2010) :attr:`>= (__gte) ` - .. code-block:: python +.. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') - q = q.filter(year__gte=2010) # year >= 2010 + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year__gte=2010) # year >= 2010 - # or the nicer syntax + # or the nicer syntax - q.filter(Automobile.year >= 2010) + q.filter(Automobile.year >= 2010) :attr:`< (__lt) ` - .. code-block:: python +.. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') - q = q.filter(year__lt=2012) # year < 2012 + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year__lt=2012) # year < 2012 - # or... + # or... - q.filter(Automobile.year < 2012) + q.filter(Automobile.year < 2012) :attr:`<= (__lte) ` - .. code-block:: python +.. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') - q = q.filter(year__lte=2012) # year <= 2012 + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year__lte=2012) # year <= 2012 - q.filter(Automobile.year <= 2012) + q.filter(Automobile.year <= 2012) :attr:`CONTAINS (__contains) ` - The CONTAINS operator is available for all collection types (List, Set, Map). +The CONTAINS operator is available for all collection types (List, Set, Map). - .. code-block:: python +.. code-block:: python - q = Automobile.objects.filter(manufacturer='Tesla') - q.filter(options__contains='backup camera').allow_filtering() + q = Automobile.objects.filter(manufacturer='Tesla') + q.filter(options__contains='backup camera').allow_filtering() - Note that we need to use allow_filtering() since the *options* column has no secondary index. +Note that we need to use allow_filtering() since the *options* column has no secondary index. :attr:`LIKE (__like) ` - The LIKE operator is available for text columns that have a SASI secondary index. +The LIKE operator is available for text columns that have a SASI secondary index. - .. code-block:: python +.. code-block:: python - q = Automobile.objects.filter(model__like='%Civic%').allow_filtering() + q = Automobile.objects.filter(model__like='%Civic%').allow_filtering() :attr:`IS NOT NULL (IsNotNull(column_name)) ` - The IS NOT NULL operator is not yet supported for C*. +The IS NOT NULL operator is not yet supported for C*. - .. code-block:: python +.. code-block:: python + + q = Automobile.objects.filter(IsNotNull('model')) - q = Automobile.objects.filter(IsNotNull('model')) +Limitations: - Limitations: - - Currently, cqlengine does not support SASI index creation. To use this feature, - you need to create the SASI index using the core driver. - - Queries using LIKE must use allow_filtering() since the *model* column has no - standard secondary index. Note that the server will use the SASI index properly - when executing the query. +- Currently, cqlengine does not support SASI index creation. To use this feature, you need to create the SASI index using the core driver. +- Queries using LIKE must use allow_filtering() since the *model* column has no standard secondary index. Note that the server will use the SASI index properly when executing the query. TimeUUID Functions ================== - In addition to querying using regular values, there are two functions you can pass in when querying TimeUUID columns to help make filtering by them easier. Note that these functions don't actually return a value, but instruct the cql interpreter to use the functions in it's query. +In addition to querying using regular values, there are two functions you can pass in when querying TimeUUID columns to help make filtering by them easier. Note that these functions don't actually return a value, but instruct the cql interpreter to use the functions in it's query. - .. class:: MinTimeUUID(datetime) +.. class:: MinTimeUUID(datetime) - returns the minimum time uuid value possible for the given datetime + returns the minimum time uuid value possible for the given datetime - .. class:: MaxTimeUUID(datetime) +.. class:: MaxTimeUUID(datetime) - returns the maximum time uuid value possible for the given datetime + returns the maximum time uuid value possible for the given datetime - *Example* +*Example* - .. code-block:: python +.. code-block:: python - class DataStream(Model): - id = columns.UUID(partition_key=True) - time = columns.TimeUUID(primary_key=True) - data = columns.Bytes() + class DataStream(Model): + id = columns.UUID(partition_key=True) + time = columns.TimeUUID(primary_key=True) + data = columns.Bytes() - min_time = datetime(1982, 1, 1) - max_time = datetime(1982, 3, 9) + min_time = datetime(1982, 1, 1) + max_time = datetime(1982, 3, 9) - DataStream.filter(time__gt=functions.MinTimeUUID(min_time), time__lt=functions.MaxTimeUUID(max_time)) + DataStream.filter(time__gt=functions.MinTimeUUID(min_time), time__lt=functions.MaxTimeUUID(max_time)) Token Function ============== - Token functon may be used only on special, virtual column pk__token, representing token of partition key (it also works for composite partition keys). - Cassandra orders returned items by value of partition key token, so using cqlengine.Token we can easy paginate through all table rows. +Token functon may be used only on special, virtual column pk__token, representing token of partition key (it also works for composite partition keys). +Cassandra orders returned items by value of partition key token, so using cqlengine.Token we can easy paginate through all table rows. - See http://cassandra.apache.org/doc/cql3/CQL-3.0.html#tokenFun +See http://cassandra.apache.org/doc/cql3/CQL-3.0.html#tokenFun - *Example* +*Example* - .. code-block:: python +.. code-block:: python - class Items(Model): - id = columns.Text(primary_key=True) - data = columns.Bytes() + class Items(Model): + id = columns.Text(primary_key=True) + data = columns.Bytes() - query = Items.objects.all().limit(10) + query = Items.objects.all().limit(10) - first_page = list(query); - last = first_page[-1] - next_page = list(query.filter(pk__token__gt=cqlengine.Token(last.pk))) + first_page = list(query); + last = first_page[-1] + next_page = list(query.filter(pk__token__gt=cqlengine.Token(last.pk))) QuerySets are immutable ======================= - When calling any method that changes a queryset, the method does not actually change the queryset object it's called on, but returns a new queryset object with the attributes of the original queryset, plus the attributes added in the method call. +When calling any method that changes a queryset, the method does not actually change the queryset object it's called on, but returns a new queryset object with the attributes of the original queryset, plus the attributes added in the method call. - *Example* +*Example* - .. code-block:: python +.. code-block:: python - #this produces 3 different querysets - #q does not change after it's initial definition - q = Automobiles.objects.filter(year=2012) - tesla2012 = q.filter(manufacturer='Tesla') - honda2012 = q.filter(manufacturer='Honda') + #this produces 3 different querysets + #q does not change after it's initial definition + q = Automobiles.objects.filter(year=2012) + tesla2012 = q.filter(manufacturer='Tesla') + honda2012 = q.filter(manufacturer='Honda') Ordering QuerySets ================== - Since Cassandra is essentially a distributed hash table on steroids, the order you get records back in will not be particularly predictable. +Since Cassandra is essentially a distributed hash table on steroids, the order you get records back in will not be particularly predictable. - However, you can set a column to order on with the ``.order_by(column_name)`` method. +However, you can set a column to order on with the ``.order_by(column_name)`` method. - *Example* +*Example* - .. code-block:: python +.. code-block:: python - #sort ascending - q = Automobiles.objects.all().order_by('year') - #sort descending - q = Automobiles.objects.all().order_by('-year') + #sort ascending + q = Automobiles.objects.all().order_by('year') + #sort descending + q = Automobiles.objects.all().order_by('-year') - *Note: Cassandra only supports ordering on a clustering key. In other words, to support ordering results, your model must have more than one primary key, and you must order on a primary key, excluding the first one.* +*Note: Cassandra only supports ordering on a clustering key. In other words, to support ordering results, your model must have more than one primary key, and you must order on a primary key, excluding the first one.* - *For instance, given our Automobile model, year is the only column we can order on.* +*For instance, given our Automobile model, year is the only column we can order on.* Values Lists ============ - There is a special QuerySet's method ``.values_list()`` - when called, QuerySet returns lists of values instead of model instances. It may significantly speedup things with lower memory footprint for large responses. - Each tuple contains the value from the respective field passed into the ``values_list()`` call — so the first item is the first field, etc. For example: +There is a special QuerySet's method ``.values_list()`` - when called, QuerySet returns lists of values instead of model instances. It may significantly speedup things with lower memory footprint for large responses. +Each tuple contains the value from the respective field passed into the ``values_list()`` call — so the first item is the first field, etc. For example: - .. code-block:: python +.. code-block:: python - items = list(range(20)) - random.shuffle(items) - for i in items: - TestModel.create(id=1, clustering_key=i) + items = list(range(20)) + random.shuffle(items) + for i in items: + TestModel.create(id=1, clustering_key=i) - values = list(TestModel.objects.values_list('clustering_key', flat=True)) - # [19L, 18L, 17L, 16L, 15L, 14L, 13L, 12L, 11L, 10L, 9L, 8L, 7L, 6L, 5L, 4L, 3L, 2L, 1L, 0L] + values = list(TestModel.objects.values_list('clustering_key', flat=True)) + # [19L, 18L, 17L, 16L, 15L, 14L, 13L, 12L, 11L, 10L, 9L, 8L, 7L, 6L, 5L, 4L, 3L, 2L, 1L, 0L] Per Query Timeouts =================== @@ -323,47 +321,47 @@ A timeout is specified in seconds and can be an int, float or None. None means no timeout. - .. code-block:: python +.. code-block:: python - class Row(Model): - id = columns.Integer(primary_key=True) - name = columns.Text() + class Row(Model): + id = columns.Integer(primary_key=True) + name = columns.Text() - Fetch all objects with a timeout of 5 seconds +Fetch all objects with a timeout of 5 seconds - .. code-block:: python +.. code-block:: python - Row.objects().timeout(5).all() + Row.objects().timeout(5).all() - Create a single row with a 50ms timeout +Create a single row with a 50ms timeout - .. code-block:: python +.. code-block:: python - Row(id=1, name='Jon').timeout(0.05).create() + Row(id=1, name='Jon').timeout(0.05).create() - Delete a single row with no timeout +Delete a single row with no timeout - .. code-block:: python +.. code-block:: python - Row(id=1).timeout(None).delete() + Row(id=1).timeout(None).delete() - Update a single row with no timeout +Update a single row with no timeout - .. code-block:: python +.. code-block:: python - Row(id=1).timeout(None).update(name='Blake') + Row(id=1).timeout(None).update(name='Blake') - Batch query timeouts +Batch query timeouts - .. code-block:: python +.. code-block:: python - with BatchQuery(timeout=10) as b: - Row(id=1, name='Jon').create() + with BatchQuery(timeout=10) as b: + Row(id=1, name='Jon').create() - NOTE: You cannot set both timeout and batch at the same time, batch will use the timeout defined in it's constructor. - Setting the timeout on the model is meaningless and will raise an AssertionError. +NOTE: You cannot set both timeout and batch at the same time, batch will use the timeout defined in it's constructor. +Setting the timeout on the model is meaningless and will raise an AssertionError. .. _ttl-change: @@ -373,33 +371,33 @@ Default TTL and Per Query TTL Model default TTL now relies on the *default_time_to_live* feature, introduced in Cassandra 2.0. It is not handled anymore in the CQLEngine Model (cassandra-driver >=3.6). You can set the default TTL of a table like this: - Example: +Example: - .. code-block:: python +.. code-block:: python - class User(Model): - __options__ = {'default_time_to_live': 20} + class User(Model): + __options__ = {'default_time_to_live': 20} - user_id = columns.UUID(primary_key=True) - ... + user_id = columns.UUID(primary_key=True) + ... You can set TTL per-query if needed. Here are a some examples: - Example: +Example: - .. code-block:: python +.. code-block:: python - class User(Model): - __options__ = {'default_time_to_live': 20} + class User(Model): + __options__ = {'default_time_to_live': 20} - user_id = columns.UUID(primary_key=True) - ... + user_id = columns.UUID(primary_key=True) + ... - user = User.objects.create(user_id=1) # Default TTL 20 will be set automatically on the server + user = User.objects.create(user_id=1) # Default TTL 20 will be set automatically on the server - user.ttl(30).update(age=21) # Update the TTL to 30 - User.objects.ttl(10).create(user_id=1) # TTL 10 - User(user_id=1, age=21).ttl(10).save() # TTL 10 + user.ttl(30).update(age=21) # Update the TTL to 30 + User.objects.ttl(10).create(user_id=1) # TTL 10 + User(user_id=1, age=21).ttl(10).save() # TTL 10 Named Tables @@ -408,14 +406,14 @@ Named Tables Named tables are a way of querying a table without creating an class. They're useful for querying system tables or exploring an unfamiliar database. - .. code-block:: python +.. code-block:: python - from cassandra.cqlengine.connection import setup - setup("127.0.0.1", "cqlengine_test") + from cassandra.cqlengine.connection import setup + setup("127.0.0.1", "cqlengine_test") - from cassandra.cqlengine.named import NamedTable - user = NamedTable("cqlengine_test", "user") - user.objects() - user.objects()[0] + from cassandra.cqlengine.named import NamedTable + user = NamedTable("cqlengine_test", "user") + user.objects() + user.objects()[0] - # {u'pk': 1, u't': datetime.datetime(2014, 6, 26, 17, 10, 31, 774000)} + # {u'pk': 1, u't': datetime.datetime(2014, 6, 26, 17, 10, 31, 774000)} diff --git a/docs/index.rst b/docs/index.rst index 2c01d0b6d0..13fca1880c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -20,33 +20,30 @@ Contents :doc:`getting_started` A guide through the first steps of connecting to Cassandra and executing queries -:doc:`object_mapper` - Introduction to the integrated object mapper, cqlengine - -:doc:`api/index` - The API documentation. - -:doc:`upgrading` - A guide to upgrading versions of the driver - :doc:`execution_profiles` An introduction to a more flexible way of configuring request execution +:doc:`lwt` + Working with results of conditional requests + +:doc:`object_mapper` + Introduction to the integrated object mapper, cqlengine + :doc:`performance` Tips for getting good performance. :doc:`query_paging` Notes on paging large query results -:doc:`lwt` - Working with results of conditional requests +:doc:`security` + An overview of the security features of the driver + +:doc:`upgrading` + A guide to upgrading versions of the driver :doc:`user_defined_types` Working with Cassandra 2.1's user-defined types -:doc:`security` - An overview of the security features of the driver - :doc:`dates_and_times` Some discussion on the driver's approach to working with timestamp, date, time types @@ -56,6 +53,9 @@ Contents :doc:`faq` A collection of Frequently Asked Questions +:doc:`api/index` + The API documentation. + .. toctree:: :hidden: From bbc0d311899797e503652bac9f1a3983a0dc29ef Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 23 Oct 2019 11:38:45 -0500 Subject: [PATCH 1676/2005] Version bump docs.yaml --- docs.yaml | 62 +++++++++++++++++++++++++++---------------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/docs.yaml b/docs.yaml index 1de32844a9..df3e800510 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,37 +23,37 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.20' - ref: '38613962' -# - name: '3.19' -# ref: ac2471f9 -# - name: '3.18' -# ref: ec36b957 -# - name: '3.17' -# ref: 38e359e1 -# - name: '3.16' -# ref: '3.16.0' -# - name: '3.15' -# ref: '2ce0bd97' -# - name: '3.14' -# ref: '9af8bd19' -# - name: '3.13' -# ref: '3.13.0' -# - name: '3.12' -# ref: '43b9c995' -# - name: '3.11' -# ref: '3.11.0' -# - name: '3.10' -# ref: 64572368 -# - name: 3.9 -# ref: 3.9-doc -# - name: 3.8 -# ref: 3.8-doc -# - name: 3.7 -# ref: 3.7-doc -# - name: 3.6 -# ref: 3.6-doc -# - name: 3.5 -# ref: 3.5-doc + ref: babfe339 + - name: '3.19' + ref: ac2471f9 + - name: '3.18' + ref: ec36b957 + - name: '3.17' + ref: 38e359e1 + - name: '3.16' + ref: '3.16.0' + - name: '3.15' + ref: '2ce0bd97' + - name: '3.14' + ref: '9af8bd19' + - name: '3.13' + ref: '3.13.0' + - name: '3.12' + ref: '43b9c995' + - name: '3.11' + ref: '3.11.0' + - name: '3.10' + ref: 64572368 + - name: 3.9 + ref: 3.9-doc + - name: 3.8 + ref: 3.8-doc + - name: 3.7 + ref: 3.7-doc + - name: 3.6 + ref: 3.6-doc + - name: 3.5 + ref: 3.5-doc redirects: - \A\/(.*)/\Z: /\1.html rewrites: From 5331d4ab4b9b4fa38ca1589c74651bf3677f68a2 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 23 Oct 2019 18:06:10 -0400 Subject: [PATCH 1677/2005] Remove bad docs ref for live publishing test --- docs.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs.yaml b/docs.yaml index df3e800510..47b6598a4c 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,8 +22,6 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - - name: '3.20' - ref: babfe339 - name: '3.19' ref: ac2471f9 - name: '3.18' From b22459df4c2b6feaea91c75d534bca69ec401464 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 24 Oct 2019 08:40:13 -0500 Subject: [PATCH 1678/2005] Disable pull requests for nightly master --- build.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/build.yaml b/build.yaml index e8b1147092..7f7743ea7e 100644 --- a/build.yaml +++ b/build.yaml @@ -1,6 +1,7 @@ schedules: nightly_master: schedule: nightly + disable_pull_requests: true branches: include: [master] env_vars: | From d30d166fe635573273dab5845d44bd486561adbf Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 25 Oct 2019 09:41:59 -0400 Subject: [PATCH 1679/2005] release 3.20.0: changelog and version --- CHANGELOG.rst | 2 +- cassandra/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 272aeb7d5e..a2166255fb 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,6 @@ 3.20.0 ====== -Not Released +October 28, 2019 Features -------- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 13a64afbdc..96cc537663 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 19, 0, 'post0') +__version_info__ = (3, 20, 0) __version__ = '.'.join(map(str, __version_info__)) From 21086c3d165794c6da9c1fbc1fbfa43967b2679e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 25 Oct 2019 09:43:06 -0400 Subject: [PATCH 1680/2005] release 3.20.0: docs --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index 47b6598a4c..621269987a 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,6 +22,8 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: + - name: '3.20' + ref: d30d166f - name: '3.19' ref: ac2471f9 - name: '3.18' From 02065de3b979ff0a5319b07eb873f1d4e1c41cc6 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 25 Oct 2019 16:07:10 -0400 Subject: [PATCH 1681/2005] Fix import error for old python installation without ssl --- cassandra/datastax/cloud/__init__.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/cassandra/datastax/cloud/__init__.py b/cassandra/datastax/cloud/__init__.py index d2b65f7cfb..ed9435e5e3 100644 --- a/cassandra/datastax/cloud/__init__.py +++ b/cassandra/datastax/cloud/__init__.py @@ -18,7 +18,13 @@ import tempfile import shutil from six.moves.urllib.request import urlopen -from ssl import SSLContext, PROTOCOL_TLSv1, CERT_REQUIRED + +_HAS_SSL = True +try: + from ssl import SSLContext, PROTOCOL_TLSv1, CERT_REQUIRED +except: + _HAS_SSL = False + from zipfile import ZipFile # 2.7 vs 3.x @@ -70,6 +76,9 @@ def from_dict(cls, d): def get_cloud_config(cloud_config): + if not _HAS_SSL: + raise DriverException("A Python installation with SSL is required to connect to a cloud cluster.") + if 'secure_connect_bundle' not in cloud_config: raise ValueError("The cloud config doesn't have a secure_connect_bundle specified.") From 14ba2d1178c037f67927e8a6eebffadd45f8161a Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 28 Oct 2019 14:10:05 -0500 Subject: [PATCH 1682/2005] [PYTHON-1161] Support Twisted context. Refactored Twisted code to use newer Endpoint APIs. This allowed us to remove ClientFactory implementation completely. Also used IOpenSSLClientConnectionCreator interface instead of implementing a ContextFactory. This allows us to use our own context and pyOpenSSL connection instead of relying on twisted to create it for us. --- cassandra/io/twistedreactor.py | 189 ++++++++++++--------------- test-requirements.txt | 1 + tests/integration/long/test_ssl.py | 186 +++++++++++++++++--------- tests/unit/io/test_twistedreactor.py | 41 +----- 4 files changed, 218 insertions(+), 199 deletions(-) diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index 1dbe9d8b5d..b44f224fda 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -16,16 +16,26 @@ ( https://twistedmatrix.com ). """ import atexit -from functools import partial import logging -from threading import Thread, Lock import time -from twisted.internet import reactor, protocol +from functools import partial +from threading import Thread, Lock import weakref -from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager +from twisted.internet import reactor, protocol +from twisted.internet.endpoints import connectProtocol, TCP4ClientEndpoint, SSL4ClientEndpoint +from twisted.internet.interfaces import IOpenSSLClientConnectionCreator +from twisted.python.failure import Failure +from zope.interface import implementer +from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager +try: + from OpenSSL import SSL + _HAS_SSL = True +except ImportError as e: + _HAS_SSL = False + import_exception = e log = logging.getLogger(__name__) @@ -42,8 +52,8 @@ class TwistedConnectionProtocol(protocol.Protocol): made events. """ - def __init__(self): - self.connection = None + def __init__(self, connection): + self.connection = connection def dataReceived(self, data): """ @@ -55,6 +65,7 @@ def dataReceived(self, data): """ self.connection._iobuf.write(data) self.connection.handle_read() + def connectionMade(self): """ Callback function that is called when a connection has succeeded. @@ -62,13 +73,6 @@ def connectionMade(self): Reaches back to the Connection object and confirms that the connection is ready. """ - try: - # Non SSL connection - self.connection = self.transport.connector.factory.conn - except AttributeError: - # SSL connection - self.connection = self.transport.connector.factory.wrappedFactory.conn - self.connection.client_connection_made(self.transport) def connectionLost(self, reason): @@ -76,43 +80,6 @@ def connectionLost(self, reason): self.connection.defunct(reason.value) -class TwistedConnectionClientFactory(protocol.ClientFactory): - - def __init__(self, connection): - # ClientFactory does not define __init__() in parent classes - # and does not inherit from object. - self.conn = connection - - def buildProtocol(self, addr): - """ - Twisted function that defines which kind of protocol to use - in the ClientFactory. - """ - return TwistedConnectionProtocol() - - def clientConnectionFailed(self, connector, reason): - """ - Overridden twisted callback which is called when the - connection attempt fails. - """ - log.debug("Connect failed: %s", reason) - self.conn.defunct(reason.value) - - def clientConnectionLost(self, connector, reason): - """ - Overridden twisted callback which is called when the - connection goes away (cleanly or otherwise). - - It should be safe to call defunct() here instead of just close, because - we can assume that if the connection was closed cleanly, there are no - requests to error out. If this assumption turns out to be false, we - can call close() instead of defunct() when "reason" is an appropriate - type. - """ - log.debug("Connect lost: %s", reason) - self.conn.defunct(reason.value) - - class TwistedLoop(object): _lock = None @@ -166,47 +133,46 @@ def _on_loop_timer(self): self._schedule_timeout(self._timers.next_timeout) -try: - from twisted.internet import ssl - import OpenSSL.crypto - from OpenSSL.crypto import load_certificate, FILETYPE_PEM - - class _SSLContextFactory(ssl.ClientContextFactory): - def __init__(self, ssl_options, check_hostname, host): - self.ssl_options = ssl_options - self.check_hostname = check_hostname - self.host = host - - def getContext(self): - # This version has to be OpenSSL.SSL.DESIRED_VERSION - # instead of ssl.DESIRED_VERSION as in other loops - self.method = self.ssl_options["ssl_version"] - context = ssl.ClientContextFactory.getContext(self) +@implementer(IOpenSSLClientConnectionCreator) +class SSLCreator(object): + def __init__(self, host, ssl_context, ssl_options, check_hostname, timeout): + self.host = host + self.ssl_options = ssl_options + self.check_hostname = check_hostname + self.timeout = timeout + + if ssl_context: + self.context = ssl_context + else: + self.context = SSL.Context(SSL.TLSv1_METHOD) if "certfile" in self.ssl_options: - context.use_certificate_file(self.ssl_options["certfile"]) + self.context.use_certificate_file(self.ssl_options["certfile"]) if "keyfile" in self.ssl_options: - context.use_privatekey_file(self.ssl_options["keyfile"]) + self.context.use_privatekey_file(self.ssl_options["keyfile"]) if "ca_certs" in self.ssl_options: - x509 = load_certificate(FILETYPE_PEM, open(self.ssl_options["ca_certs"]).read()) - store = context.get_cert_store() - store.add_cert(x509) + self.context.load_verify_locations(self.ssl_options["ca_certs"]) if "cert_reqs" in self.ssl_options: - # This expects OpenSSL.SSL.VERIFY_NONE/OpenSSL.SSL.VERIFY_PEER - # or OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT - context.set_verify(self.ssl_options["cert_reqs"], - callback=self.verify_callback) - return context - - def verify_callback(self, connection, x509, errnum, errdepth, ok): - if ok: - if self.check_hostname and self.host != x509.get_subject().commonName: - return False - return ok + self.context.set_verify( + self.ssl_options["cert_reqs"], + callback=self.verify_callback + ) + self.context.set_info_callback(self.info_callback) - _HAS_SSL = True + def verify_callback(self, connection, x509, errnum, errdepth, ok): + return ok -except ImportError as e: - _HAS_SSL = False + def info_callback(self, connection, where, ret): + if where & SSL.SSL_CB_HANDSHAKE_DONE: + if self.check_hostname and self.host != connection.get_peer_certificate().get_subject().commonName: + transport = connection.get_app_data() + transport.failVerification(Failure(Exception("Hostname verification failed"))) + + def clientConnectionForTLS(self, tlsProtocol): + connection = SSL.Connection(self.context, None) + connection.set_app_data(tlsProtocol) + if self.ssl_options and "server_hostname" in self.ssl_options: + connection.set_tlsext_host_name(self.ssl_options['server_hostname'].encode('ascii')) + return connection class TwistedConnection(Connection): @@ -246,29 +212,48 @@ def __init__(self, *args, **kwargs): reactor.callFromThread(self.add_connection) self._loop.maybe_start() - def add_connection(self): - """ - Convenience function to connect and store the resulting - connector. - """ + def _check_pyopenssl(self): if self.ssl_options: - if not _HAS_SSL: raise ImportError( - str(e) + + str(import_exception) + ', pyOpenSSL must be installed to enable SSL support with the Twisted event loop' ) - self.connector = reactor.connectSSL( - host=self.endpoint.address, port=self.port, - factory=TwistedConnectionClientFactory(self), - contextFactory=_SSLContextFactory(self.ssl_options, self._check_hostname, self.endpoint.address), - timeout=self.connect_timeout) + def add_connection(self): + """ + Convenience function to connect and store the resulting + connector. + """ + host, port = self.endpoint.resolve() + if self.ssl_context or self.ssl_options: + # Can't use optionsForClientTLS here because it *forces* hostname verification. + # Cool they enforce strong security, but we have to be able to turn it off + self._check_pyopenssl() + + ssl_options = SSLCreator( + self.endpoint.address, + self.ssl_context if self.ssl_context else None, + self.ssl_options, + self._check_hostname, + self.connect_timeout, + ) + + point = SSL4ClientEndpoint( + reactor, + host, + port, + sslContextFactory=ssl_options, + timeout=self.connect_timeout, + ) else: - self.connector = reactor.connectTCP( - host=self.endpoint.address, port=self.port, - factory=TwistedConnectionClientFactory(self), - timeout=self.connect_timeout) + point = TCP4ClientEndpoint( + reactor, + host, + port, + timeout=self.connect_timeout + ) + connectProtocol(point, TwistedConnectionProtocol(self)) def client_connection_made(self, transport): """ @@ -290,7 +275,7 @@ def close(self): self.is_closed = True log.debug("Closing connection (%s) to %s", id(self), self.endpoint) - reactor.callFromThread(self.connector.disconnect) + reactor.callFromThread(self.transport.connector.disconnect) log.debug("Closed socket to %s", self.endpoint) if not self.is_defunct: diff --git a/test-requirements.txt b/test-requirements.txt index 0eafa67648..4d88134126 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -15,3 +15,4 @@ cython>=0.20,<0.30 packaging futurist; python_version >= '3.7' asynctest; python_version > '3.4' +pyopenssl diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 7f0a870718..da75b76693 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -19,8 +19,12 @@ import os, sys, traceback, logging, ssl, time, math, uuid from cassandra.cluster import Cluster, NoHostAvailable +from cassandra.connection import DefaultEndPoint from cassandra import ConsistencyLevel from cassandra.query import SimpleStatement + +from OpenSSL import SSL, crypto + from tests.integration import PROTOCOL_VERSION, get_cluster, remove_cluster, use_single_node, EVENT_LOOP_MANAGER log = logging.getLogger(__name__) @@ -28,28 +32,32 @@ DEFAULT_PASSWORD = "pythondriver" # Server keystore trust store locations -SERVER_KEYSTORE_PATH = "tests/integration/long/ssl/.keystore" -SERVER_TRUSTSTORE_PATH = "tests/integration/long/ssl/.truststore" +SERVER_KEYSTORE_PATH = os.path.abspath("tests/integration/long/ssl/.keystore") +SERVER_TRUSTSTORE_PATH = os.path.abspath("tests/integration/long/ssl/.truststore") # Client specific keys/certs -CLIENT_CA_CERTS = 'tests/integration/long/ssl/cassandra.pem' -DRIVER_KEYFILE = "tests/integration/long/ssl/driver.key" -DRIVER_KEYFILE_ENCRYPTED = "tests/integration/long/ssl/driver_encrypted.key" -DRIVER_CERTFILE = "tests/integration/long/ssl/driver.pem" -DRIVER_CERTFILE_BAD = "tests/integration/long/ssl/python_driver_bad.pem" +CLIENT_CA_CERTS = os.path.abspath("tests/integration/long/ssl/cassandra.pem") +DRIVER_KEYFILE = os.path.abspath("tests/integration/long/ssl/driver.key") +DRIVER_KEYFILE_ENCRYPTED = os.path.abspath("tests/integration/long/ssl/driver_encrypted.key") +DRIVER_CERTFILE = os.path.abspath("tests/integration/long/ssl/driver.pem") +DRIVER_CERTFILE_BAD = os.path.abspath("tests/integration/long/ssl/python_driver_bad.pem") +USES_PYOPENSSL = "twisted" in EVENT_LOOP_MANAGER if "twisted" in EVENT_LOOP_MANAGER: import OpenSSL ssl_version = OpenSSL.SSL.TLSv1_METHOD - verify_certs = {'cert_reqs': OpenSSL.SSL.VERIFY_PEER, + verify_certs = {'cert_reqs': SSL.VERIFY_PEER, 'check_hostname': True} - else: ssl_version = ssl.PROTOCOL_TLSv1 verify_certs = {'cert_reqs': ssl.CERT_REQUIRED, 'check_hostname': True} +def verify_callback(connection, x509, errnum, errdepth, ok): + return ok + + def setup_cluster_ssl(client_auth=False): """ We need some custom setup for this module. This will start the ccm cluster with basic @@ -60,20 +68,15 @@ def setup_cluster_ssl(client_auth=False): ccm_cluster = get_cluster() ccm_cluster.stop() - # Fetch the absolute path to the keystore for ccm. - abs_path_server_keystore_path = os.path.abspath(SERVER_KEYSTORE_PATH) - # Configure ccm to use ssl. - config_options = {'client_encryption_options': {'enabled': True, - 'keystore': abs_path_server_keystore_path, + 'keystore': SERVER_KEYSTORE_PATH, 'keystore_password': DEFAULT_PASSWORD}} if(client_auth): - abs_path_server_truststore_path = os.path.abspath(SERVER_TRUSTSTORE_PATH) client_encyrption_options = config_options['client_encryption_options'] client_encyrption_options['require_client_auth'] = True - client_encyrption_options['truststore'] = abs_path_server_truststore_path + client_encyrption_options['truststore'] = SERVER_TRUSTSTORE_PATH client_encyrption_options['truststore_password'] = DEFAULT_PASSWORD ccm_cluster.set_configuration_options(config_options) @@ -83,6 +86,7 @@ def setup_cluster_ssl(client_auth=False): def validate_ssl_options(**kwargs): ssl_options = kwargs.get('ssl_options', None) ssl_context = kwargs.get('ssl_context', None) + hostname = kwargs.get('hostname', '127.0.0.1') # find absolute path to client CA_CERTS tries = 0 @@ -90,8 +94,12 @@ def validate_ssl_options(**kwargs): if tries > 5: raise RuntimeError("Failed to connect to SSL cluster after 5 attempts") try: - cluster = Cluster(protocol_version=PROTOCOL_VERSION, - ssl_options=ssl_options, ssl_context=ssl_context) + cluster = Cluster( + contact_points=[DefaultEndPoint(hostname)], + protocol_version=PROTOCOL_VERSION, + ssl_options=ssl_options, + ssl_context=ssl_context + ) session = cluster.connect(wait_for_all_pools=True) break except Exception: @@ -145,8 +153,7 @@ def test_can_connect_with_ssl_ca(self): """ # find absolute path to client CA_CERTS - abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) - ssl_options = {'ca_certs': abs_path_ca_cert_path,'ssl_version': ssl_version} + ssl_options = {'ca_certs': CLIENT_CA_CERTS,'ssl_version': ssl_version} validate_ssl_options(ssl_options=ssl_options) def test_can_connect_with_ssl_long_running(self): @@ -200,9 +207,7 @@ def test_can_connect_with_ssl_ca_host_match(self): @test_category connection:ssl """ - # find absolute path to client CA_CERTS - abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) - ssl_options = {'ca_certs': abs_path_ca_cert_path, + ssl_options = {'ca_certs': CLIENT_CA_CERTS, 'ssl_version': ssl_version} ssl_options.update(verify_certs) @@ -235,14 +240,10 @@ def test_can_connect_with_ssl_client_auth(self): @test_category connection:ssl """ - # Need to get absolute paths for certs/key - abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) - abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE) - abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE) - ssl_options = {'ca_certs': abs_path_ca_cert_path, + ssl_options = {'ca_certs': CLIENT_CA_CERTS, 'ssl_version': ssl_version, - 'keyfile': abs_driver_keyfile, - 'certfile': abs_driver_certfile} + 'keyfile': DRIVER_KEYFILE, + 'certfile': DRIVER_CERTFILE} validate_ssl_options(ssl_options=ssl_options) def test_can_connect_with_ssl_client_auth_host_name(self): @@ -260,15 +261,10 @@ def test_can_connect_with_ssl_client_auth_host_name(self): @test_category connection:ssl """ - # Need to get absolute paths for certs/key - abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) - abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE) - abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE) - - ssl_options = {'ca_certs': abs_path_ca_cert_path, + ssl_options = {'ca_certs': CLIENT_CA_CERTS, 'ssl_version': ssl_version, - 'keyfile': abs_driver_keyfile, - 'certfile': abs_driver_certfile} + 'keyfile': DRIVER_KEYFILE, + 'certfile': DRIVER_CERTFILE} ssl_options.update(verify_certs) validate_ssl_options(ssl_options=ssl_options) @@ -286,10 +282,13 @@ def test_cannot_connect_without_client_auth(self): @test_category connection:ssl """ - abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) - cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path, + cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': CLIENT_CA_CERTS, 'ssl_version': ssl_version}) # attempt to connect and expect an exception + if "twisted" in EVENT_LOOP_MANAGER: + exc = SSL.Error + else: + exc = NoHostAvailable with self.assertRaises(NoHostAvailable) as context: cluster.connect() @@ -309,19 +308,24 @@ def test_cannot_connect_with_bad_client_auth(self): @test_category connection:ssl """ - # Setup absolute paths to key/cert files - abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) - abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE) - abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE_BAD) - - cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path, + cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': CLIENT_CA_CERTS, 'ssl_version': ssl_version, - 'keyfile': abs_driver_keyfile, - 'certfile': abs_driver_certfile}) + 'keyfile': DRIVER_KEYFILE, + 'certfile': DRIVER_CERTFILE_BAD}) with self.assertRaises(NoHostAvailable) as context: cluster.connect() cluster.shutdown() + def test_cannot_connect_with_invalid_hostname(self): + ssl_options = {'ca_certs': CLIENT_CA_CERTS, + 'ssl_version': ssl_version, + 'keyfile': DRIVER_KEYFILE, + 'certfile': DRIVER_CERTFILE} + ssl_options.update(verify_certs) + + with self.assertRaises(Exception): + validate_ssl_options(ssl_options=ssl_options, hostname='localhost') + class SSLSocketErrorTests(unittest.TestCase): @@ -345,8 +349,7 @@ def test_ssl_want_write_errors_are_retried(self): @test_category connection:ssl """ - abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) - ssl_options = {'ca_certs': abs_path_ca_cert_path, + ssl_options = {'ca_certs': CLIENT_CA_CERTS, 'ssl_version': ssl_version} cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options=ssl_options) session = cluster.connect(wait_for_all_pools=True) @@ -367,7 +370,6 @@ def test_ssl_want_write_errors_are_retried(self): class SSLConnectionWithSSLContextTests(unittest.TestCase): - @classmethod def setUpClass(cls): setup_cluster_ssl() @@ -388,9 +390,13 @@ def test_can_connect_with_sslcontext_certificate(self): @test_category connection:ssl """ - abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) - ssl_context = ssl.SSLContext(ssl_version) - ssl_context.load_verify_locations(abs_path_ca_cert_path) + if USES_PYOPENSSL: + ssl_context = SSL.Context(SSL.TLSv1_METHOD) + ssl_context.load_verify_locations(CLIENT_CA_CERTS) + else: + ssl_context = ssl.SSLContext(ssl_version) + ssl_context.load_verify_locations(CLIENT_CA_CERTS) + ssl_context.verify_mode = ssl.CERT_REQUIRED validate_ssl_options(ssl_context=ssl_context) def test_can_connect_with_ssl_client_auth_password_private_key(self): @@ -406,8 +412,70 @@ def test_can_connect_with_ssl_client_auth_password_private_key(self): """ abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE_ENCRYPTED) abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE) - ssl_context = ssl.SSLContext(ssl_version) - ssl_context.load_cert_chain(certfile=abs_driver_certfile, - keyfile=abs_driver_keyfile, - password='cassandra') - validate_ssl_options(ssl_context=ssl_context) + ssl_options = {} + + if USES_PYOPENSSL: + ssl_context = SSL.Context(SSL.TLSv1_METHOD) + ssl_context.use_certificate_file(abs_driver_certfile) + with open(abs_driver_keyfile) as keyfile: + key = crypto.load_privatekey(crypto.FILETYPE_PEM, keyfile.read(), b'cassandra') + ssl_context.use_privatekey(key) + ssl_context.set_verify(SSL.VERIFY_NONE, verify_callback) + else: + ssl_context = ssl.SSLContext(ssl_version) + ssl_context.load_cert_chain(certfile=abs_driver_certfile, + keyfile=abs_driver_keyfile, + password="cassandra") + ssl_context.verify_mode = ssl.CERT_NONE + validate_ssl_options(ssl_context=ssl_context, ssl_options=ssl_options) + + def test_can_connect_with_ssl_conext_ca_host_match(self): + """ + Test to validate that we are able to connect to a cluster using a SSLContext + using client auth, an encrypted keyfile, and host matching + """ + ssl_options = {} + if USES_PYOPENSSL: + ssl_context = SSL.Context(SSL.TLSv1_METHOD) + ssl_context.use_certificate_file(DRIVER_CERTFILE) + with open(DRIVER_KEYFILE_ENCRYPTED) as keyfile: + key = crypto.load_privatekey(crypto.FILETYPE_PEM, keyfile.read(), b'cassandra') + ssl_context.use_privatekey(key) + ssl_context.load_verify_locations(CLIENT_CA_CERTS) + ssl_options["check_hostname"] = True + else: + ssl_context = ssl.SSLContext(ssl_version) + ssl_context.verify_mode = ssl.CERT_REQUIRED + ssl_context.load_verify_locations(CLIENT_CA_CERTS) + ssl_context.load_cert_chain( + certfile=DRIVER_CERTFILE, + keyfile=DRIVER_KEYFILE_ENCRYPTED, + password="cassandra", + ) + ssl_context.verify_mode = ssl.CERT_REQUIRED + ssl_options["check_hostname"] = True + validate_ssl_options(ssl_context=ssl_context, ssl_options=ssl_options) + + def test_cannot_connect_ssl_context_with_invalid_hostname(self): + ssl_options = {} + if USES_PYOPENSSL: + ssl_context = SSL.Context(SSL.TLSv1_METHOD) + ssl_context.use_certificate_file(DRIVER_CERTFILE) + with open(DRIVER_KEYFILE_ENCRYPTED) as keyfile: + key = crypto.load_privatekey(crypto.FILETYPE_PEM, keyfile.read(), b"cassandra") + ssl_context.use_privatekey(key) + ssl_context.load_verify_locations(CLIENT_CA_CERTS) + ssl_options["check_hostname"] = True + else: + ssl_context = ssl.SSLContext(ssl_version) + ssl_context.verify_mode = ssl.CERT_REQUIRED + ssl_context.load_verify_locations(CLIENT_CA_CERTS) + ssl_context.load_cert_chain( + certfile=DRIVER_CERTFILE, + keyfile=DRIVER_KEYFILE_ENCRYPTED, + password="cassandra", + ) + ssl_context.verify_mode = ssl.CERT_REQUIRED + ssl_options["check_hostname"] = True + with self.assertRaises(Exception): + validate_ssl_options(ssl_context=ssl_context, ssl_options=ssl_options, hostname="localhost") diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index f0a1d737bc..f3a2f05328 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -65,9 +65,7 @@ def setUp(self): self.tr = proto_helpers.StringTransportWithDisconnection() self.tr.connector = Mock() self.mock_connection = Mock() - self.tr.connector.factory = twistedreactor.TwistedConnectionClientFactory( - self.mock_connection) - self.obj_ut = twistedreactor.TwistedConnectionProtocol() + self.obj_ut = twistedreactor.TwistedConnectionProtocol(self.mock_connection) self.tr.protocol = self.obj_ut def tearDown(self): @@ -92,32 +90,6 @@ def test_receiving_data(self): self.mock_connection._iobuf.write.assert_called_with("foobar") -class TestTwistedClientFactory(unittest.TestCase): - def setUp(self): - if twistedreactor is None: - raise unittest.SkipTest("Twisted libraries not available") - twistedreactor.TwistedConnection.initialize_reactor() - self.mock_connection = Mock() - self.obj_ut = twistedreactor.TwistedConnectionClientFactory( - self.mock_connection) - - def test_client_connection_failed(self): - """ - Verify that connection failed causes the connection object to close. - """ - exc = Exception('a test') - self.obj_ut.clientConnectionFailed(None, Failure(exc)) - self.mock_connection.defunct.assert_called_with(exc) - - def test_client_connection_lost(self): - """ - Verify that connection lost causes the connection object to close. - """ - exc = Exception('a test') - self.obj_ut.clientConnectionLost(None, Failure(exc)) - self.mock_connection.defunct.assert_called_with(exc) - - class TestTwistedConnection(unittest.TestCase): def setUp(self): if twistedreactor is None: @@ -143,15 +115,6 @@ def test_connection_initialization(self): self.obj_ut._loop._cleanup() self.mock_reactor_run.assert_called_with(installSignalHandlers=False) - @patch('twisted.internet.reactor.connectTCP') - def test_add_connection(self, mock_connectTCP): - """ - Verify that add_connection() gives us a valid twisted connector. - """ - self.obj_ut.add_connection() - self.assertTrue(self.obj_ut.connector is not None) - self.assertTrue(mock_connectTCP.called) - def test_client_connection_made(self): """ Verifiy that _send_options_message() is called in @@ -166,8 +129,10 @@ def test_close(self, mock_connectTCP): """ Verify that close() disconnects the connector and errors callbacks. """ + transport = Mock() self.obj_ut.error_all_requests = Mock() self.obj_ut.add_connection() + self.obj_ut.client_connection_made(transport) self.obj_ut.is_closed = False self.obj_ut.close() From fdab49fdd6cdeed4a1c33f3ea11de1f40b5cfcf2 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 30 Oct 2019 08:04:16 -0400 Subject: [PATCH 1683/2005] Fix default ExecutionProfile consistency_level value --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 1d1ebc7e0d..8450a081ce 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -357,7 +357,7 @@ class ExecutionProfile(object): _consistency_level_explicit = False def __init__(self, load_balancing_policy=_NOT_SET, retry_policy=None, - consistency_level=ConsistencyLevel._NOT_SET, serial_consistency_level=None, + consistency_level=_NOT_SET, serial_consistency_level=None, request_timeout=10.0, row_factory=named_tuple_factory, speculative_execution_policy=None, continuous_paging_options=None): From 81856d154bf6007c85bda472e528c4219dfdc58e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 30 Oct 2019 16:04:12 -0400 Subject: [PATCH 1684/2005] Fix cloud and some simulacron tests --- cassandra/cluster.py | 2 +- tests/integration/simulacron/test_connection.py | 16 +++++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 8450a081ce..4621783040 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -47,7 +47,7 @@ OperationTimedOut, UnsupportedOperation, SchemaTargetType, DriverException, ProtocolVersion, UnresolvableContactPoints) -from cassandra.auth import _proxy_execute_key +from cassandra.auth import _proxy_execute_key, PlainTextAuthProvider from cassandra.connection import (ConnectionException, ConnectionShutdown, ConnectionHeartbeat, ProtocolVersionUnsupported, EndPoint, DefaultEndPoint, DefaultEndPointFactory, diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 9fee9a2b93..08590eca63 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -27,9 +27,10 @@ from cassandra.policies import HostStateListener, RoundRobinPolicy from tests import connection_class, thread_pool_executor_class -from tests.integration import PROTOCOL_VERSION, requiressimulacron +from tests.integration import requiressimulacron from tests.integration.util import assert_quiescent_pool_state, late -from tests.integration.simulacron import SimulacronBase +# important to import the patch PROTOCOL_VERSION from the simulacron module +from tests.integration.simulacron import SimulacronBase, PROTOCOL_VERSION from cassandra.connection import DEFAULT_CQL_VERSION from tests.unit.cython.utils import cythontest from tests.integration.simulacron.utils import (NO_THEN, PrimeOptions, @@ -323,7 +324,7 @@ def test_retry_after_defunct(self): cluster_name="{}/{}".format(simulacron_cluster.cluster_name, last_host)) roundrobin_lbp = OrderedRoundRobinPolicy() - cluster = Cluster(compression=False, + cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False, idle_heartbeat_interval=idle_heartbeat_interval, idle_heartbeat_timeout=idle_heartbeat_timeout, execution_profiles={ @@ -377,7 +378,7 @@ def test_idle_connection_is_not_closed(self): idle_heartbeat_interval = 1 listener = TrackDownListener() - cluster = Cluster(compression=False, + cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False, idle_heartbeat_interval=idle_heartbeat_interval, idle_heartbeat_timeout=idle_heartbeat_timeout) session = cluster.connect(wait_for_all_pools=True) @@ -408,7 +409,7 @@ def test_host_is_not_set_to_down_after_query_oto(self): prime_query(query_to_prime, then=NO_THEN) listener = TrackDownListener() - cluster = Cluster(compression=False) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) session = cluster.connect(wait_for_all_pools=True) cluster.register_listener(listener) @@ -429,7 +430,8 @@ def test_can_shutdown_connection_subclass(self): class ExtendedConnection(connection_class): pass - cluster = Cluster(contact_points=["127.0.0.2"], + cluster = Cluster(protocol_version=PROTOCOL_VERSION, + contact_points=["127.0.0.2"], connection_class=ExtendedConnection, compression=False) cluster.connect() @@ -443,7 +445,7 @@ def test_driver_recovers_nework_isolation(self): listener = TrackDownListener() - cluster = Cluster(['127.0.0.1'], + cluster = Cluster(protocol_version=PROTOCOL_VERSION, contact_points=['127.0.0.1'], idle_heartbeat_timeout=idle_heartbeat_timeout, idle_heartbeat_interval=idle_heartbeat_interval, executor_threads=16, From cded2c109e011cd4656cbc7d64afced30cab807f Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 28 Oct 2019 14:11:08 -0500 Subject: [PATCH 1685/2005] [PYTHON-1163] Twisted cloud support --- cassandra/cluster.py | 16 ++++++--- cassandra/datastax/cloud/__init__.py | 33 +++++++++++++++---- cassandra/io/twistedreactor.py | 27 +++++++-------- test-requirements.txt | 3 +- .../integration/advanced/cloud/test_cloud.py | 2 +- 5 files changed, 55 insertions(+), 26 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 8fcbe336d6..28cfb14c9f 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -36,6 +36,12 @@ import weakref from weakref import WeakValueDictionary + +try: + from cassandra.io.twistedreactor import TwistedConnection +except ImportError: + TwistedConnection = None + try: from weakref import WeakSet except ImportError: @@ -906,13 +912,18 @@ def __init__(self, Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. """ + if connection_class is not None: + self.connection_class = connection_class if cloud is not None: if contact_points is not _NOT_SET or endpoint_factory or ssl_context or ssl_options: raise ValueError("contact_points, endpoint_factory, ssl_context, and ssl_options " "cannot be specified with a cloud configuration") - cloud_config = dscloud.get_cloud_config(cloud) + cloud_config = dscloud.get_cloud_config( + cloud, + create_pyopenssl_context=self.connection_class is TwistedConnection + ) ssl_context = cloud_config.ssl_context ssl_options = {'check_hostname': True} @@ -994,9 +1005,6 @@ def __init__(self, raise TypeError("address_translator should not be a class, it should be an instance of that class") self.address_translator = address_translator - if connection_class is not None: - self.connection_class = connection_class - if timestamp_generator is not None: if not callable(timestamp_generator): raise ValueError("timestamp_generator must be callable") diff --git a/cassandra/datastax/cloud/__init__.py b/cassandra/datastax/cloud/__init__.py index ed9435e5e3..ff3b047f0e 100644 --- a/cassandra/datastax/cloud/__init__.py +++ b/cassandra/datastax/cloud/__init__.py @@ -75,7 +75,7 @@ def from_dict(cls, d): return c -def get_cloud_config(cloud_config): +def get_cloud_config(cloud_config, create_pyopenssl_context=False): if not _HAS_SSL: raise DriverException("A Python installation with SSL is required to connect to a cloud cluster.") @@ -83,26 +83,29 @@ def get_cloud_config(cloud_config): raise ValueError("The cloud config doesn't have a secure_connect_bundle specified.") try: - config = read_cloud_config_from_zip(cloud_config) + config = read_cloud_config_from_zip(cloud_config, create_pyopenssl_context) except BadZipFile: raise ValueError("Unable to open the zip file for the cloud config. Check your secure connect bundle.") - return read_metadata_info(config, cloud_config) + config = read_metadata_info(config, cloud_config) + if create_pyopenssl_context: + config.ssl_context = config.pyopenssl_context + return config -def read_cloud_config_from_zip(cloud_config): +def read_cloud_config_from_zip(cloud_config, create_pyopenssl_context): secure_bundle = cloud_config['secure_connect_bundle'] with ZipFile(secure_bundle) as zipfile: base_dir = os.path.dirname(secure_bundle) tmp_dir = tempfile.mkdtemp(dir=base_dir) try: zipfile.extractall(path=tmp_dir) - return parse_cloud_config(os.path.join(tmp_dir, 'config.json'), cloud_config) + return parse_cloud_config(os.path.join(tmp_dir, 'config.json'), cloud_config, create_pyopenssl_context) finally: shutil.rmtree(tmp_dir) -def parse_cloud_config(path, cloud_config): +def parse_cloud_config(path, cloud_config, create_pyopenssl_context): with open(path, 'r') as stream: data = json.load(stream) @@ -116,7 +119,11 @@ def parse_cloud_config(path, cloud_config): ca_cert_location = os.path.join(config_dir, 'ca.crt') cert_location = os.path.join(config_dir, 'cert') key_location = os.path.join(config_dir, 'key') + # Regardless of if we create a pyopenssl context, we still need the builtin one + # to connect to the metadata service config.ssl_context = _ssl_context_from_cert(ca_cert_location, cert_location, key_location) + if create_pyopenssl_context: + config.pyopenssl_context = _pyopenssl_context_from_cert(ca_cert_location, cert_location, key_location) return config @@ -165,3 +172,17 @@ def _ssl_context_from_cert(ca_cert_location, cert_location, key_location): ssl_context.load_cert_chain(certfile=cert_location, keyfile=key_location) return ssl_context + + +def _pyopenssl_context_from_cert(ca_cert_location, cert_location, key_location): + try: + from OpenSSL import SSL + except ImportError: + return None + ssl_context = SSL.Context(SSL.TLSv1_METHOD) + ssl_context.set_verify(SSL.VERIFY_PEER, callback=lambda _1, _2, _3, _4, ok: ok) + ssl_context.use_certificate_file(cert_location) + ssl_context.use_privatekey_file(key_location) + ssl_context.load_verify_locations(ca_cert_location) + + return ssl_context \ No newline at end of file diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index b44f224fda..9b3ff09398 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -28,7 +28,7 @@ from twisted.python.failure import Failure from zope.interface import implementer -from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager +from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager, ConnectionException try: from OpenSSL import SSL @@ -77,6 +77,7 @@ def connectionMade(self): def connectionLost(self, reason): # reason is a Failure instance + log.debug("Connect lost: %s", reason) self.connection.defunct(reason.value) @@ -134,9 +135,9 @@ def _on_loop_timer(self): @implementer(IOpenSSLClientConnectionCreator) -class SSLCreator(object): - def __init__(self, host, ssl_context, ssl_options, check_hostname, timeout): - self.host = host +class _SSLCreator(object): + def __init__(self, endpoint, ssl_context, ssl_options, check_hostname, timeout): + self.endpoint = endpoint self.ssl_options = ssl_options self.check_hostname = check_hostname self.timeout = timeout @@ -163,9 +164,9 @@ def verify_callback(self, connection, x509, errnum, errdepth, ok): def info_callback(self, connection, where, ret): if where & SSL.SSL_CB_HANDSHAKE_DONE: - if self.check_hostname and self.host != connection.get_peer_certificate().get_subject().commonName: + if self.check_hostname and self.endpoint.address != connection.get_peer_certificate().get_subject().commonName: transport = connection.get_app_data() - transport.failVerification(Failure(Exception("Hostname verification failed"))) + transport.failVerification(Failure(ConnectionException("Hostname verification failed", self.endpoint))) def clientConnectionForTLS(self, tlsProtocol): connection = SSL.Connection(self.context, None) @@ -213,7 +214,7 @@ def __init__(self, *args, **kwargs): self._loop.maybe_start() def _check_pyopenssl(self): - if self.ssl_options: + if self.ssl_context or self.ssl_options: if not _HAS_SSL: raise ImportError( str(import_exception) + @@ -231,29 +232,29 @@ def add_connection(self): # Cool they enforce strong security, but we have to be able to turn it off self._check_pyopenssl() - ssl_options = SSLCreator( - self.endpoint.address, + ssl_connection_creator = _SSLCreator( + self.endpoint, self.ssl_context if self.ssl_context else None, self.ssl_options, self._check_hostname, self.connect_timeout, ) - point = SSL4ClientEndpoint( + endpoint = SSL4ClientEndpoint( reactor, host, port, - sslContextFactory=ssl_options, + sslContextFactory=ssl_connection_creator, timeout=self.connect_timeout, ) else: - point = TCP4ClientEndpoint( + endpoint = TCP4ClientEndpoint( reactor, host, port, timeout=self.connect_timeout ) - connectProtocol(point, TwistedConnectionProtocol(self)) + connectProtocol(endpoint, TwistedConnectionProtocol(self)) def client_connection_made(self, transport): """ diff --git a/test-requirements.txt b/test-requirements.txt index 4d88134126..709da4f37b 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,7 +7,7 @@ unittest2 pytz sure pure-sasl -twisted; python_version >= '3.5' +twisted[tls]; python_version >= '3.5' twisted[tls]==19.2.1; python_version < '3.5' gevent>=1.0 eventlet @@ -15,4 +15,3 @@ cython>=0.20,<0.30 packaging futurist; python_version >= '3.7' asynctest; python_version > '3.4' -pyopenssl diff --git a/tests/integration/advanced/cloud/test_cloud.py b/tests/integration/advanced/cloud/test_cloud.py index 0ad8ea7f13..eefcdfa3d5 100644 --- a/tests/integration/advanced/cloud/test_cloud.py +++ b/tests/integration/advanced/cloud/test_cloud.py @@ -107,7 +107,7 @@ def test_verify_hostname(self): with patch('cassandra.datastax.cloud.parse_metadata_info', wraps=self._bad_hostname_metadata): with self.assertRaises(NoHostAvailable) as e: self.connect(self.creds) - self.assertIn("hostname", str(e.exception)) + self.assertIn("hostname", str(e.exception).lower()) def test_error_when_bundle_doesnt_exist(self): try: From cccf3b0c97a05504506336c39332d247e90eb3fd Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 31 Oct 2019 10:37:10 -0500 Subject: [PATCH 1686/2005] Documentation for twisted SSL context --- docs/cloud.rst | 2 +- docs/security.rst | 35 +++++++++++++++++++++++++++++++++-- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/docs/cloud.rst b/docs/cloud.rst index a7e2fb93c1..0da671151e 100644 --- a/docs/cloud.rst +++ b/docs/cloud.rst @@ -34,5 +34,5 @@ Limitations Event loops ^^^^^^^^^^^ -Twisted and Evenlet aren't supported yet. These event loops are still using the old way to configure +Evenlet isn't supported yet. Eventlet still uses the old way to configure SSL (ssl_options), which is not compatible with the secure connect bundle provided by Apollo. diff --git a/docs/security.rst b/docs/security.rst index 0353091eb7..1093c52df9 100644 --- a/docs/security.rst +++ b/docs/security.rst @@ -61,8 +61,8 @@ SSL should be used when client encryption is enabled in Cassandra. To give you as much control as possible over your SSL configuration, our SSL API takes a user-created `SSLContext` instance from the Python standard library. These docs will include some examples for how to achieve common configurations, -but the `ssl.SSLContext` documentation gives a more complete description of -what is possible. +but the `ssl.SSLContext `_ documentation +gives a more complete description of what is possible. To enable SSL with version 3.17.0 and higher, you will need to set :attr:`.Cluster.ssl_context` to a ``ssl.SSLContext`` instance to enable SSL. Optionally, you can also set :attr:`.Cluster.ssl_options` @@ -78,6 +78,15 @@ It might be also useful to learn about the different levels of identity verifica * `Using SSL in DSE drivers `_ +SSL with Twisted +^^^^^^^^^^^^^^^^ +Twisted uses an alternative SSL implementation called pyOpenSSL, so if your `Cluster`'s connection class is +:class:`~cassandra.io.twistedreactor.TwistedConnection`, you must pass a +`pyOpenSSL context `_ instead. +An example is provided in these docs, and more details can be found in the +`documentation `_. +pyOpenSSL is not installed by the driver and must be installed separately. + SSL Configuration Examples ^^^^^^^^^^^^^^^^^^^^^^^^^^ Here, we'll describe the server and driver configuration necessary to set up SSL to meet various goals, such as the client verifying the server and the server verifying the client. We'll also include Python code demonstrating how to use servers and drivers configured in these ways. @@ -239,6 +248,28 @@ The following driver code specifies that the connection should use two-way verif The driver uses ``SSLContext`` directly to give you many other options in configuring SSL. Consider reading the `Python SSL documentation `_ for more details about ``SSLContext`` configuration. +**Server verifies client and client verifies server using Twisted and pyOpenSSL** + +.. code-block:: python + + from OpenSSL import SSL, crypto + from cassandra.cluster import Cluster + from cassandra.io.twistedreactor import TwistedConnection + + ssl_context = SSL.Context(SSL.TLSv1_METHOD) + ssl_context.set_verify(SSL.VERIFY_PEER, callback=lambda _1, _2, _3, _4, ok: ok) + ssl_context.use_certificate_file('/path/to/client.crt_signed') + ssl_context.use_privatekey_file('/path/to/client.key') + ssl_context.load_verify_locations('/path/to/rootca.crt') + + cluster = Cluster( + contact_points=['127.0.0.1'], + connection_class=TwistedConnection, + ssl_context=ssl_context, + ssl_options={'check_hostname': True} + ) + session = cluster.connect() + Versions 3.16.0 and lower ^^^^^^^^^^^^^^^^^^^^^^^^^ From 97de684894f6d8f15917da5bda852f7bea82fd07 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 1 Nov 2019 09:16:07 -0400 Subject: [PATCH 1687/2005] Ngdg docs integration (#19) * Add iterate_step test * ngdg docs integration --- cassandra/datastax/graph/fluent/__init__.py | 2 +- docs/.nav | 3 + docs/api/cassandra/graph.rst | 27 + docs/api/cassandra/metadata.rst | 9 + docs/classic_graph.rst | 299 +++++++++++ docs/graph.rst | 501 +++++++++++------- docs/graph_fluent.rst | 41 +- docs/index.rst | 6 +- .../advanced/graph/fluent/test_graph.py | 14 + 9 files changed, 707 insertions(+), 195 deletions(-) create mode 100644 docs/classic_graph.rst diff --git a/cassandra/datastax/graph/fluent/__init__.py b/cassandra/datastax/graph/fluent/__init__.py index 7d1ba0b60c..0609172483 100644 --- a/cassandra/datastax/graph/fluent/__init__.py +++ b/cassandra/datastax/graph/fluent/__init__.py @@ -190,7 +190,7 @@ def query_from_traversal(traversal, graph_protocol=DSE_GRAPH_QUERY_PROTOCOL, con :param traversal: The GraphTraversal object :param graph_protocol: The graph protocol. Default is `DseGraph.DSE_GRAPH_QUERY_PROTOCOL`. :param context: The dict of the serialization context, needed for GraphSON3 (tuple, udt). - e.g: {'cluster': dse_cluster, 'graph_name': name} + e.g: {'cluster': cluster, 'graph_name': name} """ if isinstance(traversal, GraphTraversal): diff --git a/docs/.nav b/docs/.nav index 7b39d9001d..d5b54c4e13 100644 --- a/docs/.nav +++ b/docs/.nav @@ -3,6 +3,9 @@ getting_started execution_profiles lwt object_mapper +geo_types +graph +classic_graph performance query_paging security diff --git a/docs/api/cassandra/graph.rst b/docs/api/cassandra/graph.rst index 2211a25d96..43ddd3086c 100644 --- a/docs/api/cassandra/graph.rst +++ b/docs/api/cassandra/graph.rst @@ -11,6 +11,30 @@ .. autofunction:: graph_object_row_factory +.. autofunction:: graph_graphson2_row_factory + +.. autofunction:: graph_graphson3_row_factory + +.. function:: to_int(value) + + Wraps a value to be explicitly serialized as a graphson Int. + +.. function:: to_bigint(value) + + Wraps a value to be explicitly serialized as a graphson Bigint. + +.. function:: to_smallint(value) + + Wraps a value to be explicitly serialized as a graphson Smallint. + +.. function:: to_float(value) + + Wraps a value to be explicitly serialized as a graphson Float. + +.. function:: to_double(value) + + Wraps a value to be explicitly serialized as a graphson Double. + .. autoclass:: GraphProtocol :members: @@ -92,3 +116,6 @@ .. autoclass:: GraphSON2Reader :members: + +.. autoclass:: GraphSON3Reader + :members: diff --git a/docs/api/cassandra/metadata.rst b/docs/api/cassandra/metadata.rst index ed79d04f42..b5e6dae904 100644 --- a/docs/api/cassandra/metadata.rst +++ b/docs/api/cassandra/metadata.rst @@ -34,6 +34,9 @@ Schemas .. autoclass:: TableMetadata () :members: +.. autoclass:: TableMetadataV3 () + :members: + .. autoclass:: ColumnMetadata () :members: @@ -43,6 +46,12 @@ Schemas .. autoclass:: MaterializedViewMetadata () :members: +.. autoclass:: VertexMetadata () + :members: + +.. autoclass:: EdgeMetadata () + :members: + Tokens and Ring Topology ------------------------ diff --git a/docs/classic_graph.rst b/docs/classic_graph.rst new file mode 100644 index 0000000000..ef68c86359 --- /dev/null +++ b/docs/classic_graph.rst @@ -0,0 +1,299 @@ +DataStax Classic Graph Queries +============================== + +Getting Started +~~~~~~~~~~~~~~~ + +First, we need to create a graph in the system. To access the system API, we +use the system execution profile :: + + from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT + + cluster = Cluster() + session = cluster.connect() + + graph_name = 'movies' + session.execute_graph("system.graph(name).ifNotExists().engine(Classic).create()", {'name': graph_name}, + execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) + + +To execute requests on our newly created graph, we need to setup an execution +profile. Additionally, we also need to set the schema_mode to `development` +for the schema creation:: + + + from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.graph import GraphOptions + + graph_name = 'movies' + ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name=graph_name)) + + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) + session = cluster.connect() + + session.execute_graph("schema.config().option('graph.schema_mode').set('development')") + + +We are ready to configure our graph schema. We will create a simple one for movies:: + + # properties are used to define a vertex + properties = """ + schema.propertyKey("genreId").Text().create(); + schema.propertyKey("personId").Text().create(); + schema.propertyKey("movieId").Text().create(); + schema.propertyKey("name").Text().create(); + schema.propertyKey("title").Text().create(); + schema.propertyKey("year").Int().create(); + schema.propertyKey("country").Text().create(); + """ + + session.execute_graph(properties) # we can execute multiple statements in a single request + + # A Vertex represents a "thing" in the world. + vertices = """ + schema.vertexLabel("genre").properties("genreId","name").create(); + schema.vertexLabel("person").properties("personId","name").create(); + schema.vertexLabel("movie").properties("movieId","title","year","country").create(); + """ + + session.execute_graph(vertices) + + # An edge represents a relationship between two vertices + edges = """ + schema.edgeLabel("belongsTo").single().connection("movie","genre").create(); + schema.edgeLabel("actor").connection("movie","person").create(); + """ + + session.execute_graph(edges) + + # Indexes to execute graph requests efficiently + indexes = """ + schema.vertexLabel("genre").index("genresById").materialized().by("genreId").add(); + schema.vertexLabel("genre").index("genresByName").materialized().by("name").add(); + schema.vertexLabel("person").index("personsById").materialized().by("personId").add(); + schema.vertexLabel("person").index("personsByName").materialized().by("name").add(); + schema.vertexLabel("movie").index("moviesById").materialized().by("movieId").add(); + schema.vertexLabel("movie").index("moviesByTitle").materialized().by("title").add(); + schema.vertexLabel("movie").index("moviesByYear").secondary().by("year").add(); + """ + +Next, we'll add some data:: + + session.execute_graph(""" + g.addV('genre').property('genreId', 1).property('name', 'Action').next(); + g.addV('genre').property('genreId', 2).property('name', 'Drama').next(); + g.addV('genre').property('genreId', 3).property('name', 'Comedy').next(); + g.addV('genre').property('genreId', 4).property('name', 'Horror').next(); + """) + + session.execute_graph(""" + g.addV('person').property('personId', 1).property('name', 'Mark Wahlberg').next(); + g.addV('person').property('personId', 2).property('name', 'Leonardo DiCaprio').next(); + g.addV('person').property('personId', 3).property('name', 'Iggy Pop').next(); + """) + + session.execute_graph(""" + g.addV('movie').property('movieId', 1).property('title', 'The Happening'). + property('year', 2008).property('country', 'United States').next(); + g.addV('movie').property('movieId', 2).property('title', 'The Italian Job'). + property('year', 2003).property('country', 'United States').next(); + + g.addV('movie').property('movieId', 3).property('title', 'Revolutionary Road'). + property('year', 2008).property('country', 'United States').next(); + g.addV('movie').property('movieId', 4).property('title', 'The Man in the Iron Mask'). + property('year', 1998).property('country', 'United States').next(); + + g.addV('movie').property('movieId', 5).property('title', 'Dead Man'). + property('year', 1995).property('country', 'United States').next(); + """) + +Now that our genre, actor and movie vertices are added, we'll create the relationships (edges) between them:: + + session.execute_graph(""" + genre_horror = g.V().hasLabel('genre').has('name', 'Horror').next(); + genre_drama = g.V().hasLabel('genre').has('name', 'Drama').next(); + genre_action = g.V().hasLabel('genre').has('name', 'Action').next(); + + leo = g.V().hasLabel('person').has('name', 'Leonardo DiCaprio').next(); + mark = g.V().hasLabel('person').has('name', 'Mark Wahlberg').next(); + iggy = g.V().hasLabel('person').has('name', 'Iggy Pop').next(); + + the_happening = g.V().hasLabel('movie').has('title', 'The Happening').next(); + the_italian_job = g.V().hasLabel('movie').has('title', 'The Italian Job').next(); + rev_road = g.V().hasLabel('movie').has('title', 'Revolutionary Road').next(); + man_mask = g.V().hasLabel('movie').has('title', 'The Man in the Iron Mask').next(); + dead_man = g.V().hasLabel('movie').has('title', 'Dead Man').next(); + + the_happening.addEdge('belongsTo', genre_horror); + the_italian_job.addEdge('belongsTo', genre_action); + rev_road.addEdge('belongsTo', genre_drama); + man_mask.addEdge('belongsTo', genre_drama); + man_mask.addEdge('belongsTo', genre_action); + dead_man.addEdge('belongsTo', genre_drama); + + the_happening.addEdge('actor', mark); + the_italian_job.addEdge('actor', mark); + rev_road.addEdge('actor', leo); + man_mask.addEdge('actor', leo); + dead_man.addEdge('actor', iggy); + """) + +We are all set. You can now query your graph. Here are some examples:: + + # Find all movies of the genre Drama + for r in session.execute_graph(""" + g.V().has('genre', 'name', 'Drama').in('belongsTo').valueMap();"""): + print(r) + + # Find all movies of the same genre than the movie 'Dead Man' + for r in session.execute_graph(""" + g.V().has('movie', 'title', 'Dead Man').out('belongsTo').in('belongsTo').valueMap();"""): + print(r) + + # Find all movies of Mark Wahlberg + for r in session.execute_graph(""" + g.V().has('person', 'name', 'Mark Wahlberg').in('actor').valueMap();"""): + print(r) + +To see a more graph examples, see `DataStax Graph Examples `_. + +Graph Types +~~~~~~~~~~~ + +Here are the supported graph types with their python representations: + +========== ================ +DSE Graph Python +========== ================ +boolean bool +bigint long, int (PY3) +int int +smallint int +varint int +float float +double double +uuid uuid.UUID +Decimal Decimal +inet str +timestamp datetime.datetime +date datetime.date +time datetime.time +duration datetime.timedelta +point Point +linestring LineString +polygon Polygon +blob bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) +========== ================ + +Graph Row Factory +~~~~~~~~~~~~~~~~~ + +By default (with :class:`.GraphExecutionProfile.row_factory` set to :func:`.graph.graph_object_row_factory`), known graph result +types are unpacked and returned as specialized types (:class:`.Vertex`, :class:`.Edge`). If the result is not one of these +types, a :class:`.graph.Result` is returned, containing the graph result parsed from JSON and removed from its outer dict. +The class has some accessor convenience methods for accessing top-level properties by name (`type`, `properties` above), +or lists by index:: + + # dicts with `__getattr__` or `__getitem__` + result = session.execute_graph("[[key_str: 'value', key_int: 3]]", execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0] # Using system exec just because there is no graph defined + result # dse.graph.Result({u'key_str': u'value', u'key_int': 3}) + result.value # {u'key_int': 3, u'key_str': u'value'} (dict) + result.key_str # u'value' + result.key_int # 3 + result['key_str'] # u'value' + result['key_int'] # 3 + + # lists with `__getitem__` + result = session.execute_graph('[[0, 1, 2]]', execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0] + result # dse.graph.Result([0, 1, 2]) + result.value # [0, 1, 2] (list) + result[1] # 1 (list[1]) + +You can use a different row factory by setting :attr:`.Session.default_graph_row_factory` or passing it to +:meth:`.Session.execute_graph`. For example, :func:`.graph.single_object_row_factory` returns the JSON result string`, +unparsed. :func:`.graph.graph_result_row_factory` returns parsed, but unmodified results (such that all metadata is retained, +unlike :func:`.graph.graph_object_row_factory`, which sheds some as attributes and properties are unpacked). These results +also provide convenience methods for converting to known types (:meth:`~.Result.as_vertex`, :meth:`~.Result.as_edge`, :meth:`~.Result.as_path`). + +Vertex and Edge properties are never unpacked since their types are unknown. If you know your graph schema and want to +deserialize properties, use the :class:`.GraphSON1Deserializer`. It provides convenient methods to deserialize by types (e.g. +deserialize_date, deserialize_uuid, deserialize_polygon etc.) Example:: + + # ... + from cassandra.graph import GraphSON1Deserializer + + row = session.execute_graph("g.V().toList()")[0] + value = row.properties['my_property_key'][0].value # accessing the VertexProperty value + value = GraphSON1Deserializer.deserialize_timestamp(value) + + print(value) # 2017-06-26 08:27:05 + print(type(value)) # + + +Named Parameters +~~~~~~~~~~~~~~~~ + +Named parameters are passed in a dict to :meth:`.cluster.Session.execute_graph`:: + + result_set = session.execute_graph('[a, b]', {'a': 1, 'b': 2}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) + [r.value for r in result_set] # [1, 2] + +All python types listed in `Graph Types`_ can be passed as named parameters and will be serialized +automatically to their graph representation: + +Example:: + + session.execute_graph(""" + g.addV('person'). + property('name', text_value). + property('age', integer_value). + property('birthday', timestamp_value). + property('house_yard', polygon_value).toList() + """, { + 'text_value': 'Mike Smith', + 'integer_value': 34, + 'timestamp_value': datetime.datetime(1967, 12, 30), + 'polygon_value': Polygon(((30, 10), (40, 40), (20, 40), (10, 20), (30, 10))) + }) + + +As with all Execution Profile parameters, graph options can be set in the cluster default (as shown in the first example) +or specified per execution:: + + ep = session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, + graph_options=GraphOptions(graph_name='something-else')) + session.execute_graph(statement, execution_profile=ep) + +Using GraphSON2 Protocol +~~~~~~~~~~~~~~~~~~~~~~~~ + +The default graph protocol used is GraphSON1. However GraphSON1 may +cause problems of type conversion happening during the serialization +of the query to the DSE Graph server, or the deserialization of the +responses back from a string Gremlin query. GraphSON2 offers better +support for the complex data types handled by DSE Graph. + +DSE >=5.0.4 now offers the possibility to use the GraphSON2 protocol +for graph queries. Enabling GraphSON2 can be done by `changing the +graph protocol of the execution profile` and `setting the graphson2 row factory`:: + + from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.graph import GraphOptions, GraphProtocol, graph_graphson2_row_factory + + # Create a GraphSON2 execution profile + ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name='types', + graph_protocol=GraphProtocol.GRAPHSON_2_0), + row_factory=graph_graphson2_row_factory) + + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) + session = cluster.connect() + session.execute_graph(...) + +Using GraphSON2, all properties will be automatically deserialized to +its Python representation. Note that it may bring significant +behavioral change at runtime. + +It is generally recommended to switch to GraphSON2 as it brings more +consistent support for complex data types in the Graph driver and will +be activated by default in the next major version (Python dse-driver +driver 3.0). diff --git a/docs/graph.rst b/docs/graph.rst index bd81f0cf4f..47dc53d38d 100644 --- a/docs/graph.rst +++ b/docs/graph.rst @@ -1,11 +1,11 @@ DataStax Graph Queries ====================== -The Cassandra driver executes graph queries over the Cassandra native protocol. Use +The driver executes graph queries over the Cassandra native protocol. Use :meth:`.Session.execute_graph` or :meth:`.Session.execute_graph_async` for -executing gremlin queries in DSE Graph. +executing gremlin queries in DataStax Graph. -Three Execution Profiles are provided suitable for graph execution: +The driver defines three Execution Profiles suitable for graph execution: * :data:`~.cluster.EXEC_PROFILE_GRAPH_DEFAULT` * :data:`~.cluster.EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT` @@ -14,8 +14,13 @@ Three Execution Profiles are provided suitable for graph execution: See :doc:`getting_started` and :doc:`execution_profiles` for more detail on working with profiles. -Getting Started -~~~~~~~~~~~~~~~ +In DSE 6.8.0, the Core graph engine has been introduced and is now the default. It +provides a better unified multi-model, performance and scale. This guide +is for graphs that use the core engine. If you work with previous versions of +DSE or existing graphs, see :doc:`classic_graph`. + +Getting Started with Graph and the Core Engine +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ First, we need to create a graph in the system. To access the system API, we use the system execution profile :: @@ -26,129 +31,204 @@ use the system execution profile :: session = cluster.connect() graph_name = 'movies' - session.execute_graph("system.graph(name).ifNotExists().create()", {'name': graph_name}, + session.execute_graph("system.graph(name).create()", {'name': graph_name}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) -To execute requests on our newly created graph, we need to setup an execution -profile. Additionally, we also need to set the schema_mode to `development` -for the schema creation:: - +Graphs that use the core engine only support GraphSON3. Since they are Cassandra tables under +the hood, we can automatically configure the execution profile with the proper options +(row_factory and graph_protocol) when executing queries. You only need to make sure that +the `graph_name` is set and GraphSON3 will be automatically used:: from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.datastax.graph import GraphOptions graph_name = 'movies' ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name=graph_name)) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) session = cluster.connect() - - session.execute_graph("schema.config().option('graph.schema_mode').set('development')") + session.execute_graph("g.addV(...)") -We are ready to configure our graph schema. We will create a simple one for movies:: +Note that this graph engine detection is based on the metadata. You might experience +some query errors if the graph has been newly created and is not yet in the metadata. This +would result to a badly configured execution profile. If you really want to avoid that, +configure your execution profile explicitly:: - # properties are used to define a vertex - properties = """ - schema.propertyKey("genreId").Text().create(); - schema.propertyKey("personId").Text().create(); - schema.propertyKey("movieId").Text().create(); - schema.propertyKey("name").Text().create(); - schema.propertyKey("title").Text().create(); - schema.propertyKey("year").Int().create(); - schema.propertyKey("country").Text().create(); - """ + from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.graph import GraphOptions, GraphProtocol, graph_graphson3_row_factory - session.execute_graph(properties) # we can execute multiple statements in a single request + graph_name = 'movies' + ep_graphson3 = GraphExecutionProfile( + row_factory=graph_graphson3_row_factory, + graph_options=GraphOptions( + graph_protocol=GraphProtocol.GRAPHSON_3_0, + graph_name=graph_name)) + + cluster = Cluster(execution_profiles={'core': ep_graphson3}) + session = cluster.connect() + session.execute_graph("g.addV(...)", execution_profile='core') + + +We are ready to configure our graph schema. We will create a simple one for movies:: # A Vertex represents a "thing" in the world. - vertices = """ - schema.vertexLabel("genre").properties("genreId","name").create(); - schema.vertexLabel("person").properties("personId","name").create(); - schema.vertexLabel("movie").properties("movieId","title","year","country").create(); + # Create the genre vertex + query = """ + schema.vertexLabel('genre') + .partitionBy('genreId', Int) + .property('name', Text) + .create() """ - - session.execute_graph(vertices) + session.execute_graph(query) + + # Create the person vertex + query = """ + schema.vertexLabel('person') + .partitionBy('personId', Int) + .property('name', Text) + .create() + """ + session.execute_graph(query) + + # Create the movie vertex + query = """ + schema.vertexLabel('movie') + .partitionBy('movieId', Int) + .property('title', Text) + .property('year', Int) + .property('country', Text) + .create() + """ + session.execute_graph(query) # An edge represents a relationship between two vertices - edges = """ - schema.edgeLabel("belongsTo").single().connection("movie","genre").create(); - schema.edgeLabel("actor").connection("movie","person").create(); + # Create our edges + queries = """ + schema.edgeLabel('belongsTo').from('movie').to('genre').create(); + schema.edgeLabel('actor').from('movie').to('person').create(); """ - - session.execute_graph(edges) + session.execute_graph(queries) # Indexes to execute graph requests efficiently + + # If you have a node with the search workload enabled (solr), use the following: + indexes = """ + schema.vertexLabel('genre').searchIndex() + .by("name") + .create(); + + schema.vertexLabel('person').searchIndex() + .by("name") + .create(); + + schema.vertexLabel('movie').searchIndex() + .by('title') + .by("year") + .create(); + """ + session.execute_graph(indexes) + + # Otherwise, use secondary indexes: indexes = """ - schema.vertexLabel("genre").index("genresById").materialized().by("genreId").add(); - schema.vertexLabel("genre").index("genresByName").materialized().by("name").add(); - schema.vertexLabel("person").index("personsById").materialized().by("personId").add(); - schema.vertexLabel("person").index("personsByName").materialized().by("name").add(); - schema.vertexLabel("movie").index("moviesById").materialized().by("movieId").add(); - schema.vertexLabel("movie").index("moviesByTitle").materialized().by("title").add(); - schema.vertexLabel("movie").index("moviesByYear").secondary().by("year").add(); + schema.vertexLabel('genre') + .secondaryIndex('by_genre') + .by('name') + .create() + + schema.vertexLabel('person') + .secondaryIndex('by_name') + .by('name') + .create() + + schema.vertexLabel('movie') + .secondaryIndex('by_title') + .by('title') + .create() """ + session.execute_graph(indexes) + +Add some edge indexes (materialized views):: + + indexes = """ + schema.edgeLabel('belongsTo') + .from('movie') + .to('genre') + .materializedView('movie__belongsTo__genre_by_in_genreId') + .ifNotExists() + .partitionBy(IN, 'genreId') + .clusterBy(OUT, 'movieId', Asc) + .create() + + schema.edgeLabel('actor') + .from('movie') + .to('person') + .materializedView('movie__actor__person_by_in_personId') + .ifNotExists() + .partitionBy(IN, 'personId') + .clusterBy(OUT, 'movieId', Asc) + .create() + """ + session.execute_graph(indexes) Next, we'll add some data:: session.execute_graph(""" - g.addV('genre').property('genreId', 1).property('name', 'Action').next(); - g.addV('genre').property('genreId', 2).property('name', 'Drama').next(); - g.addV('genre').property('genreId', 3).property('name', 'Comedy').next(); - g.addV('genre').property('genreId', 4).property('name', 'Horror').next(); + g.addV('genre').property('genreId', 1).property('name', 'Action').next(); + g.addV('genre').property('genreId', 2).property('name', 'Drama').next(); + g.addV('genre').property('genreId', 3).property('name', 'Comedy').next(); + g.addV('genre').property('genreId', 4).property('name', 'Horror').next(); """) session.execute_graph(""" - g.addV('person').property('personId', 1).property('name', 'Mark Wahlberg').next(); - g.addV('person').property('personId', 2).property('name', 'Leonardo DiCaprio').next(); - g.addV('person').property('personId', 3).property('name', 'Iggy Pop').next(); + g.addV('person').property('personId', 1).property('name', 'Mark Wahlberg').next(); + g.addV('person').property('personId', 2).property('name', 'Leonardo DiCaprio').next(); + g.addV('person').property('personId', 3).property('name', 'Iggy Pop').next(); """) session.execute_graph(""" - g.addV('movie').property('movieId', 1).property('title', 'The Happening'). - property('year', 2008).property('country', 'United States').next(); - g.addV('movie').property('movieId', 2).property('title', 'The Italian Job'). - property('year', 2003).property('country', 'United States').next(); - - g.addV('movie').property('movieId', 3).property('title', 'Revolutionary Road'). - property('year', 2008).property('country', 'United States').next(); - g.addV('movie').property('movieId', 4).property('title', 'The Man in the Iron Mask'). - property('year', 1998).property('country', 'United States').next(); - - g.addV('movie').property('movieId', 5).property('title', 'Dead Man'). - property('year', 1995).property('country', 'United States').next(); + g.addV('movie').property('movieId', 1).property('title', 'The Happening'). + property('year', 2008).property('country', 'United States').next(); + g.addV('movie').property('movieId', 2).property('title', 'The Italian Job'). + property('year', 2003).property('country', 'United States').next(); + + g.addV('movie').property('movieId', 3).property('title', 'Revolutionary Road'). + property('year', 2008).property('country', 'United States').next(); + g.addV('movie').property('movieId', 4).property('title', 'The Man in the Iron Mask'). + property('year', 1998).property('country', 'United States').next(); + + g.addV('movie').property('movieId', 5).property('title', 'Dead Man'). + property('year', 1995).property('country', 'United States').next(); """) Now that our genre, actor and movie vertices are added, we'll create the relationships (edges) between them:: session.execute_graph(""" - genre_horror = g.V().hasLabel('genre').has('name', 'Horror').next(); - genre_drama = g.V().hasLabel('genre').has('name', 'Drama').next(); - genre_action = g.V().hasLabel('genre').has('name', 'Action').next(); - - leo = g.V().hasLabel('person').has('name', 'Leonardo DiCaprio').next(); - mark = g.V().hasLabel('person').has('name', 'Mark Wahlberg').next(); - iggy = g.V().hasLabel('person').has('name', 'Iggy Pop').next(); - - the_happening = g.V().hasLabel('movie').has('title', 'The Happening').next(); - the_italian_job = g.V().hasLabel('movie').has('title', 'The Italian Job').next(); - rev_road = g.V().hasLabel('movie').has('title', 'Revolutionary Road').next(); - man_mask = g.V().hasLabel('movie').has('title', 'The Man in the Iron Mask').next(); - dead_man = g.V().hasLabel('movie').has('title', 'Dead Man').next(); - - the_happening.addEdge('belongsTo', genre_horror); - the_italian_job.addEdge('belongsTo', genre_action); - rev_road.addEdge('belongsTo', genre_drama); - man_mask.addEdge('belongsTo', genre_drama); - man_mask.addEdge('belongsTo', genre_action); - dead_man.addEdge('belongsTo', genre_drama); - - the_happening.addEdge('actor', mark); - the_italian_job.addEdge('actor', mark); - rev_road.addEdge('actor', leo); - man_mask.addEdge('actor', leo); - dead_man.addEdge('actor', iggy); + genre_horror = g.V().hasLabel('genre').has('name', 'Horror').id().next(); + genre_drama = g.V().hasLabel('genre').has('name', 'Drama').id().next(); + genre_action = g.V().hasLabel('genre').has('name', 'Action').id().next(); + + leo = g.V().hasLabel('person').has('name', 'Leonardo DiCaprio').id().next(); + mark = g.V().hasLabel('person').has('name', 'Mark Wahlberg').id().next(); + iggy = g.V().hasLabel('person').has('name', 'Iggy Pop').id().next(); + + the_happening = g.V().hasLabel('movie').has('title', 'The Happening').id().next(); + the_italian_job = g.V().hasLabel('movie').has('title', 'The Italian Job').id().next(); + rev_road = g.V().hasLabel('movie').has('title', 'Revolutionary Road').id().next(); + man_mask = g.V().hasLabel('movie').has('title', 'The Man in the Iron Mask').id().next(); + dead_man = g.V().hasLabel('movie').has('title', 'Dead Man').id().next(); + + g.addE('belongsTo').from(__.V(the_happening)).to(__.V(genre_horror)).next(); + g.addE('belongsTo').from(__.V(the_italian_job)).to(__.V(genre_action)).next(); + g.addE('belongsTo').from(__.V(rev_road)).to(__.V(genre_drama)).next(); + g.addE('belongsTo').from(__.V(man_mask)).to(__.V(genre_drama)).next(); + g.addE('belongsTo').from(__.V(man_mask)).to(__.V(genre_action)).next(); + g.addE('belongsTo').from(__.V(dead_man)).to(__.V(genre_drama)).next(); + + g.addE('actor').from(__.V(the_happening)).to(__.V(mark)).next(); + g.addE('actor').from(__.V(the_italian_job)).to(__.V(mark)).next(); + g.addE('actor').from(__.V(rev_road)).to(__.V(leo)).next(); + g.addE('actor').from(__.V(man_mask)).to(__.V(leo)).next(); + g.addE('actor').from(__.V(dead_man)).to(__.V(iggy)).next(); """) We are all set. You can now query your graph. Here are some examples:: @@ -157,7 +237,7 @@ We are all set. You can now query your graph. Here are some examples:: for r in session.execute_graph(""" g.V().has('genre', 'name', 'Drama').in('belongsTo').valueMap();"""): print(r) - + # Find all movies of the same genre than the movie 'Dead Man' for r in session.execute_graph(""" g.V().has('movie', 'title', 'Dead Man').out('belongsTo').in('belongsTo').valueMap();"""): @@ -170,81 +250,40 @@ We are all set. You can now query your graph. Here are some examples:: To see a more graph examples, see `DataStax Graph Examples `_. -.. _graph-types: - -Graph Types -~~~~~~~~~~~ +Graph Types for the Core Engine +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here are the supported graph types with their python representations: -========== ================ -DSE Graph Python -========== ================ -boolean bool -bigint long, int (PY3) -int int -smallint int -varint int -float float -double double -uuid uuid.UUID -Decimal Decimal -inet str -timestamp datetime.datetime -date datetime.date -time datetime.time -duration datetime.timedelta -point Point -linestring LineString -polygon Polygon -blob bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) -========== ================ - -Graph Row Factory -~~~~~~~~~~~~~~~~~ - -By default (with :class:`.GraphExecutionProfile.row_factory` set to :func:`.datastax.graph.graph_object_row_factory`), known graph result -types are unpacked and returned as specialized types (:class:`.Vertex`, :class:`.Edge`). If the result is not one of these -types, a :class:`.datastax.graph.Result` is returned, containing the graph result parsed from JSON and removed from its outer dict. -The class has some accessor convenience methods for accessing top-level properties by name (`type`, `properties` above), -or lists by index:: - - # dicts with `__getattr__` or `__getitem__` - result = session.execute_graph("[[key_str: 'value', key_int: 3]]", execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0] # Using system exec just because there is no graph defined - result # cassandra.datastax.graph.Result({u'key_str': u'value', u'key_int': 3}) - result.value # {u'key_int': 3, u'key_str': u'value'} (dict) - result.key_str # u'value' - result.key_int # 3 - result['key_str'] # u'value' - result['key_int'] # 3 - - # lists with `__getitem__` - result = session.execute_graph('[[0, 1, 2]]', execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0] - result # cassandra.datastax.graph.Result([0, 1, 2]) - result.value # [0, 1, 2] (list) - result[1] # 1 (list[1]) - -You can use a different row factory by setting :attr:`.cluster.ExecutionProfile.row_factory` or passing it to -:meth:`cluster.Session.execute_graph`. For example, :func:`.datastax.graph.single_object_row_factory` returns the JSON result string, -unparsed. :func:`.datastax.graph.graph_result_row_factory` returns parsed, but unmodified results (such that all metadata is retained, -unlike :func:`.datastax.graph.graph_object_row_factory`, which sheds some as attributes and properties are unpacked). These results -also provide convenience methods for converting to known types (:meth:`.datastax.graph.Result.as_vertex`, :meth:`.datastax.graph.Result.as_edge`, - :meth:`.datastax.graph.Result.as_path`). - -Vertex and Edge properties are never unpacked since their types are unknown. If you know your graph schema and want to -deserialize properties, use the :class:`.datastax.graph.GraphSON1Deserializer`. It provides convenient methods to deserialize by types (e.g. -deserialize_date, deserialize_uuid, deserialize_polygon etc.) Example:: - - # ... - from cassandra.datastax.graph import GraphSON1Deserializer - - row = session.execute_graph("g.V().toList()")[0] - value = row.properties['my_property_key'][0].value # accessing the VertexProperty value - value = GraphSON1Deserializer.deserialize_timestamp(value) - - print value # 2017-06-26 08:27:05 - print type(value) # - +============ ================= +DSE Graph Python Driver +============ ================= +text str +boolean bool +bigint long +int int +smallint int +varint long +double float +float float +uuid UUID +bigdecimal Decimal +duration Duration (cassandra.util) +inet str or IPV4Address/IPV6Address (if available) +timestamp datetime.datetime +date datetime.date +time datetime.time +polygon Polygon +point Point +linestring LineString +blob bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) +list list +map dict +set set or list + (Can return a list due to numerical values returned by Java) +tuple tuple +udt class or namedtuple +============ ================= Named Parameters ~~~~~~~~~~~~~~~~ @@ -254,17 +293,17 @@ Named parameters are passed in a dict to :meth:`.cluster.Session.execute_graph`: result_set = session.execute_graph('[a, b]', {'a': 1, 'b': 2}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) [r.value for r in result_set] # [1, 2] -All python types listed in `Graph Types`_ can be passed as named parameters and will be serialized +All python types listed in `Graph Types for the Core Engine`_ can be passed as named parameters and will be serialized automatically to their graph representation: Example:: - s.execute_graph(""" + session.execute_graph(""" g.addV('person'). property('name', text_value). property('age', integer_value). property('birthday', timestamp_value). - property('house_yard', polygon_value).toList() + property('house_yard', polygon_value).next() """, { 'text_value': 'Mike Smith', 'integer_value': 34, @@ -280,36 +319,116 @@ or specified per execution:: graph_options=GraphOptions(graph_name='something-else')) session.execute_graph(statement, execution_profile=ep) -Using GraphSON2 Protocol -~~~~~~~~~~~~~~~~~~~~~~~~ +CQL collections, Tuple and UDT +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The default graph protocol used is GraphSON1. However GraphSON1 may -cause problems of type conversion happening during the serialization -of the query to the DSE Graph server, or the deserialization of the -responses back from a string Gremlin query. GraphSON2 offers better -support for the complex data types handled by DSE Graph. +This is a very interesting feature of the core engine: we can use all CQL data types, including +list, map, set, tuple and udt. Here is an example using all these types:: -DSE >=5.0.4 now offers the possibility to use the GraphSON2 protocol -for graph queries. Enabling GraphSON2 can be done by `changing the -graph protocol of the execution profile` and `setting the graphson2 row factory`:: + query = """ + schema.type('address') + .property('address', Text) + .property('city', Text) + .property('state', Text) + .create(); + """ + session.execute_graph(query) + + # It works the same way than normal CQL UDT, so we + # can create an udt class and register it + class Address(object): + def __init__(self, address, city, state): + self.address = address + self.city = city + self.state = state + + session.cluster.register_user_type(graph_name, 'address', Address) + + query = """ + schema.vertexLabel('person') + .partitionBy('personId', Int) + .property('address', typeOf('address')) + .property('friends', listOf(Text)) + .property('skills', setOf(Text)) + .property('scores', mapOf(Text, Int)) + .property('last_workout', tupleOf(Text, Date)) + .create() + """ + session.execute_graph(query) + + # insertion example + query = """ + g.addV('person') + .property('personId', pid) + .property('address', address) + .property('friends', friends) + .property('skills', skills) + .property('scores', scores) + .property('last_workout', last_workout) + .next() + """ - from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.datastax.graph import GraphOptions, GraphProtocol, graph_graphson2_row_factory + session.execute_graph(query, { + 'pid': 3, + 'address': Address('42 Smith St', 'Quebec', 'QC'), + 'friends': ['Al', 'Mike', 'Cathy'], + 'skills': {'food', 'fight', 'chess'}, + 'scores': {'math': 98, 'french': 3}, + 'last_workout': ('CrossFit', datetime.date(2018, 11, 20)) + }) - # Create a GraphSON2 execution profile - ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name='types', - graph_protocol=GraphProtocol.GRAPHSON_2_0), - row_factory=graph_graphson2_row_factory) +Limitations +----------- - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) - s = cluster.connect() - s.execute_graph(...) +Since Python is not a strongly-typed language and the UDT/Tuple graphson representation is, you might +get schema errors when trying to write numerical data. Example:: + + session.execute_graph(""" + schema.vertexLabel('test_tuple').partitionBy('id', Int).property('t', tupleOf(Text, Bigint)).create() + """) -Using GraphSON2, all properties will be automatically deserialized to -its Python representation. Note that it may bring significant -behavioral change at runtime. + session.execute_graph(""" + g.addV('test_tuple').property('id', 0).property('t', t) + """, + {'t': ('Test', 99))} + ) + + # error: [Invalid query] message="Value component 1 is of type int, not bigint" + +This is because the server requires the client to include a GraphSON schema definition +with every UDT or tuple query. In the general case, the driver can't determine what Graph type +is meant by, e.g., an int value, and so it can't serialize the value with the correct type in the schema. +The driver provides some numerical type-wrapper factories that you can use to specify types: + +* :func:`~.to_int` +* :func:`~.to_bigint` +* :func:`~.to_smallint` +* :func:`~.to_float` +* :func:`~.to_double` + +Here's the working example of the case above:: + + from cassandra.graph import to_bigint + + session.execute_graph(""" + g.addV('test_tuple').property('id', 0).property('t', t) + """, + {'t': ('Test', to_bigint(99))} + ) -It is generally recommended to switch to GraphSON2 as it brings more -consistent support for complex data types in the Graph driver and will -be activated by default in the next major version (Python dse-driver -driver 3.0). +Continuous Paging +~~~~~~~~~~~~~~~~~ + +This is another nice feature that comes with the core engine: continuous paging with +graph queries. If all nodes of the cluster are >= DSE 6.8.0, it is automatically +enabled under the hood to get the best performance. If you want to explicitly +enable/disable it, you can do it through the execution profile:: + + # Disable it + ep = GraphExecutionProfile(..., continuous_paging_options=None)) + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) + + # Enable with a custom max_pages option + ep = GraphExecutionProfile(..., + continuous_paging_options=ContinuousPagingOptions(max_pages=10))) + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) diff --git a/docs/graph_fluent.rst b/docs/graph_fluent.rst index da5eee8db7..876d69a269 100644 --- a/docs/graph_fluent.rst +++ b/docs/graph_fluent.rst @@ -11,7 +11,7 @@ The fluent API adds graph features to the core driver:: The Graph fluent API depends on Apache TinkerPop and is not installed by default. Make sure you have the Graph requirements are properly :ref:`installed `. -You might be interested in reading the :doc:`DSE Graph Getting Started documentation ` to +You might be interested in reading the :doc:`DataStax Graph Getting Started documentation ` to understand the basics of creating a graph and its schema. Graph Traversal Queries @@ -31,7 +31,7 @@ a `Session` object, or implicitly:: g.addV('genre').property('genreId', 1).property('name', 'Action').next() # implicit execution caused by iterating over results - for v in g.V().has('genre', 'name', 'Drama').in('belongsTo').valueMap(): + for v in g.V().has('genre', 'name', 'Drama').in_('belongsTo').valueMap(): print(v) These :ref:`Python types ` are also supported transparently:: @@ -71,6 +71,27 @@ If you want to change execution property defaults, please see the :doc:`Executio for a more generalized discussion of the API. Graph traversal queries use the same execution profile defined for DSE graph. If you need to change the default properties, please refer to the :doc:`DSE Graph query documentation page ` +Configuring a Traversal Execution Profile for the Core graph engine +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To execute a traversal query with graphs that use the core engine, you need to configure +a graphson3 execution profile: + +.. code-block:: python + + from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.datastax.graph import GraphProtocol + from cassandra.datastax.graph.fluent import DseGraph + + ep_graphson3 = DseGraph.create_execution_profile( + 'my_core_graph_name', + graph_protocol=GraphProtocol.GRAPHSON_3_0 + ) + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep_graphson3}) + + g = DseGraph.traversal_source(session) + print g.V().toList() + Explicit Graph Traversal Execution with a DSE Session ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -100,6 +121,22 @@ Below is an example of explicit execution. For this example, assume the schema h for result in session.execute_graph(v_query): pprint(result.value) +Converting a traversal to a bytecode query for core graphs require some more work, because we +need the cluster context for UDT and tuple types: + +.. code-block:: python + + g = DseGraph.traversal_source(session=session) + context = { + 'cluster': cluster, + 'graph_name': 'the_graph_for_the_query' + } + addV_query = DseGraph.query_from_traversal( + g.addV('genre').property('genreId', 1).property('name', 'Action'), + graph_protocol=GraphProtocol.GRAPHSON_3_0, + context=context + ) + Implicit Graph Traversal Execution with TinkerPop ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/index.rst b/docs/index.rst index 8ea11c6088..7eca865755 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -54,7 +54,10 @@ Contents Working with DSE geometry types :doc:`graph` - Graph queries with DSE Graph + Graph queries with the Core engine + +:doc:`classic_graph` + Graph queries with the Classic engine :doc:`graph_fluent` DataStax Graph Fluent API @@ -84,6 +87,7 @@ Contents object_mapper geo_types graph + classic_graph graph_fluent dse_auth dates_and_times diff --git a/tests/integration/advanced/graph/fluent/test_graph.py b/tests/integration/advanced/graph/fluent/test_graph.py index 174c15277b..b2fc5d62f6 100644 --- a/tests/integration/advanced/graph/fluent/test_graph.py +++ b/tests/integration/advanced/graph/fluent/test_graph.py @@ -654,6 +654,20 @@ def _validate_type(self, g, vertex): key = prop.key _validate_prop(key, value, self) + def test_iterate_step(self): + """ + Test to validate that the iterate() step work on all dse versions. + + @jira_ticket PYTHON-1155 + @expected_result iterate step works + + @test_category dse graph + """ + + g = self.fetch_traversal_source() + generate_classic(self.session) + g.addV('person').property('name', 'Person1').iterate() + class ExplicitExecutionBase(GraphUnitTestCase): From 711d4fdd1f3f107c0f0529f9feafd95ce129a48e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 1 Nov 2019 10:11:07 -0400 Subject: [PATCH 1688/2005] Move cloud tests out of advanced/ --- build.yaml | 4 ++-- tests/integration/{advanced => }/cloud/__init__.py | 0 tests/integration/{advanced => }/cloud/test_cloud.py | 0 tests/integration/{advanced => }/cloud/test_cloud_schema.py | 0 4 files changed, 2 insertions(+), 2 deletions(-) rename tests/integration/{advanced => }/cloud/__init__.py (100%) rename tests/integration/{advanced => }/cloud/test_cloud.py (100%) rename tests/integration/{advanced => }/cloud/test_cloud_schema.py (100%) diff --git a/build.yaml b/build.yaml index beaeec3d98..7963d2257f 100644 --- a/build.yaml +++ b/build.yaml @@ -274,8 +274,8 @@ build: EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR DSE_VERSION=$DSE_VERSION ADS_HOME=$HOME/ VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true fi - echo "==========RUNNING ADVANCED AND CLOUD TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CLOUD_PROXY_PATH="$HOME/proxy/" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/advanced/ || true + echo "==========RUNNING CLOUD TESTS==========" + EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CLOUD_PROXY_PATH="$HOME/proxy/" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true if [ -z "$EXCLUDE_LONG" ]; then echo "==========RUNNING LONG INTEGRATION TESTS==========" diff --git a/tests/integration/advanced/cloud/__init__.py b/tests/integration/cloud/__init__.py similarity index 100% rename from tests/integration/advanced/cloud/__init__.py rename to tests/integration/cloud/__init__.py diff --git a/tests/integration/advanced/cloud/test_cloud.py b/tests/integration/cloud/test_cloud.py similarity index 100% rename from tests/integration/advanced/cloud/test_cloud.py rename to tests/integration/cloud/test_cloud.py diff --git a/tests/integration/advanced/cloud/test_cloud_schema.py b/tests/integration/cloud/test_cloud_schema.py similarity index 100% rename from tests/integration/advanced/cloud/test_cloud_schema.py rename to tests/integration/cloud/test_cloud_schema.py From 04ea944aa29e8b96646a886a615c8bb75f049db7 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 1 Nov 2019 11:02:11 -0400 Subject: [PATCH 1689/2005] Fix cloud tests imports --- tests/integration/cloud/test_cloud.py | 2 +- tests/integration/cloud/test_cloud_schema.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/cloud/test_cloud.py b/tests/integration/cloud/test_cloud.py index 0ad8ea7f13..4a0713eb52 100644 --- a/tests/integration/cloud/test_cloud.py +++ b/tests/integration/cloud/test_cloud.py @@ -32,7 +32,7 @@ from tests.integration import requirescloudproxy from tests.integration.util import wait_until_not_raised -from tests.integration.advanced.cloud import CloudProxyCluster, CLOUD_PROXY_SERVER +from tests.integration.cloud import CloudProxyCluster, CLOUD_PROXY_SERVER DISALLOWED_CONSISTENCIES = [ ConsistencyLevel.ANY, diff --git a/tests/integration/cloud/test_cloud_schema.py b/tests/integration/cloud/test_cloud_schema.py index 0a70fd0c2a..1d52e8e428 100644 --- a/tests/integration/cloud/test_cloud_schema.py +++ b/tests/integration/cloud/test_cloud_schema.py @@ -25,7 +25,7 @@ from cassandra.query import SimpleStatement from tests.integration import execute_until_pass -from tests.integration.advanced.cloud import CloudProxyCluster +from tests.integration.cloud import CloudProxyCluster log = logging.getLogger(__name__) From 76fdd52ab6f08c2bb3d35e32f844b37540382779 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 4 Nov 2019 12:39:38 -0500 Subject: [PATCH 1690/2005] PYTHON-1157: upgrade notes for dse-driver (#22) Add upgrade notes for dse-driver users --- docs/api/cassandra/policies.rst | 6 +++ docs/upgrading.rst | 78 ++++++++++++++++++++++++++++++++- 2 files changed, 83 insertions(+), 1 deletion(-) diff --git a/docs/api/cassandra/policies.rst b/docs/api/cassandra/policies.rst index b662755caf..387b19ed95 100644 --- a/docs/api/cassandra/policies.rst +++ b/docs/api/cassandra/policies.rst @@ -32,6 +32,12 @@ Load Balancing .. automethod:: distance .. automethod:: make_query_plan +.. autoclass:: DefaultLoadBalancingPolicy + :members: + +.. autoclass:: DSELoadBalancingPolicy + :members: + Translating Server Node Addresses --------------------------------- diff --git a/docs/upgrading.rst b/docs/upgrading.rst index 9ab8eb31f4..a0342206b6 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -4,6 +4,82 @@ Upgrading .. toctree:: :maxdepth: 1 +Upgrading from dse-driver +------------------------- + +Since 3.21.0, cassandra-driver fully supports DataStax products. dse-driver and +dse-graph users should now migrate to cassandra-driver to benefit from latest bug fixes +and new features. The upgrade to this new unified driver version is straightforward +with no major API changes. + +Installation +^^^^^^^^^^^^ + +Only the `cassandra-driver` package should be installed. `dse-driver` and `dse-graph` +are not required anymore:: + + pip install cassandra-driver + +If you need the Graph *Fluent* API (features provided by dse-graph):: + + pip install cassandra-driver[graph] + +See :doc:`installation` for more details. + +Import from the cassandra module +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There is no `dse` module, so you should import from the `cassandra` module. You +need to change only the first module of your import statements, not the submodules. + +.. code-block:: python + + from dse.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT + from dse.auth import PlainTextAuthProvider + from dse.policies import WhiteListRoundRobinPolicy + + # becomes + + from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.auth import PlainTextAuthProvider + from cassandra.policies import WhiteListRoundRobinPolicy + +dse-graph +^^^^^^^^^ + +dse-graph features are now built-in in cassandra-driver. The only change you need +to do is your import statements: + +.. code-block:: python + + from dse_graph import .. + from dse_graph.query import .. + + # becomes + + from cassandra.datastax.graph.fluent import .. + from cassandra.datastax.graph.fluent.query import .. + +See :mod:`~.datastax.graph.fluent`. + +Session.execute and Session.execute_async API +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Although it is not common to use this API with positional arguments, it is +important to be aware that the `host` and `execute_as` parameters have had +their positional order swapped. This is only because `execute_as` was added +in dse-driver before `host`. + +See :meth:`.Session.execute`. + +Deprecations +^^^^^^^^^^^^ + +These changes are optional, but recommended: + +* Importing from `cassandra.graph` is deprecated. Consider importing from `cassandra.datastax.graph`. +* Use :class:`~.policies.DefaultLoadBalancingPolicy` instead of DSELoadBalancingPolicy. + Upgrading to 3.0 ---------------- Version 3.0 of the DataStax Python driver for Apache Cassandra @@ -46,7 +122,7 @@ materialize a list using the iterator: results = session.execute("SELECT * FROM system.local") row_list = list(results) -For backward compatability, :class:`~.ResultSet` supports indexing. When +For backward compatibility, :class:`~.ResultSet` supports indexing. When accessed at an index, a `~.ResultSet` object will materialize all its pages: .. code-block:: python From c9b567a04b85379c803c0c53f0909b00b062b39f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 4 Nov 2019 16:41:12 -0500 Subject: [PATCH 1691/2005] Fix test_iterate_step test --- tests/integration/advanced/graph/fluent/test_graph.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/advanced/graph/fluent/test_graph.py b/tests/integration/advanced/graph/fluent/test_graph.py index b2fc5d62f6..15630aa9b4 100644 --- a/tests/integration/advanced/graph/fluent/test_graph.py +++ b/tests/integration/advanced/graph/fluent/test_graph.py @@ -654,7 +654,7 @@ def _validate_type(self, g, vertex): key = prop.key _validate_prop(key, value, self) - def test_iterate_step(self): + def _test_iterate_step(self, schema, graphson): """ Test to validate that the iterate() step work on all dse versions. @@ -664,7 +664,7 @@ def test_iterate_step(self): @test_category dse graph """ - g = self.fetch_traversal_source() + g = self.fetch_traversal_source(graphson) generate_classic(self.session) g.addV('person').property('name', 'Person1').iterate() From 64ae6760938f4ddac64bc6a527e0974553f6df57 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 5 Nov 2019 15:57:41 -0600 Subject: [PATCH 1692/2005] Changelog for twisted --- CHANGELOG.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a2166255fb..fd0806a7c3 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,12 @@ +3.21.0 +====== +Unreleased + +Features +-------- +* Allow passing ssl context for Twisted (PYTHON-1161) +* Cloud Twisted support (PYTHON-1163) + 3.20.0 ====== October 28, 2019 From 279ec278fac906d0c8e44b6311a2589e2e2bb5ba Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 6 Nov 2019 10:20:58 -0600 Subject: [PATCH 1693/2005] [PYTHON-1172] Fix version parsing when two dashes in server version --- CHANGELOG.rst | 8 ++++++++ cassandra/util.py | 2 +- tests/unit/test_util_types.py | 2 ++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a2166255fb..d85aea760e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +3.20.1 +====== +November 6, 2019 + +Bug Fixes +--------- +* ValueError: too many values to unpack (expected 2)" when there are two dashes in server version number (PYTHON-1172) + 3.20.0 ====== October 28, 2019 diff --git a/cassandra/util.py b/cassandra/util.py index 2b9e9161a3..efb3a9586c 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1246,7 +1246,7 @@ class Version(object): def __init__(self, version): self._version = version if '-' in version: - version_without_prerelease, self.prerelease = version.split('-') + version_without_prerelease, self.prerelease = version.split('-', 1) else: version_without_prerelease = version parts = list(reversed(version_without_prerelease.split('.'))) diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py index 2cff3362c0..8c60bfeba1 100644 --- a/tests/unit/test_util_types.py +++ b/tests/unit/test_util_types.py @@ -292,3 +292,5 @@ def test_version_compare(self): self.assertTrue(Version('4.0.0.build6-SNAPSHOT') > Version('4.0.0.build5-SNAPSHOT')) self.assertTrue(Version('4.0-SNAPSHOT2') > Version('4.0-SNAPSHOT1')) self.assertTrue(Version('4.0-SNAPSHOT2') > Version('4.0.0-SNAPSHOT1')) + + self.assertTrue(Version('4.0.0-alpha1-SNAPSHOT') > Version('4.0.0-SNAPSHOT')) From 1a9eacaabf888eea028112aa25d56694e376ff96 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 6 Nov 2019 14:02:03 -0500 Subject: [PATCH 1694/2005] release 3.20.1: version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 96cc537663..2a29332c05 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 20, 0) +__version_info__ = (3, 20, 1) __version__ = '.'.join(map(str, __version_info__)) From 1e5633f7167d495e567b3179f0332c8def732ec3 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 1 Nov 2019 09:49:30 -0500 Subject: [PATCH 1695/2005] [PYTHON-1140] Update tests/docs to remove read_repair_chance in c* 4.0+ --- CHANGELOG.rst | 1 + docs/api/cassandra/cqlengine/models.rst | 1 - tests/integration/cqlengine/management/test_management.py | 2 +- tests/integration/simulacron/test_empty_column.py | 4 ---- 4 files changed, 2 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index b818cd2bce..60bca7d34b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,7 @@ Unreleased Features -------- +* Remove *read_repair_chance table options (PYTHON-1140) Bug Fixes --------- diff --git a/docs/api/cassandra/cqlengine/models.rst b/docs/api/cassandra/cqlengine/models.rst index fbcec06e5e..60b1471184 100644 --- a/docs/api/cassandra/cqlengine/models.rst +++ b/docs/api/cassandra/cqlengine/models.rst @@ -64,7 +64,6 @@ Model __options__ = {'compaction': {'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': '64', 'tombstone_threshold': '.2'}, - 'read_repair_chance': '0.5', 'comment': 'User data stored here'} user_id = columns.UUID(primary_key=True) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index f255a8bb8e..7edb3e71dd 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -33,7 +33,7 @@ from tests.integration.cqlengine import DEFAULT_KEYSPACE -INCLUDE_REPAIR = not (DSE_VERSION and DSE_VERSION >= Version('6.0.7')) +INCLUDE_REPAIR = not CASSANDRA_VERSION >= Version('4-a') # This should cover DSE 6.0+ class KeyspaceManagementTest(BaseCassEngTestCase): diff --git a/tests/integration/simulacron/test_empty_column.py b/tests/integration/simulacron/test_empty_column.py index 5a6ac38b4d..bd7fe6ead0 100644 --- a/tests/integration/simulacron/test_empty_column.py +++ b/tests/integration/simulacron/test_empty_column.py @@ -124,8 +124,6 @@ def test_empty_columns_in_system_schema(self): "caching": {"keys": "ALL", "rows_per_partition": "NONE"}, "comment": "comment", "gc_grace_seconds": 60000, - "dclocal_read_repair_chance": 0.1, - "read_repair_chance": 0.1, "keyspace_name": "testks", "table_name": "testtable", "columnfamily_name": "testtable", # C* 2.2 @@ -140,8 +138,6 @@ def test_empty_columns_in_system_schema(self): "caching": "map", "comment": "ascii", "gc_grace_seconds": "int", - "dclocal_read_repair_chance": "double", - "read_repair_chance": "double", "keyspace_name": "ascii", "table_name": "ascii", "columnfamily_name": "ascii", From 59a71be4e2e237aa3f26227fd1500ef3a7d0f8c2 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 1 Nov 2019 11:19:27 -0500 Subject: [PATCH 1696/2005] [PYTHON-1048] Add additional_write_policy and read_repair options to v4 metadata --- CHANGELOG.rst | 1 + cassandra/metadata.py | 24 ++++++++++++++++-------- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 60bca7d34b..b9f451915e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,7 @@ Unreleased Features -------- +* Add additional_write_policy and read_repair to system schema parsing (PYTHON-1048) * Remove *read_repair_chance table options (PYTHON-1140) Bug Fixes diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 453cc064ab..aade3996ba 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2570,14 +2570,22 @@ class SchemaParserDSE60(SchemaParserV3): class SchemaParserV4(SchemaParserV3): - recognized_table_options = tuple( - opt for opt in - SchemaParserV3.recognized_table_options - if opt not in ( - # removed in V4: CASSANDRA-13910 - 'dclocal_read_repair_chance', 'read_repair_chance' - ) - ) + recognized_table_options = ( + 'additional_write_policy', + 'bloom_filter_fp_chance', + 'caching', + 'cdc', + 'comment', + 'compaction', + 'compression', + 'crc_check_chance', + 'default_time_to_live', + 'gc_grace_seconds', + 'max_index_interval', + 'memtable_flush_period_in_ms', + 'min_index_interval', + 'read_repair', + 'speculative_retry') _SELECT_VIRTUAL_KEYSPACES = 'SELECT * from system_virtual_schema.keyspaces' _SELECT_VIRTUAL_TABLES = 'SELECT * from system_virtual_schema.tables' From 9a1c04d1c2f68bf172cbe5b3b3d3e992eba92d00 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 11 Nov 2019 12:05:22 -0500 Subject: [PATCH 1697/2005] PYTHON-1129: Ability to execute Fluent Graph queries asynchronously (#24) Ability to execute Fluent Graph queries asynchronously --- CHANGELOG.rst | 3 +- cassandra/datastax/graph/fluent/__init__.py | 38 +++++++++- docs/graph_fluent.rst | 35 ++++++++- .../advanced/graph/fluent/test_graph.py | 72 ++++++++++++++++++- 4 files changed, 139 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 900dff0ff7..c0ce4b50b0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,7 +7,8 @@ Features * Allow passing ssl context for Twisted (PYTHON-1161) * Cloud Twisted support (PYTHON-1163) * Add additional_write_policy and read_repair to system schema parsing (PYTHON-1048) -* Remove *read_repair_chance table options (PYTHON-1140) +* Remove *read_repair_chance* table options (PYTHON-1140) +* [GRAPH] Ability to execute Fluent Graph queries asynchronously (PYTHON-1129) Bug Fixes --------- diff --git a/cassandra/datastax/graph/fluent/__init__.py b/cassandra/datastax/graph/fluent/__init__.py index 193272d986..5365a59a06 100644 --- a/cassandra/datastax/graph/fluent/__init__.py +++ b/cassandra/datastax/graph/fluent/__init__.py @@ -15,6 +15,8 @@ import logging import copy +from concurrent.futures import Future + HAVE_GREMLIN = False try: import gremlin_python @@ -117,8 +119,7 @@ def __init__(self, session, graph_name=None, execution_profile=EXEC_PROFILE_GRAP self.graph_name = graph_name self.execution_profile = execution_profile - def submit(self, bytecode): - + def _prepare_query(self, bytecode): query = DseGraph.query_from_traversal(bytecode) ep = self.session.execution_profile_clone_update(self.execution_profile, row_factory=graph_traversal_row_factory) @@ -126,13 +127,44 @@ def submit(self, bytecode): graph_options.graph_language = DseGraph.DSE_GRAPH_QUERY_LANGUAGE if self.graph_name: graph_options.graph_name = self.graph_name - ep.graph_options = graph_options + return query, ep + + @staticmethod + def _handle_query_results(result_set, gremlin_future): + try: + traversers = [Traverser(t) for t in result_set] + gremlin_future.set_result( + RemoteTraversal(iter(traversers), TraversalSideEffects()) + ) + except Exception as e: + gremlin_future.set_exception(e) + + @staticmethod + def _handle_query_error(response, gremlin_future): + gremlin_future.set_exception(response) + + def submit(self, bytecode): + # the only reason I don't use submitAsync here + # is to avoid an unuseful future wrap + query, ep = self._prepare_query(bytecode) + traversers = self.session.execute_graph(query, execution_profile=ep) traversers = [Traverser(t) for t in traversers] return RemoteTraversal(iter(traversers), TraversalSideEffects()) + def submitAsync(self, bytecode): + query, ep = self._prepare_query(bytecode) + + # to be compatible with gremlinpython, we need to return a concurrent.futures.Future + gremlin_future = Future() + response_future = self.session.execute_graph_async(query, execution_profile=ep) + response_future.add_callback(self._handle_query_results, gremlin_future) + response_future.add_errback(self._handle_query_error, gremlin_future) + + return gremlin_future + def __str__(self): return "".format(self.graph_name) diff --git a/docs/graph_fluent.rst b/docs/graph_fluent.rst index da5eee8db7..ecb79ab647 100644 --- a/docs/graph_fluent.rst +++ b/docs/graph_fluent.rst @@ -109,7 +109,8 @@ that session. We call this *implicit execution* because the `Session` is not explicitly involved. Everything is managed internally by TinkerPop while traversing the graph and the results are TinkerPop types as well. -For example: +Synchronous Example +------------------- .. code-block:: python @@ -117,8 +118,36 @@ For example: g = DseGraph.traversal_source(session) # implicitly execute the query by traversing the TraversalSource g.addV('genre').property('genreId', 1).property('name', 'Action').next() - # view the results of the execution - pprint(g.V().toList()) + + # blocks until the query is completed and return the results + results = g.V().toList() + pprint(results) + +Asynchronous Exemple +-------------------- + +You can execute a graph traversal query asynchronously by using `.promise()`. It returns a +python `Future `_. + +.. code-block:: python + + # Build the GraphTraversalSource + g = DseGraph.traversal_source(session) + # implicitly execute the query by traversing the TraversalSource + g.addV('genre').property('genreId', 1).property('name', 'Action').next() # not async + + # get a future and wait + future = g.V().promise() + results = list(future.result()) + pprint(results) + + # or set a callback + def cb(f): + results = list(f.result()) + pprint(results) + future = g.V().promise() + future.add_done_callback(cb) + # do other stuff... Specify the Execution Profile explicitly ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/tests/integration/advanced/graph/fluent/test_graph.py b/tests/integration/advanced/graph/fluent/test_graph.py index 623e275352..67ac17e51d 100644 --- a/tests/integration/advanced/graph/fluent/test_graph.py +++ b/tests/integration/advanced/graph/fluent/test_graph.py @@ -14,6 +14,9 @@ import sys + +from concurrent.futures import Future + from cassandra.datastax.graph.fluent import DseGraph from gremlin_python.process.graph_traversal import GraphTraversal, GraphTraversalSource from gremlin_python.process.traversal import P @@ -35,6 +38,7 @@ if six.PY3: import ipaddress + def setup_module(): if DSE_VERSION: dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}} @@ -398,13 +402,13 @@ def _check_equality(self, g, original, read_value): @requiredse -class ImplicitExecutionTest(AbstractTraversalTest, BasicGraphUnitTestCase): +class BaseImplicitExecutionTest(BasicGraphUnitTestCase): """ This test class will execute all tests of the AbstractTraversalTestClass using implicit execution This all traversal will be run directly using toList() """ def setUp(self): - super(ImplicitExecutionTest, self).setUp() + super(BaseImplicitExecutionTest, self).setUp() if DSE_VERSION: self.ep = DseGraph().create_execution_profile(self.graph_name) self.cluster.add_execution_profile(self.graph_name, self.ep) @@ -487,6 +491,70 @@ def _validate_type(self, g, vertex): _validate_prop(key, value, self) +@requiredse +class ImplicitExecutionTest(BaseImplicitExecutionTest, AbstractTraversalTest): + pass + + +@requiredse +class ImplicitAsyncExecutionTest(BaseImplicitExecutionTest): + """ + Test to validate that the traversal async execution works properly. + + @since 3.21.0 + @jira_ticket PYTHON-1129 + + @test_category dse graph + """ + + def _validate_results(self, results): + results = list(results) + self.assertEqual(len(results), 2) + self.assertIn('vadas', results) + self.assertIn('josh', results) + + def test_promise(self): + generate_classic(self.session) + g = self.fetch_traversal_source() + traversal_future = g.V().has('name', 'marko').out('knows').values('name').promise() + self._validate_results(traversal_future.result()) + + def test_promise_error_is_propagated(self): + generate_classic(self.session) + g = DseGraph().traversal_source(self.session, 'wrong_graph', execution_profile=self.ep) + traversal_future = g.V().has('name', 'marko').out('knows').values('name').promise() + with self.assertRaises(Exception): + traversal_future.result() + + def test_promise_callback(self): + generate_classic(self.session) + g = self.fetch_traversal_source() + future = Future() + + def cb(f): + future.set_result(f.result()) + + traversal_future = g.V().has('name', 'marko').out('knows').values('name').promise() + traversal_future.add_done_callback(cb) + self._validate_results(future.result()) + + def test_promise_callback_on_error(self): + generate_classic(self.session) + g = DseGraph().traversal_source(self.session, 'wrong_graph', execution_profile=self.ep) + future = Future() + + def cb(f): + try: + f.result() + except Exception as e: + future.set_exception(e) + + traversal_future = g.V().has('name', 'marko').out('knows').values('name').promise() + traversal_future.add_done_callback(cb) + with self.assertRaises(Exception): + future.result() + + @requiredse class ExplicitExecutionBase(BasicGraphUnitTestCase): def setUp(self): From 5f13adf6dadf0d1f859a8bae3704aceb70f9c3c8 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 11 Nov 2019 20:20:01 -0600 Subject: [PATCH 1698/2005] [PYTHON-1162] ssl context and cloud support for Eventlet (#1051) --- CHANGELOG.rst | 1 + build.yaml | 1 + cassandra/cluster.py | 32 ++++++++++++---------- cassandra/connection.py | 16 ++++++++--- cassandra/datastax/cloud/__init__.py | 10 +++++-- cassandra/io/eventletreactor.py | 41 +++++++++++++++++++++++++++- docs/cloud.rst | 3 +- docs/security.rst | 12 +++++--- tests/integration/long/test_ssl.py | 2 +- 9 files changed, 90 insertions(+), 28 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f704331105..f4725d5996 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,7 @@ Unreleased Features -------- * Allow passing ssl context for Twisted (PYTHON-1161) +* ssl context and cloud support for Eventlet (PYTHON-1162) * Cloud Twisted support (PYTHON-1163) 3.20.1 diff --git a/build.yaml b/build.yaml index 29a0f0395c..158b4ea35d 100644 --- a/build.yaml +++ b/build.yaml @@ -227,6 +227,7 @@ build: "tests/integration/standard/test_metrics.py" "tests/integration/standard/test_query.py" "tests/integration/simulacron/test_endpoint.py" + "tests/integration/long/test_ssl.py" ) EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true exit 0 diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 28cfb14c9f..56abaed8e2 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -37,16 +37,6 @@ import weakref from weakref import WeakValueDictionary -try: - from cassandra.io.twistedreactor import TwistedConnection -except ImportError: - TwistedConnection = None - -try: - from weakref import WeakSet -except ImportError: - from cassandra.util import WeakSet # NOQA - from cassandra import (ConsistencyLevel, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, SchemaTargetType, DriverException, ProtocolVersion, @@ -85,6 +75,21 @@ from cassandra.compat import Mapping from cassandra.datastax import cloud as dscloud +try: + from cassandra.io.twistedreactor import TwistedConnection +except ImportError: + TwistedConnection = None + +try: + from cassandra.io.eventletreactor import EventletConnection +except ImportError: + EventletConnection = None + +try: + from weakref import WeakSet +except ImportError: + from cassandra.util import WeakSet # NOQA + def _is_eventlet_monkey_patched(): if 'eventlet.patcher' not in sys.modules: @@ -920,10 +925,9 @@ def __init__(self, raise ValueError("contact_points, endpoint_factory, ssl_context, and ssl_options " "cannot be specified with a cloud configuration") - cloud_config = dscloud.get_cloud_config( - cloud, - create_pyopenssl_context=self.connection_class is TwistedConnection - ) + uses_twisted = TwistedConnection and issubclass(self.connection_class, TwistedConnection) + uses_eventlet = EventletConnection and issubclass(self.connection_class, EventletConnection) + cloud_config = dscloud.get_cloud_config(cloud, create_pyopenssl_context=uses_twisted or uses_eventlet) ssl_context = cloud_config.ssl_context ssl_options = {'check_hostname': True} diff --git a/cassandra/connection.py b/cassandra/connection.py index ba08ae2960..0c0259f1da 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -615,6 +615,15 @@ def factory(cls, endpoint, timeout, *args, **kwargs): else: return conn + def _wrap_socket_from_context(self): + self._socket = self.ssl_context.wrap_socket(self._socket, **(self.ssl_options or {})) + + def _initiate_connection(self, sockaddr): + self._socket.connect(sockaddr) + + def _match_hostname(self): + ssl.match_hostname(self._socket.getpeercert(), self.endpoint.address) + def _get_socket_addresses(self): address, port = self.endpoint.resolve() @@ -634,17 +643,16 @@ def _connect_socket(self): try: self._socket = self._socket_impl.socket(af, socktype, proto) if self.ssl_context: - self._socket = self.ssl_context.wrap_socket(self._socket, - **(self.ssl_options or {})) + self._wrap_socket_from_context() elif self.ssl_options: if not self._ssl_impl: raise RuntimeError("This version of Python was not compiled with SSL support") self._socket = self._ssl_impl.wrap_socket(self._socket, **self.ssl_options) self._socket.settimeout(self.connect_timeout) - self._socket.connect(sockaddr) + self._initiate_connection(sockaddr) self._socket.settimeout(None) if self._check_hostname: - ssl.match_hostname(self._socket.getpeercert(), self.endpoint.address) + self._match_hostname() sockerr = None break except socket.error as err: diff --git a/cassandra/datastax/cloud/__init__.py b/cassandra/datastax/cloud/__init__.py index ff3b047f0e..d16cff0855 100644 --- a/cassandra/datastax/cloud/__init__.py +++ b/cassandra/datastax/cloud/__init__.py @@ -15,8 +15,10 @@ import os import logging import json +import sys import tempfile import shutil +import six from six.moves.urllib.request import urlopen _HAS_SSL = True @@ -177,8 +179,12 @@ def _ssl_context_from_cert(ca_cert_location, cert_location, key_location): def _pyopenssl_context_from_cert(ca_cert_location, cert_location, key_location): try: from OpenSSL import SSL - except ImportError: - return None + except ImportError as e: + six.reraise( + ImportError, + ImportError("PyOpenSSL must be installed to connect to Apollo with the Eventlet or Twisted event loops"), + sys.exc_info()[2] + ) ssl_context = SSL.Context(SSL.TLSv1_METHOD) ssl_context.set_verify(SSL.VERIFY_PEER, callback=lambda _1, _2, _3, _4, ok: ok) ssl_context.use_certificate_file(cert_location) diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index 2b16ef69af..162661f468 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -15,7 +15,6 @@ # Originally derived from MagnetoDB source: # https://github.com/stackforge/magnetodb/blob/2015.1.0b1/magnetodb/common/cassandra/io/eventletreactor.py - import eventlet from eventlet.green import socket from eventlet.queue import Queue @@ -27,11 +26,25 @@ from six.moves import xrange from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager +try: + from eventlet.green.OpenSSL import SSL + _PYOPENSSL = True +except ImportError as e: + _PYOPENSSL = False + no_pyopenssl_error = e log = logging.getLogger(__name__) +def _check_pyopenssl(): + if not _PYOPENSSL: + raise ImportError( + "{}, pyOpenSSL must be installed to enable " + "SSL support with the Eventlet event loop".format(str(no_pyopenssl_error)) + ) + + class EventletConnection(Connection): """ An implementation of :class:`.Connection` that utilizes ``eventlet``. @@ -81,6 +94,7 @@ def service_timeouts(cls): def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) + self.uses_legacy_ssl_options = self.ssl_options and not self.ssl_context self._write_queue = Queue() self._connect_socket() @@ -89,6 +103,31 @@ def __init__(self, *args, **kwargs): self._write_watcher = eventlet.spawn(lambda: self.handle_write()) self._send_options_message() + def _wrap_socket_from_context(self): + _check_pyopenssl() + self._socket = SSL.Connection(self.ssl_context, self._socket) + self._socket.set_connect_state() + if self.ssl_options and 'server_hostname' in self.ssl_options: + # This is necessary for SNI + self._socket.set_tlsext_host_name(self.ssl_options['server_hostname'].encode('ascii')) + + def _initiate_connection(self, sockaddr): + if self.uses_legacy_ssl_options: + super(EventletConnection, self)._initiate_connection(sockaddr) + else: + self._socket.connect(sockaddr) + if self.ssl_context or self.ssl_options: + self._socket.do_handshake() + + def _match_hostname(self): + if self.uses_legacy_ssl_options: + super(EventletConnection, self)._match_hostname() + else: + cert_name = self._socket.get_peer_certificate().get_subject().commonName + if cert_name != self.endpoint.address: + raise Exception("Hostname verification failed! Certificate name '{}' " + "doesn't endpoint '{}'".format(cert_name, self.endpoint.address)) + def close(self): with self.lock: if self.is_closed: diff --git a/docs/cloud.rst b/docs/cloud.rst index 0da671151e..7ddb763a42 100644 --- a/docs/cloud.rst +++ b/docs/cloud.rst @@ -34,5 +34,4 @@ Limitations Event loops ^^^^^^^^^^^ -Evenlet isn't supported yet. Eventlet still uses the old way to configure -SSL (ssl_options), which is not compatible with the secure connect bundle provided by Apollo. +Evenlet isn't yet supported for python 3.7+ due to an `issue in Eventlet `_. diff --git a/docs/security.rst b/docs/security.rst index 1093c52df9..424afdea27 100644 --- a/docs/security.rst +++ b/docs/security.rst @@ -78,10 +78,10 @@ It might be also useful to learn about the different levels of identity verifica * `Using SSL in DSE drivers `_ -SSL with Twisted -^^^^^^^^^^^^^^^^ -Twisted uses an alternative SSL implementation called pyOpenSSL, so if your `Cluster`'s connection class is -:class:`~cassandra.io.twistedreactor.TwistedConnection`, you must pass a +SSL with Twisted or Eventlet +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Twisted and Eventlet both use an alternative SSL implementation called pyOpenSSL, so if your `Cluster`'s connection class is +:class:`~cassandra.io.twistedreactor.TwistedConnection` or :class:`~cassandra.io.eventletreactor.EventletConnection`, you must pass a `pyOpenSSL context `_ instead. An example is provided in these docs, and more details can be found in the `documentation `_. @@ -270,6 +270,10 @@ for more details about ``SSLContext`` configuration. ) session = cluster.connect() + +Connecting using Eventlet would look similar except instead of importing and using ``TwistedConnection``, you would +import and use ``EventletConnection``, including the appropriate monkey-patching. + Versions 3.16.0 and lower ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index da75b76693..d7788f0f8a 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -42,7 +42,7 @@ DRIVER_CERTFILE = os.path.abspath("tests/integration/long/ssl/driver.pem") DRIVER_CERTFILE_BAD = os.path.abspath("tests/integration/long/ssl/python_driver_bad.pem") -USES_PYOPENSSL = "twisted" in EVENT_LOOP_MANAGER +USES_PYOPENSSL = "twisted" in EVENT_LOOP_MANAGER or "eventlet" in EVENT_LOOP_MANAGER if "twisted" in EVENT_LOOP_MANAGER: import OpenSSL ssl_version = OpenSSL.SSL.TLSv1_METHOD From e2c5873b629ea1ec07f735a18431e85b66568011 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 8 Nov 2019 15:36:23 -0600 Subject: [PATCH 1699/2005] [PYTHON-1174] Allow server versions as long as we recognize the major version --- CHANGELOG.rst | 3 ++- cassandra/util.py | 34 +++++++++++++++++++++++----------- tests/unit/test_util_types.py | 14 +++++--------- 3 files changed, 30 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 534303b4df..1d419736a9 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,7 +8,8 @@ Features * ssl context and cloud support for Eventlet (PYTHON-1162) * Cloud Twisted support (PYTHON-1163) * Add additional_write_policy and read_repair to system schema parsing (PYTHON-1048) -* Remove *read_repair_chance* table options (PYTHON-1140) +* Remove *read_repair_chance table options (PYTHON-1140) +* Flexible version parsing (PYTHON-1174) * [GRAPH] Ability to execute Fluent Graph queries asynchronously (PYTHON-1129) Bug Fixes diff --git a/cassandra/util.py b/cassandra/util.py index 9cc0a54702..0651591203 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1929,19 +1929,31 @@ def __init__(self, version): version_without_prerelease = version parts = list(reversed(version_without_prerelease.split('.'))) if len(parts) > 4: - raise ValueError("Invalid version: {}. Only 4 " - "components plus prerelease are supported".format(version)) + prerelease_string = "-{}".format(self.prerelease) if self.prerelease else "" + log.warning("Unrecognized version: {}. Only 4 components plus prerelease are supported. " + "Assuming version as {}{}".format(version, '.'.join(parts[:-5:-1]), prerelease_string)) - self.major = int(parts.pop()) - self.minor = int(parts.pop()) if parts else 0 - self.patch = int(parts.pop()) if parts else 0 + try: + self.major = int(parts.pop()) + except ValueError: + six.reraise( + ValueError, + ValueError("Couldn't parse version {}. Version should start with a number".format(version)), + sys.exc_info()[2] + ) + try: + self.minor = int(parts.pop()) if parts else 0 + self.patch = int(parts.pop()) if parts else 0 - if parts: # we have a build version - build = parts.pop() - try: - self.build = int(build) - except ValueError: - self.build = build + if parts: # we have a build version + build = parts.pop() + try: + self.build = int(build) + except ValueError: + self.build = build + except ValueError: + assumed_version = "{}.{}.{}.{}-{}".format(self.major, self.minor, self.patch, self.build, self.prerelease) + log.warning("Unrecognized version {}. Assuming version as {}".format(version, assumed_version)) def __hash__(self): return self._version diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py index 8c60bfeba1..b7dc837249 100644 --- a/tests/unit/test_util_types.py +++ b/tests/unit/test_util_types.py @@ -217,6 +217,11 @@ def test_version_parsing(self): ('3.55.1.build12', (3, 55, 1, 'build12', 0)), ('3.55.1.20190429-TEST', (3, 55, 1, 20190429, 'TEST')), ('4.0-SNAPSHOT', (4, 0, 0, 0, 'SNAPSHOT')), + ('1.0.5.4.3', (1, 0, 5, 4, 0)), + ('1-SNAPSHOT', (1, 0, 0, 0, 'SNAPSHOT')), + ('4.0.1.2.3.4.5-ABC-123-SNAP-TEST.blah', (4, 0, 1, 2, 'ABC-123-SNAP-TEST.blah')), + ('2.1.hello', (2, 1, 0, 0, 0)), + ('2.test.1', (2, 0, 0, 0, 0)), ] for str_version, expected_result in versions: @@ -229,18 +234,9 @@ def test_version_parsing(self): self.assertEqual(v.prerelease, expected_result[4]) # not supported version formats - with self.assertRaises(ValueError): - Version('2.1.hello') - - with self.assertRaises(ValueError): - Version('2.test.1') - with self.assertRaises(ValueError): Version('test.1.0') - with self.assertRaises(ValueError): - Version('1.0.0.0.1') - def test_version_compare(self): # just tests a bunch of versions From 90a231f3c9bcf680b41b0ba548eb2ba9ac027175 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 13 Nov 2019 18:45:27 -0500 Subject: [PATCH 1700/2005] Support NULL in collection deserializer --- CHANGELOG.rst | 1 + cassandra/cqltypes.py | 30 +++++++++++++++++++--------- tests/unit/test_types.py | 43 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 64 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 534303b4df..e6cf44fdee 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -10,6 +10,7 @@ Features * Add additional_write_policy and read_repair to system schema parsing (PYTHON-1048) * Remove *read_repair_chance* table options (PYTHON-1140) * [GRAPH] Ability to execute Fluent Graph queries asynchronously (PYTHON-1129) +* Support NULL in collection deserializer (PYTHON-1123) Bug Fixes --------- diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index a6c78c55e1..7946a63af8 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -813,9 +813,12 @@ def deserialize_safe(cls, byts, protocol_version): for _ in range(numelements): itemlen = unpack(byts[p:p + length]) p += length - item = byts[p:p + itemlen] - p += itemlen - result.append(subtype.from_binary(item, inner_proto)) + if itemlen < 0: + result.append(None) + else: + item = byts[p:p + itemlen] + p += itemlen + result.append(subtype.from_binary(item, inner_proto)) return cls.adapter(result) @classmethod @@ -867,14 +870,23 @@ def deserialize_safe(cls, byts, protocol_version): for _ in range(numelements): key_len = unpack(byts[p:p + length]) p += length - keybytes = byts[p:p + key_len] - p += key_len + if key_len < 0: + keybytes = None + key = None + else: + keybytes = byts[p:p + key_len] + p += key_len + key = key_type.from_binary(keybytes, inner_proto) + val_len = unpack(byts[p:p + length]) p += length - valbytes = byts[p:p + val_len] - p += val_len - key = key_type.from_binary(keybytes, inner_proto) - val = value_type.from_binary(valbytes, inner_proto) + if val_len < 0: + val = None + else: + valbytes = byts[p:p + val_len] + p += val_len + val = value_type.from_binary(valbytes, inner_proto) + themap._insert_unchecked(key, keybytes, val) return themap diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index e638bd48c9..562fd2c899 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -29,7 +29,8 @@ CassandraType, DateRangeType, DateType, DecimalType, EmptyValue, LongType, SetType, UTF8Type, cql_typename, int8_pack, int64_pack, lookup_casstype, - lookup_casstype_simple, parse_casstype_args + lookup_casstype_simple, parse_casstype_args, + int32_pack, Int32Type, ListType, MapType ) from cassandra.encoder import cql_quote from cassandra.pool import Host @@ -226,6 +227,46 @@ def test_datetype(self): expected = 2177403010.123 self.assertEqual(DateType.deserialize(int64_pack(int(1000 * expected)), 0), datetime.datetime(2038, 12, 31, 10, 10, 10, 123000)) + def test_collection_null_support(self): + """ + Test that null values in collection are decoded properly. + + @jira_ticket PYTHON-1123 + """ + int_list = ListType.apply_parameters([Int32Type]) + value = ( + int32_pack(2) + # num items + int32_pack(-1) + # size of item1 + int32_pack(4) + # size of item2 + int32_pack(42) # item2 + ) + self.assertEqual( + [None, 42], + int_list.deserialize(value, 3) + ) + + set_list = SetType.apply_parameters([Int32Type]) + self.assertEqual( + {None, 42}, + set(set_list.deserialize(value, 3)) + ) + + value = ( + int32_pack(2) + # num items + int32_pack(4) + # key size of item1 + int32_pack(42) + # key item1 + int32_pack(-1) + # value size of item1 + int32_pack(-1) + # key size of item2 + int32_pack(4) + # value size of item2 + int32_pack(42) # value of item2 + ) + + map_list = MapType.apply_parameters([Int32Type, Int32Type]) + self.assertEqual( + [(42, None), (None, 42)], + map_list.deserialize(value, 3)._items # OrderedMapSerializedKey + ) + def test_write_read_string(self): with tempfile.TemporaryFile() as f: value = u'test' From f0883532a18a60d571f3bbda0bdff02cc9a0e9de Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 8 Nov 2019 09:25:19 -0600 Subject: [PATCH 1701/2005] [PYTHON-1124] Fail fast if prepared id mismatch. See CASSANDRA-15252 --- CHANGELOG.rst | 1 + cassandra/cluster.py | 10 ++++++++++ .../standard/test_prepared_statements.py | 18 +++++++++++++++++- tests/unit/test_response_future.py | 3 +++ 4 files changed, 31 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 66b9cd2968..fd5e2c6e4f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,6 +8,7 @@ Features * ssl context and cloud support for Eventlet (PYTHON-1162) * Cloud Twisted support (PYTHON-1163) * Add additional_write_policy and read_repair to system schema parsing (PYTHON-1048) +* Handle prepared id mismatch when repreparing on the fly (PYTHON-1124) * Remove *read_repair_chance table options (PYTHON-1140) * Flexible version parsing (PYTHON-1174) * [GRAPH] Ability to execute Fluent Graph queries asynchronously (PYTHON-1129) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 363272e307..18e68094a0 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -19,6 +19,7 @@ from __future__ import absolute_import import atexit +from binascii import hexlify from collections import defaultdict from concurrent.futures import ThreadPoolExecutor, FIRST_COMPLETED, wait as wait_futures from copy import copy @@ -4559,6 +4560,15 @@ def _execute_after_prepare(self, host, connection, pool, response): if isinstance(response, ResultMessage): if response.kind == RESULT_KIND_PREPARED: if self.prepared_statement: + if self.prepared_statement.query_id != response.query_id: + self._set_final_exception(DriverException( + "ID mismatch while trying to reprepare (expected {expected}, got {got}). " + "This prepared statement won't work anymore. " + "This usually happens when you run a 'USE...' " + "query after the statement was prepared.".format( + expected=hexlify(self.prepared_statement.query_id), got=hexlify(response.query_id) + ) + )) self.prepared_statement.result_metadata = response.column_metadata new_metadata_id = response.result_metadata_id if new_metadata_id is not None: diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index dffa906967..d314846e51 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -19,7 +19,7 @@ import unittest2 as unittest except ImportError: import unittest # noqa -from cassandra import InvalidRequest +from cassandra import InvalidRequest, DriverException from cassandra import ConsistencyLevel, ProtocolVersion from cassandra.cluster import Cluster @@ -397,6 +397,22 @@ def test_raise_error_on_prepared_statement_execution_dropped_table(self): with self.assertRaises(InvalidRequest): self.session.execute(prepared, [0]) + def test_fail_if_different_query_id_on_reprepare(self): + """ PYTHON-1124 and CASSANDRA-15252 """ + keyspace = "test_fail_if_different_query_id_on_reprepare" + self.session.execute( + "CREATE KEYSPACE IF NOT EXISTS {} WITH replication = " + "{{'class': 'SimpleStrategy', 'replication_factor': 1}}".format(keyspace) + ) + self.session.execute("CREATE TABLE IF NOT EXISTS {}.foo(k int PRIMARY KEY)".format(keyspace)) + prepared = self.session.prepare("SELECT * FROM {}.foo WHERE k=?".format(keyspace)) + self.session.execute("DROP TABLE {}.foo".format(keyspace)) + self.session.execute("CREATE TABLE {}.foo(k int PRIMARY KEY)".format(keyspace)) + self.session.execute("USE {}".format(keyspace)) + with self.assertRaises(DriverException) as e: + self.session.execute(prepared, [0]) + self.assertIn("ID mismatch", str(e.exception)) + @greaterthanorequalcass40 class PreparedStatementInvalidationTest(BasicSharedKeyspaceUnitTestCase): diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index a0db0f6341..d6568012f4 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -563,6 +563,7 @@ def test_prepared_query_not_found_bad_keyspace(self): self.assertRaises(ValueError, rf.result) def test_repeat_orig_query_after_succesful_reprepare(self): + query_id = b'abc123' # Just a random binary string so we don't hit id mismatch exception session = self.make_session() rf = self.make_response_future(session) @@ -570,12 +571,14 @@ def test_repeat_orig_query_after_succesful_reprepare(self): kind=RESULT_KIND_PREPARED, result_metadata_id='foo') response.results = (None, None, None, None, None) + response.query_id = query_id rf._query = Mock(return_value=True) rf._execute_after_prepare('host', None, None, response) rf._query.assert_called_once_with('host') rf.prepared_statement = Mock() + rf.prepared_statement.query_id = query_id rf._query = Mock(return_value=True) rf._execute_after_prepare('host', None, None, response) rf._query.assert_called_once_with('host') From 43019911445171bdf5b05054da279aa77960afc8 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 14 Nov 2019 13:47:56 -0500 Subject: [PATCH 1702/2005] Avoid warnings about unspecified load balancing policy when connecting to a cloud cluster --- CHANGELOG.rst | 3 ++- cassandra/cluster.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index fd5e2c6e4f..2e55328b7a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,8 +11,9 @@ Features * Handle prepared id mismatch when repreparing on the fly (PYTHON-1124) * Remove *read_repair_chance table options (PYTHON-1140) * Flexible version parsing (PYTHON-1174) -* [GRAPH] Ability to execute Fluent Graph queries asynchronously (PYTHON-1129) * Support NULL in collection deserializer (PYTHON-1123) +* Avoid warnings about unspecified load balancing policy when connecting to a cloud cluster (PYTHON-1177) +* [GRAPH] Ability to execute Fluent Graph queries asynchronously (PYTHON-1129) Bug Fixes --------- diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 18e68094a0..8ad7b69277 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1106,6 +1106,7 @@ def __init__(self, self.connection_class = connection_class if cloud is not None: + self.cloud = cloud if contact_points is not _NOT_SET or endpoint_factory or ssl_context or ssl_options: raise ValueError("contact_points, endpoint_factory, ssl_context, and ssl_options " "cannot be specified with a cloud configuration") @@ -1242,7 +1243,7 @@ def __init__(self, profiles.setdefault(EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT, GraphAnalyticsExecutionProfile(load_balancing_policy=lbp)) - if self._contact_points_explicit: + if self._contact_points_explicit and not self.cloud: # avoid this warning for cloud users. if self._config_mode is _ConfigMode.PROFILES: default_lbp_profiles = self.profile_manager._profiles_without_explicit_lbps() if default_lbp_profiles: From 83066e2bce51df4e1cde1738b4a52a10415f93f0 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 15 Nov 2019 10:40:35 -0500 Subject: [PATCH 1703/2005] changelog: re-raising the CQLEngineException will fail on Python 3 --- CHANGELOG.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f4725d5996..c67647bc20 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,6 +8,10 @@ Features * ssl context and cloud support for Eventlet (PYTHON-1162) * Cloud Twisted support (PYTHON-1163) +Bug Fixes +--------- +* re-raising the CQLEngineException will fail on Python 3 (PYTHON-1166) + 3.20.1 ====== November 6, 2019 From 86c6e3d208b24a009ba372086558dd3597dcafcc Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 15 Nov 2019 10:42:38 -0500 Subject: [PATCH 1704/2005] changelog python-1166 --- CHANGELOG.rst | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6a3efe33d6..e317e7f03c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -16,6 +16,7 @@ Features Bug Fixes --------- +* re-raising the CQLEngineException will fail on Python 3 (PYTHON-1166) Others ------ @@ -80,10 +81,6 @@ Other * Write documentation examples for DSE 2.0 features (PYTHON-732) * DSE_V1 protocol should not include all of protocol v5 (PYTHON-694) -Bug Fixes ---------- -* re-raising the CQLEngineException will fail on Python 3 (PYTHON-1166) - 3.20.1 ====== November 6, 2019 From 628a85aba5d5f911f0e2b25638ac924bde9119ba Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 19 Nov 2019 16:13:08 -0500 Subject: [PATCH 1705/2005] release 3.20.2: changelog & version --- CHANGELOG.rst | 8 ++++++++ cassandra/__init__.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d85aea760e..0ac2aebb7c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +3.20.2 +====== +November 19, 2019 + +Bug Fixes +--------- +* Fix import error for old python installation without SSLContext (PYTHON-1183) + 3.20.1 ====== November 6, 2019 diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 2a29332c05..38aef2f5d8 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 20, 1) +__version_info__ = (3, 20, 2) __version__ = '.'.join(map(str, __version_info__)) From 401e40fb4d4c6efbebadfc6208f09e2e4354905d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 18 Nov 2019 11:20:41 -0500 Subject: [PATCH 1706/2005] Fix asyncio message chunks can be processed discontinuously --- CHANGELOG.rst | 1 + build.yaml | 4 ++-- cassandra/io/asyncioreactor.py | 20 ++++++++++++++----- tests/integration/__init__.py | 1 + .../integration/simulacron/test_connection.py | 3 ++- 5 files changed, 21 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index b291a0694b..2fa9973cca 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -18,6 +18,7 @@ Features Bug Fixes --------- * re-raising the CQLEngineException will fail on Python 3 (PYTHON-1166) +* asyncio message chunks can be processed discontinuously (PYTHON-1185) Others ------ diff --git a/build.yaml b/build.yaml index 750e7544ff..e498b511c2 100644 --- a/build.yaml +++ b/build.yaml @@ -46,8 +46,8 @@ schedules: EXCLUDE_LONG=1 matrix: exclude: - - python: [2.7, 3.4, 3.6, 3.7] - - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse'] + - python: [2.7, 3.4, 3.7] + - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse', dse-4.8', 'dse-5.0'] release_test: schedule: per_commit diff --git a/cassandra/io/asyncioreactor.py b/cassandra/io/asyncioreactor.py index b386388da3..7cb0444a32 100644 --- a/cassandra/io/asyncioreactor.py +++ b/cassandra/io/asyncioreactor.py @@ -83,6 +83,7 @@ class AsyncioConnection(Connection): _loop_thread = None _write_queue = None + _write_queue_lock = None def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) @@ -91,6 +92,7 @@ def __init__(self, *args, **kwargs): self._socket.setblocking(0) self._write_queue = asyncio.Queue(loop=self._loop) + self._write_queue_lock = asyncio.Lock(loop=self._loop) # see initialize_reactor -- loop is running in a separate thread, so we # have to use a threadsafe call @@ -157,20 +159,28 @@ def _close(self): def push(self, data): buff_size = self.out_buffer_size if len(data) > buff_size: + chunks = [] for i in range(0, len(data), buff_size): - self._push_chunk(data[i:i + buff_size]) + chunks.append(data[i:i + buff_size]) else: - self._push_chunk(data) + chunks = [data] - def _push_chunk(self, chunk): if self._loop_thread.ident != get_ident(): asyncio.run_coroutine_threadsafe( - self._write_queue.put(chunk), + self._push_msg(chunks), loop=self._loop ) else: # avoid races/hangs by just scheduling this, not using threadsafe - self._loop.create_task(self._write_queue.put(chunk)) + self._loop.create_task(self._push_msg(chunks)) + + @asyncio.coroutine + def _push_msg(self, chunks): + # This lock ensures all chunks of a message are sequential in the Queue + with (yield from self._write_queue_lock): + for chunk in chunks: + self._write_queue.put_nowait(chunk) + @asyncio.coroutine def handle_write(self): diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 251bf8562a..7c89ebcfc6 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -355,6 +355,7 @@ def _id_and_mark(f): requiredse = unittest.skipUnless(DSE_VERSION, "DSE required") requirescloudproxy = unittest.skipIf(CLOUD_PROXY_PATH is None, "Cloud Proxy path hasn't been specified") +libevtest = unittest.skipUnless(EVENT_LOOP_MANAGER=="libev", "Test timing designed for libev loop") def wait_for_node_socket(node, timeout): binary_itf = node.network_interfaces['binary'] diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 08590eca63..25824cc599 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -27,7 +27,7 @@ from cassandra.policies import HostStateListener, RoundRobinPolicy from tests import connection_class, thread_pool_executor_class -from tests.integration import requiressimulacron +from tests.integration import requiressimulacron, libevtest from tests.integration.util import assert_quiescent_pool_state, late # important to import the patch PROTOCOL_VERSION from the simulacron module from tests.integration.simulacron import SimulacronBase, PROTOCOL_VERSION @@ -194,6 +194,7 @@ def test_callbacks_and_pool_when_oto(self): callback.assert_not_called() @cythontest + @libevtest def test_heartbeat_defunct_deadlock(self): """ Ensure that there is no deadlock when request is in-flight and heartbeat defuncts connection From 0efb1aeedcc626a75f2cb515c188b4be39b31619 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 21 Nov 2019 15:12:13 -0500 Subject: [PATCH 1707/2005] Remove deprecated option ssl imports --- cassandra/connection.py | 6 +----- cassandra/io/asyncorereactor.py | 8 ++------ cassandra/io/libevreactor.py | 2 +- 3 files changed, 4 insertions(+), 12 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index bf4a873351..cafbba80be 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -26,11 +26,7 @@ import sys from threading import Thread, Event, RLock, Condition import time - -try: - import ssl -except ImportError: - ssl = None # NOQA +import ssl if 'gevent.monkey' in sys.modules: from gevent.queue import Queue, Empty diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index c5737d191d..1a6b9fd3e9 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -22,6 +22,7 @@ import time import weakref import sys +import ssl from six.moves import range @@ -32,11 +33,6 @@ import asyncore -try: - import ssl -except ImportError: - ssl = None # NOQA - from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager log = logging.getLogger(__name__) @@ -424,7 +420,7 @@ def handle_read(self): if len(buf) < self.in_buffer_size: break except socket.error as err: - if ssl and isinstance(err, ssl.SSLError): + if isinstance(err, ssl.SSLError): if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): if not self._iobuf.tell(): return diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 7d4bf8e01f..2487419784 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -343,7 +343,7 @@ def handle_read(self, watcher, revents, errno=None): if len(buf) < self.in_buffer_size: break except socket.error as err: - if ssl and isinstance(err, ssl.SSLError): + if isinstance(err, ssl.SSLError): if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): if not self._iobuf.tell(): return From a8c37f87524a102f1ddd8a099124b73e0b3ad812 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 21 Nov 2019 14:18:41 -0600 Subject: [PATCH 1708/2005] [PYTHON-1179] Connection errors for possibly parked cloud instance --- cassandra/datastax/cloud/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/datastax/cloud/__init__.py b/cassandra/datastax/cloud/__init__.py index d16cff0855..46fd822b87 100644 --- a/cassandra/datastax/cloud/__init__.py +++ b/cassandra/datastax/cloud/__init__.py @@ -137,7 +137,8 @@ def read_metadata_info(config, cloud_config): response = urlopen(url, context=config.ssl_context, timeout=timeout) except Exception as e: log.exception(e) - raise DriverException("Unable to connect to the metadata service at %s" % url) + raise DriverException("Unable to connect to the metadata service at %s. " + "Check the cluster status in the Constellation cloud console. " % url) if response.code != 200: raise DriverException(("Error while fetching the metadata at: %s. " From df6e69ed957487b7c778b641e7d07a3818e61c90 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 25 Nov 2019 10:10:42 -0600 Subject: [PATCH 1709/2005] [PYTHON-1171] Fix paging tests by querying at local quorum --- tests/integration/standard/test_query_paging.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index dfe9f70df8..b1f7b39fc6 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from tests.integration import use_singledc, PROTOCOL_VERSION import logging @@ -26,7 +25,8 @@ from six.moves import range from threading import Event -from cassandra.cluster import Cluster +from cassandra import ConsistencyLevel +from cassandra.cluster import Cluster, EXEC_PROFILE_DEFAULT, ExecutionProfile from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args from cassandra.policies import HostDistance from cassandra.query import SimpleStatement @@ -44,7 +44,10 @@ def setUp(self): "Protocol 2.0+ is required for Paging state, currently testing against %r" % (PROTOCOL_VERSION,)) - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.cluster = Cluster( + protocol_version=PROTOCOL_VERSION, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(consistency_level=ConsistencyLevel.LOCAL_QUORUM)} + ) if PROTOCOL_VERSION < 3: self.cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) self.session = self.cluster.connect(wait_for_all_pools=True) From cc49ab25ac40cdb66d74e6d98664acb8faafaa02 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 21 Nov 2019 11:11:25 -0600 Subject: [PATCH 1710/2005] Fix long tests comparing DSE_VERSION when DSE_VERSION is None --- tests/integration/long/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index 1829d05cd0..421e694a9a 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -93,7 +93,7 @@ def force_stop(node): def decommission(node): - if DSE_VERSION >= Version("5.1") or CASSANDRA_VERSION >= Version("4.0"): + if (DSE_VERSION and DSE_VERSION >= Version("5.1")) or CASSANDRA_VERSION >= Version("4.0"): # CASSANDRA-12510 get_node(node).decommission(force=True) else: From d0241ad41691099b850b33bc573642769e954965 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 22 Nov 2019 09:57:18 -0600 Subject: [PATCH 1711/2005] [PYTHON-1181] Stop reconnection attempts after node removal --- CHANGELOG.rst | 1 + cassandra/cluster.py | 4 ++ .../integration/long/test_topology_change.py | 49 +++++++++++++++++++ 3 files changed, 54 insertions(+) create mode 100644 tests/integration/long/test_topology_change.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 2fa9973cca..4c51d017c1 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -19,6 +19,7 @@ Bug Fixes --------- * re-raising the CQLEngineException will fail on Python 3 (PYTHON-1166) * asyncio message chunks can be processed discontinuously (PYTHON-1185) +* Reconnect attempts persist after downed node removed from peers (PYTHON-1181) Others ------ diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 8ad7b69277..e96e459621 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2029,6 +2029,10 @@ def on_remove(self, host): listener.on_remove(host) self.control_connection.on_remove(host) + reconnection_handler = host.get_and_set_reconnection_handler(None) + if reconnection_handler: + reconnection_handler.cancel() + def signal_connection_failure(self, host, connection_exc, is_host_addition, expect_host_to_be_down=False): is_down = host.signal_connection_failure(connection_exc) if is_down: diff --git a/tests/integration/long/test_topology_change.py b/tests/integration/long/test_topology_change.py new file mode 100644 index 0000000000..ccd20779a5 --- /dev/null +++ b/tests/integration/long/test_topology_change.py @@ -0,0 +1,49 @@ +from unittest import TestCase + +from cassandra.cluster import Cluster +from cassandra.policies import HostStateListener +from tests.integration import PROTOCOL_VERSION, get_node, use_cluster, local +from tests.integration.long.utils import decommission +from tests.integration.util import wait_until + + +class StateListener(HostStateListener): + def __init__(self): + self.downed_host = None + self.removed_host = None + + def on_remove(self, host): + self.removed_host = host + + def on_up(self, host): + pass + + def on_down(self, host): + self.downed_host = host + + def on_add(self, host): + pass + + +class TopologyChangeTests(TestCase): + @local + def test_removed_node_stops_reconnecting(self): + """ Ensure we stop reconnecting after a node is removed. PYTHON-1181 """ + use_cluster("test_down_then_removed", [3], start=True) + + state_listener = StateListener() + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.addCleanup(cluster.shutdown) + cluster.register_listener(state_listener) + session = cluster.connect(wait_for_all_pools=True) + + get_node(3).nodetool("disablebinary") + + wait_until(condition=lambda: state_listener.downed_host is not None, delay=2, max_attempts=50) + self.assertTrue(state_listener.downed_host.is_currently_reconnecting()) + + decommission(3) + + wait_until(condition=lambda: state_listener.removed_host is not None, delay=2, max_attempts=50) + self.assertIs(state_listener.downed_host, state_listener.removed_host) # Just a sanity check + self.assertFalse(state_listener.removed_host.is_currently_reconnecting()) From 37128031b35212bb22121314cae16f5c8a5f49e1 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 27 Nov 2019 22:07:44 -0600 Subject: [PATCH 1712/2005] tmp build.yaml --- build.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build.yaml b/build.yaml index 7c8020cb80..50fb8c1b70 100644 --- a/build.yaml +++ b/build.yaml @@ -57,17 +57,17 @@ schedules: env_vars: | EVENT_LOOP_MANAGER='libev' -ngdg: + tmpngdg: schedule: adhoc branches: - include: [ngdg_master_ft] + include: [merge_ossnext_ngdg] env_vars: | EVENT_LOOP_MANAGER='libev' EXCLUDE_LONG=1 matrix: exclude: - python: [2.7, 3.4, 3.6, 3.7] - - cassandra: ['dse-4.8', 'dse-5.0', dse-6.0', 'dse-6.7'] + - cassandra: ['2.1', '2.2', '3.0', '3.11', 'dse-4.8', 'dse-5.0', 'dse-5.1', 'dse-6.0', 'dse-6.7'] weekly_master: schedule: 0 10 * * 6 From 6bfdb6853b1d21443134c1a3172ebb6a7e9329b8 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 2 Dec 2019 08:58:05 -0600 Subject: [PATCH 1713/2005] pinning gremlinpython to 3.3.4 --- setup.py | 2 +- test-requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index a43eea7d05..c310f1fa65 100644 --- a/setup.py +++ b/setup.py @@ -410,7 +410,7 @@ def run_setup(extensions): dependencies.append('futures') _EXTRAS_REQUIRE = { - 'graph': ['gremlinpython>=3.3.4,<3.3.9'] + 'graph': ['gremlinpython==3.3.4'] } setup( diff --git a/test-requirements.txt b/test-requirements.txt index 07bdcdf9ce..87e6ec745e 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -18,4 +18,4 @@ requests backports.ssl_match_hostname; python_version < '2.7.9' futurist; python_version >= '3.7' asynctest; python_version > '3.4' -gremlinpython>=3.3.4,<3.3.9 +gremlinpython==3.3.4 From 23cbf907f08091f18ca06cc93b768a9d5ed2cc8d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 2 Dec 2019 11:28:22 -0500 Subject: [PATCH 1714/2005] PYTHON_1186: Connection fails to validate ssl certificate hostname when SSLContext.check_hostname is set (#1059) * Fix Connection fails to validate ssl certificate hostname when SSLContext.check_hostname is set --- CHANGELOG.rst | 1 + cassandra/connection.py | 9 ++- docs/security.rst | 8 +++ tests/integration/long/ssl/.keystore | Bin 2287 -> 0 bytes tests/integration/long/ssl/127.0.0.1.keystore | Bin 0 -> 4317 bytes tests/integration/long/ssl/cassandra.pem | 19 ------ .../integration/long/ssl/cassandra.truststore | Bin 0 -> 1074 bytes tests/integration/long/ssl/client.crt_signed | 19 ++++++ tests/integration/long/ssl/client.key | 28 +++++++++ tests/integration/long/ssl/client_bad.key | 28 +++++++++ .../integration/long/ssl/client_encrypted.key | 30 ++++++++++ tests/integration/long/ssl/driver.crt | Bin 931 -> 0 bytes tests/integration/long/ssl/driver.key | 27 --------- tests/integration/long/ssl/driver.pem | 19 ------ .../integration/long/ssl/driver_encrypted.key | 30 ---------- tests/integration/long/ssl/rootCa.crt | 19 ++++++ tests/integration/long/test_ssl.py | 56 +++++++++++------- 17 files changed, 176 insertions(+), 117 deletions(-) delete mode 100644 tests/integration/long/ssl/.keystore create mode 100644 tests/integration/long/ssl/127.0.0.1.keystore delete mode 100644 tests/integration/long/ssl/cassandra.pem create mode 100644 tests/integration/long/ssl/cassandra.truststore create mode 100644 tests/integration/long/ssl/client.crt_signed create mode 100644 tests/integration/long/ssl/client.key create mode 100644 tests/integration/long/ssl/client_bad.key create mode 100644 tests/integration/long/ssl/client_encrypted.key delete mode 100644 tests/integration/long/ssl/driver.crt delete mode 100644 tests/integration/long/ssl/driver.key delete mode 100644 tests/integration/long/ssl/driver.pem delete mode 100644 tests/integration/long/ssl/driver_encrypted.key create mode 100644 tests/integration/long/ssl/rootCa.crt diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 2e86d6fa95..c23cdea47e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,6 +11,7 @@ Features Bug Fixes --------- * re-raising the CQLEngineException will fail on Python 3 (PYTHON-1166) +* Connection fails to validate ssl certificate hostname when SSLContext.check_hostname is set 3.20.2 ====== diff --git a/cassandra/connection.py b/cassandra/connection.py index 0c0259f1da..bb11248e25 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -616,7 +616,14 @@ def factory(cls, endpoint, timeout, *args, **kwargs): return conn def _wrap_socket_from_context(self): - self._socket = self.ssl_context.wrap_socket(self._socket, **(self.ssl_options or {})) + ssl_options = self.ssl_options or {} + # PYTHON-1186: set the server_hostname only if the SSLContext has + # check_hostname enabled and it is not already provided by the EndPoint ssl options + if (self.ssl_context.check_hostname and + 'server_hostname' not in ssl_options): + ssl_options = ssl_options.copy() + ssl_options['server_hostname'] = self.endpoint.address + self._socket = self.ssl_context.wrap_socket(self._socket, **ssl_options) def _initiate_connection(self, sockaddr): self._socket.connect(sockaddr) diff --git a/docs/security.rst b/docs/security.rst index 424afdea27..5ae4404330 100644 --- a/docs/security.rst +++ b/docs/security.rst @@ -69,6 +69,14 @@ To enable SSL with version 3.17.0 and higher, you will need to set :attr:`.Clust to a dict of options. These will be passed as kwargs to ``ssl.SSLContext.wrap_socket()`` when new sockets are created. +If you create your SSLContext using `ssl.create_default_context `_, +be aware that SSLContext.check_hostname is set to True by default, so the hostname validation will be done +by Python and not the driver. For this reason, we need to set the server_hostname at best effort, which is the +resolved ip address. If this validation needs to be done against the FQDN, consider enabling it using the ssl_options +as described in the following examples or implement your own :class:`~.connection.EndPoint` and +:class:`~.connection.EndPointFactory`. + + The following examples assume you have generated your Cassandra certificate and keystore files with these intructions: diff --git a/tests/integration/long/ssl/.keystore b/tests/integration/long/ssl/.keystore deleted file mode 100644 index 58ab9696b565276d128f5c4070953bc4591affc1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2287 zcmc(g`8(8$7sqGD%rJIlY*|Jqgz=fN6QjYsQuZ|?nT+haJGSfEh3xyDLMS2Blr~Fb zkfq2Lg=F6cH)>qn=ljF`55CWN-ankz`Q<$CbDnb;>x^{}2n0D1_+KHMNr8bRA94Wc zgjgwxy&469U=Sb%$^bawta5NL6nqZC0|vuD&=}}Sc(1)<544v3b!1jau5RL{^rhg~rjBt=qlhn6&Qnq7 zP3w3|CIvi98S)BeX9gn->6%KveJZ`?m4kSAyYDRglxc|LV=m$}o?bA(+f6N8H5H78 zcjkQUoM>N|HC~919=zAbZdA2XD>0u`Iw|a^MOf^)KlM%?VJjneTAPod6SAX_7I020 z+DoZWJoHC!pIeGtvy>N|QZnRcJDTuudp0_Nsz1k=P;^ruwm4oSZr)W_>3it*mNiqJ zp!>AhSZq@JZ&#<<7eKGVWpkmUzt?Lq$?GeGv>;YG6x%z~;Rfm!V*mh*8U-$E; z)D!}3EWBE>G2l^^xt|xznW52fOKb&kR&F;jJ=T`g1Z&qHR+?+Q<3E1>lGn?-IJfgd zv>0ErL+-=G-}v=W#W`>8k%hIzUr&Cs8q8+|jedR{Il$sfU|85b$~6&_Z7uaPC7Nw= z?lM0^3ktkaGit~>YH#R;el)4m5JJ*%U6lO+OMLXHO6#uO%pe&_jddR7RD5ex_{cHm zs-#-@la~Ag%pA-2A+<&$6CcE^>0jt!fxCiLN(PCy>Wq3)jq01P z(mVzWcP%&Q+uMuHy9RuDy^DbhUz@8A9v8OgY`c(4tJ}IPNT%DbXWS}9yR#D)JB}OL z><4+2DO#nxQKr#n)-+^3XSPbn{_L>(9=0f`khT@lpO;dbLt;;`-gLG%R}6yP5>;v! zaF5TPYs%;nY~thZrq}UN-^EWE&Wj&t&{a2Wp$$*EBHAKCe`LrUXy}N zb9uT0)^$>UqIhZJ0cyvRiZ7VzIzRnwSY$&e5FspeUxzXJ$FTbB`s&+T(H-g_sz=cbpazwJ@Ir8vN$@=&hd&-ruHdvp~xrpQio9Br>IO$G)+W&b39DwB0fJ08858zB&ZAJl_AtUsLtwIIhyc zK*d+6e_B%p)*{dI7GHhsO_{E)f3@SDauYh$EH2q)9jyQMyfS2sRc(@&i(o!BEbt-H zWJ-6r@S$qM~#-|21InZhlWPxLYlfcbA`rz>YC4t@i@tEiLKOHs@v*Vof14-FpK zmmIi6l*!p0zFog6vdY@o)R%)Z>-nPsC!(nB$$tr-cLl-(27xP1186|>WW1uBP<|*X za)?YxA^YHa*9rt`sg7UTgL%UKYvRf7otOv)g-1+|Uq=Qv4%$0)@Er-&i1&B}`;W4t z1NJhjWak~TLvU-W)VL1)4D5=Y8Cj!yqT+IpOF~);=&BxDM*jQT5Zi0E2bi(ch-H4} z@5U--@W=oF diff --git a/tests/integration/long/ssl/127.0.0.1.keystore b/tests/integration/long/ssl/127.0.0.1.keystore new file mode 100644 index 0000000000000000000000000000000000000000..208166d99a626af52fecc9275c0314103823cf31 GIT binary patch literal 4317 zcmY+EWmFUno5g1sIs}nW8UbmTAqJ2J34s}q?natnkY?x(2kGwa5J_o}k`C#XkdO{R z*#GW%_uYLz-1D4s?|tsaA2^6)0RspE2a&{M<8g#3hFuc@ae%o&B*Bw-{KHwlM z#D7FsrkFukhX2^Q|40*?;9p;a_&|)@Aj~^(5auQ$ z7{vsUEg!rl?P9?I0eaOigD?jlx!eS`yPokTcN;%-DO}}1uzJ$G~rEM_WSCiBC zt?LT*-#0t!Kf{0X!AyQ^GqZMBcx*f;#K1>e3!nVPa+=U^CVJvA(l42(KxEZ1Q(NE{#a@vA zK2{5FE3S$lGkY2R2g>_R7}{MueehYgs^UOU&MRD)bS(!WfmI-eBfVZ+O<{{Q!9w*^^MHjAgW)o`rNC0*f!AtlqvP>k#;%Y8K?BA2+^1E zK1}7+n%Ue}*fNHp%mUI9N}A6xdMa^nHZDr3R_|AoGPIvk&`6|+y+O=Uesu|ddNN1( zQPqzAw`I40K+_HT3dl-w)!4t2bA`a>T}<;t+8j41bi1daByWPOn1vD-ZkAM1av#F9 z?GI=pYdnP|+4g4_=Qf5X@|nw0U&t7pFK8V|&Eq}77}KfDVPUtZVC?xPO`?V8Y2ihB z>wMm()N}HUes#JM#~;Fz3NdsaFa)lbKCmtw=#OTKH7MO0zeLma)_&h-#oFU9mGNo; zAN(eW5ikC{Tfa)KY1I!+^ENLrzAreE(D5>e% zz!F#cZ^Vp~X72WJmKbsO*EKB+-%J7X6HiWpWrrmwN+Ql|j=8+bPOw0>tNO;Fs3e1> z&7*CG<&8llQLyiyLltuPi=sFuBG@W1AcWV`(qTDGK3_4*xe)obPvJ*mM-;24s9Wi% zCZHcAC_N-6x=2Z6$Mt%A_T>)5Q$6dUg#C%ogZuu<_ehaspY|aaRy^p#I0XVQfcq1T z`=o=tAl3T>1n+qJzFNOsP2X#axmW^_IFSyq3Yk)rqK^o-zIMY#>=#igRn#8hVwU3N zzzQTiMSJV=bJa+a8)#lwR3kim;@Wc9{=DnvJZxEicT>64C1A!EhXRTIx+;*noX<<3 z{)Dt>L3D%ft8nA5_7(>7T2Ccg=Q+Rc+E1%NDK<95WTVX27JDXY|LS5LPr={A(k=qfKIvK5~Q(5I`*2wc!Y1;WbF6L%@ zRj$W1*|!#u$*X_^Tvj1I8L`k8dW$tPbQA?TwrmbW+>SbQn4U4%*gol>t~(*}V8(x= z5|0c*Mn?*O0Qdkx03HDNUxNJq3OR%flT_2z(TV{gC?p^(EC3aTLIs7vK?Iop-oe4n z4Z^h8jLdYCsQH6V)>}c8s#d#1b{uBsdya^ zhP3q4P}2&2XL>7LQOqmXMo$uDe}ahi;ZANpdhSzbWE7pMSByBY1DfwKOmg5ZJ%pUy zv!ek=D}JnygWt~>Pn>=$ccc=${rGj!@1_0KIvjndnu#hV3}u^(of&o+I4#($z-ihN1%lOVN3NP#cMDZvCn-8-XO15D z9-RHItmN^fYqM)8xkI+(fWyi;SH25XQ`_*T)8gx&U9rd^LV8wO8nKVPT=rML8t_DOynF~O+lfe)euD&{~>FV#PiK$4qmKD5qpp4 z;z4cJ@X<=ykcKKOZ*f}B!thmqZG$3@YJEa+%;jCTRqECZ;L&I!N@JPZGAj8>H^5cZ zhMzRPZ@LMm+_n6+Uo2BdG4C?z3b{1(Q}0yT?8 zQ>bKn@o>VE@eX}RBG+q@a;at%4G^Vy)17EDty`^!=MZxuCY5;?9j_L7Q>+zJxwV}_ z4#H${gTX_wM0F~BV|5bI(VttXCn0#faX3QBaZS3knMh6t3k6goebGx+xQ8}MU+cBp z$)m))b$`xl>_#1-!H>O;Xe#TT@Y<9BiFZ?dw*c3hP)m3PR-jO>) zTuu)IFLT4{K84||*df(@0lTXh_Y_=V$#hQhPi$qS4&f9zBKyrp{Uk| zOWEGo(vU6t{J0iI-&BP8HiD8_D1!{6=QYqbup^FYJCiOiNchNYpyn&Sn$ogcMe6Ov zO%A0$xsEJ|s4=3f)FF}EC0xNFqydXTgQ{M5!XGmz=L8(30Jn+AF@c-LFQ0@Tc=`L<|$>FspLV0DKNs?M+;zfbv zW=C@v^-uE0+#s+``7_5h0f7#gkOxQ==M0JYl`Bk(kxQ&041?@_063PWzP`7z$yR^* zF!i!N-+*L_fY)=tTL(Rfi$ka>h0mibz`E%m>+=eDuF-L+xR&m1rM2EC93Hyh9Cj$_mlcQhg98Ts52U`OQqXboVY5bXL${HseBCP6RQO>F)0;Hd6ep4L zd>mO9{~WA?U)uoRb~!08bH`71kYrONK{Njqe6;$HubDHcTg_}EVvtDbwJq3-U>1uB zS^4FhraL@WYR>HIsXjEyC4bty0JkMD=a7at>|LO>Hcl; z^0n;E%WUS z7*352Gi%!M+S1%Exg8HM+lHgOt(nEIztgL9Tj;M<7^ zegz>+LIM~R!aTmdyJhLNKQ9l&3{iXtuTT8?n1zT4iVk8Ys- ztl0kJ0A>@4&#uvYTT72)L{(f;Vb7D2JE>`*^*US68@)SI54pY0Orm2c`i^A=WXMVC zm1h{BnXx5es?yUX91P~xYDZL232lExoRWk?97#U;w9}l(S=g;2sNWI8@dOe-`YFU4 z^GbBO#pexns!U+fvv{}e^TiR)a;LmHyR;nm8rdu+!o6z^_b2Xkl2g5v-FHGSt_I&{ zX4ij0&SIje5;mXjeLG>PP6mxB8fjGe({eS@Mhqg2>qcl?(?gk5pzB8_q2_t1w7IYI?sCD z^erl<342ORS?ao8?*i*`92U4}`4}Z?5+R(XdQVk~4Xu!pz$l@rMvPBU%9VdCxIEz7 z_Rmm6Rm@;jQl~FANoDw3L(Oh?hz(?{)iCm$A)3wS93c zR+}-WlEQUMtIyF=Rs%AnD#Nw~wIG|mhWN;LvdKhxY9@qeo04zP=nDD1aKt+kdPCi~ zfmam7snGc!EUPIx$*X&kn|$`v%X4EvhjnX=r_P#DBU|I|vWe_uE0&tKTF3zDb z>TJAj9ZJq-oL~$0&l{XxW-10}(#@<1d*ev31eg(whmFPg1cX6_1;8ZR6cW{?;iX|^ rN5(@1Ma-siQTcwDWDadr8&zvBRKn{{%3(Y!T%W}|<}pCPu-yLw^bQOH literal 0 HcmV?d00001 diff --git a/tests/integration/long/ssl/cassandra.pem b/tests/integration/long/ssl/cassandra.pem deleted file mode 100644 index 43c0a3e4a7..0000000000 --- a/tests/integration/long/ssl/cassandra.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDnzCCAoegAwIBAgIEG7jtLDANBgkqhkiG9w0BAQsFADB/MQswCQYDVQQGEwJVUzETMBEGA1UE -CBMKQ2FsaWZvcm5pYTEUMBIGA1UEBxMLU2FudGEgQ2xhcmExFjAUBgNVBAoTDURhdGFTdGF4IElu -Yy4xGTAXBgNVBAsTEFBIUCBEcml2ZXIgVGVzdHMxEjAQBgNVBAMTCTEyNy4wLjAuMTAgFw0xNjA0 -MTkxNTIzNDBaGA8yMTE2MDMyNjE1MjM0MFowfzELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm -b3JuaWExFDASBgNVBAcTC1NhbnRhIENsYXJhMRYwFAYDVQQKEw1EYXRhU3RheCBJbmMuMRkwFwYD -VQQLExBQSFAgRHJpdmVyIFRlc3RzMRIwEAYDVQQDEwkxMjcuMC4wLjEwggEiMA0GCSqGSIb3DQEB -AQUAA4IBDwAwggEKAoIBAQCkFMlcxnl+k5KAzt6g1GSQ/kXzHtQXcf0//x6BQTRYdOEeiBnMcI+o -HYefiwGpDslr3YxcWwJSfKjgUhGj+2wyl4O8PP0Up47tX5HUQuIlIjjBZd465VhQh6DqaJky3YHX -KD+8eHuVMnEyAImsNh9laQkUOHsGT/POpI77IBxS1hVVOu6A5bYz17D0RAzZBel3eZBWLSSgbkSG -jUPIDY+078qRJI56xY/6lEM5Zr6DJ96jTdqjRPFv3fHJJZnwCNz0ng0wB/gHYFkm2fdGAM2jrCdD -jC+VZK6uyXatRbFanPdlfZ4rWPrH7V0c6wrDssuUMlDIdaMyHC89axZLP5ZZAgMBAAGjITAfMB0G -A1UdDgQWBBR/0GR2jGRuP8rsmBept17gwXcRejANBgkqhkiG9w0BAQsFAAOCAQEAe33a2GITd7DP -WmDCFfsZqCDIbHdx1QwfcvWRpSnjXpN4Muzrt4TCRJAx6kNQZDnG2aNMdGWGjrQAXUMLkJfSnwJX -X1f3LteNfekWB/lN6kVbPy5lSuLT45q3xSLpTUE51S3zG/c+qyi3FZgYA729ieuDW8VTQdF9hClN -Ik8Wy5Gg87AdYY71DvG9IQkg9eAGVeZSnfMUDYpAiZaavsYJwFfYqIT/WCz9FNXPYN1tDNoV3u71 -GTPrywMLZlmmGn3EBvbh1mR25EmPkxAw6LkWyfbbzrskhXmzN+j9TZNN9MiYMNtisWBR9afsVpLq -Bw4ukuih6CyqUxwuVBNhns8iFA== ------END CERTIFICATE----- diff --git a/tests/integration/long/ssl/cassandra.truststore b/tests/integration/long/ssl/cassandra.truststore new file mode 100644 index 0000000000000000000000000000000000000000..afea1c9431b2a6361eec3bbbb0b2679daddb0be9 GIT binary patch literal 1074 zcmV-21kL*}f&?xC0Ru3C1Lp<_Duzgg_YDCD0ic2d*aU(D)G&er&@h4n%mxW6hDe6@ z4FLxRpn?OvFoFZQ0s#Opf&;P!2`Yw2hW8Bt2LUiC1_~;MNQU)s0jX8@*OgWRPAVTkgkREDbII z(t1;<(+V3RSd$cU*5gO+mH@nUfLR3&2uK)uLWDS42vx`sGHm1QTt|qS1~Wz`UuSv~zqnZ`yt}}_9ri7IX|1-cnlXH{Z!MR|l5h=jc z9B1EXrtlYfix_OD8|ia;`RLwWMZNLaN+NJna|Qu(k@sH)S2OMfnYx~?24h3U1hy@0 z`xL09hAd~rnm~!^Wj5k)@a0HRtgzJWg>Xdr&mO(S4c?0INB)5}m_etB zlY!jH+2uymd#i2-=Aw|Vra`e^|7x_xin~I4EeE@GM!r0cQAigh5bby_1Q_(_hWcg= ztKr2?SS{?{0hYCj!(6NwXH|(7`ZuQUbZm;dd z$ojj?t>tqF9j$u>VkKKOF<8OMr@?pLZPC|$V)0hymbNV1xezPpUgg2}9&GSJoe**~ zZLZXRH5`k?8Xg0k!0iy!8o8?MprEH=w^r&*2@S8qo>#-bm~`TZ&(A&!{axFu|1SjT ztO(>iGNqov`DS`@*OK$TIVyt{X!8#DW;Tq0OvP@G4OeIi`D*7_mtV?)+5XEXh`Oy( z0^lI6J7lC4&=m?5iIzZArU|DB&)OIh(E_8>J1{;lAutIB1uG5%0vZJX1QZBfa{SH! sS@wS%XrS`CWf$9DpkV|QqkYvE6XIGn=LZsoWSKgC#5Ing0s{etpm$;JhX4Qo literal 0 HcmV?d00001 diff --git a/tests/integration/long/ssl/client.crt_signed b/tests/integration/long/ssl/client.crt_signed new file mode 100644 index 0000000000..7338adcf69 --- /dev/null +++ b/tests/integration/long/ssl/client.crt_signed @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCzCCAfMCFHXm/9f6KJL3965bBgZMFdd4Xs0gMA0GCSqGSIb3DQEBCwUAMEIx +CzAJBgNVBAYTAlVTMREwDwYDVQQKDAhkYXRhc3RheDEPMA0GA1UECwwGZmllbGRz +MQ8wDQYDVQQDDAZyb290Q2EwHhcNMTkxMTI3MTk1ODA3WhcNMjAxMTI2MTk1ODA3 +WjBCMQswCQYDVQQGEwJVUzERMA8GA1UECgwIZGF0YXN0YXgxDzANBgNVBAsMBmZp +ZWxkczEPMA0GA1UEAwwGY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA4SEvwK9IbwdVEnBEf6UbYyaGh0Ao4VGyIA2Z2yDTmSIqBgBZjbBdYr1w +FC0nMqKgAI0NOqZHNRiuOUojweHZLwrYhVPg2e/Va+vGslQY+cKmAYDTCsxhEwv7 +4PXiI6QhGwmfhnSnA0+nzPKZU1SFFWl9Od07X8QJWJMLHDOG14OnsWI2t4wetkIw +6yyTXfyAD9mSsQeqyu5P6E94E2VAGtQvP4yeAJX/dWPYhFEBctsQ0H+Jk2GK3kBe +9YX9Jhk4Ono8cdUy3WfUFQRXgi01YmF1vI+Z0fS0olkdqw8mDonFpMS0ly97Lnze +aOsTv3s/SONdYa3BslbEAwUT7kvE4QIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAH +QZDTPBQgMbTCp8IJvDRbXKQAfjEE7FjYani1qWOMx5JpOJIoYHnnbLNRxCWowWRj +zsyOa5Sqs0TVOS5/4QHqu+cSdZsUNL9TvESor+BwgSf81JViD0r//xK+j58pLqB1 +F+sPIltIqclWRN/QZH1VNZ+G0WQiXoT+YGNYRzJtXNBQAi3cxZSXLaeZ6yViWzw3 +m8IL0nUTX+uNxJSaFqZw1vTHBe0hC/l+P3YfnM1k6lg4lsWDvzdwNKfmohFlo5eV +OSG89YncwigG1KUHfQ1ECX7aaI8+cTFEg5XWTMqwVko1pRVVL9mVF/YEwI/+ydaJ +whmL77CEP1AvHIjdiafh +-----END CERTIFICATE----- diff --git a/tests/integration/long/ssl/client.key b/tests/integration/long/ssl/client.key new file mode 100644 index 0000000000..b2934eadd5 --- /dev/null +++ b/tests/integration/long/ssl/client.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDhIS/Ar0hvB1US +cER/pRtjJoaHQCjhUbIgDZnbINOZIioGAFmNsF1ivXAULScyoqAAjQ06pkc1GK45 +SiPB4dkvCtiFU+DZ79Vr68ayVBj5wqYBgNMKzGETC/vg9eIjpCEbCZ+GdKcDT6fM +8plTVIUVaX053TtfxAlYkwscM4bXg6exYja3jB62QjDrLJNd/IAP2ZKxB6rK7k/o +T3gTZUAa1C8/jJ4Alf91Y9iEUQFy2xDQf4mTYYreQF71hf0mGTg6ejxx1TLdZ9QV +BFeCLTViYXW8j5nR9LSiWR2rDyYOicWkxLSXL3sufN5o6xO/ez9I411hrcGyVsQD +BRPuS8ThAgMBAAECggEBAN6HezgvCp4U1VZgepThkuyyzA8ssDvoEPX1+7rJ+95G +EtvTxLF1Pxm2vu5yo2g9r4Jb6lOqwIAPYUdnrWib4Z3KTrObcYp6sq72Y3UqA3X9 +nTGnMPRfiSTWl6aJ5XntZnvfSzwQWnW/atH+iuf/h3zexNVJhMLod4SB9F1v4T1s +HgVjDJ/4cLomtNDkB0CwhgNr6elASL/DLnWC4Fb+C7HpJtu4A4U/08DWD6kAfs5a +zaSTywXVtxpp4NSMJUUI6KQZEfA4uLP8dLX5QMYgdwfpKXkVturTWMIUmU73IWCk +CaDMdXNeKU7d2iJIQi3DlHrcVZm+MaQKW79N5XUo3TECgYEA/u28rQM7AAWNNEJI +Gj/iGGFF6lE2V+o2uWySxEZeb0noWzuyUP2bKU+CbYhaS6YCAtEEkLCHSt6D4jZs +vUxJTz3H99F4jqLbI0iZE+TKh0ff0oVDq8odmn03QylbZ3+H0xgfumjyOtQ9/CKs +1fIA7pXXu0M2J7znYes/tUsINs8CgYEA4hNj9gk2xCrKE/onmzodWa1F4VrsrajC +aDZmm7P+gZb/7p2JRn54K0SWSJthVNu3WnnzNFjR4lsRN7jVxscWb5Uq0ppYNjj6 +5Y3IU7F8zhib1zaasqPpvZpq2rK0AwFeQN8aZrLhWEPVii30HnLc9cQRFcHiZfWj +/amGJzB2NU8CgYAs1Jf6gPfuMAu95e4SF6vmB60o4LFC0qBfCVXiCFHxFjkzWhMo +pQCVSjMwmervJLlzz1gTLcgNBYaB0Hpc6750nfO9g+vEgaUx1kF9Ox3WnnAg8GiP +HFMKaPy/5dT6JXY4TCTAPlZivBJOdbaZRR6e1mECwHYUlObH1Hv2oMRIBwKBgHzW +UkOBMQG/0xiW8RnxTFXBra3URI9cegWLzLs7+FTc6fR8f1oy6e6SkB4F1whBz4yh +fJ+yUCubN/W6FioOs5/oEd57pixC6KCr2ywD/TPdOOjtWR0+EAtH4qtjNK3YKpDN +4clGC9NumdMUrxHFQahgnUKTbo34x5aB7vdi9lNXAoGBAJrYYiVUe46FDAGZUzKG +xMuJS693kx23vC/PNaIP9wMa58aOEtTw+zLdyioiQIvxmJXTnEKWFK25z0n0gDJM +hMar/CFrERHooWRjn+a5kyKXppJ1DHtoSho67wOjejsnikjQvmppEBFr1YjvyDhD +kY44x2EM9WzqlrwHtBeblQWE +-----END PRIVATE KEY----- diff --git a/tests/integration/long/ssl/client_bad.key b/tests/integration/long/ssl/client_bad.key new file mode 100644 index 0000000000..5d810f25cf --- /dev/null +++ b/tests/integration/long/ssl/client_bad.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDha8+NBvpTmTbw +D2EIodXlaaAEtLmXTGoH8pdBm3JxzMuUEkYbGig3YjQ1BAKQgCB1TJGPINcHz7Jo +5aW5To1jrxhhohZYQLCNKlAONDhgJbHEPf7s8dreQ/q5ISt/2I3z68c9I0j3VoRz +AxxcNktl/x+6YkXe9tXf/LWmJk/gHlu72/HuJ5oNyqOKaCCoMoib3jLTlR+lslTy +Qy/CJZH6WJabLOPmppFLaxJMlSGDSTE/Xktt7+H5ssHnfQtyWyylVjZkzChJfBgh +HrLpm3hO5rmqVwOhoKLKVDFMmX3aMGX2S+3KpXQ8gLnPXwfLI9J9fDg5jp7bya4k +OXlZfB5hAgMBAAECggEBANQVFbmudfgPL4PeREHV2SM1JCspSW9SonOFxs8gDCWL +M4HFS5YWHv40c7/pXOxMz7zsZApQMF8WBtnwLeJRSG8f/oVk9Tbk7fZyd81VTjEP +ZdenKGAPEAeL16kzzvRCbxOtoc8gkna6PHTk2VrcbkWxKU23RduHSiOpY9HFO+Mz +iI69tB7657NOiZCQ6xDIjKv+jR63m7VAWKT5jkN+tYpvx4K20na5t8RO1s0shqNE +e2zMG8WXVl6lW4btfkt/lwWUNXu8olMTk9qN2b5Rq7BEJfKwn3lb9vCpUMyewtRB +/8U+Zu7Tlwni5QagOqAUEkjuOJ8cR/Jgwu1mqV2sXxECgYEA9zXi0PjWAe2ZIALd +1iWPZCvvT7yEjt4ulhAYPqi8T38B4K5f//m5SuYPS2ebmSAd2WBTeIX2A6mHc9lk +53gnwvsgAqaFgjYeDqBThpCE8icFXEZnJbtnJyC8zC7pYjUovAHkFEdLw5kQoI6Y +i9HNOS9ugSut8RnF0oSv/E2mahUCgYEA6W+ZAEneBBCsOQclVfVPLm7D+y+5SZEt +zWr2b7CCnGCev/qRCllIEwQ2+W1ACEHof9xjE+aWwEQjX8YnoVbAJo2ru6FFQfI+ +f/SQx7beX8jUAeJGo+CFr2ijdVmcCCbMGeAm8mpACUIQfWPHVqjtGS/CayxdfwA+ +lbWPbkXCMh0CgYBfUgHRPgGW4LyoYTKUfgsaPu6ZukEKrZUc+7u9fWaO6JQaxGHz +26CcxrSjCKIwmvend8L3t/+yTc4S14JW1jfOsPIY04irOp7AWQWb32HD1VP1zpe7 +LtWJetARkw0edwzr4XbGcu89zmlg31rmntEY+bcMS4FYc+2ZTNxm1rISOQKBgGQZ +lct44Xpux9tghBMbMUwg9WtWKKcyWSi4EFsOnsN97zU1tlJwvKZi7UwCHC4uTQvf +LqFPBSAHV//u0fmuYJFnuNeprTA9N63Y6uipMyxxyu/P3yjQ06LHRSjCN1WLhYQn +Cax0AWe266lJSyaPI7TkNQOOL72RFkVOaOYJhd/FAoGAPtpVPTiVK0RYwLnZqaWB +fxyI6w+UjOEbP88vD7N7FEI2kQSGQ6F3pMzDK37NglJVtwjgzEIF9x9BIE8XSf16 +shc0U73Vg9ZsXDNPUz21hhAwYL1cCgnx0mfL88F1Icb5FfxlT/1BPHNHKowA9vST +ihbxCJg/JJBzwXTxPocQisk= +-----END PRIVATE KEY----- diff --git a/tests/integration/long/ssl/client_encrypted.key b/tests/integration/long/ssl/client_encrypted.key new file mode 100644 index 0000000000..88c8f906ba --- /dev/null +++ b/tests/integration/long/ssl/client_encrypted.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-256-CBC,D33271042DDB319A9A83747391E3B8B0 + +NTq5xNw+6QBZsuXsM4ymjwtRHmEmb5T/DMUsWYZmJk8Eb12NBveWub1ltdkjOe9C +vqmYKG7ZGgEeRynia93P3+LY+K+8O7vH81jceYNaFYP7OEr8mSblrotjdQkcSEBs +mNGhZ+7XhlGGmxtMnOodYchwBsKHgBOpPzGB6iuwtM5b80wQ3jh7w6NdsPQzeSBS +B4RalAsBtU6dxfdl9NfjKisNrfGHec6uE5SfpYqUQeZ471JYl8pBkcJfATdlNFkC +uKgUJSxK5kV1KF5ClO32Ekq1IUgeNZUFHE/YA2xK/YbmDvFwOTfTpozoj7c2VRDs +2AWaYwIazz9dH+i03/bFZtZUCARVj1jig+FVVWSIF5PNUrgxTEMpG7Q8u2Tr4nCW +x0/KQKLOkXd3cgNAJDrnWrfaA3zE9VB3dL0sbFG/NFU7HUxwdt3q7k3xU1raGsPT +539ydrvAaxiZswGH/UYMAbXCEM2e1mx1M/HVgNX8/3auERxPrMBO6YEM3cNd7ktX +q/JU69XXiuncl4uQJNQfADFlK5PD9KR8PLfIg6qIRmwI/rtp4M1+tLXeNt7apU/l +6kDyI0pjPH7eouUWFr1wqToAHHi2JU1e0gN8CP59Q8EzeJUtzOxTablGLZbz4FeN +ymFgCFrd3HCVo16Svepy10Ka7ZVlludHVC+IKy47VjkevQtXW9u4iUiwL2+S4gAJ +AfylzmRQNPIzwAM4CWGdb5pgXYzCNsE/WKuV/nGjIKz/noiXoJCTnLRI+0rnUcF1 +lNjZls9XzdPX67WPQZqs/6f47YChw5+yrpZ8NRnC8aPouR1BgpLWg0u0Xqa9NfDl +opRIDjCD1dkUwA0ByotM/eeqYLGnnqaQ+qbFyf5WMaEwngRanpX1dxh/W+kaYcEm +CeLWwmyL17uGpxT1ULsx9RFjUCuPC3IbHUZc6l9UtsCShCtK27wpkccaF9OpGM4k +cokXQNV9iP+ve4ZNaEdAZck4HgR/v0dYRV+QYaNeOaozRFIqrumKNIDah6JDHcI4 +ZorCnSdlX8sdCjJ71ap9572msK4L6HT/sSop/fORw7wwIvNmSPOJGKxNCacsfB9I +jRcU0WUWiWWVhqUC+AhnImB2wLYFL0uSMo8Ww5crdC6cWbmJdMHfI5Uag7ri3SAs +Y5hPc3/DXrgpFJ3WOJf9dpoCx0s6V4YERkZZvufD1Nz2DoGrw0NUAU8G7cBqImTG +ExsU1z6DwI0fuEZYT5xYtpO8EjCbPY7Ucz4gp3PyCWDAOiulHaH9JMli8rsjglY8 +nUTfkAA1kWSJbTQQxG0flgzOdQoPoezP4JKwGEaUJ7B9MGm3cKFmhefspk/UaavO +F9+35wsPhPhiA22+pKSCmoRnanq+Pi2+kC9M0DEY9O1UDFKeDg3U6sn4tVy5H9mb +qEHRnK06q8LAbUrKopSZDnmRt6bjjANm7h6ekasWndmM8VKZ77dhCyhTTBCL2jke +OmMJLBnfmE712XwZNV6ewHv52iV/2KE7DMhhY0TtISVSPdvE4/rbkmjH73UOPwZg +ufJDT8XnwqQEEzgOZUciUw5Xjc7xRikg3MCHEy2SHvT36/6UYRlI+ZkRZ1wYyXJX +-----END RSA PRIVATE KEY----- diff --git a/tests/integration/long/ssl/driver.crt b/tests/integration/long/ssl/driver.crt deleted file mode 100644 index 76c999903e5ced1fbb2f101ee23067bbe10caede..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 931 zcmXqLVxDi%#MHiknTe5!iABZh)CL1yHcqWJkGAi;jEvl@3Pr$|cO}l30=$ zT#{I!;F*`KXDDeP4ie`U76|YNP;e>AEK4m?2uUq2DK-=`5CBOr3v(J8nd=zj*dGY?tPX+&vz3yB7zTGip!n_q~QYs%0-t%~Hd0Ie= z=uEN0509Ktd~ba@V@F4w(O#hk?^*uOZ5O&7sS#eVpV#c2W$9-N)@Sz)%A0BVX)_#r zz__&8&hpdZ){{R>d8QT{=bSp^Uzc)Gc5XR$i}>e`mzUHT>#v4b`(>sr;`rCd-Szg{ z-c46MPCgerp#AK8@9W>mA+J5}t7V!9iyKdz8WJ^|D`3mf!>V65W!2TK-5}q+Phj#& z#+_$(rY5aSSd^-f!G8MM-_QHp-Y?p9cb9ne3FiVs(-{-b35w7DS66=L>9Vb+?6>X1 z&VTc+@jLSV9P?HtW=00a#fk><2C~5DmE~g*V-cyzm~_~SQ^fGpo685HkBK;3XYx=+ z4n$zu0|p`^!-A58X0;y8?TxQ%-=y&z|6uce9?yY)pO}A^Htc`tBzZ}~%nf137IEete2W^Y8>R(Z~QQL`kJI0lOB0rIlD^DQuXeOl=#!d znl;TzTsxKqtk-gkba&GI$!hcF9%0F2UW|H-zEJbGa*Q Xdl+YNCi#Cmwp8Yy4dc`#JHL1UE7@EN diff --git a/tests/integration/long/ssl/driver.key b/tests/integration/long/ssl/driver.key deleted file mode 100644 index bcb033d338..0000000000 --- a/tests/integration/long/ssl/driver.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAohKfXUHM5AQvwgTPIretqenYVJzo78z5IE/xjUe0/e24lJCe -qCYaJPHB3kjg05ZQhBWZFsPhxMwh7zvTaLiIfjK9EuDvBP+dhxLbWShXcL8NNu45 -dfM4BebewR82Kk4rAMHgAaWDPjnyo4XJ+DUMlXMzbMrCT35k0R2ddwuEF/OI6dIn -AX/VVDtOaWaiCP6BC4rt9r2y1UjJ5xHAK+bPjev7Y1TrSd8maTQTFzORlVRamwpQ -tMXDJfWyan5+rbAfi74Qk6kBuc25ZWKpYKJlKGgHy9b9875G76K63boXe8hDcDE1 -mJHOEReb/n533OWmtTUH2z9Wz/ZLfE7E784DtQIDAQABAoIBAEjCDXMDMCUI0BvM -puSSCs9loG1Tx6qnnQtpeCoRj+oTBzMCvYcFG+WL9pmApBW4vEqfm4tBwp/jeDR6 -6gxwEy58Pq8z9XmL+z8BMyGSX7bIvfjz9y3BU1wq6cNO7LUPRHThlFuI3yNXRlss -m5/myNBq2CI14Adp2X6bSe6AZ/uL6+Ez8ZLpNiMUtGzTDILyH8bkgRHsyDxoDpF+ -h1r42dZG3ue4JC3oqUE2epvNE5Rzh02OTBSYYbmOmFWkhV2lm6DH46lycFHjkzcm -2meU7q9I7nZrf0+ZkkQXhWGQWcGzHd3si+XMhaHT0BnH8gGFZDzpC/qnvZdIn//y -zDu2B/0CgYEA43kkgZ1YGqi5Vi4ngek/uk4sJ9dOvYIaAag+u5ybRinLhCFDGhr9 -MzksqFY7naq9oeZZrLbftw2FsKsCMYAOrrxZIbhvGm538jwWmBefEmD0Ww+k/WKG -AAv0z4sSnSOq2/sZBQJlOrk8K6wQ+FcyEddiy16zyj9UCzEPSDQoNj8CgYEAtmXY -xYALbGz0peDGfzPq/WjeqCJtjlXIKOB7+vdyi88YE4Tea2vfBmAAWmbTf/el/o8R -txDfP6+RKxXs2jCFCY640W83ar3RjXcwtu+oH4Aqa8S21o6Dx2sx3HOgKoJ3DGGB -HHmYczuDZmN/zHseBrYUf1UNcIDQIWZCLKImkQsCgYEAuY735bfJyC18yr1pwwLX -1o2wXWu4ssSvnEx3qCmvTIQJnaZYi7IafC+Wq1d02EAQ40H6QvcG9ddVCHHnnyOc -VpQUjXpbP49/zx2PPNCAFcj7bFENwRkewGkAclE7hO40kbB6j2iN1WKHoASD72GJ -2Z3+3HFLbp9MWrjRg4/wgZcCgYAk2IgkxYwJOC1nwPN4SM2kqWWpQ2MsSKnpkAeb -mBccpRYxAztHKgBgsk3/9RuukyGGPfKPL6pZnCbQNFqnbPvDBYDSVgw01OmbEUPX -AKzOyD5JjPB+gUWfqEjnRrhJPhe8eYnybaHdTV1q9piffxN+uZOEcXMIkgz5YkXl -7E+sJwKBgFLA2CS19lbcoi5R8AJbUydZJr/LNAbFknWnliiq3GxISfWydA9cG/dI -CxV3297awLhHrCWqziC0zITjEcAhsNkfG/VQlYGJOS3sfvMSrLuW/9bAL8o4VCpC -cOs9e+svbJukJB6UQu4vpROMmv+0quXM325VlCZNel7DPAovYwjW ------END RSA PRIVATE KEY----- diff --git a/tests/integration/long/ssl/driver.pem b/tests/integration/long/ssl/driver.pem deleted file mode 100644 index f5aaa25b3c..0000000000 --- a/tests/integration/long/ssl/driver.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDnzCCAoegAwIBAgIEJErKsDANBgkqhkiG9w0BAQsFADB/MQswCQYDVQQGEwJVUzETMBEGA1UE -CBMKQ2FsaWZvcm5pYTEUMBIGA1UEBxMLU2FudGEgQ2xhcmExFjAUBgNVBAoTDURhdGFTdGF4IElu -Yy4xGTAXBgNVBAsTEFBIUCBEcml2ZXIgVGVzdHMxEjAQBgNVBAMTCTEyNy4wLjAuMTAgFw0xNjA0 -MTkxNTIzNDVaGA8yMTE2MDMyNjE1MjM0NVowfzELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm -b3JuaWExFDASBgNVBAcTC1NhbnRhIENsYXJhMRYwFAYDVQQKEw1EYXRhU3RheCBJbmMuMRkwFwYD -VQQLExBQSFAgRHJpdmVyIFRlc3RzMRIwEAYDVQQDEwkxMjcuMC4wLjEwggEiMA0GCSqGSIb3DQEB -AQUAA4IBDwAwggEKAoIBAQCiEp9dQczkBC/CBM8it62p6dhUnOjvzPkgT/GNR7T97biUkJ6oJhok -8cHeSODTllCEFZkWw+HEzCHvO9NouIh+Mr0S4O8E/52HEttZKFdwvw027jl18zgF5t7BHzYqTisA -weABpYM+OfKjhcn4NQyVczNsysJPfmTRHZ13C4QX84jp0icBf9VUO05pZqII/oELiu32vbLVSMnn -EcAr5s+N6/tjVOtJ3yZpNBMXM5GVVFqbClC0xcMl9bJqfn6tsB+LvhCTqQG5zbllYqlgomUoaAfL -1v3zvkbvorrduhd7yENwMTWYkc4RF5v+fnfc5aa1NQfbP1bP9kt8TsTvzgO1AgMBAAGjITAfMB0G -A1UdDgQWBBR8aJLDSgkUMcrs08BbxhRA1wJIIzANBgkqhkiG9w0BAQsFAAOCAQEAoHRggyaMKbeB -633sZgzH8DzvngzA/vID+XWAv+lCGdIYNkbu9VJ8IaYsa9JsMvvhp7UFL1mYm32QacjorxqfNTNS -To8z4VOXrGLYkuJL1M2qJjkl3ehkX8tzKXyDIgq4pVCvKkFZR0It+QU87MnHUL1/HIOy+zdNW6ZU -Q7sRCUMtqstiQ4scbispsVevfEBkGNjHIp6M/5Qe6skntRvdNMWZILz82GLym+NppTgcNcwDf7lq -g/syNznM0KAE1VUAJ2y8tArvAZ/XugC2RmZGwY3q/qw1B7kaoTqu7KSdLuWzol5gR0NNVADU5x7U -BrwBmgliT/bGpRz+PAGVYj5OXw== ------END CERTIFICATE----- diff --git a/tests/integration/long/ssl/driver_encrypted.key b/tests/integration/long/ssl/driver_encrypted.key deleted file mode 100644 index 91ae5efecb..0000000000 --- a/tests/integration/long/ssl/driver_encrypted.key +++ /dev/null @@ -1,30 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -Proc-Type: 4,ENCRYPTED -DEK-Info: AES-256-CBC,4B3084A6FB74C60EF6AD51C4677FE509 - -PRmOOSwQNcBYCBbGuv/YgKgSep00GGqCeyhej2soLaGKgSjPhe/cE7sFvm6jk72+ -wDJNpuY6gLoKmizvxrgi5iHnoVdaPkRXKkcQHgaLt4TjB2EUfPp/+V5N4FA4DxN2 -/jqfWZ5Fw8ltHbW1hEkxQ58cYWd5OQB8EesQ865Pv364px5GIcDyr9FEgTiE/Ulc -wS5V1DSgytL/iwBP7FE/zejvZLiS0OoqNGq/G0nBSsv68CH7/lCs3i0Gn2E8YNW8 -CYdMnRzsxwPgbvWYYFhNeK1mrXC+RZa7FgLeeXWR6K28aSB8wc35PePBbZT6TViz -jZkmBFvbpLrLP7KNogLdzhFLqBzkP0NgkYtseYFFcuUAmIrWPVrsoA4xHWMkdcB6 -OLMz+zpb5u8rjpa3gWh5xQJbdVDxXGJNxCYzT7sc28wzr8vHFJFg6U2DJL9x+ufW -BXwBafb8biS1TBWGSDsc36NUwkyvv/2zjSytEIkdJ4lp4S69TGcU0Qvr1gA+8zLm -o6/1G5wUTt461mAAlvcy/9pHZlm8K1VShBvBCYouoVfUyIYAKhq0bBm64E/jdE3v -/Rxd/hom0d+m595wbH7f2AZCBVmII+hEyaagtKEO87qztq/WVMjN6h6BtXR5Xjsv -fXg71mMlQvqYuTB7hOhpyPdux0vvGzCdlGmvUsAC+urqCfkUseLAFfTnD7IrfYK8 -lzRyFJPABtrDiVqQpCBsOq/ACERjVpn/XcJgEBui2jhaTodwWIVD9DPlf4o93Fhf -oKE2o2W0TkaU/uaTLNt4TLJ0EXJ8S0XIqXnEvt3eRu0jOcC3ZWlFINdJISjm53LW -o1q/6mlvj2COLxCipUfGLVmIRuuMDybndSiXH+vddLQDZSw9mynYzJUvMcurBYyL -Lb9t+8hSRftRNqnM9ojDB+gmCJ0uQUlSAt47SZHGIi91S5pM7IELgP55SUG02mGn -Pr+BBtQCVNbd1szPXH7dlUfD9eUHZ34uL/2wOMtNT7da4ajviR6HIyLiz3lrI+e+ -2X62fMFD5tnxffMIZLVPhAHHlT8RPzvspA7QHdJzWCsdxZ3VVmWCFL0WHKuP3Hhh -0jIBOh0Y9eFCMcL7yP0rbjv/3MyBqqM4T5RfVkoRE3M/F8+kHObQzdUqy4JxApyb -cb0ipAU3JxgEy2rOkXDoeYeRoCKVNU3MZ3x3/+oTZXBhlP+oIbqMo/bTr8JJFC6y -y38YcWcVdUzYBI3KbH2aRCtUXNK404zm8GNs6hTuB95IrpI6APspNkQS9yhcxBdr -zOjZh8snZ52gVDtgrNCSfSgflPEs1CNeg33PpVfZWCai/zwSVFwJuL4iyQqkTVlL -wmbLUFdZN1AmYC2dJpHm30+cykXWJr/xFpAlZXmtgHlttD02pnTVl98G1rP3Oq8u -NplSV/TOXVc3keBnQ7N5BohnQ1NSLq4wusVd7n9UEoCE6HxCUbWWkAKQIHom+5HI -7PgRw0RJRUFbydBz+bDXz3KEFcTOT5ihtI/qq6tJpXMKz7uKGLAnzTIk2DecQYm4 -c40zTUKGVISky357ZIYB6uG9NRVffqsm9M1oIeprwYNpVzyqxz6Yyen+VCxdPq9w ------END RSA PRIVATE KEY----- diff --git a/tests/integration/long/ssl/rootCa.crt b/tests/integration/long/ssl/rootCa.crt new file mode 100644 index 0000000000..3fd24df8ac --- /dev/null +++ b/tests/integration/long/ssl/rootCa.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCzCCAfMCFEmF5bV72tooSzGiFznZmtdMNDpLMA0GCSqGSIb3DQEBCwUAMEIx +CzAJBgNVBAYTAlVTMREwDwYDVQQKDAhkYXRhc3RheDEPMA0GA1UECwwGZmllbGRz +MQ8wDQYDVQQDDAZyb290Q2EwHhcNMTkxMTI3MTk1ODA0WhcNMjkxMTI0MTk1ODA0 +WjBCMQswCQYDVQQGEwJVUzERMA8GA1UECgwIZGF0YXN0YXgxDzANBgNVBAsMBmZp +ZWxkczEPMA0GA1UEAwwGcm9vdENhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEApn1jNTaOMRiOEC25Tsb4ZdqoXHWxaeewItBh/fLiVKXpuHnZbK1h8183 +Cqt5sKMaYfrGt9UVRf6ACZbqK5/bJkKKo8Ts5WsTqZ9MDA0F77XQ0phQoJ5NlFkf +okLvZ/+kUc7PDsi4Hvp3IbgEsAlTBhRPUvztaGZfZNQTR38XTGGVrXRKaUeA7O3j +jM+WKAYY5AFNoz3TUczdHnkR6srbkiC/t4NwOJBlM6zNPl1zank1LGz4EEdxasfj +JOK8VqoX7CRUsVl/Km1pFPqKq4Za79VEIAZwn8ZtGUkrXdYaFTOy1bzMAQHLy4cn +lrikWegc5Ctqcie39rhrd1Cokuj85wIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBu +NFqRArFrtK0arHBDDfRPour4W+3ncvuMRrR7noVUa4ymN1ExtCj+VarhIzccouOt +0ZGyPBma6g7wEy2fwW+L4ojg9WixolI3gibfX1ol9eKPfSQpjp/rBFwzR/myPVfj +AJDknN4x3HUMvVnz1lOKEaKSNOst9tu1WGqxn2HI+rwLqUfiP3ucV6828fSdvOMu +SbZCbsNIqJgTIN8tJZgL7iMi4u26q4FZZyrRElq2iQzkBPRycxwYsnMDt0MpQshN +Weyy0IhrVQFgDGr7YMt18nuEMLy1AgSPklkAtqhuYEZPe3RHFoqmObY5b9YeGdeh +LhV8l6WhvgYkpiQXmL0u +-----END CERTIFICATE----- diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index d7788f0f8a..f0e3d8bf52 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -29,18 +29,18 @@ log = logging.getLogger(__name__) -DEFAULT_PASSWORD = "pythondriver" +DEFAULT_PASSWORD = "cassandra" # Server keystore trust store locations -SERVER_KEYSTORE_PATH = os.path.abspath("tests/integration/long/ssl/.keystore") -SERVER_TRUSTSTORE_PATH = os.path.abspath("tests/integration/long/ssl/.truststore") +SERVER_KEYSTORE_PATH = os.path.abspath("tests/integration/long/ssl/127.0.0.1.keystore") +SERVER_TRUSTSTORE_PATH = os.path.abspath("tests/integration/long/ssl/cassandra.truststore") # Client specific keys/certs -CLIENT_CA_CERTS = os.path.abspath("tests/integration/long/ssl/cassandra.pem") -DRIVER_KEYFILE = os.path.abspath("tests/integration/long/ssl/driver.key") -DRIVER_KEYFILE_ENCRYPTED = os.path.abspath("tests/integration/long/ssl/driver_encrypted.key") -DRIVER_CERTFILE = os.path.abspath("tests/integration/long/ssl/driver.pem") -DRIVER_CERTFILE_BAD = os.path.abspath("tests/integration/long/ssl/python_driver_bad.pem") +CLIENT_CA_CERTS = os.path.abspath("tests/integration/long/ssl/rootCa.crt") +DRIVER_KEYFILE = os.path.abspath("tests/integration/long/ssl/client.key") +DRIVER_KEYFILE_ENCRYPTED = os.path.abspath("tests/integration/long/ssl/client_encrypted.key") +DRIVER_CERTFILE = os.path.abspath("tests/integration/long/ssl/client.crt_signed") +DRIVER_CERTFILE_BAD = os.path.abspath("tests/integration/long/ssl/client_bad.key") USES_PYOPENSSL = "twisted" in EVENT_LOOP_MANAGER or "eventlet" in EVENT_LOOP_MANAGER if "twisted" in EVENT_LOOP_MANAGER: @@ -284,19 +284,14 @@ def test_cannot_connect_without_client_auth(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': CLIENT_CA_CERTS, 'ssl_version': ssl_version}) - # attempt to connect and expect an exception - if "twisted" in EVENT_LOOP_MANAGER: - exc = SSL.Error - else: - exc = NoHostAvailable - with self.assertRaises(NoHostAvailable) as context: + with self.assertRaises(NoHostAvailable) as _: cluster.connect() cluster.shutdown() def test_cannot_connect_with_bad_client_auth(self): """ - Test to validate that we cannot connect with invalid client auth. + Test to validate that we cannot connect with invalid client auth. This test will use bad keys/certs to preform client authentication. It will then attempt to connect to a server that has client authentication enabled. @@ -308,11 +303,20 @@ def test_cannot_connect_with_bad_client_auth(self): @test_category connection:ssl """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': CLIENT_CA_CERTS, - 'ssl_version': ssl_version, - 'keyfile': DRIVER_KEYFILE, - 'certfile': DRIVER_CERTFILE_BAD}) - with self.assertRaises(NoHostAvailable) as context: + ssl_options = {'ca_certs': CLIENT_CA_CERTS, + 'ssl_version': ssl_version, + 'keyfile': DRIVER_KEYFILE} + + if not USES_PYOPENSSL: + # I don't set the bad certfile for pyopenssl because it hangs + ssl_options['certfile'] = DRIVER_CERTFILE_BAD + + cluster = Cluster(protocol_version=PROTOCOL_VERSION, + ssl_options={'ca_certs': CLIENT_CA_CERTS, + 'ssl_version': ssl_version, + 'keyfile': DRIVER_KEYFILE}) + + with self.assertRaises(NoHostAvailable) as _: cluster.connect() cluster.shutdown() @@ -429,7 +433,7 @@ def test_can_connect_with_ssl_client_auth_password_private_key(self): ssl_context.verify_mode = ssl.CERT_NONE validate_ssl_options(ssl_context=ssl_context, ssl_options=ssl_options) - def test_can_connect_with_ssl_conext_ca_host_match(self): + def test_can_connect_with_ssl_context_ca_host_match(self): """ Test to validate that we are able to connect to a cluster using a SSLContext using client auth, an encrypted keyfile, and host matching @@ -479,3 +483,13 @@ def test_cannot_connect_ssl_context_with_invalid_hostname(self): ssl_options["check_hostname"] = True with self.assertRaises(Exception): validate_ssl_options(ssl_context=ssl_context, ssl_options=ssl_options, hostname="localhost") + + @unittest.skipIf(USES_PYOPENSSL, "This test is for the built-in ssl.Context") + def test_can_connect_with_sslcontext_default_context(self): + """ + Test to validate that we are able to connect to a cluster using a SSLContext created from create_default_context(). + @expected_result The client can connect via SSL and preform some basic operations + @test_category connection:ssl + """ + ssl_context = ssl.create_default_context(cafile=CLIENT_CA_CERTS) + validate_ssl_options(ssl_context=ssl_context) From bc7ee7e5c40861d28d68d39f9cb0f194d56360aa Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 2 Dec 2019 14:15:03 -0500 Subject: [PATCH 1715/2005] Fix ResponseFuture._set_result crashes on connection error when used with PrepareMessage --- CHANGELOG.rst | 3 ++- cassandra/cluster.py | 7 ++++--- tests/unit/test_response_future.py | 22 ++++++++++++++++++++++ 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index c23cdea47e..4b2257d368 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,7 +11,8 @@ Features Bug Fixes --------- * re-raising the CQLEngineException will fail on Python 3 (PYTHON-1166) -* Connection fails to validate ssl certificate hostname when SSLContext.check_hostname is set +* Connection fails to validate ssl certificate hostname when SSLContext.check_hostname is set (PYTHON-1186) +* ResponseFuture._set_result crashes on connection error when used with PrepareMessage (PYTHON-1187) 3.20.2 ====== diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 56abaed8e2..c1d1c51c36 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -4059,8 +4059,9 @@ def _set_result(self, host, connection, pool, response): log.warning("Host %s error: %s.", host, response.summary) if self._metrics is not None: self._metrics.on_other_error() + cl = getattr(self.message, 'consistency_level', None) retry = retry_policy.on_request_error( - self.query, self.message.consistency_level, error=response, + self.query, cl, error=response, retry_num=self._query_retries) elif isinstance(response, PreparedQueryNotFound): if self.prepared_statement: @@ -4117,9 +4118,9 @@ def _set_result(self, host, connection, pool, response): self._metrics.on_connection_error() if not isinstance(response, ConnectionShutdown): self._connection.defunct(response) + cl = getattr(self.message, 'consistency_level', None) retry = self._retry_policy.on_request_error( - self.query, self.message.consistency_level, error=response, - retry_num=self._query_retries) + self.query, cl, error=response, retry_num=self._query_retries) self._handle_retry_decision(retry, response, host) elif isinstance(response, Exception): if hasattr(response, 'to_exception'): diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 87f1bd683e..785f821bbd 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -200,6 +200,28 @@ def test_unavailable_error_message(self): rf._set_result(None, None, None, result) self.assertRaises(Exception, rf.result) + def test_request_error_with_prepare_message(self): + session = self.make_session() + query = SimpleStatement("SELECT * FROM foobar") + retry_policy = Mock() + retry_policy.on_request_error.return_value = (RetryPolicy.RETHROW, None) + message = PrepareMessage(query=query) + + rf = ResponseFuture(session, message, query, 1, retry_policy=retry_policy) + rf._query_retries = 1 + rf.send_request() + result = Mock(spec=OverloadedErrorMessage) + result.to_exception.return_value = result + rf._set_result(None, None, None, result) + self.assertIsInstance(rf._final_exception, OverloadedErrorMessage) + + rf = ResponseFuture(session, message, query, 1, retry_policy=retry_policy) + rf._query_retries = 1 + rf.send_request() + result = Mock(spec=ConnectionException) + rf._set_result(None, None, None, result) + self.assertIsInstance(rf._final_exception, ConnectionException) + def test_retry_policy_says_ignore(self): session = self.make_session() query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)") From ad517f74db018235442410f0357bc7a48c760a5a Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 3 Dec 2019 15:05:04 -0500 Subject: [PATCH 1716/2005] Add some details and an example to enable speculative execution --- docs/getting_started.rst | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index e27f72baf5..0d6b4ec055 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -434,3 +434,43 @@ level on that: user3_lookup = user_lookup_stmt.bind([user_id3]) user3_lookup.consistency_level = ConsistencyLevel.ALL user3 = session.execute(user3_lookup) + +Speculative Execution +^^^^^^^^^^^^^^^^^^^^^ + +Speculative execution is a way to minimize latency by preemptively executing several +instances of the same query against different nodes. For more details about this +technique, see `Speculative Execution with DataStax Drivers `_. + +To enable speculative execution: + +* Configure a :class:`~.policies.SpeculativeExecutionPolicy` with the ExecutionProfile +* Mark your query as idempotent, which mean it can be applied multiple + times without changing the result of the initial application. + See `Query Idempotence `_ for more details. + + +Example: + +.. code-block:: python + + from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT + from cassandra.policies import ConstantSpeculativeExecutionPolicy + from cassandra.query import SimpleStatement + + # Configure the speculative execution policy + ep = ExecutionProfile( + speculative_execution_policy=ConstantSpeculativeExecutionPolicy(delay=.5, max_attempts=10) + ) + cluster = Cluster(..., execution_profiles={EXEC_PROFILE_DEFAULT: ep}) + session = cluster.connect() + + # Mark the query idempotent + query = SimpleStatement( + "UPDATE my_table SET list_col = [1] WHERE pk = 1", + is_idempotent=True + ) + + # Execute. A new query will be sent to the server every 0.5 second + # until we receive a response, for a max number attempts of 10. + session.execute(query) From ef0831bb06dcf6056e47654f9684b3237f7784b2 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 4 Dec 2019 08:48:14 -0500 Subject: [PATCH 1717/2005] Emphasize prepared statements in getting started --- docs/getting_started.rst | 69 +++++++++++++++++++++------------------- 1 file changed, 36 insertions(+), 33 deletions(-) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 0d6b4ec055..f7370c4918 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -110,7 +110,6 @@ by executing a ``USE `` query: # or you can do this instead session.execute('USE users') - Executing Queries ----------------- Now that we have a :class:`.Session` we can begin to execute queries. The simplest @@ -153,13 +152,45 @@ examples are equivalent: If you prefer another result format, such as a ``dict`` per row, you can change the :attr:`~.Session.row_factory` attribute. -For queries that will be run repeatedly, you should use -`Prepared statements <#prepared-statements>`_. +As mentioned in our `Drivers Best Practices Guide `_, +it is highly recommended to use `Prepared statements <#prepared-statement>`_ for your +frequently run queries. + +.. _prepared-statement: + +Prepared Statements +------------------- +Prepared statements are queries that are parsed by Cassandra and then saved +for later use. When the driver uses a prepared statement, it only needs to +send the values of parameters to bind. This lowers network traffic +and CPU utilization within Cassandra because Cassandra does not have to +re-parse the query each time. + +To prepare a query, use :meth:`.Session.prepare()`: + +.. code-block:: python + + user_lookup_stmt = session.prepare("SELECT * FROM users WHERE user_id=?") + + users = [] + for user_id in user_ids_to_query: + user = session.execute(user_lookup_stmt, [user_id]) + users.append(user) + +:meth:`~.Session.prepare()` returns a :class:`~.PreparedStatement` instance +which can be used in place of :class:`~.SimpleStatement` instances or literal +string queries. It is automatically prepared against all nodes, and the driver +handles re-preparing against new nodes and restarted nodes when necessary. + +Note that the placeholders for prepared statements are ``?`` characters. This +is different than for simple, non-prepared statements (although future versions +of the driver may use the same placeholders for both). Passing Parameters to CQL Queries ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -When executing non-prepared statements, the driver supports two forms of -parameter place-holders: positional and named. +Althought it is not recommended, you can also pass parameters to non-prepared +statements. The driver supports two forms of parameter place-holders: positional +and named. Positional parameters are used with a ``%s`` placeholder. For example, when you execute: @@ -376,34 +407,6 @@ in a :class:`~.SimpleStatement`: consistency_level=ConsistencyLevel.QUORUM) session.execute(query, ('John', 42)) -Prepared Statements -------------------- -Prepared statements are queries that are parsed by Cassandra and then saved -for later use. When the driver uses a prepared statement, it only needs to -send the values of parameters to bind. This lowers network traffic -and CPU utilization within Cassandra because Cassandra does not have to -re-parse the query each time. - -To prepare a query, use :meth:`.Session.prepare()`: - -.. code-block:: python - - user_lookup_stmt = session.prepare("SELECT * FROM users WHERE user_id=?") - - users = [] - for user_id in user_ids_to_query: - user = session.execute(user_lookup_stmt, [user_id]) - users.append(user) - -:meth:`~.Session.prepare()` returns a :class:`~.PreparedStatement` instance -which can be used in place of :class:`~.SimpleStatement` instances or literal -string queries. It is automatically prepared against all nodes, and the driver -handles re-preparing against new nodes and restarted nodes when necessary. - -Note that the placeholders for prepared statements are ``?`` characters. This -is different than for simple, non-prepared statements (although future versions -of the driver may use the same placeholders for both). - Setting a Consistency Level with Prepared Statements ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To specify a consistency level for prepared statements, you have two options. From 4929dee21f9040054bc8d700484e282592d4250f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 4 Dec 2019 14:03:55 -0500 Subject: [PATCH 1718/2005] docs: Slack Academy --> DataStax Community --- README.rst | 2 +- docs/index.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 7d44102bfd..76cc5383ba 100644 --- a/README.rst +++ b/README.rst @@ -72,7 +72,7 @@ Getting Help ------------ Your best options for getting help with the driver are the `mailing list `_ -and the ``#datastax-drivers`` channel in the `DataStax Academy Slack `_. +and the `DataStax Community `_. License ------- diff --git a/docs/index.rst b/docs/index.rst index 8ea11c6088..0187b05806 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -96,7 +96,7 @@ Visit the :doc:`FAQ section ` in this documentation. Please send questions to the `mailing list `_. -Alternatively, you can use the `#datastax-drivers` channel in the DataStax Acadamy Slack to ask questions in real time. +Alternatively, you can use the `DataStax Community `_. Reporting Issues ---------------- From d24652c75faa41ecae88e13eb343ec68e158f816 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 4 Dec 2019 10:44:35 -0500 Subject: [PATCH 1719/2005] Add a UDT dict registration example --- docs/user_defined_types.rst | 36 +++++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/docs/user_defined_types.rst b/docs/user_defined_types.rst index fd95b09fc4..32c03e37e8 100644 --- a/docs/user_defined_types.rst +++ b/docs/user_defined_types.rst @@ -9,12 +9,16 @@ new type through ``CREATE TYPE`` statements in CQL:: Version 2.1 of the Python driver adds support for user-defined types. -Registering a Class to Map to a UDT ------------------------------------ +Registering a UDT +----------------- You can tell the Python driver to return columns of a specific UDT as -instances of a class by registering them with your :class:`~.Cluster` +instances of a class or a dict by registering them with your :class:`~.Cluster` instance through :meth:`.Cluster.register_user_type`: + +Map a Class to a UDT +++++++++++++++++++++ + .. code-block:: python cluster = Cluster(protocol_version=3) @@ -39,7 +43,29 @@ instance through :meth:`.Cluster.register_user_type`: # results will include Address instances results = session.execute("SELECT * FROM users") row = results[0] - print row.id, row.location.street, row.location.zipcode + print(row.id, row.location.street, row.location.zipcode) + +Map a dict to a UDT ++++++++++++++++++++ + +.. code-block:: python + + cluster = Cluster(protocol_version=3) + session = cluster.connect() + session.set_keyspace('mykeyspace') + session.execute("CREATE TYPE address (street text, zipcode int)") + session.execute("CREATE TABLE users (id int PRIMARY KEY, location frozen
)") + + cluster.register_user_type('mykeyspace', 'address', dict) + + # insert a row using a prepared statement and a tuple + insert_statement = session.prepare("INSERT INTO mykeyspace.users (id, location) VALUES (?, ?)") + session.execute(insert_statement, [0, ("123 Main St.", 78723)]) + + # results will include dict instances + results = session.execute("SELECT * FROM users") + row = results[0] + print(row.id, row.location['street'], row.location['zipcode']) Using UDTs Without Registering Them ----------------------------------- @@ -79,7 +105,7 @@ for the UDT: results = session.execute("SELECT * FROM users") first_row = results[0] address = first_row.location - print address # prints "Address(street='123 Main St.', zipcode=78723)" + print(address) # prints "Address(street='123 Main St.', zipcode=78723)" street = address.street zipcode = address.street From 6826728f2826018cf3ce2259c206e13e17bcbee8 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 6 Dec 2019 09:49:49 -0500 Subject: [PATCH 1720/2005] PYTHON-1189: Add Python 3.8 support (#37) Add Python 3.8 support --- CHANGELOG.rst | 1 + README-dev.rst | 15 +++++++++++++++ README.rst | 2 +- build.yaml | 13 +++++++------ cassandra/cluster.py | 4 ++-- docs/index.rst | 2 +- docs/installation.rst | 2 +- setup.py | 1 + tests/unit/advanced/test_insights.py | 4 ++-- 9 files changed, 31 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 1d92640625..034407d982 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -13,6 +13,7 @@ Features * Flexible version parsing (PYTHON-1174) * Support NULL in collection deserializer (PYTHON-1123) * Avoid warnings about unspecified load balancing policy when connecting to a cloud cluster (PYTHON-1177) +* Add Python 3.8 support (PYTHON-1189) * [GRAPH] Ability to execute Fluent Graph queries asynchronously (PYTHON-1129) Bug Fixes diff --git a/README-dev.rst b/README-dev.rst index 2296dcd05d..8294d4efb8 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -234,3 +234,18 @@ An EAP release is only uploaded on a private server and it is not published on p python setup.py doc * Upload the docs on the EAP download server. + +Adding a New Python Runtime Support +=================================== + +* Add the new python version to our jenkins image: + https://github.com/riptano/openstack-jenkins-drivers/ + +* Add the new python version in job-creator: + https://github.com/riptano/job-creator/ + +* Run the tests and ensure they all pass + * also test all event loops + +* Update the wheels building repo to support that version: + https://github.com/riptano/python-dse-driver-wheels diff --git a/README.rst b/README.rst index 76cc5383ba..693f8d0b71 100644 --- a/README.rst +++ b/README.rst @@ -7,7 +7,7 @@ DataStax Python Driver for Apache Cassandra and DataStax products A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (2.1+) and DataStax Enterprise (4.7+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. -The driver supports Python 2.7, 3.4, 3.5, 3.6 and 3.7. +The driver supports Python 2.7, 3.4, 3.5, 3.6, 3.7 and 3.8. **Note:** DataStax products do not support big-endian systems. diff --git a/build.yaml b/build.yaml index e498b511c2..6bcd4d7f64 100644 --- a/build.yaml +++ b/build.yaml @@ -8,7 +8,7 @@ schedules: EVENT_LOOP_MANAGER='libev' matrix: exclude: - - python: [3.4, 3.6, 3.7] + - python: [3.4, 3.6, 3.7, 3.8] - cassandra: ['2.1', '3.0', 'test-dse'] commit_long_test: @@ -20,7 +20,7 @@ schedules: EVENT_LOOP_MANAGER='libev' matrix: exclude: - - python: [3.4, 3.6, 3.7] + - python: [3.4, 3.6, 3.7, 3.8] - cassandra: ['2.1', '3.0', 'test-dse'] commit_branches: @@ -33,7 +33,7 @@ schedules: EXCLUDE_LONG=1 matrix: exclude: - - python: [3.4, 3.6, 3.7] + - python: [3.4, 3.6, 3.7, 3.8] - cassandra: ['2.1', '3.0', 'test-dse'] commit_branches_dev: @@ -46,7 +46,7 @@ schedules: EXCLUDE_LONG=1 matrix: exclude: - - python: [2.7, 3.4, 3.7] + - python: [2.7, 3.4, 3.7, 3.8] - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse', dse-4.8', 'dse-5.0'] release_test: @@ -138,7 +138,7 @@ schedules: JUST_UPGRADE=True matrix: exclude: - - python: [3.4, 3.6, 3.7] + - python: [3.4, 3.6, 3.7, 3.8] - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse'] # TODO: Temporary, don't merge me @@ -152,7 +152,7 @@ schedules: EXCLUDE_LONG=1 matrix: exclude: - - python: [3.4, 3.6, 3.7] + - python: [3.4, 3.6, 3.7, 3.8] python: - 2.7 @@ -160,6 +160,7 @@ python: - 3.5 - 3.6 - 3.7 + - 3.8 os: - ubuntu/bionic64/python-driver diff --git a/cassandra/cluster.py b/cassandra/cluster.py index afac4c3b35..c453164361 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1352,7 +1352,7 @@ def _create_thread_pool_executor(self, **kwargs): Create a ThreadPoolExecutor for the cluster. In most cases, the built-in `concurrent.futures.ThreadPoolExecutor` is used. - Python 3.7 and Eventlet cause the `concurrent.futures.ThreadPoolExecutor` + Python 3.7+ and Eventlet cause the `concurrent.futures.ThreadPoolExecutor` to hang indefinitely. In that case, the user needs to have the `futurist` package so we can use the `futurist.GreenThreadPoolExecutor` class instead. @@ -1375,7 +1375,7 @@ def _create_thread_pool_executor(self, **kwargs): except ImportError: # futurist is not available raise ImportError( - ("Python 3.7 and Eventlet cause the `concurrent.futures.ThreadPoolExecutor` " + ("Python 3.7+ and Eventlet cause the `concurrent.futures.ThreadPoolExecutor` " "to hang indefinitely. If you want to use the Eventlet reactor, you " "need to install the `futurist` package to allow the driver to use " "the GreenThreadPoolExecutor. See https://github.com/eventlet/eventlet/issues/508 " diff --git a/docs/index.rst b/docs/index.rst index 0187b05806..ccc12e7bc8 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,7 +4,7 @@ A Python client driver for `Apache Cassandra `_ and DataStax products. This driver works exclusively with the Cassandra Query Language v3 (CQL3) and Cassandra's native protocol. Cassandra 2.1+ is supported, including DSE 4.7+. -The driver supports Python 2.7, 3.4, 3.5, 3.6 and 3.7. +The driver supports Python 2.7, 3.4, 3.5, 3.6, 3.7 and 3.8. This driver is open source under the `Apache v2 License `_. diff --git a/docs/installation.rst b/docs/installation.rst index 007f6e1743..2905611ad7 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -3,7 +3,7 @@ Installation Supported Platforms ------------------- -Python 2.7, 3.4, 3.5, 3.6 and 3.7 are supported. Both CPython (the standard Python +Python 2.7, 3.4, 3.5, 3.6, 3.7 and 3.8 are supported. Both CPython (the standard Python implementation) and `PyPy `_ are supported and tested. Linux, OSX, and Windows are supported. diff --git a/setup.py b/setup.py index c310f1fa65..dc3e4490c6 100644 --- a/setup.py +++ b/setup.py @@ -447,6 +447,7 @@ def run_setup(extensions): 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Software Development :: Libraries :: Python Modules' diff --git a/tests/unit/advanced/test_insights.py b/tests/unit/advanced/test_insights.py index b63e48ba6a..7d11a00e68 100644 --- a/tests/unit/advanced/test_insights.py +++ b/tests/unit/advanced/test_insights.py @@ -230,9 +230,9 @@ def test_token_aware_policy(self): def test_whitelist_round_robin_policy(self): self.assertEqual( - insights_registry.serialize(WhiteListRoundRobinPolicy(['123.123'])), + insights_registry.serialize(WhiteListRoundRobinPolicy(['127.0.0.3'])), {'namespace': 'cassandra.policies', - 'options': {'allowed_hosts': ('123.123',)}, + 'options': {'allowed_hosts': ('127.0.0.3',)}, 'type': 'WhiteListRoundRobinPolicy'} ) From 6c8a7703ef2961cd6d275673f2b602c7800dc16b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 6 Dec 2019 09:46:57 -0500 Subject: [PATCH 1721/2005] Fix cloud guardrails test to use user1 instead of cassandra --- tests/integration/cloud/test_cloud.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/cloud/test_cloud.py b/tests/integration/cloud/test_cloud.py index ba6af7ae28..952a92835b 100644 --- a/tests/integration/cloud/test_cloud.py +++ b/tests/integration/cloud/test_cloud.py @@ -69,8 +69,8 @@ def test_match_system_local(self): def test_set_auth_provider(self): self.connect(self.creds) self.assertIsInstance(self.cluster.auth_provider, PlainTextAuthProvider) - self.assertEqual(self.cluster.auth_provider.username, 'cassandra') - self.assertEqual(self.cluster.auth_provider.password, 'cassandra') + self.assertEqual(self.cluster.auth_provider.username, 'user1') + self.assertEqual(self.cluster.auth_provider.password, 'user1') def test_support_leaving_the_auth_unset(self): with self.assertRaises(NoHostAvailable): From 5b15af91d6cb8c6cf52b2afbc470c1b67630634a Mon Sep 17 00:00:00 2001 From: Tim Gates Date: Sat, 7 Dec 2019 22:22:59 +1100 Subject: [PATCH 1722/2005] Fix simple typo: versioni -> version There is a small typo in cassandra/__init__.py. Should read version rather than versioni. --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 38aef2f5d8..1cdae142c3 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -181,7 +181,7 @@ class ProtocolVersion(object): MAX_SUPPORTED = max(SUPPORTED_VERSIONS) """ - Maximum protocol versioni supported by this driver. + Maximum protocol version supported by this driver. """ @classmethod From d73a61c43b74c74b3a50af8db58c235ecda5b532 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 10 Dec 2019 14:27:58 -0500 Subject: [PATCH 1723/2005] version 3.21a labs --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index c0b1b2d5a6..4fc2fb7303 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 20, 2, '20191104+labs') +__version_info__ = (3, '21a1', '20191210+labs') __version__ = '.'.join(map(str, __version_info__)) From c29ffbe262eda0fa50ad19b497e1a4a093dedb03 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 10 Dec 2019 14:37:55 -0500 Subject: [PATCH 1724/2005] version 3.21a labs --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 4fc2fb7303..be08299b62 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, '21a1', '20191210+labs') +__version_info__ = (3, '21a1+20191210') __version__ = '.'.join(map(str, __version_info__)) From 727a0519da710c03df0c6b6847a4caa2ad6c7fdb Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 12 Dec 2019 12:01:58 -0500 Subject: [PATCH 1725/2005] Fix Insights fail to serialize the startup message when the SSL Context is from PyOpenSSL --- CHANGELOG.rst | 1 + cassandra/datastax/insights/reporter.py | 25 +++++++++++++++++-------- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 034407d982..73f1c8967a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -23,6 +23,7 @@ Bug Fixes * Reconnect attempts persist after downed node removed from peers (PYTHON-1181) * Connection fails to validate ssl certificate hostname when SSLContext.check_hostname is set (PYTHON-1186) * ResponseFuture._set_result crashes on connection error when used with PrepareMessage (PYTHON-1187) +* Insights fail to serialize the startup message when the SSL Context is from PyOpenSSL (PYTHON-1192) Others ------ diff --git a/cassandra/datastax/insights/reporter.py b/cassandra/datastax/insights/reporter.py index 5402c6a7f6..b05a88deb0 100644 --- a/cassandra/datastax/insights/reporter.py +++ b/cassandra/datastax/insights/reporter.py @@ -135,14 +135,23 @@ def _get_startup_data(self): 'ignored': host_distances_counter[HostDistance.IGNORED] } - compression_type = cc._connection._compression_type if cc._connection else 'NONE' + try: + compression_type = cc._connection._compression_type + except AttributeError: + compression_type = 'NONE' - if self._session.cluster.ssl_context: - cert_validation = self._session.cluster.ssl_context.verify_mode == ssl.CERT_REQUIRED - elif self._session.cluster.ssl_options: - cert_validation = self._session.cluster.ssl_options.get('cert_reqs') == ssl.CERT_REQUIRED - else: - cert_validation = None + cert_validation = None + try: + if self._session.cluster.ssl_context: + if isinstance(self._session.cluster.ssl_context, ssl.SSLContext): + cert_validation = self._session.cluster.ssl_context.verify_mode == ssl.CERT_REQUIRED + else: # pyopenssl + from OpenSSL import SSL + cert_validation = self._session.cluster.ssl_context.get_verify_mode() != SSL.VERIFY_NONE + elif self._session.cluster.ssl_options: + cert_validation = self._session.cluster.ssl_options.get('cert_reqs') == ssl.CERT_REQUIRED + except Exception as e: + log.debug('Unable to get the cert validation: {}'.format(e)) uname_info = platform.uname() @@ -157,7 +166,7 @@ def _get_startup_data(self): }, }, 'data': { - 'driverName': 'DataStax Enterprise Python Driver', + 'driverName': 'DataStax Python Driver', 'driverVersion': sys.modules['cassandra'].__version__, 'clientId': str(self._session.cluster.client_id), 'sessionId': str(self._session.session_id), From ffeb4ac3408db28c16fcff53b4238a56e0f4fc75 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 12 Dec 2019 16:27:54 -0500 Subject: [PATCH 1726/2005] Split test requirements --- build.yaml | 7 ++++++- test-datastax-requirements.txt | 3 +++ test-requirements.txt | 3 --- tests/unit/advanced/test_auth.py | 3 +++ 4 files changed, 12 insertions(+), 4 deletions(-) create mode 100644 test-datastax-requirements.txt diff --git a/build.yaml b/build.yaml index 6bcd4d7f64..ce03cc4978 100644 --- a/build.yaml +++ b/build.yaml @@ -196,7 +196,12 @@ build: pip install git+ssh://git@github.com/riptano/ccm-private.git - pip install -r test-requirements.txt + if [ -n "$CCM_IS_DSE" ]; then + pip install -r test-datastax-requirements.txt + else + pip install -r test-requirements.txt + fi + pip install nose-ignore-docstring pip install nose-exclude pip install service_identity diff --git a/test-datastax-requirements.txt b/test-datastax-requirements.txt new file mode 100644 index 0000000000..69cc3a9484 --- /dev/null +++ b/test-datastax-requirements.txt @@ -0,0 +1,3 @@ +-r test-requirements.txt +kerberos +gremlinpython==3.3.4 diff --git a/test-requirements.txt b/test-requirements.txt index 87e6ec745e..d032180826 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -13,9 +13,6 @@ gevent>=1.0 eventlet cython>=0.20,<0.30 packaging -kerberos -requests backports.ssl_match_hostname; python_version < '2.7.9' futurist; python_version >= '3.7' asynctest; python_version > '3.4' -gremlinpython==3.3.4 diff --git a/tests/unit/advanced/test_auth.py b/tests/unit/advanced/test_auth.py index 0a3d644246..e4f7e4cf5e 100644 --- a/tests/unit/advanced/test_auth.py +++ b/tests/unit/advanced/test_auth.py @@ -21,7 +21,10 @@ from cassandra.auth import DSEGSSAPIAuthProvider +from tests.integration import requiredse +# This auth provider requires kerberos and puresals +@requiredse class TestGSSAPI(unittest.TestCase): def test_host_resolution(self): From 6dc81da5b3f7b9d499c8867025547d011b27caf2 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 16 Dec 2019 11:58:28 -0500 Subject: [PATCH 1727/2005] Fix multiple prime queries in tests --- tests/integration/simulacron/test_policies.py | 28 ++++++++++++------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index 209929dfa1..855a4de3ca 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -17,9 +17,10 @@ import unittest # noqa from cassandra import OperationTimedOut, WriteTimeout -from cassandra.cluster import Cluster, ExecutionProfile, ResponseFuture, EXEC_PROFILE_DEFAULT +from cassandra.cluster import Cluster, ExecutionProfile, ResponseFuture, EXEC_PROFILE_DEFAULT, NoHostAvailable from cassandra.query import SimpleStatement from cassandra.policies import ConstantSpeculativeExecutionPolicy, RoundRobinPolicy, RetryPolicy, WriteType +from cassandra.protocol import OverloadedErrorMessage, IsBootstrappingErrorMessage, TruncateError, ServerError from tests.integration import greaterthancass21, requiressimulacron, SIMULACRON_JAR, \ CASSANDRA_VERSION @@ -242,7 +243,7 @@ def on_unavailable(self, query, consistency, required_replicas, alive_replicas, def on_request_error(self, query, consistency, error, retry_num): next(self.request_error) - return self.IGNORE, None + return self.RETHROW, None def reset_counters(self): self.write_timeout = count() @@ -429,14 +430,20 @@ def test_retry_policy_on_request_error(self): retry_policy = CounterRetryPolicy() self.set_cluster(retry_policy) - for e in [overloaded_error, bootstrapping_error, truncate_error, server_error]: + for prime_error, exc in [ + (overloaded_error, OverloadedErrorMessage), + (bootstrapping_error, IsBootstrappingErrorMessage), + (truncate_error, TruncateError), + (server_error, ServerError)]: + + clear_queries() query_to_prime = "SELECT * from simulacron_keyspace.simulacron_table;" - prime_query(query_to_prime, then=e, rows=None, column_types=None) + prime_query(query_to_prime, then=prime_error, rows=None, column_types=None) rf = self.session.execute_async(query_to_prime) - try: + + with self.assertRaises(exc): rf.result() - except: - pass + self.assertEqual(len(rf.attempted_hosts), 1) # no retry self.assertEqual(next(retry_policy.request_error), 4) @@ -446,11 +453,12 @@ def test_retry_policy_on_request_error(self): self.set_cluster(retry_policy) for e in [overloaded_error, bootstrapping_error, truncate_error, server_error]: + clear_queries() query_to_prime = "SELECT * from simulacron_keyspace.simulacron_table;" prime_query(query_to_prime, then=e, rows=None, column_types=None) rf = self.session.execute_async(query_to_prime) - try: + + with self.assertRaises(NoHostAvailable): rf.result() - except: - pass + self.assertEqual(len(rf.attempted_hosts), 3) # all 3 nodes failed From ada6e6e9fb7c1abaf9ed500e2ed4db44a8e344fb Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 20 Dec 2019 14:05:59 -0500 Subject: [PATCH 1728/2005] Add new DSE CQL keywords for 6.8 --- CHANGELOG.rst | 1 + cassandra/metadata.py | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 73f1c8967a..3dfd87bf19 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -14,6 +14,7 @@ Features * Support NULL in collection deserializer (PYTHON-1123) * Avoid warnings about unspecified load balancing policy when connecting to a cloud cluster (PYTHON-1177) * Add Python 3.8 support (PYTHON-1189) +* Add new DSE CQL keywords (PYTHON-1122) * [GRAPH] Ability to execute Fluent Graph queries asynchronously (PYTHON-1129) Bug Fixes diff --git a/cassandra/metadata.py b/cassandra/metadata.py index aade3996ba..f7019b7e9f 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -57,7 +57,14 @@ 'permissions', 'primary', 'rename', 'replace', 'returns', 'revoke', 'role', 'roles', 'schema', 'select', 'set', 'sfunc', 'smallint', 'static', 'storage', 'stype', 'superuser', 'table', 'text', 'time', 'timestamp', 'timeuuid', 'tinyint', 'to', 'token', 'trigger', 'truncate', 'ttl', 'tuple', 'type', 'unlogged', 'update', 'use', 'user', - 'users', 'using', 'uuid', 'values', 'varchar', 'varint', 'view', 'where', 'with', 'writetime' + 'users', 'using', 'uuid', 'values', 'varchar', 'varint', 'view', 'where', 'with', 'writetime', + + # DSE specifics + "node", "nodes", "plan", "active", "application", "applications", "java", "executor", "executors", "std_out", "std_err", + "renew", "delegation", "no", "redact", "token", "lowercasestring", "cluster", "authentication", "schemes", "scheme", + "internal", "ldap", "kerberos", "remote", "object", "method", "call", "calls", "search", "schema", "config", "rows", + "columns", "profiles", "commit", "reload", "unset", "rebuild", "field", "workpool", "any", "submission", "indices", + "restrict", "unrestrict" )) """ Set of keywords in CQL. From f492b14db2ff4fb7ade750dd437acab6f8b31260 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 8 Jan 2020 09:39:36 -0600 Subject: [PATCH 1729/2005] Review and lint comments for oss-next --- build.yaml | 2 +- cassandra/auth.py | 4 ++-- cassandra/cluster.py | 2 +- tests/integration/advanced/__init__.py | 6 +++--- tests/integration/advanced/graph/fluent/test_graph.py | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/build.yaml b/build.yaml index ce03cc4978..b3a960c935 100644 --- a/build.yaml +++ b/build.yaml @@ -179,7 +179,7 @@ cassandra: env: CYTHON: - CYTHON -# - NO_CYTHON + - NO_CYTHON build: - script: | diff --git a/cassandra/auth.py b/cassandra/auth.py index 61cc48b3a6..910592f7ac 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -150,8 +150,8 @@ class TransitionalModePlainTextAuthProvider(object): auth_provider = TransitionalModePlainTextAuthProvider() cluster = Cluster(auth_provider=auth_provider) - .. warning:: TransitionalModePlainTextAuthProvider will be removed in dse-driver - 3.0. The transitional mode will be handled internally without the need + .. warning:: TransitionalModePlainTextAuthProvider will be removed in cassandra-driver + 4.0. The transitional mode will be handled internally without the need of any auth provider. """ diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c453164361..be3e570cc6 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -446,7 +446,7 @@ def __init__(self, load_balancing_policy=None, retry_policy=None, See also :class:`~.GraphExecutionPolicy`. In addition to default parameters shown in the signature, this profile also defaults ``retry_policy`` to - :class:`dse.policies.NeverRetryPolicy`, and ``load_balancing_policy`` to one that targets the current Spark + :class:`cassandra.policies.NeverRetryPolicy`, and ``load_balancing_policy`` to one that targets the current Spark master. Note: The graph_options.graph_source is set automatically to b'a' (analytics) diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py index a943beac41..24b3e474ac 100644 --- a/tests/integration/advanced/__init__.py +++ b/tests/integration/advanced/__init__.py @@ -33,7 +33,7 @@ from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT, EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT from tests.integration import PROTOCOL_VERSION, DSE_VERSION, get_server_versions, BasicKeyspaceUnitTestCase, \ - drop_keyspace_shutdown_cluster, get_node, USE_CASS_EXTERNAL, CASSANDRA_IP + drop_keyspace_shutdown_cluster, get_node, USE_CASS_EXTERNAL, CASSANDRA_IP, set_default_cass_ip from tests.integration import use_singledc, use_single_node, wait_for_node_socket from cassandra.protocol import ServerError from cassandra.util import Point, LineString, Polygon @@ -216,7 +216,7 @@ def use_cluster_with_graph(num_nodes): when started all at once. """ if USE_CASS_EXTERNAL: - set_default_dse_ip() + set_default_cass_ip() return # Create the cluster but don't start it. @@ -705,4 +705,4 @@ def validate_path_result_type(test, path): elif isinstance(obj, Vertex): validate_classic_vertex(test, obj) else: - test.fail("Invalid object found in path " + str(object.type)) + test.fail("Invalid object found in path " + str(obj.type)) diff --git a/tests/integration/advanced/graph/fluent/test_graph.py b/tests/integration/advanced/graph/fluent/test_graph.py index 67ac17e51d..b829d60721 100644 --- a/tests/integration/advanced/graph/fluent/test_graph.py +++ b/tests/integration/advanced/graph/fluent/test_graph.py @@ -468,7 +468,7 @@ def _validate_path_result_type(self, g, objects_path): elif isinstance(obj, TravVertex): self._validate_classic_vertex(g, obj) else: - self.fail("Invalid object found in path " + str(object.type)) + self.fail("Invalid object found in path " + str(obj.type)) def _validate_meta_property(self, g, vertex): meta_props = g.V(vertex.id).properties().toList() From 2be31162abd8c8b0e77ec8bd18c4ad7044ba5d9a Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 8 Jan 2020 13:51:20 -0500 Subject: [PATCH 1730/2005] Last fixes and removals --- CHANGELOG.rst | 2 ++ README.rst | 4 ++-- build.yaml | 21 --------------------- cassandra/cluster.py | 2 +- cassandra/policies.py | 4 ++-- cassandra/protocol.py | 2 +- docs/getting_started.rst | 4 ++-- docs/index.rst | 8 ++++---- setup.py | 2 +- 9 files changed, 15 insertions(+), 34 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3dfd87bf19..d2a338c472 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -28,6 +28,8 @@ Bug Fixes Others ------ +* The driver has a new dependency: geomet. It comes from the dse-driver unification and + is used to support DSE geo types. Deprecations ------------ diff --git a/README.rst b/README.rst index 693f8d0b71..0b6c1e206d 100644 --- a/README.rst +++ b/README.rst @@ -1,5 +1,5 @@ -DataStax Python Driver for Apache Cassandra and DataStax products -================================================================= +DataStax Driver for Apache Cassandra +==================================== .. image:: https://travis-ci.org/datastax/python-driver.png?branch=master :target: https://travis-ci.org/datastax/python-driver diff --git a/build.yaml b/build.yaml index b3a960c935..b60c0950c1 100644 --- a/build.yaml +++ b/build.yaml @@ -141,19 +141,6 @@ schedules: - python: [3.4, 3.6, 3.7, 3.8] - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse'] - # TODO: Temporary, don't merge me - python_oss_next_tests: - schedule: adhoc - disable_pull_requests: true - branches: - include: [/oss-next.*/] - env_vars: | - EVENT_LOOP_MANAGER='libev' - EXCLUDE_LONG=1 - matrix: - exclude: - - python: [3.4, 3.6, 3.7, 3.8] - python: - 2.7 - 3.4 @@ -268,14 +255,6 @@ build: echo "==========RUNNING INTEGRATION TESTS==========" EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true - # TODO: Temporary, don't merge this 2.1 stuff... - # if [ "$CCM_CASSANDRA_VERSION" == "2.1.21" ]; then - # echo "Testing cassandra 2.1 with protocol V2" - # PROTOCOL_VERSION=2 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_v2_results.xml tests/integration/standard/ || true - # echo "Testing cassandra 2.1 with protocol V1" - # PROTOCOL_VERSION=1 EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_v1_results.xml tests/integration/standard/ || true - # fi - if [ -n "$DSE_VERSION" ] && ! [[ $DSE_VERSION == "4.8"* ]]; then echo "==========RUNNING DSE INTEGRATION TESTS==========" EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR DSE_VERSION=$DSE_VERSION ADS_HOME=$HOME/ VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true diff --git a/cassandra/cluster.py b/cassandra/cluster.py index be3e570cc6..c9a8b6d397 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2962,7 +2962,7 @@ def prepare(self, query, custom_payload=None, keyspace=None): future = ResponseFuture(self, message, query=None, timeout=self.default_timeout) try: future.send_request() - response = future.result()[0] + response = future.result().one() except Exception: log.exception("Error preparing query:") raise diff --git a/cassandra/policies.py b/cassandra/policies.py index edd91e8120..fa1e8cf385 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -1168,8 +1168,8 @@ class DSELoadBalancingPolicy(DefaultLoadBalancingPolicy): *Deprecated:* This will be removed in the next major release, consider using :class:`.DefaultLoadBalancingPolicy`. """ - def __init__(self, *args): - super(DSELoadBalancingPolicy, self).__init__(*args) + def __init__(self, *args, **kwargs): + super(DSELoadBalancingPolicy, self).__init__(*args, **kwargs) warnings.warn("DSELoadBalancingPolicy will be removed in 4.0. Consider using " "DefaultLoadBalancingPolicy.", DeprecationWarning) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 0fe7439f2d..eac9ebb8b5 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -589,7 +589,7 @@ def _write_query_params(self, f, protocol_version): else: raise UnsupportedOperation( "Keyspaces may only be set on queries with protocol version " - "DSE_V2 or higher. Consider setting Cluster.protocol_version to ProtocolVersion.DSE_V2.") + "5 or DSE_V2 or higher. Consider setting Cluster.protocol_version.") if ProtocolVersion.uses_int_query_flags(protocol_version): write_uint(f, flags) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index f7370c4918..9b948c9778 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -47,7 +47,7 @@ behavior in some other way, this is the place to do it: cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: profile}) session = cluster.connect() - print session.execute("SELECT release_version FROM system.local")[0] + print(session.execute("SELECT release_version FROM system.local").one()) Profiles are passed in by ``execution_profiles`` dict. @@ -70,7 +70,7 @@ In this case we can construct the base ``ExecutionProfile`` passing all attribut cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: profile}) session = cluster.connect() - print session.execute("SELECT release_version FROM system.local")[0] + print(session.execute("SELECT release_version FROM system.local").one()) Users are free to setup additional profiles to be used by name: diff --git a/docs/index.rst b/docs/index.rst index ccc12e7bc8..a39c229a05 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,7 +1,7 @@ -DataStax Python Driver for Apache Cassandra and DataStax products -================================================================= -A Python client driver for `Apache Cassandra `_ and -DataStax products. This driver works exclusively with the Cassandra Query Language v3 (CQL3) +DataStax Python Driver for Apache Cassandra +=========================================== +A Python client driver for `Apache Cassandra `_. +This driver works exclusively with the Cassandra Query Language v3 (CQL3) and Cassandra's native protocol. Cassandra 2.1+ is supported, including DSE 4.7+. The driver supports Python 2.7, 3.4, 3.5, 3.6, 3.7 and 3.8. diff --git a/setup.py b/setup.py index dc3e4490c6..e157228f56 100644 --- a/setup.py +++ b/setup.py @@ -416,7 +416,7 @@ def run_setup(extensions): setup( name='cassandra-driver', version=__version__, - description=' DataStax Driver for Apache Cassandra and DataStax products', + description=' DataStax Driver for Apache Cassandra', long_description=long_description, url='http://github.com/datastax/python-driver', project_urls={ From d0dbb4468e4fd4688c5df522f443fc55a22733f3 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 9 Jan 2020 11:21:16 -0500 Subject: [PATCH 1731/2005] Add 38 in tox.ini --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index ff04577dac..9f0d510045 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py{27,34,35,36,37},pypy +envlist = py{27,34,35,36,37,38},pypy [base] deps = nose From 406a64d1d3cd76ef11d2d6d5e0bafec7cb089d79 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 9 Jan 2020 13:41:18 -0500 Subject: [PATCH 1732/2005] Move some doc installation sections --- docs/installation.rst | 95 ++++++++++++++++++++++--------------------- docs/upgrading.rst | 3 ++ 2 files changed, 51 insertions(+), 47 deletions(-) diff --git a/docs/installation.rst b/docs/installation.rst index 2905611ad7..d33ce441c9 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -20,6 +20,54 @@ You can use ``pip install --pre cassandra-driver`` if you need to install a beta ***Note**: if intending to use optional extensions, install the `dependencies <#optional-non-python-dependencies>`_ first. The driver may need to be reinstalled if dependencies are added after the initial installation. +Verifying your Installation +--------------------------- +To check if the installation was successful, you can run:: + + python -c 'import cassandra; print cassandra.__version__' + +It should print something like "3.21.0". + +.. _installation-datastax-graph: + +(*Optional*) DataStax Graph +--------------------------- +The driver provides an optional fluent graph API that depends on Apache TinkerPop (gremlinpython). It is +not installed by default. To be able to build Gremlin traversals, you need to install +the `graph` requirements:: + + pip install cassandra-driver[graph] + +See :doc:`graph_fluent` for more details about this API. + +(*Optional*) Compression Support +-------------------------------- +Compression can optionally be used for communication between the driver and +Cassandra. There are currently two supported compression algorithms: +snappy (in Cassandra 1.2+) and LZ4 (only in Cassandra 2.0+). If either is +available for the driver and Cassandra also supports it, it will +be used automatically. + +For lz4 support:: + + pip install lz4 + +For snappy support:: + + pip install python-snappy + +(If using a Debian Linux derivative such as Ubuntu, it may be easier to +just run ``apt-get install python-snappy``.) + +(*Optional*) Metrics Support +---------------------------- +The driver has built-in support for capturing :attr:`.Cluster.metrics` about +the queries you run. However, the ``scales`` library is required to +support this:: + + pip install scales + + Speeding Up Installation ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -80,53 +128,6 @@ Once the dependencies are installed, simply run:: python setup.py install -Verifying your Installation ---------------------------- -To check if the installation was successful, you can run:: - - python -c 'import cassandra; print cassandra.__version__' - -It should print something like "2.7.0". - -.. _installation-datastax-graph: - -(*Optional*) DataStax Graph ---------------------------- -The driver provides an optional fluent graph API that depends on Apache TinkerPop (gremlinpython). It is -not installed by default. To be able to build Gremlin traversals, you need to install -the `graph` requirements:: - - pip install cassandra-driver[graph] - -See :doc:`graph_fluent` for more details about this API. - -(*Optional*) Compression Support --------------------------------- -Compression can optionally be used for communication between the driver and -Cassandra. There are currently two supported compression algorithms: -snappy (in Cassandra 1.2+) and LZ4 (only in Cassandra 2.0+). If either is -available for the driver and Cassandra also supports it, it will -be used automatically. - -For lz4 support:: - - pip install lz4 - -For snappy support:: - - pip install python-snappy - -(If using a Debian Linux derivative such as Ubuntu, it may be easier to -just run ``apt-get install python-snappy``.) - -(*Optional*) Metrics Support ----------------------------- -The driver has built-in support for capturing :attr:`.Cluster.metrics` about -the queries you run. However, the ``scales`` library is required to -support this:: - - pip install scales - (*Optional*) Non-python Dependencies ------------------------------------ diff --git a/docs/upgrading.rst b/docs/upgrading.rst index a0342206b6..3a600e9ac0 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -44,6 +44,9 @@ need to change only the first module of your import statements, not the submodul from cassandra.auth import PlainTextAuthProvider from cassandra.policies import WhiteListRoundRobinPolicy +Also note that the cassandra.hosts module doesn't exist in cassandra-driver. This +module is named cassandra.pool. + dse-graph ^^^^^^^^^ From 736557e25fa6929235ed060f986ff2c85b7cf57f Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 10 Jan 2020 13:48:28 -0600 Subject: [PATCH 1733/2005] Set CN=127.0.0.1 in gen_client_cert.conf when generating certificates --- tests/integration/long/ssl/.truststore | Bin 997 -> 0 bytes tests/integration/long/ssl/127.0.0.1.keystore | Bin 4317 -> 3775 bytes .../integration/long/ssl/cassandra.truststore | Bin 1074 -> 846 bytes tests/integration/long/ssl/client.crt_signed | 32 +++++------ tests/integration/long/ssl/client.key | 52 +++++++++--------- .../integration/long/ssl/client_encrypted.key | 52 +++++++++--------- tests/integration/long/ssl/rootCa.crt | 28 +++++----- 7 files changed, 82 insertions(+), 82 deletions(-) delete mode 100644 tests/integration/long/ssl/.truststore diff --git a/tests/integration/long/ssl/.truststore b/tests/integration/long/ssl/.truststore deleted file mode 100644 index 80bbc1fd78aafafbe847717dfec764570e6eda68..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 997 zcmezO_TO6u1_mY|W(3om$%)0qiFqkSi9nGMeYxXi46G4)rUsS_49trSnwaMsG%>X= zU}j=uVq#J8I<>)omyJ`a&72>lsQK zh=atrg#`jU0u)?|GRsnn6hcyqONtGJ3 zY+@QE!Ea;;6fib2g9;416Pp;7kb{kpm4Ug5k)Oe!iIIz`iII_Ek!v)_$33i#Yx@a(BJ`ws+H2kCV>@4`@F- z-~0M^a>#4X`)Zjc!s5mgr-nq$<_g$y^swsJO<8qyYd6Su?-Q83l5ywRovBGH6Beav zWU!yU_V@EXxA%*7-Q6W#eZslG&~(PcbAsZt|J9Y>dAe+?Df?~vu=C%%Yy6J9KgYb4 ziJ6gsaj~L-yn!q*dS&@o#8^aXGA14N;uJAF_2%+{=wl)d*O@$&kpmH!_JD!N$grR! zp;@g*b9>|K+Ba!D$3NJ-pT~3H-zVmur49REI!RuVFmrqNHK;~$nNIel9HZY4mv3d& zkDM{Pc0%Tf7we_wn;HlC^&3A7p1vmO#-vBySI(|dvsAtNA|?KGv1U!P64#EU0qeCK zBi)^Jf3n)VIe9!_Z@o#jPT(g$e0_~Wio}iMO7nXD zPmz0dQhlrRU6Z3T754nOk@RWyvv`~|28tWJab_I%hgZ@^_)FhxZX3) zzrKrMn_HUO!QNN@)|j&Ilv-%D?#+_9dQUemic4^J_6=dU@?7o;+aAVQoJs!QjxCk> hXTvx($<8l6f4|}pb^%T`3pTa6Ud?YVhTBEd0syZsZd(8V diff --git a/tests/integration/long/ssl/127.0.0.1.keystore b/tests/integration/long/ssl/127.0.0.1.keystore index 208166d99a626af52fecc9275c0314103823cf31..3855f00a1ae458fb330deaaf11d623e203ef3ccc 100644 GIT binary patch literal 3775 zcmeI!X*8Sp8UXMll2{TAu|&(LeN9Php>&2SYO5-ZjwxcVQfV!%twofgvF~D6)KWw( zRa>WsYUmbJYOijJDj}Al*L3d8ojZ5V{WRb1J^%B5c%F0K^WWa*eSW)(yNdt-0QCC< z^&=7kT%7>`ATh5&Kw!!ja5WfUGa4t9J0B8RI=O8R%@0Gn2 z3=Z}1a`$ob|1H4*|F#B0XN)+CR}F)~Vlir1ELL699>uG^7h-V#4F8rnNIM0WCHS4%USpfPcdp9N0!XLS@0=JJbzSb)vL6}c_0uqI6kIwL6q6h zS?v^UMEewR!o!w*hp1%}X%^|(_$JL+gOg34Hxz|WFg)RJ=O<=tK`eW%!nZq-XURuk z5ZN;jb9(rqb$dj&gQ~oRB=b-Sg?pX`?p9kF&W^xraQ&df2IG2fm-RpCF`wj(D@f#g z*Rj7zEcEK?=T{jy=MAGT`a3={qo=<*812-&pF1;n6sWyGNLq6|I$|qR^dKQStEDe@ z%+zr+;;XjA1rP@i0Q}<#?OpEP2>}6FjGR+9Sir${!aP>mU_4>Gzsq7Yr)z-=j&mTC z>90^wFF>z(xaVN{LEyUD^LQB`oVyduH`sECzWW@;Zqzn^8Ff>Q+=lBoaPJx*E4Y2i zH_y3R5v1Fq-mfBjB=^c^pm>#LKwL@>(=~9*WW=iIv;X5+lu!d&T#598TK2ZR{-u}M zJX$MxSBg|Atv6#UU_IDXH1bk!3G=>7g%)<4Zfw%}i5AA>2>0=@PZ=g$%O#9>ysvFu z&|Zd{std--D2%3UO$W3Ud1ap}Fw;LH{(QNrWKp<#>kU3XqUqqGMvb-;)bE0Z#KS(~ z(*mS|Y1Cam9dox<{ZG!vu$_Aq0|anm)o?19y@CC;V9rnh00jHK?3!ntrf8$XQLayOfg2RKdi zNajnvS!i`2c6>lvNtR8!FjR&t1}t@M+xa!0=}fssK#h`Pwp>@VMgsZ}p}XBRQRW^G z9|U5b9nIZthN^^#>{E#rukd_Fa2dAX=8NC2{YFXVQSxW4GR8ID_v6Z%!iMy7tyi ze&J!h0cKk+&f6o6MQQJ7XDyi*7ZfZGqh8oX?2?6=ZoD-pAIM+L&@;7+u)E(Jb!WLz z`IJvSbCGCF@pjg8``I_I^JugOXw9`a?MDXEMpk!@sMs?bb%CR-a9e%EYB>Iu@Mp9~fI}=5HXiOJWc$g{b>5_ICjG7BwyWDz&~e_J=lEin zO6$7Q>!KkyC-pq#gsSXU_c~>s5U-B(ARsI18+SLypUR!Kd&uPwxgl(FIk1dSYr{CA zoHZhuM7y*0KA_ychle-@+8;|_Jrql9QIxk+1O$d7i$AdWgJM_CQNpMrbl60o>$X*} z8+D4T8@G-Zy6)dqdVQ?Eq~^gNwP`^E*er8Tf8NFKnbC!KybWa zob|;WjAU-*QL$%@kf2Ortbd~T+&=vbomnAMVUu>=S?6a5BaHF!WN)cNlUaG=S?mA{ zd!@FUkhOZ#YyQCxVo30^9;gp*Y&p3EkvMGNQwPSBNROR+A#zqMxV4mcvD#X38ETah zm#I(_xT!xgBk7$X_IXu^vH2v_|L2n1dGBA($*m+#Yqih;9}*;a*#_<}cwM$~u_e&C zmg^mFsxxxbD~vo*yxK3uE}K)$8S!4oY2%w64@tFns%eVx{aw>*FJyb)+CrG}g#9ga2zjv5buRhh(o=>c3hOuFKaQwu2ctUS(+)CLw zq3|_B6hz5${Lxi_?^Jjm>6R2gJQ_;jOYaRb9L^Gc{N-rd2%Ww5Tl-38y#a#WvsVdwN29t<60cAdw1fB z?M9r!d_{x+>v-v=oeDTJ^qVGrmjilF4hk3&h_8qP1O$NuISB(@Q3w-OR_Ab@8QU&~ z8h2IkPaWoOY3={c2izVX)UbPe(D}v(oIS?quZbe?pF|P(jVMSA-!Eq25ct=Re@_bk zT@bD6k)-jp21-3Hj_iN>a7>s0$NMSoHFj|2&J59dXK*{UCTE%+lPHpsNU&clm`Ix3 z1sF7EiQ*<2UWIs{Jc+hDdh<%y3<-k!vJnm2%e3WgmTixJ-D8!0P4~1=ecE9E z>*JG~d=yz8nUta6(#NFik4uG3a|j6@=PlgjgPQ#PWA2B@om$iM`nr1k}4j%-$xGrkF; zQ~>iU0l(u1b77!2z~Orh^G}!ljTK40Yy!T`50D<65nyy2bpdY74OFN;|7Wu z+afqcv}jchOCdCsH5aZP9z;tgcD-G#PL0Vl(Oq1sH|G;(3S9vGQ1I1j*{hyVBNyFR zTM{UiGvj$8cWB9<$%XQ;NL}+pd)Y|A?*Uv1gR^|I>alX-1p`je6fT?lhVI82o-QMO zLQ~D8)JrYH`<0D<(v>`Vnt#h;$9t&t0txoDhEjAV)$~Ej&XsZL3JwiXUCFXr>FJDP zJ&FxakLj<~-J!n<&nfrbveajsb}_hf7nV0)m2A-na(_~t|3p#qr(s>IyUi1MoOco| z;>VQ^qXDPLX97~5O=S$N9J;6Ye{laF+<#pe|LfomSh=UNY!x;XvU7j*di!{GX$p+~ E4@6Q;7ytkO literal 4317 zcmY+EWmFUno5g1sIs}nW8UbmTAqJ2J34s}q?natnkY?x(2kGwa5J_o}k`C#XkdO{R z*#GW%_uYLz-1D4s?|tsaA2^6)0RspE2a&{M<8g#3hFuc@ae%o&B*Bw-{KHwlM z#D7FsrkFukhX2^Q|40*?;9p;a_&|)@Aj~^(5auQ$ z7{vsUEg!rl?P9?I0eaOigD?jlx!eS`yPokTcN;%-DO}}1uzJ$G~rEM_WSCiBC zt?LT*-#0t!Kf{0X!AyQ^GqZMBcx*f;#K1>e3!nVPa+=U^CVJvA(l42(KxEZ1Q(NE{#a@vA zK2{5FE3S$lGkY2R2g>_R7}{MueehYgs^UOU&MRD)bS(!WfmI-eBfVZ+O<{{Q!9w*^^MHjAgW)o`rNC0*f!AtlqvP>k#;%Y8K?BA2+^1E zK1}7+n%Ue}*fNHp%mUI9N}A6xdMa^nHZDr3R_|AoGPIvk&`6|+y+O=Uesu|ddNN1( zQPqzAw`I40K+_HT3dl-w)!4t2bA`a>T}<;t+8j41bi1daByWPOn1vD-ZkAM1av#F9 z?GI=pYdnP|+4g4_=Qf5X@|nw0U&t7pFK8V|&Eq}77}KfDVPUtZVC?xPO`?V8Y2ihB z>wMm()N}HUes#JM#~;Fz3NdsaFa)lbKCmtw=#OTKH7MO0zeLma)_&h-#oFU9mGNo; zAN(eW5ikC{Tfa)KY1I!+^ENLrzAreE(D5>e% zz!F#cZ^Vp~X72WJmKbsO*EKB+-%J7X6HiWpWrrmwN+Ql|j=8+bPOw0>tNO;Fs3e1> z&7*CG<&8llQLyiyLltuPi=sFuBG@W1AcWV`(qTDGK3_4*xe)obPvJ*mM-;24s9Wi% zCZHcAC_N-6x=2Z6$Mt%A_T>)5Q$6dUg#C%ogZuu<_ehaspY|aaRy^p#I0XVQfcq1T z`=o=tAl3T>1n+qJzFNOsP2X#axmW^_IFSyq3Yk)rqK^o-zIMY#>=#igRn#8hVwU3N zzzQTiMSJV=bJa+a8)#lwR3kim;@Wc9{=DnvJZxEicT>64C1A!EhXRTIx+;*noX<<3 z{)Dt>L3D%ft8nA5_7(>7T2Ccg=Q+Rc+E1%NDK<95WTVX27JDXY|LS5LPr={A(k=qfKIvK5~Q(5I`*2wc!Y1;WbF6L%@ zRj$W1*|!#u$*X_^Tvj1I8L`k8dW$tPbQA?TwrmbW+>SbQn4U4%*gol>t~(*}V8(x= z5|0c*Mn?*O0Qdkx03HDNUxNJq3OR%flT_2z(TV{gC?p^(EC3aTLIs7vK?Iop-oe4n z4Z^h8jLdYCsQH6V)>}c8s#d#1b{uBsdya^ zhP3q4P}2&2XL>7LQOqmXMo$uDe}ahi;ZANpdhSzbWE7pMSByBY1DfwKOmg5ZJ%pUy zv!ek=D}JnygWt~>Pn>=$ccc=${rGj!@1_0KIvjndnu#hV3}u^(of&o+I4#($z-ihN1%lOVN3NP#cMDZvCn-8-XO15D z9-RHItmN^fYqM)8xkI+(fWyi;SH25XQ`_*T)8gx&U9rd^LV8wO8nKVPT=rML8t_DOynF~O+lfe)euD&{~>FV#PiK$4qmKD5qpp4 z;z4cJ@X<=ykcKKOZ*f}B!thmqZG$3@YJEa+%;jCTRqECZ;L&I!N@JPZGAj8>H^5cZ zhMzRPZ@LMm+_n6+Uo2BdG4C?z3b{1(Q}0yT?8 zQ>bKn@o>VE@eX}RBG+q@a;at%4G^Vy)17EDty`^!=MZxuCY5;?9j_L7Q>+zJxwV}_ z4#H${gTX_wM0F~BV|5bI(VttXCn0#faX3QBaZS3knMh6t3k6goebGx+xQ8}MU+cBp z$)m))b$`xl>_#1-!H>O;Xe#TT@Y<9BiFZ?dw*c3hP)m3PR-jO>) zTuu)IFLT4{K84||*df(@0lTXh_Y_=V$#hQhPi$qS4&f9zBKyrp{Uk| zOWEGo(vU6t{J0iI-&BP8HiD8_D1!{6=QYqbup^FYJCiOiNchNYpyn&Sn$ogcMe6Ov zO%A0$xsEJ|s4=3f)FF}EC0xNFqydXTgQ{M5!XGmz=L8(30Jn+AF@c-LFQ0@Tc=`L<|$>FspLV0DKNs?M+;zfbv zW=C@v^-uE0+#s+``7_5h0f7#gkOxQ==M0JYl`Bk(kxQ&041?@_063PWzP`7z$yR^* zF!i!N-+*L_fY)=tTL(Rfi$ka>h0mibz`E%m>+=eDuF-L+xR&m1rM2EC93Hyh9Cj$_mlcQhg98Ts52U`OQqXboVY5bXL${HseBCP6RQO>F)0;Hd6ep4L zd>mO9{~WA?U)uoRb~!08bH`71kYrONK{Njqe6;$HubDHcTg_}EVvtDbwJq3-U>1uB zS^4FhraL@WYR>HIsXjEyC4bty0JkMD=a7at>|LO>Hcl; z^0n;E%WUS z7*352Gi%!M+S1%Exg8HM+lHgOt(nEIztgL9Tj;M<7^ zegz>+LIM~R!aTmdyJhLNKQ9l&3{iXtuTT8?n1zT4iVk8Ys- ztl0kJ0A>@4&#uvYTT72)L{(f;Vb7D2JE>`*^*US68@)SI54pY0Orm2c`i^A=WXMVC zm1h{BnXx5es?yUX91P~xYDZL232lExoRWk?97#U;w9}l(S=g;2sNWI8@dOe-`YFU4 z^GbBO#pexns!U+fvv{}e^TiR)a;LmHyR;nm8rdu+!o6z^_b2Xkl2g5v-FHGSt_I&{ zX4ij0&SIje5;mXjeLG>PP6mxB8fjGe({eS@Mhqg2>qcl?(?gk5pzB8_q2_t1w7IYI?sCD z^erl<342ORS?ao8?*i*`92U4}`4}Z?5+R(XdQVk~4Xu!pz$l@rMvPBU%9VdCxIEz7 z_Rmm6Rm@;jQl~FANoDw3L(Oh?hz(?{)iCm$A)3wS93c zR+}-WlEQUMtIyF=Rs%AnD#Nw~wIG|mhWN;LvdKhxY9@qeo04zP=nDD1aKt+kdPCi~ zfmam7snGc!EUPIx$*X&kn|$`v%X4EvhjnX=r_P#DBU|I|vWe_uE0&tKTF3zDb z>TJAj9ZJq-oL~$0&l{XxW-10}(#@<1d*ev31eg(whmFPg1cX6_1;8ZR6cW{?;iX|^ rN5(@1Ma-siQTcwDWDadr8&zvBRKn{{%3(Y!T%W}|<}pCPu-yLw^bQOH diff --git a/tests/integration/long/ssl/cassandra.truststore b/tests/integration/long/ssl/cassandra.truststore index afea1c9431b2a6361eec3bbbb0b2679daddb0be9..4de3d319197650252f27bea858bd279736f31241 100644 GIT binary patch literal 846 zcmezO_TO6u1_mY|W(3o0Mfv$9$%#O2{zRYPAO_Y5JyQcq1_ow+gC=HfgC@q$Od|P< z1Rgwe)iLpO*4w`O`X%AZI_g^uc-c6$+C196^D;7WvoaVs8FCwNvN4CUun9AT1{(?* z@Pjy9JRB*BC5go)i4}(YKrKKaZXUL@%+#EeVweCk57-UPi3W1wyha8FhK2@4hK7d5 zrcvU&#z3xtIh0EkJDM1kke$xR%D~*j$j<=uC>K)`BO}9}U9W$wKE2oRW_9zYo7}Ia zaMXL8$tld{*Zif4$D1nV3cP2e$Sa@ zdp7@mle3^N%Sq`==HerXXG+D--np%pD}8g*zSZ%quE*Xr_@#w>Tbpkd-00PqzU9f} zL=*OJr(Ziu^W43h&hzy3-G?DepQFs?KmFSn(__ocsv5!SeY*Z_;GTxVvHI#hiZ^u^ z&E|Z4jODP=r>ip>41RFfYkgxeKe}S+$#X}%@9}od>tlbmS8}UNb?uiAx2Dd_a&9ds zjyvIXdde;N*7$9boYN|AFj~Ax=>Hn$bS*?>!GYcx)3+a=b=x!UXTyIBg)s0jX8@*OgWRPAVTkgkREDbII z(t1;<(+V3RSd$cU*5gO+mH@nUfLR3&2uK)uLWDS42vx`sGHm1QTt|qS1~Wz`UuSv~zqnZ`yt}}_9ri7IX|1-cnlXH{Z!MR|l5h=jc z9B1EXrtlYfix_OD8|ia;`RLwWMZNLaN+NJna|Qu(k@sH)S2OMfnYx~?24h3U1hy@0 z`xL09hAd~rnm~!^Wj5k)@a0HRtgzJWg>Xdr&mO(S4c?0INB)5}m_etB zlY!jH+2uymd#i2-=Aw|Vra`e^|7x_xin~I4EeE@GM!r0cQAigh5bby_1Q_(_hWcg= ztKr2?SS{?{0hYCj!(6NwXH|(7`ZuQUbZm;dd z$ojj?t>tqF9j$u>VkKKOF<8OMr@?pLZPC|$V)0hymbNV1xezPpUgg2}9&GSJoe**~ zZLZXRH5`k?8Xg0k!0iy!8o8?MprEH=w^r&*2@S8qo>#-bm~`TZ&(A&!{axFu|1SjT ztO(>iGNqov`DS`@*OK$TIVyt{X!8#DW;Tq0OvP@G4OeIi`D*7_mtV?)+5XEXh`Oy( z0^lI6J7lC4&=m?5iIzZArU|DB&)OIh(E_8>J1{;lAutIB1uG5%0vZJX1QZBfa{SH! sS@wS%XrS`CWf$9DpkV|QqkYvE6XIGn=LZsoWSKgC#5Ing0s{etpm$;JhX4Qo diff --git a/tests/integration/long/ssl/client.crt_signed b/tests/integration/long/ssl/client.crt_signed index 7338adcf69..b0da180632 100644 --- a/tests/integration/long/ssl/client.crt_signed +++ b/tests/integration/long/ssl/client.crt_signed @@ -1,19 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIDCzCCAfMCFHXm/9f6KJL3965bBgZMFdd4Xs0gMA0GCSqGSIb3DQEBCwUAMEIx +MIIDDjCCAfYCFAdgzL+DD0fzl77VnEr2V4axiX0qMA0GCSqGSIb3DQEBCwUAMEIx CzAJBgNVBAYTAlVTMREwDwYDVQQKDAhkYXRhc3RheDEPMA0GA1UECwwGZmllbGRz -MQ8wDQYDVQQDDAZyb290Q2EwHhcNMTkxMTI3MTk1ODA3WhcNMjAxMTI2MTk1ODA3 -WjBCMQswCQYDVQQGEwJVUzERMA8GA1UECgwIZGF0YXN0YXgxDzANBgNVBAsMBmZp -ZWxkczEPMA0GA1UEAwwGY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEA4SEvwK9IbwdVEnBEf6UbYyaGh0Ao4VGyIA2Z2yDTmSIqBgBZjbBdYr1w -FC0nMqKgAI0NOqZHNRiuOUojweHZLwrYhVPg2e/Va+vGslQY+cKmAYDTCsxhEwv7 -4PXiI6QhGwmfhnSnA0+nzPKZU1SFFWl9Od07X8QJWJMLHDOG14OnsWI2t4wetkIw -6yyTXfyAD9mSsQeqyu5P6E94E2VAGtQvP4yeAJX/dWPYhFEBctsQ0H+Jk2GK3kBe -9YX9Jhk4Ono8cdUy3WfUFQRXgi01YmF1vI+Z0fS0olkdqw8mDonFpMS0ly97Lnze -aOsTv3s/SONdYa3BslbEAwUT7kvE4QIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAH -QZDTPBQgMbTCp8IJvDRbXKQAfjEE7FjYani1qWOMx5JpOJIoYHnnbLNRxCWowWRj -zsyOa5Sqs0TVOS5/4QHqu+cSdZsUNL9TvESor+BwgSf81JViD0r//xK+j58pLqB1 -F+sPIltIqclWRN/QZH1VNZ+G0WQiXoT+YGNYRzJtXNBQAi3cxZSXLaeZ6yViWzw3 -m8IL0nUTX+uNxJSaFqZw1vTHBe0hC/l+P3YfnM1k6lg4lsWDvzdwNKfmohFlo5eV -OSG89YncwigG1KUHfQ1ECX7aaI8+cTFEg5XWTMqwVko1pRVVL9mVF/YEwI/+ydaJ -whmL77CEP1AvHIjdiafh +MQ8wDQYDVQQDDAZyb290Q2EwHhcNMjAwMTEwMjExMTM3WhcNMjEwMTA5MjExMTM3 +WjBFMQswCQYDVQQGEwJVUzERMA8GA1UECgwIZGF0YXN0YXgxDzANBgNVBAsMBmZp +ZWxkczESMBAGA1UEAwwJMTI3LjAuMC4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAvGQ6G4o1PYcDiRuZnWhSzq2Zh0i/73JPyUAauPzXFRjZmOQdUaro +inkyoSpJkARxL6SIGbUeKq6rD/2Std4b6T1lj5GOmmqetUtNf1tiXvgu0NFUuWh7 +KJllojuFvUgiWRN3oMEoYgN+EqIBS+wYx5Nh59UW2v+1D/zD3ckn3kHXX2c+fNjJ +UqYUbUBElpb7N9j6SouakW3UzTonvl1kxMO3UjiWyy03IVRS8zQo/zpHHOqsqYhr +G4jCWB+7JRHi8qxiA3sjA6mUPatgF46dJKlEXEP5OSsESC20CDhmOzFQFA4/PIc9 +srG9MSaZlENyhF/ZTW4CJeH9QmCmFnv4ewIDAQABMA0GCSqGSIb3DQEBCwUAA4IB +AQA5LFiDq/+2SpfmL6US0x93E4LNCut7qa7eQhqEdmnKqshO3HwmNmYzLWhpifHL +OFz3Tzz7JxuIyNNI2XPQvLcyncymw69PdIbaYiNhuwvV7DtZUuxY/z0dlDPNDOqU +NJCS6/ny2KcCK5d36b+ppvLB8DHdyQAn++7mqqvB1vyePtkKx801qC0VFvP/NVKf +R5gFWBwFX+wNz66oFhikPSxG96SaddE5XIpRWY2richacgn2/f+Z04P6AN7DyAWA +lc94hdua7X+AV5lKM7VMWcgSJq/xhGZb0GsJv9+yZwTHWnv4qOtT5FVGGIVXrl97 +yjeRAMOqzv8+w1nGGZ1Kv1Ym -----END CERTIFICATE----- diff --git a/tests/integration/long/ssl/client.key b/tests/integration/long/ssl/client.key index b2934eadd5..b942d628c1 100644 --- a/tests/integration/long/ssl/client.key +++ b/tests/integration/long/ssl/client.key @@ -1,28 +1,28 @@ -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDhIS/Ar0hvB1US -cER/pRtjJoaHQCjhUbIgDZnbINOZIioGAFmNsF1ivXAULScyoqAAjQ06pkc1GK45 -SiPB4dkvCtiFU+DZ79Vr68ayVBj5wqYBgNMKzGETC/vg9eIjpCEbCZ+GdKcDT6fM -8plTVIUVaX053TtfxAlYkwscM4bXg6exYja3jB62QjDrLJNd/IAP2ZKxB6rK7k/o -T3gTZUAa1C8/jJ4Alf91Y9iEUQFy2xDQf4mTYYreQF71hf0mGTg6ejxx1TLdZ9QV -BFeCLTViYXW8j5nR9LSiWR2rDyYOicWkxLSXL3sufN5o6xO/ez9I411hrcGyVsQD -BRPuS8ThAgMBAAECggEBAN6HezgvCp4U1VZgepThkuyyzA8ssDvoEPX1+7rJ+95G -EtvTxLF1Pxm2vu5yo2g9r4Jb6lOqwIAPYUdnrWib4Z3KTrObcYp6sq72Y3UqA3X9 -nTGnMPRfiSTWl6aJ5XntZnvfSzwQWnW/atH+iuf/h3zexNVJhMLod4SB9F1v4T1s -HgVjDJ/4cLomtNDkB0CwhgNr6elASL/DLnWC4Fb+C7HpJtu4A4U/08DWD6kAfs5a -zaSTywXVtxpp4NSMJUUI6KQZEfA4uLP8dLX5QMYgdwfpKXkVturTWMIUmU73IWCk -CaDMdXNeKU7d2iJIQi3DlHrcVZm+MaQKW79N5XUo3TECgYEA/u28rQM7AAWNNEJI -Gj/iGGFF6lE2V+o2uWySxEZeb0noWzuyUP2bKU+CbYhaS6YCAtEEkLCHSt6D4jZs -vUxJTz3H99F4jqLbI0iZE+TKh0ff0oVDq8odmn03QylbZ3+H0xgfumjyOtQ9/CKs -1fIA7pXXu0M2J7znYes/tUsINs8CgYEA4hNj9gk2xCrKE/onmzodWa1F4VrsrajC -aDZmm7P+gZb/7p2JRn54K0SWSJthVNu3WnnzNFjR4lsRN7jVxscWb5Uq0ppYNjj6 -5Y3IU7F8zhib1zaasqPpvZpq2rK0AwFeQN8aZrLhWEPVii30HnLc9cQRFcHiZfWj -/amGJzB2NU8CgYAs1Jf6gPfuMAu95e4SF6vmB60o4LFC0qBfCVXiCFHxFjkzWhMo -pQCVSjMwmervJLlzz1gTLcgNBYaB0Hpc6750nfO9g+vEgaUx1kF9Ox3WnnAg8GiP -HFMKaPy/5dT6JXY4TCTAPlZivBJOdbaZRR6e1mECwHYUlObH1Hv2oMRIBwKBgHzW -UkOBMQG/0xiW8RnxTFXBra3URI9cegWLzLs7+FTc6fR8f1oy6e6SkB4F1whBz4yh -fJ+yUCubN/W6FioOs5/oEd57pixC6KCr2ywD/TPdOOjtWR0+EAtH4qtjNK3YKpDN -4clGC9NumdMUrxHFQahgnUKTbo34x5aB7vdi9lNXAoGBAJrYYiVUe46FDAGZUzKG -xMuJS693kx23vC/PNaIP9wMa58aOEtTw+zLdyioiQIvxmJXTnEKWFK25z0n0gDJM -hMar/CFrERHooWRjn+a5kyKXppJ1DHtoSho67wOjejsnikjQvmppEBFr1YjvyDhD -kY44x2EM9WzqlrwHtBeblQWE +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC8ZDobijU9hwOJ +G5mdaFLOrZmHSL/vck/JQBq4/NcVGNmY5B1RquiKeTKhKkmQBHEvpIgZtR4qrqsP +/ZK13hvpPWWPkY6aap61S01/W2Je+C7Q0VS5aHsomWWiO4W9SCJZE3egwShiA34S +ogFL7BjHk2Hn1Rba/7UP/MPdySfeQddfZz582MlSphRtQESWlvs32PpKi5qRbdTN +Oie+XWTEw7dSOJbLLTchVFLzNCj/Okcc6qypiGsbiMJYH7slEeLyrGIDeyMDqZQ9 +q2AXjp0kqURcQ/k5KwRILbQIOGY7MVAUDj88hz2ysb0xJpmUQ3KEX9lNbgIl4f1C +YKYWe/h7AgMBAAECggEASoGdFY04dyfxdwUqYL2Emma/5GgaOJnOAjrPFsAwVBCq +5jO5gLYGF9XM9z5hL4sCNKRuizQ9RQYlc0KHBlRcV4dHplsbuehW8j5g3PCIXCTt +ZvqS9mzi4HCiaGIAB5cCtpXjZvldfj4BW18lAiDSwAOC4gw9aMlek38U+571nIlh +gs2rtCuWR6usJbJdFfJ4ouIPX1F4Gz5s/lYhG5jTGUAnKyE1NgqPj+PY+ZfH0qNX +AVJVNn9Ze3hT3JdEjTYFNo5c1YHui9Krlh5EMabVWgqmbI2WS8sZsRcMAluDB5lY +OaCKLFYwOIG4nGBU9TE2s7fxl3qCkBGNDhuD4n80iQKBgQDrZ233CVfSLgTyKKuV +i2niqIk1fAgLIFefHxsI1wlApLBCc58dAGhAvmHRdYSw8iBinbLXGnS9yPZvjNik +kLyWPznq1p94CZTD1KsLz5qmRlReKHJEhdajlR5LnisULvqiDc7/lk18W2fD0KeD +UnN5hUjjOeaQRjvOTghTie+1PwKBgQDM39Agd/INPr6WsYSqYPEeE9fZu27p0YUQ +4Y8zHVENeg1jUMDXgHo3weZl5n3sFMYeuYm6d9nE0k/Mjr2hL/zlnqQBG5XtJ5oW +2r7Sh1pKCxV+oYgsdFctkJKSi5Q8RyK4YrUQA55EZqDewfZlmmWW3q/fahls49Ny +uTJESV/BxQKBgDiIh10rjj64tJFfeQ2aBJzdcs44ckoRw1lAhCKUWfF/W6Ep2U2C +uobJ8f32ph5El8h3LOsBvIWTjLNvdNvYsqG2n3cpgfS3AFYjbcyRWAeUnlBakE6q +gciZWEQ6wQfA3IosnMi+1O8HmJzrMD+WforxmnaPgjKl21kJXnCJkNrLAoGAdlG3 +6Fh9UUrwVSVWgfOUrRM2sMd/yce4OsSZqCKBQfBANSBZDtxjOTphbm5MQQDKXso2 +kZtQCEyRy4iQWbvWKWKSQxWEY79gTVytofaLnYGDO2vcshfKlUUOcVXtGVbX5XcU +LJh6WfSPabbJL2qYyyX2mmezIWD+KB5uumNJyyUCgYEA6afM7Gzv/jvm8Vrqx+Gj +gDFvO0ZxkxOCtA1Qd42NYu9rgn0pMIJyukxvEXePxUu164ehE7VRN0HhofXMGbMG +R8aVqbl0w7jSN6M97vo6Xn+sRwFcgMfjo3uKgdXbdxyKnzPGxOTYUuy7Q5B0teiw +kz4fRgPkfSQ+nfVrHAgX3WA= -----END PRIVATE KEY----- diff --git a/tests/integration/long/ssl/client_encrypted.key b/tests/integration/long/ssl/client_encrypted.key index 88c8f906ba..cb787d657f 100644 --- a/tests/integration/long/ssl/client_encrypted.key +++ b/tests/integration/long/ssl/client_encrypted.key @@ -1,30 +1,30 @@ -----BEGIN RSA PRIVATE KEY----- Proc-Type: 4,ENCRYPTED -DEK-Info: AES-256-CBC,D33271042DDB319A9A83747391E3B8B0 +DEK-Info: AES-256-CBC,12FC332B1EC2742035449FF59F9C5F71 -NTq5xNw+6QBZsuXsM4ymjwtRHmEmb5T/DMUsWYZmJk8Eb12NBveWub1ltdkjOe9C -vqmYKG7ZGgEeRynia93P3+LY+K+8O7vH81jceYNaFYP7OEr8mSblrotjdQkcSEBs -mNGhZ+7XhlGGmxtMnOodYchwBsKHgBOpPzGB6iuwtM5b80wQ3jh7w6NdsPQzeSBS -B4RalAsBtU6dxfdl9NfjKisNrfGHec6uE5SfpYqUQeZ471JYl8pBkcJfATdlNFkC -uKgUJSxK5kV1KF5ClO32Ekq1IUgeNZUFHE/YA2xK/YbmDvFwOTfTpozoj7c2VRDs -2AWaYwIazz9dH+i03/bFZtZUCARVj1jig+FVVWSIF5PNUrgxTEMpG7Q8u2Tr4nCW -x0/KQKLOkXd3cgNAJDrnWrfaA3zE9VB3dL0sbFG/NFU7HUxwdt3q7k3xU1raGsPT -539ydrvAaxiZswGH/UYMAbXCEM2e1mx1M/HVgNX8/3auERxPrMBO6YEM3cNd7ktX -q/JU69XXiuncl4uQJNQfADFlK5PD9KR8PLfIg6qIRmwI/rtp4M1+tLXeNt7apU/l -6kDyI0pjPH7eouUWFr1wqToAHHi2JU1e0gN8CP59Q8EzeJUtzOxTablGLZbz4FeN -ymFgCFrd3HCVo16Svepy10Ka7ZVlludHVC+IKy47VjkevQtXW9u4iUiwL2+S4gAJ -AfylzmRQNPIzwAM4CWGdb5pgXYzCNsE/WKuV/nGjIKz/noiXoJCTnLRI+0rnUcF1 -lNjZls9XzdPX67WPQZqs/6f47YChw5+yrpZ8NRnC8aPouR1BgpLWg0u0Xqa9NfDl -opRIDjCD1dkUwA0ByotM/eeqYLGnnqaQ+qbFyf5WMaEwngRanpX1dxh/W+kaYcEm -CeLWwmyL17uGpxT1ULsx9RFjUCuPC3IbHUZc6l9UtsCShCtK27wpkccaF9OpGM4k -cokXQNV9iP+ve4ZNaEdAZck4HgR/v0dYRV+QYaNeOaozRFIqrumKNIDah6JDHcI4 -ZorCnSdlX8sdCjJ71ap9572msK4L6HT/sSop/fORw7wwIvNmSPOJGKxNCacsfB9I -jRcU0WUWiWWVhqUC+AhnImB2wLYFL0uSMo8Ww5crdC6cWbmJdMHfI5Uag7ri3SAs -Y5hPc3/DXrgpFJ3WOJf9dpoCx0s6V4YERkZZvufD1Nz2DoGrw0NUAU8G7cBqImTG -ExsU1z6DwI0fuEZYT5xYtpO8EjCbPY7Ucz4gp3PyCWDAOiulHaH9JMli8rsjglY8 -nUTfkAA1kWSJbTQQxG0flgzOdQoPoezP4JKwGEaUJ7B9MGm3cKFmhefspk/UaavO -F9+35wsPhPhiA22+pKSCmoRnanq+Pi2+kC9M0DEY9O1UDFKeDg3U6sn4tVy5H9mb -qEHRnK06q8LAbUrKopSZDnmRt6bjjANm7h6ekasWndmM8VKZ77dhCyhTTBCL2jke -OmMJLBnfmE712XwZNV6ewHv52iV/2KE7DMhhY0TtISVSPdvE4/rbkmjH73UOPwZg -ufJDT8XnwqQEEzgOZUciUw5Xjc7xRikg3MCHEy2SHvT36/6UYRlI+ZkRZ1wYyXJX +mjFChSmk+AX00EmSz22A+kv8X5XwtW8awkrcud2cH1tUopGv5B7PL6dfprrNp4fj +T9nmncH65b+GovBClEqOy6I+Tzm/WJ6aOqQnkfL48QT5KW6bM/Gzm8JSJkXSEfDZ +Cck1FvMG4ituL50Rvlw5XpFjSyFt6VEZi/s6taSLPiJmuw8cWeEkMCs3I4nZee0k +lDvsiDRcqV/+Uk7jM4umepa7fBMumI8fydtscpFxJBspBVIb4UMx3i2lFWs7NeXf +EN92S1fhq3uWBDIlQMtAAoV2qf3cPSg7+du/iLB6/Sc7mxGyQ6aVWoIyxB+dwVVX +oqmeBgyNV8xS94Qcw2aNcqyyFsU0cIitafIzBuJO9+G2/FB+UWTJwPzbp/Kk24OV +wZ1wExFcYo4UOe6FT5LAihhfkGFdpXba8XviEgQK18o8lQL1SitwGSrlBNQsohJt +3Ug9aHvTGvECBdjj7NwNP0EDs7IxMVSsOHPAJyDLdWzHe0eLqRYCWNQlWGn7LKIP +dsqJqpMJkh3G3m7Lkkb0Z4YaUoFRi5DyCB2vkBcgmvbV2QdNHwc6EWk4OnFTq4vE +kWrMGa7lQkxnXV12NUNaUoHbsX7waeO5d81IUHbfa6r3/N046QGDrkLAFnsghfiS +I76y8QklqVIp5/5t4hg96n55lSqxbDAikHMrTI5niHUajqU74xkoCgh/SpwwwndT +QwdRH8jAMLyBf4xf5PQMPJwiNltX7QE6XB9sD80f9SJdf3xHF9L3H++ANPDzhHJC +ntGd/JmsMv+G/u+VLPTF9hB83UEMuTi1OL2Fm7V41yR6ZROYDUXyf8hhUxKAGasu +Hn1Q41mVb1nFE2NaD65+6MD2UtqBBXaD8raTGzcauxgkNTg4JroMM3vc+PSanSUp +ZSN28MYp4XlGl5dDhpoY782V09YIweLKAqC/4NZK9X0r563BLj5pektuqNlNG9N3 +E+X9F4fP4tjOYGeVDhZ1sYBYsUfHrkbngacGpnD+eis+ReSaBE7T5/jfxnbW9AW7 +Cj4qRlUYvyYznvHxCrQ8Su6IleW49z0lD8jLmxUPZkzMqEX8tYe8dxz5AaG2Egb9 +bhwheQiiih1LTv9ZuLMGyARQcOZsPqsi1SRMzaln5f/PNlh4RCP+JbjvHxy5IMAN +g5ti9ejF2D7A1YOxlyqv0xvN5z4OEkXngg3HCT/FFgJWsppEG6nbpgpDlnC/xBiJ +3VgtwmU/CsJ11lkCZihl2xMxiazA0WoKMNs8N4qxjr5YEnyBwRqFWjpMgXUyjcPH +H+QBAkUL11qbr8O0Sj2cn/urVZNCQMMDpYT2VDbJK6vrj2ZyEsH3XeeQr2ohevwp +4GVyIDgzvP/+B67k65Pvj6iUXI2kXqZhhURBPKDo00XCFSz+9guDJX7HeRII1D1k +VzW3aly1SvrEUgI30FpC1L47LY/NksVybSD1kxNMMdb1g/yVJCzGeMgMyiyreaxT ++UXJ5/sZIQ+FZqzh24EShM/JmUC9epO0nl8nee8FAd9kY9kkDCjTXf3KzYTiyKN8 +ma2xnLwWWK1bqj282/dj6D/QlXYgePb90Duq5X19HOTe6wouNnr6fx8ZH/k/tAGQ -----END RSA PRIVATE KEY----- diff --git a/tests/integration/long/ssl/rootCa.crt b/tests/integration/long/ssl/rootCa.crt index 3fd24df8ac..ee7805354b 100644 --- a/tests/integration/long/ssl/rootCa.crt +++ b/tests/integration/long/ssl/rootCa.crt @@ -1,19 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIDCzCCAfMCFEmF5bV72tooSzGiFznZmtdMNDpLMA0GCSqGSIb3DQEBCwUAMEIx +MIIDCzCCAfMCFG+iEODhRSw0SUMut7vX0hPTLCe1MA0GCSqGSIb3DQEBCwUAMEIx CzAJBgNVBAYTAlVTMREwDwYDVQQKDAhkYXRhc3RheDEPMA0GA1UECwwGZmllbGRz -MQ8wDQYDVQQDDAZyb290Q2EwHhcNMTkxMTI3MTk1ODA0WhcNMjkxMTI0MTk1ODA0 +MQ8wDQYDVQQDDAZyb290Q2EwHhcNMjAwMTEwMjExMTM1WhcNMzAwMTA3MjExMTM1 WjBCMQswCQYDVQQGEwJVUzERMA8GA1UECgwIZGF0YXN0YXgxDzANBgNVBAsMBmZp ZWxkczEPMA0GA1UEAwwGcm9vdENhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEApn1jNTaOMRiOEC25Tsb4ZdqoXHWxaeewItBh/fLiVKXpuHnZbK1h8183 -Cqt5sKMaYfrGt9UVRf6ACZbqK5/bJkKKo8Ts5WsTqZ9MDA0F77XQ0phQoJ5NlFkf -okLvZ/+kUc7PDsi4Hvp3IbgEsAlTBhRPUvztaGZfZNQTR38XTGGVrXRKaUeA7O3j -jM+WKAYY5AFNoz3TUczdHnkR6srbkiC/t4NwOJBlM6zNPl1zank1LGz4EEdxasfj -JOK8VqoX7CRUsVl/Km1pFPqKq4Za79VEIAZwn8ZtGUkrXdYaFTOy1bzMAQHLy4cn -lrikWegc5Ctqcie39rhrd1Cokuj85wIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBu -NFqRArFrtK0arHBDDfRPour4W+3ncvuMRrR7noVUa4ymN1ExtCj+VarhIzccouOt -0ZGyPBma6g7wEy2fwW+L4ojg9WixolI3gibfX1ol9eKPfSQpjp/rBFwzR/myPVfj -AJDknN4x3HUMvVnz1lOKEaKSNOst9tu1WGqxn2HI+rwLqUfiP3ucV6828fSdvOMu -SbZCbsNIqJgTIN8tJZgL7iMi4u26q4FZZyrRElq2iQzkBPRycxwYsnMDt0MpQshN -Weyy0IhrVQFgDGr7YMt18nuEMLy1AgSPklkAtqhuYEZPe3RHFoqmObY5b9YeGdeh -LhV8l6WhvgYkpiQXmL0u +CgKCAQEAvLrr+qvLvUHZe4Py2QvqlAh/SMxscWsPKfCvJLYS4SX4tDxyn/Xgn1+M +9XMlhtnpJHSdL6bzVm3PMDJmPe+8zKa8s/vsbKCOakIi9GmjxGHMdRfN3NsubRvZ +sr6rX4VFxu6ATmZU9q1vNlOBSoFntOSTYTQH9svrQxsM3dNnDOXr3eFUAvNaNp/l +/YFcjD0LBSVYBUvLf+1RvIDDXS8nTCHZLaKbCevGBMMy8tWYgDD4CD8q9gQ3xail +yc7ES94NiZ6OB+a9GbUce3308NqVmWpDhXBzXshKy5TaH4VfthkJlnnYATjsYI/1 +XkLWVCSgwI2Yl7fHmttJXvmA/zggbQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCX +05lSh/MBzbxg6zEjQdyN3Mm/TNiZv+wIDDddYKVA8psX0gLiCgnpC0B4BK+ualYk +AQwJvgQQzbdxy/7RC/a0OEvXg4syjLs3vRq4fgCXebzfbpxhqykCO7czyS4WPZp3 +8AEeqjV0iZHE2WN1+kfWT6Dwc8rjFxWyLB4qgtKxpeW8sNdpSussNpD+IoKpIzzh +VBNRzb+g1tc87zDnvy7GfEjLRke57sZ82QN/bGZakdVgfppg1mbnrrbsOPEMSTMU +iCQo2JP64HS7oGmYUp5KQFge0fGqou0Ww/rkVp2AtR/tNKw4XwZybTQgwsdvz54S +KEmDs3I5S2S5QO5hRDG+ -----END CERTIFICATE----- From 3aa4de6dd036e545c6aacd061aaf402aa45bca8c Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 13 Jan 2020 10:16:43 -0500 Subject: [PATCH 1734/2005] release 3.21: bump version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index a7f50da57b..ea0a9b7bdd 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 20, 2) +__version_info__ = (3, 21, 0) __version__ = '.'.join(map(str, __version_info__)) From 36f6da72ae6a65ef18c9ea8f59f3c3115bfd3b22 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 13 Jan 2020 10:17:47 -0500 Subject: [PATCH 1735/2005] add 3.21 in docs.yaml --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index 621269987a..b68aa7a4c9 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,6 +22,8 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: + - name: '3.21' + ref: 3aa4de6d - name: '3.20' ref: d30d166f - name: '3.19' From 6556dcfafb9d4102230af18f89217589faf680d1 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 13 Jan 2020 11:18:58 -0500 Subject: [PATCH 1736/2005] docs.yaml should install deps from test-datastax-requirements.txt --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index b68aa7a4c9..0f48599c7d 100644 --- a/docs.yaml +++ b/docs.yaml @@ -13,7 +13,7 @@ sections: directory: docs virtualenv_init: | set -x - CASS_DRIVER_NO_CYTHON=1 pip install -r test-requirements.txt + CASS_DRIVER_NO_CYTHON=1 pip install -r test-datastax-requirements.txt # for newer versions this is redundant, but in older versions we need to # install, e.g., the cassandra driver, and those versions don't specify # the cassandra driver version in requirements files From 2d77ee850e7096a12f10d7c35309d31b23fec76b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 13 Jan 2020 11:19:30 -0500 Subject: [PATCH 1737/2005] update 3.21 hash in docs.yaml --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 0f48599c7d..b846c4dd2e 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.21' - ref: 3aa4de6d + ref: 6556dcfa - name: '3.20' ref: d30d166f - name: '3.19' From 8d64851666554cc22b5a9a23613c6e5d550d240b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 13 Jan 2020 11:42:56 -0500 Subject: [PATCH 1738/2005] Minor documentation improvements --- docs/getting_started.rst | 58 ++++++++++++++++++---------------------- docs/security.rst | 19 ++++++++++--- 2 files changed, 41 insertions(+), 36 deletions(-) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 9b948c9778..8cb86a5504 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -40,14 +40,35 @@ behavior in some other way, this is the place to do it: .. code-block:: python - from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT - from cassandra.query import tuple_factory + from cassandra.cluster import Cluster + cluster = Cluster(['192.168.0.1', '192.168.0.2'], port=..., ssl_context=...) - profile = ExecutionProfile(row_factory=tuple_factory) - cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: profile}) +Instantiating a :class:`~.Cluster` does not actually connect us to any nodes. +To establish connections and begin executing queries we need a +:class:`~.Session`, which is created by calling :meth:`.Cluster.connect()`: + +.. code-block:: python + + cluster = Cluster() session = cluster.connect() - print(session.execute("SELECT release_version FROM system.local").one()) +The :meth:`~.Cluster.connect()` method takes an optional ``keyspace`` argument +which sets the default keyspace for all queries made through that :class:`~.Session`: + +.. code-block:: python + + cluster = Cluster() + session = cluster.connect('mykeyspace') + + +You can always change a Session's keyspace using :meth:`~.Session.set_keyspace` or +by executing a ``USE `` query: + +.. code-block:: python + + session.set_keyspace('users') + # or you can do this instead + session.execute('USE users') Profiles are passed in by ``execution_profiles`` dict. @@ -83,33 +104,6 @@ Users are free to setup additional profiles to be used by name: Also, parameters passed to ``Session.execute`` or attached to ``Statement``\s are still honored as before. -Instantiating a :class:`~.Cluster` does not actually connect us to any nodes. -To establish connections and begin executing queries we need a -:class:`~.Session`, which is created by calling :meth:`.Cluster.connect()`: - -.. code-block:: python - - cluster = Cluster() - session = cluster.connect() - -The :meth:`~.Cluster.connect()` method takes an optional ``keyspace`` argument -which sets the default keyspace for all queries made through that :class:`~.Session`: - -.. code-block:: python - - cluster = Cluster() - session = cluster.connect('mykeyspace') - - -You can always change a Session's keyspace using :meth:`~.Session.set_keyspace` or -by executing a ``USE `` query: - -.. code-block:: python - - session.set_keyspace('users') - # or you can do this instead - session.execute('USE users') - Executing Queries ----------------- Now that we have a :class:`.Session` we can begin to execute queries. The simplest diff --git a/docs/security.rst b/docs/security.rst index 4210788b54..4cf3163fb0 100644 --- a/docs/security.rst +++ b/docs/security.rst @@ -99,7 +99,10 @@ SSL Configuration Examples ^^^^^^^^^^^^^^^^^^^^^^^^^^ Here, we'll describe the server and driver configuration necessary to set up SSL to meet various goals, such as the client verifying the server and the server verifying the client. We'll also include Python code demonstrating how to use servers and drivers configured in these ways. -**No identity verification** +.. _ssl-no-identify-verification: + +No identity verification +++++++++++++++++++++++++ No identity verification at all. Note that this is not recommended for for production deployments. @@ -123,7 +126,10 @@ The driver configuration: cluster = Cluster(['127.0.0.1'], ssl_context=ssl_context) session = cluster.connect() -**Client verifies server** +.. _ssl-client-verifies-server: + +Client verifies server +++++++++++++++++++++++ Ensure the python driver verifies the identity of the server. @@ -166,7 +172,10 @@ Additionally, you can also force the driver to verify the `hostname` of the serv cluster = Cluster(['127.0.0.1'], ssl_context=ssl_context, ssl_options=ssl_options) session = cluster.connect() -**Server verifies client** +.. _ssl-server-verifies-client: + +Server verifies client +++++++++++++++++++++++ If Cassandra is configured to verify clients (``require_client_auth``), you need to generate SSL key and certificate files. @@ -229,8 +238,10 @@ Finally, you can use that configuration with the following driver code: cluster = Cluster(['127.0.0.1'], ssl_context=ssl_context) session = cluster.connect() +.. _ssl-server-client-verification: -**Server verifies client and client verifies server** +Server verifies client and client verifies server ++++++++++++++++++++++++++++++++++++++++++++++++++ See the previous section for examples of Cassandra configuration and preparing the client certificates. From 6c55cbf37012df9efcf722a8b98de48686251791 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 13 Jan 2020 15:02:08 -0500 Subject: [PATCH 1739/2005] A few other docs fixes --- docs/graph_fluent.rst | 2 +- docs/query_paging.rst | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/graph_fluent.rst b/docs/graph_fluent.rst index ecb79ab647..d707860661 100644 --- a/docs/graph_fluent.rst +++ b/docs/graph_fluent.rst @@ -1,7 +1,7 @@ DataStax Graph Fluent API ========================= -The fluent API adds graph features to the core driver:: +The fluent API adds graph features to the core driver: * A TinkerPop GraphTraversalSource builder to execute traversals on a DSE cluster * The ability to execution traversal queries explicitly using execute_graph diff --git a/docs/query_paging.rst b/docs/query_paging.rst index 0b97de4839..2c4a4995ca 100644 --- a/docs/query_paging.rst +++ b/docs/query_paging.rst @@ -86,10 +86,10 @@ You can resume the pagination when executing a new query by using the :attr:`.Re results = session.execute(statement) # save the paging_state somewhere and return current results - session['paging_stage'] = results.paging_state + web_session['paging_stage'] = results.paging_state # resume the pagination sometime later... statement = SimpleStatement(query, fetch_size=10) - ps = session['paging_state'] + ps = web_session['paging_state'] results = session.execute(statement, paging_state=ps) From ea33f2daeab0de272ab8c8e8f1bd6819774623d7 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 14 Jan 2020 14:39:44 -0500 Subject: [PATCH 1740/2005] release 3.21: changelog --- CHANGELOG.rst | 15 ++++++++------- docs.yaml | 4 ++-- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d2a338c472..ae1b50a589 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,24 +1,22 @@ 3.21.0 ====== -Unreleased +January 15, 2020 Features -------- +* Unified driver: merge core and DSE drivers into a single package (PYTHON-1130) +* Add Python 3.8 support (PYTHON-1189) * Allow passing ssl context for Twisted (PYTHON-1161) -* ssl context and cloud support for Eventlet (PYTHON-1162) +* Ssl context and cloud support for Eventlet (PYTHON-1162) * Cloud Twisted support (PYTHON-1163) * Add additional_write_policy and read_repair to system schema parsing (PYTHON-1048) -* Handle prepared id mismatch when repreparing on the fly (PYTHON-1124) -* Remove *read_repair_chance table options (PYTHON-1140) * Flexible version parsing (PYTHON-1174) * Support NULL in collection deserializer (PYTHON-1123) -* Avoid warnings about unspecified load balancing policy when connecting to a cloud cluster (PYTHON-1177) -* Add Python 3.8 support (PYTHON-1189) -* Add new DSE CQL keywords (PYTHON-1122) * [GRAPH] Ability to execute Fluent Graph queries asynchronously (PYTHON-1129) Bug Fixes --------- +* Handle prepared id mismatch when repreparing on the fly (PYTHON-1124) * re-raising the CQLEngineException will fail on Python 3 (PYTHON-1166) * asyncio message chunks can be processed discontinuously (PYTHON-1185) * Reconnect attempts persist after downed node removed from peers (PYTHON-1181) @@ -30,6 +28,9 @@ Others ------ * The driver has a new dependency: geomet. It comes from the dse-driver unification and is used to support DSE geo types. +* Remove *read_repair_chance table options (PYTHON-1140) +* Avoid warnings about unspecified load balancing policy when connecting to a cloud cluster (PYTHON-1177) +* Add new DSE CQL keywords (PYTHON-1122) Deprecations ------------ diff --git a/docs.yaml b/docs.yaml index b846c4dd2e..363ec2208c 100644 --- a/docs.yaml +++ b/docs.yaml @@ -1,5 +1,5 @@ -title: DataStax Python Driver for Apache Cassandra -summary: DataStax Python Driver for Apache Cassandra Documentation +title: DataStax Python Driver +summary: DataStax Python Driver for Apache Cassandra® output: docs/_build/ swiftype_drivers: pythondrivers checks: From 9297c74c9075b9eb732190f7f0bdf1b06493b808 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 14 Jan 2020 14:45:47 -0500 Subject: [PATCH 1741/2005] release 3.21: docs --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 363ec2208c..a49c787672 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.21' - ref: 6556dcfa + ref: ea33f2da - name: '3.20' ref: d30d166f - name: '3.19' From dc6b3ec97532191e6334a32b66d0c09847b8f5be Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 14 Jan 2020 15:46:04 -0500 Subject: [PATCH 1742/2005] use TM for docs summary, not the registered char --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index a49c787672..51c6dbef2d 100644 --- a/docs.yaml +++ b/docs.yaml @@ -1,5 +1,5 @@ title: DataStax Python Driver -summary: DataStax Python Driver for Apache Cassandra® +summary: DataStax Python Driver for Apache Cassandra™ output: docs/_build/ swiftype_drivers: pythondrivers checks: From 4b1ef75f41d4a379799807d41ca748f2f85fee6b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 14 Jan 2020 15:52:31 -0500 Subject: [PATCH 1743/2005] release 3.21: docs --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 51c6dbef2d..d4233b3717 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.21' - ref: ea33f2da + ref: dc6b3ec9 - name: '3.20' ref: d30d166f - name: '3.19' From 494824629397c00e7c0eb549cfba1b807ed26bc0 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 14 Jan 2020 16:22:37 -0500 Subject: [PATCH 1744/2005] =?UTF-8?q?use=20=C2=AE=20for=20docs=20summary?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index d4233b3717..feb2b43757 100644 --- a/docs.yaml +++ b/docs.yaml @@ -1,5 +1,5 @@ title: DataStax Python Driver -summary: DataStax Python Driver for Apache Cassandra™ +summary: DataStax Python Driver for Apache Cassandra® output: docs/_build/ swiftype_drivers: pythondrivers checks: From 9d11d2075aea314bc97cd2aaf6afa1d57e59993a Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 14 Jan 2020 16:22:56 -0500 Subject: [PATCH 1745/2005] release 3.21: docs --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index feb2b43757..985e33087a 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.21' - ref: dc6b3ec9 + ref: 49482462 - name: '3.20' ref: d30d166f - name: '3.19' From 5589d96bf23ebd2ac73a8663592c4cefb0b265af Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 14 Jan 2020 19:30:55 -0500 Subject: [PATCH 1746/2005] Last fix for the registered char for Cassandra --- docs/index.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index a39c229a05..3a752975bd 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,6 +1,6 @@ -DataStax Python Driver for Apache Cassandra -=========================================== -A Python client driver for `Apache Cassandra `_. +DataStax Python Driver for Apache Cassandra® +============================================ +A Python client driver for `Apache Cassandra® `_. This driver works exclusively with the Cassandra Query Language v3 (CQL3) and Cassandra's native protocol. Cassandra 2.1+ is supported, including DSE 4.7+. From 5d7637d1bf81522d3c0f06a68c9e92918b0baec5 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 14 Jan 2020 19:31:21 -0500 Subject: [PATCH 1747/2005] release 3.21: update docs --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 985e33087a..e2e1231834 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.21' - ref: 49482462 + ref: 5589d96b - name: '3.20' ref: d30d166f - name: '3.19' From 2c855944d8b1788b29492e0640739daa56df5a60 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 16 Jan 2020 08:56:00 -0500 Subject: [PATCH 1748/2005] Moved some utils module to fix import errors in unit tests --- tests/integration/__init__.py | 2 + tests/integration/cloud/test_cloud.py | 2 +- .../integration/long/test_topology_change.py | 2 +- .../integration/simulacron/test_connection.py | 3 +- .../standard/test_custom_cluster.py | 2 +- tests/integration/util.py | 58 --------------- tests/unit/advanced/test_auth.py | 7 +- tests/unit/test_connection.py | 2 +- tests/util.py | 72 +++++++++++++++++++ 9 files changed, 84 insertions(+), 66 deletions(-) create mode 100644 tests/util.py diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 7c89ebcfc6..48f5f1aa10 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -166,6 +166,8 @@ def _get_dse_version_from_cass(cass_version): cv_string = os.getenv('CASSANDRA_VERSION', None) mcv_string = os.getenv('MAPPED_CASSANDRA_VERSION', None) try: + print(cv_string) + sasa cassandra_version = Version(cv_string) # env var is set to test-dse for DDAC except: # fallback to MAPPED_CASSANDRA_VERSION diff --git a/tests/integration/cloud/test_cloud.py b/tests/integration/cloud/test_cloud.py index 952a92835b..31b5367f3c 100644 --- a/tests/integration/cloud/test_cloud.py +++ b/tests/integration/cloud/test_cloud.py @@ -31,7 +31,7 @@ from mock import patch from tests.integration import requirescloudproxy -from tests.integration.util import wait_until_not_raised +from tests.util import wait_until_not_raised from tests.integration.cloud import CloudProxyCluster, CLOUD_PROXY_SERVER DISALLOWED_CONSISTENCIES = [ diff --git a/tests/integration/long/test_topology_change.py b/tests/integration/long/test_topology_change.py index ccd20779a5..8800cd802b 100644 --- a/tests/integration/long/test_topology_change.py +++ b/tests/integration/long/test_topology_change.py @@ -4,7 +4,7 @@ from cassandra.policies import HostStateListener from tests.integration import PROTOCOL_VERSION, get_node, use_cluster, local from tests.integration.long.utils import decommission -from tests.integration.util import wait_until +from tests.util import wait_until class StateListener(HostStateListener): diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 25824cc599..a09950dda6 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -27,8 +27,9 @@ from cassandra.policies import HostStateListener, RoundRobinPolicy from tests import connection_class, thread_pool_executor_class +from tests.util late from tests.integration import requiressimulacron, libevtest -from tests.integration.util import assert_quiescent_pool_state, late +from tests.integration.util import assert_quiescent_pool_state # important to import the patch PROTOCOL_VERSION from the simulacron module from tests.integration.simulacron import SimulacronBase, PROTOCOL_VERSION from cassandra.connection import DEFAULT_CQL_VERSION diff --git a/tests/integration/standard/test_custom_cluster.py b/tests/integration/standard/test_custom_cluster.py index 9208c35cea..1943557ee4 100644 --- a/tests/integration/standard/test_custom_cluster.py +++ b/tests/integration/standard/test_custom_cluster.py @@ -14,7 +14,7 @@ from cassandra.cluster import Cluster, NoHostAvailable from tests.integration import use_singledc, get_cluster, remove_cluster, local -from tests.integration.util import wait_until, wait_until_not_raised +from tests.util import wait_until, wait_until_not_raised try: import unittest2 as unittest diff --git a/tests/integration/util.py b/tests/integration/util.py index a2ce9d5c3f..6215449d1f 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -13,7 +13,6 @@ # limitations under the License. from tests.integration import PROTOCOL_VERSION -from functools import wraps import time @@ -50,60 +49,3 @@ def assert_quiescent_pool_state(test_case, cluster, wait=None): test_case.assertEqual(connection.highest_request_id, max(req_ids)) if PROTOCOL_VERSION < 3: test_case.assertEqual(connection.highest_request_id, connection.max_request_id) - - -def wait_until(condition, delay, max_attempts): - """ - Executes a function at regular intervals while the condition - is false and the amount of attempts < maxAttempts. - :param condition: a function - :param delay: the delay in second - :param max_attempts: the maximum number of attempts. So the timeout - of this function is delay*max_attempts - """ - attempt = 0 - while not condition() and attempt < max_attempts: - attempt += 1 - time.sleep(delay) - - if attempt >= max_attempts: - raise Exception("Condition is still False after {} attempts.".format(max_attempts)) - - -def wait_until_not_raised(condition, delay, max_attempts): - """ - Executes a function at regular intervals while the condition - doesn't raise an exception and the amount of attempts < maxAttempts. - :param condition: a function - :param delay: the delay in second - :param max_attempts: the maximum number of attemps. So the timeout - of this function will be delay*max_attempts - """ - def wrapped_condition(): - try: - condition() - except: - return False - - return True - - attempt = 0 - while attempt < (max_attempts-1): - attempt += 1 - if wrapped_condition(): - return - - time.sleep(delay) - - # last attempt, let the exception raise - condition() - - -def late(seconds=1): - def decorator(func): - @wraps(func) - def wrapper(*args, **kwargs): - time.sleep(seconds) - func(*args, **kwargs) - return wrapper - return decorator diff --git a/tests/unit/advanced/test_auth.py b/tests/unit/advanced/test_auth.py index e4f7e4cf5e..bb411afe2b 100644 --- a/tests/unit/advanced/test_auth.py +++ b/tests/unit/advanced/test_auth.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os from puresasl import QOP try: @@ -21,10 +22,10 @@ from cassandra.auth import DSEGSSAPIAuthProvider -from tests.integration import requiredse - +# Cannot import requiredse from tests.integration # This auth provider requires kerberos and puresals -@requiredse +DSE_VERSION = os.getenv('DSE_VERSION', None) +@unittest.skipUnless(DSE_VERSION, "DSE required") class TestGSSAPI(unittest.TestCase): def test_host_resolution(self): diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index be205c33a8..68577a396e 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -31,7 +31,7 @@ from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ProtocolHandler) -from tests.integration.util import wait_until +from tests.util import wait_until class ConnectionTest(unittest.TestCase): diff --git a/tests/util.py b/tests/util.py new file mode 100644 index 0000000000..c5dfd8a387 --- /dev/null +++ b/tests/util.py @@ -0,0 +1,72 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +from functools import wraps + +def wait_until(condition, delay, max_attempts): + """ + Executes a function at regular intervals while the condition + is false and the amount of attempts < maxAttempts. + :param condition: a function + :param delay: the delay in second + :param max_attempts: the maximum number of attempts. So the timeout + of this function is delay*max_attempts + """ + attempt = 0 + while not condition() and attempt < max_attempts: + attempt += 1 + time.sleep(delay) + + if attempt >= max_attempts: + raise Exception("Condition is still False after {} attempts.".format(max_attempts)) + + +def wait_until_not_raised(condition, delay, max_attempts): + """ + Executes a function at regular intervals while the condition + doesn't raise an exception and the amount of attempts < maxAttempts. + :param condition: a function + :param delay: the delay in second + :param max_attempts: the maximum number of attemps. So the timeout + of this function will be delay*max_attempts + """ + def wrapped_condition(): + try: + condition() + except: + return False + + return True + + attempt = 0 + while attempt < (max_attempts-1): + attempt += 1 + if wrapped_condition(): + return + + time.sleep(delay) + + # last attempt, let the exception raise + condition() + + +def late(seconds=1): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + time.sleep(seconds) + func(*args, **kwargs) + return wrapper + return decorator From 095f542b523e37158c881b1626a0d62a6e58a4ee Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 16 Jan 2020 08:57:26 -0500 Subject: [PATCH 1749/2005] remove test code --- tests/integration/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 48f5f1aa10..7c89ebcfc6 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -166,8 +166,6 @@ def _get_dse_version_from_cass(cass_version): cv_string = os.getenv('CASSANDRA_VERSION', None) mcv_string = os.getenv('MAPPED_CASSANDRA_VERSION', None) try: - print(cv_string) - sasa cassandra_version = Version(cv_string) # env var is set to test-dse for DDAC except: # fallback to MAPPED_CASSANDRA_VERSION From c5a374f9924ebb6b107057dafba2b6fed88a115d Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 17 Jan 2020 14:47:48 -0600 Subject: [PATCH 1750/2005] ninja fix invalid syntax in test --- tests/integration/simulacron/test_connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index a09950dda6..afe2685dbf 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -27,7 +27,7 @@ from cassandra.policies import HostStateListener, RoundRobinPolicy from tests import connection_class, thread_pool_executor_class -from tests.util late +from tests.util import late from tests.integration import requiressimulacron, libevtest from tests.integration.util import assert_quiescent_pool_state # important to import the patch PROTOCOL_VERSION from the simulacron module From d59af15e57d2639b213644b8a9f5b697bcc693af Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 17 Jan 2020 13:12:53 -0600 Subject: [PATCH 1751/2005] Changes from test failures. Move session id creation before insights reporter starts. When the insights reporter retrieves the startup data, it captures the session id. Since this runs in a separate thread, the session id is usually created by the time this capture actually runs. But it's a race, and sessionId can occassionally be captured as None. Fix tests/integration/cqlengine/model/test_model.py:TestDeprecationWarning.test_deprecation_warnings: Asyncio throws warnings on python 2.7+ that aren't relevant to the test, so ignore them. --- cassandra/cluster.py | 3 ++- tests/integration/cqlengine/model/test_model.py | 13 ++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c9a8b6d397..a1cbe9323f 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2538,6 +2538,8 @@ def __init__(self, cluster, hosts, keyspace=None): msg += " using keyspace '%s'" % self.keyspace raise NoHostAvailable(msg, [h.address for h in hosts]) + self.session_id = uuid.uuid4() + cc_host = self.cluster.get_control_connection_host() valid_insights_version = (cc_host and version_supports_insights(cc_host.dse_version)) if self.cluster.monitor_reporting_enabled and valid_insights_version: @@ -2551,7 +2553,6 @@ def __init__(self, cluster, hosts, keyspace=None): 'not supported by server version {v} on ' 'ControlConnection host {c}'.format(v=cc_host.release_version, c=cc_host)) - self.session_id = uuid.uuid4() log.debug('Started Session with client_id {} and session_id {}'.format(self.cluster.client_id, self.session_id)) diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index 81de0ead0c..bbd9e0cbb6 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -259,10 +259,13 @@ class SensitiveModel(Model): rows[-1] rows[-1:] - self.assertEqual(len(w), 4) - self.assertIn("__table_name_case_sensitive__ will be removed in 4.0.", str(w[0].message)) - self.assertIn("__table_name_case_sensitive__ will be removed in 4.0.", str(w[1].message)) + # Asyncio complains loudly about old syntax on python 3.7+, so get rid of all of those + relevant_warnings = [warn for warn in w if "with (yield from lock)" not in str(warn.message)] + + self.assertEqual(len(relevant_warnings), 4) + self.assertIn("__table_name_case_sensitive__ will be removed in 4.0.", str(relevant_warnings[0].message)) + self.assertIn("__table_name_case_sensitive__ will be removed in 4.0.", str(relevant_warnings[1].message)) self.assertIn("ModelQuerySet indexing with negative indices support will be removed in 4.0.", - str(w[2].message)) + str(relevant_warnings[2].message)) self.assertIn("ModelQuerySet slicing with negative indices support will be removed in 4.0.", - str(w[3].message)) + str(relevant_warnings[3].message)) From 9400af8960d8616b2ff506dc95ccabafccc86334 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 21 Jan 2020 15:11:16 -0500 Subject: [PATCH 1752/2005] Add all() function to the ResultSet API --- CHANGELOG.rst | 9 +++++++++ cassandra/cluster.py | 9 +++++++++ tests/unit/test_resultset.py | 7 +++++++ 3 files changed, 25 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index ae1b50a589..da15a1158b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +3.22.0 +====== +UNRELEASED + +Features +-------- +* Add all() function to the ResultSet API (PYTHON-1203) + 3.21.0 ====== January 15, 2020 @@ -31,6 +39,7 @@ Others * Remove *read_repair_chance table options (PYTHON-1140) * Avoid warnings about unspecified load balancing policy when connecting to a cloud cluster (PYTHON-1177) * Add new DSE CQL keywords (PYTHON-1122) +* Publish binary wheel distributions (PYTHON-1013) Deprecations ------------ diff --git a/cassandra/cluster.py b/cassandra/cluster.py index a1cbe9323f..59c8b61f96 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -4934,6 +4934,15 @@ def current_rows(self): """ return self._current_rows or [] + def all(self): + """ + Returns all the remaining rows as a list. This is basically + a convenient shortcut to `list(result_set)`. + + This function is not recommended for queries that return a large number of elements. + """ + return list(self) + def one(self): """ Return a single row of the results or None if empty. This is basically diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index c1a2562360..1af3e849b6 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -195,6 +195,13 @@ def test_one(self): self.assertEqual(rs.one(), first) + def test_all(self): + first, second = Mock(), Mock() + rs1 = ResultSet(Mock(has_more_pages=False), [first, second]) + rs2 = ResultSet(Mock(has_more_pages=False), [first, second]) + + self.assertEqual(rs1.all(), list(rs2)) + @patch('cassandra.cluster.warn') def test_indexing_deprecation(self, mocked_warn): # normally we'd use catch_warnings to test this, but that doesn't work From 3a5bd85f0cccb4a74399dc5eb3b9e69e9af532c7 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 23 Jan 2020 12:55:20 -0600 Subject: [PATCH 1753/2005] Test 68 (#1066) 6.8 test changes * Materialized view tests: You now can't create a MV with clustering order by unless you specify ALL of the clustering columns. So tests are updated to do that. * DuplicateRpcTest: We used to be able to update system.peers, but that's not allowed anymore, so converted it to a simulacron test. Updated another test to use mocks for the same reason. * Updated virtual keyspace test to not verify exact structure since that will change between versions. Rather, verify a few known values that should exist in all versions to verify we parsed the structure correctly. * Tombstone settings have moved under guardrails, and delete entire row instead of just cells in tombstone tests due to DB-2426. * Added a few optional env vars to allow management of an external cluster * DSE version checks --- tests/integration/__init__.py | 10 +- .../integration/cqlengine/query/test_named.py | 2 +- tests/integration/long/test_failure_types.py | 9 +- tests/integration/simulacron/test_cluster.py | 41 +++- tests/integration/standard/test_cluster.py | 185 ++++++------------ .../standard/test_control_connection.py | 3 +- tests/integration/standard/test_metadata.py | 45 ++--- tests/integration/standard/test_query.py | 8 +- 8 files changed, 141 insertions(+), 162 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 7c89ebcfc6..0a3d6542a0 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -125,7 +125,7 @@ def _get_cass_version_from_dse(dse_version): cass_ver = '4.0.0.2349' else: cass_ver = '4.0.0.' + ''.join(dse_version.split('.')) - elif dse_version.startswith('6.7'): + elif Version(dse_version) >= Version('6.7'): if dse_version == '6.7.0': cass_ver = "4.0.0.67" else: @@ -472,7 +472,13 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, if CCM_CLUSTER: log.debug("Using external CCM cluster {0}".format(CCM_CLUSTER.name)) else: - log.debug("Using unnamed external cluster") + ccm_path = os.getenv("CCM_PATH", None) + ccm_name = os.getenv("CCM_NAME", None) + if ccm_path and ccm_name: + CCM_CLUSTER = CCMClusterFactory.load(ccm_path, ccm_name) + log.debug("Using external CCM cluster {0}".format(CCM_CLUSTER.name)) + else: + log.debug("Using unnamed external cluster") if set_keyspace and start: setup_keyspace(ipformat=ipformat, wait=False) return diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index 4907c26661..3a6f83b32e 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -335,7 +335,7 @@ def test_named_table_with_mv(self): SELECT * FROM {0}.scores WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL PRIMARY KEY (game, score, user, year, month, day) - WITH CLUSTERING ORDER BY (score DESC)""".format(ks) + WITH CLUSTERING ORDER BY (score DESC, user DESC, year DESC, month DESC, day DESC)""".format(ks) self.session.execute(create_mv_alltime) diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index a63b36649b..486313e096 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -16,6 +16,8 @@ import sys import traceback import time + +from ccmlib.dse_cluster import DseCluster from mock import Mock from cassandra.policies import HostFilterPolicy, RoundRobinPolicy @@ -29,7 +31,7 @@ from tests.integration import ( use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace, remove_cluster, get_node, start_cluster_wait_for_up, requiresmallclockgranularity, -) + local) try: @@ -40,6 +42,7 @@ log = logging.getLogger(__name__) +@local def setup_module(): """ We need some custom setup for this module. All unit tests in this module @@ -52,7 +55,7 @@ def setup_module(): use_singledc(start=False) ccm_cluster = get_cluster() ccm_cluster.stop() - config_options = {'tombstone_failure_threshold': 2000, 'tombstone_warn_threshold': 1000} + config_options = {'guardrails.tombstone_failure_threshold': 2000, 'guardrails.tombstone_warn_threshold': 1000} ccm_cluster.set_configuration_options(config_options) start_cluster_wait_for_up(ccm_cluster) setup_keyspace() @@ -252,7 +255,7 @@ def test_tombstone_overflow_read_failure(self): parameters = [(x,) for x in range(3000)] self.execute_concurrent_args_helper(self.session, statement, parameters) - statement = self.session.prepare("DELETE v1 FROM test3rf.test2 WHERE k = 1 AND v0 =?") + statement = self.session.prepare("DELETE FROM test3rf.test2 WHERE k = 1 AND v0 =?") parameters = [(x,) for x in range(2001)] self.execute_concurrent_args_helper(self.session, statement, parameters) diff --git a/tests/integration/simulacron/test_cluster.py b/tests/integration/simulacron/test_cluster.py index ec20c106ba..b89f564f08 100644 --- a/tests/integration/simulacron/test_cluster.py +++ b/tests/integration/simulacron/test_cluster.py @@ -16,14 +16,20 @@ except ImportError: import unittest # noqa -from tests.integration.simulacron import SimulacronCluster -from tests.integration import (requiressimulacron, PROTOCOL_VERSION) -from tests.integration.simulacron.utils import prime_query +import logging +from packaging.version import Version + +import cassandra +from tests.integration.simulacron import SimulacronCluster, SimulacronBase +from tests.integration import (requiressimulacron, PROTOCOL_VERSION, DSE_VERSION, MockLoggingHandler) +from tests.integration.simulacron.utils import prime_query, start_and_prime_singledc from cassandra import (WriteTimeout, WriteType, ConsistencyLevel, UnresolvableContactPoints) -from cassandra.cluster import Cluster +from cassandra.cluster import Cluster, ControlConnection + +PROTOCOL_VERSION = min(4, PROTOCOL_VERSION if (DSE_VERSION is None or DSE_VERSION >= Version('5.0')) else 3) @requiressimulacron class ClusterTests(SimulacronCluster): @@ -78,3 +84,30 @@ def test_connection_with_only_unresolvable_contact_points(self): self.cluster = Cluster(['dns.invalid'], protocol_version=PROTOCOL_VERSION, compression=False) + + +@requiressimulacron +class DuplicateRpcTest(SimulacronCluster): + connect = False + + def test_duplicate(self): + mock_handler = MockLoggingHandler() + logger = logging.getLogger(cassandra.cluster.__name__) + logger.addHandler(mock_handler) + address_column = "native_transport_address" if DSE_VERSION and DSE_VERSION > Version("6.0") else "rpc_address" + rows = [ + {"peer": "127.0.0.1", "data_center": "dc", "host_id": "dontcare1", "rack": "rack1", + "release_version": "3.11.4", address_column: "127.0.0.1", "schema_version": "dontcare", "tokens": "1"}, + {"peer": "127.0.0.2", "data_center": "dc", "host_id": "dontcare2", "rack": "rack1", + "release_version": "3.11.4", address_column: "127.0.0.2", "schema_version": "dontcare", "tokens": "2"}, + ] + prime_query(ControlConnection._SELECT_PEERS, rows=rows) + + cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) + session = cluster.connect(wait_for_all_pools=True) + + warnings = mock_handler.messages.get("warning") + self.assertEqual(len(warnings), 1) + self.assertTrue('multiple hosts with the same endpoint' in warnings[0]) + logger.removeHandler(mock_handler) + cluster.shutdown() diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index eae7d64b71..2314931b7d 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -27,7 +27,7 @@ from packaging.version import Version import cassandra -from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT, ControlConnection from cassandra.concurrent import execute_concurrent from cassandra.policies import (RoundRobinPolicy, ExponentialReconnectionPolicy, RetryPolicy, SimpleConvictionPolicy, HostDistance, @@ -502,79 +502,70 @@ def test_refresh_schema_type(self): @local @notwindows def test_refresh_schema_no_wait(self): - contact_points = [CASSANDRA_IP] - with Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=10, - contact_points=contact_points, - execution_profiles= - {EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy= - HostFilterPolicy( - RoundRobinPolicy(), lambda host: host.address == CASSANDRA_IP - ))}) as cluster: - session = cluster.connect() - - schema_ver = session.execute("SELECT schema_version FROM system.local WHERE key='local'")[0][0] - new_schema_ver = uuid4() - session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (new_schema_ver,)) - - try: - agreement_timeout = 1 - - # cluster agreement wait exceeded - c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=agreement_timeout) - c.connect() - self.assertTrue(c.metadata.keyspaces) - - # cluster agreement wait used for refresh - original_meta = c.metadata.keyspaces - start_time = time.time() - self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata) - end_time = time.time() - self.assertGreaterEqual(end_time - start_time, agreement_timeout) - self.assertIs(original_meta, c.metadata.keyspaces) - - # refresh wait overrides cluster value - original_meta = c.metadata.keyspaces - start_time = time.time() - c.refresh_schema_metadata(max_schema_agreement_wait=0) - end_time = time.time() - self.assertLess(end_time - start_time, agreement_timeout) - self.assertIsNot(original_meta, c.metadata.keyspaces) - self.assertEqual(original_meta, c.metadata.keyspaces) - - c.shutdown() - - refresh_threshold = 0.5 - # cluster agreement bypass - c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=0) - start_time = time.time() - s = c.connect() - end_time = time.time() - self.assertLess(end_time - start_time, refresh_threshold) - self.assertTrue(c.metadata.keyspaces) - - # cluster agreement wait used for refresh - original_meta = c.metadata.keyspaces - start_time = time.time() - c.refresh_schema_metadata() - end_time = time.time() - self.assertLess(end_time - start_time, refresh_threshold) - self.assertIsNot(original_meta, c.metadata.keyspaces) - self.assertEqual(original_meta, c.metadata.keyspaces) - - # refresh wait overrides cluster value - original_meta = c.metadata.keyspaces - start_time = time.time() - self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata, - max_schema_agreement_wait=agreement_timeout) - end_time = time.time() - self.assertGreaterEqual(end_time - start_time, agreement_timeout) - self.assertIs(original_meta, c.metadata.keyspaces) - c.shutdown() - finally: - # TODO once fixed this connect call - session = cluster.connect() - session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (schema_ver,)) - + original_wait_for_responses = connection.Connection.wait_for_responses + + def patched_wait_for_responses(*args, **kwargs): + # When selecting schema version, replace the real schema UUID with an unexpected UUID + response = original_wait_for_responses(*args, **kwargs) + if len(args) > 2 and hasattr(args[2], "query") and args[2].query == "SELECT schema_version FROM system.local WHERE key='local'": + new_uuid = uuid4() + response[1].parsed_rows[0] = (new_uuid,) + return response + + with patch.object(connection.Connection, "wait_for_responses", patched_wait_for_responses): + agreement_timeout = 1 + + # cluster agreement wait exceeded + c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=agreement_timeout) + c.connect() + self.assertTrue(c.metadata.keyspaces) + + # cluster agreement wait used for refresh + original_meta = c.metadata.keyspaces + start_time = time.time() + self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata) + end_time = time.time() + self.assertGreaterEqual(end_time - start_time, agreement_timeout) + self.assertIs(original_meta, c.metadata.keyspaces) + + # refresh wait overrides cluster value + original_meta = c.metadata.keyspaces + start_time = time.time() + c.refresh_schema_metadata(max_schema_agreement_wait=0) + end_time = time.time() + self.assertLess(end_time - start_time, agreement_timeout) + self.assertIsNot(original_meta, c.metadata.keyspaces) + self.assertEqual(original_meta, c.metadata.keyspaces) + + c.shutdown() + + refresh_threshold = 0.5 + # cluster agreement bypass + c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=0) + start_time = time.time() + s = c.connect() + end_time = time.time() + self.assertLess(end_time - start_time, refresh_threshold) + self.assertTrue(c.metadata.keyspaces) + + # cluster agreement wait used for refresh + original_meta = c.metadata.keyspaces + start_time = time.time() + c.refresh_schema_metadata() + end_time = time.time() + self.assertLess(end_time - start_time, refresh_threshold) + self.assertIsNot(original_meta, c.metadata.keyspaces) + self.assertEqual(original_meta, c.metadata.keyspaces) + + # refresh wait overrides cluster value + original_meta = c.metadata.keyspaces + start_time = time.time() + self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata, + max_schema_agreement_wait=agreement_timeout) + end_time = time.time() + self.assertGreaterEqual(end_time - start_time, agreement_timeout) + self.assertIs(original_meta, c.metadata.keyspaces) + c.shutdown() def test_trace(self): """ @@ -1480,52 +1471,6 @@ def test_prepare_on_ignored_hosts(self): cluster.shutdown() -@local -class DuplicateRpcTest(unittest.TestCase): - - load_balancing_policy = HostFilterPolicy(RoundRobinPolicy(), - lambda host: host.address == "127.0.0.1") - - def setUp(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=self.load_balancing_policy)}) - self.session = self.cluster.connect() - - self.address_column = "native_transport_address" if DSE_VERSION and DSE_VERSION >= Version("6.0") else "rpc_address" - self.session.execute("UPDATE system.peers SET {} = '127.0.0.1' WHERE peer='127.0.0.2'". - format(self.address_column)) - - def tearDown(self): - self.session.execute("UPDATE system.peers SET {} = '127.0.0.2' WHERE peer='127.0.0.2'". - format(self.address_column)) - self.cluster.shutdown() - - def test_duplicate(self): - """ - Test duplicate RPC addresses. - - Modifies the system.peers table to make hosts have the same rpc address. Ensures such hosts are filtered out and a message is logged - - @since 3.4 - @jira_ticket PYTHON-366 - @expected_result only one hosts' metadata will be populated - - @test_category metadata - """ - mock_handler = MockLoggingHandler() - logger = logging.getLogger(cassandra.cluster.__name__) - logger.addHandler(mock_handler) - test_cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=self.load_balancing_policy)}) - - test_cluster.connect() - warnings = mock_handler.messages.get("warning") - self.assertEqual(len(warnings), 1) - self.assertTrue('multiple' in warnings[0]) - logger.removeHandler(mock_handler) - test_cluster.shutdown() - - @protocolv5 class BetaProtocolTest(unittest.TestCase): diff --git a/tests/integration/standard/test_control_connection.py b/tests/integration/standard/test_control_connection.py index b928cd2b68..b91d29c4e6 100644 --- a/tests/integration/standard/test_control_connection.py +++ b/tests/integration/standard/test_control_connection.py @@ -14,6 +14,7 @@ # # # +from cassandra import InvalidRequest try: import unittest2 as unittest @@ -43,7 +44,7 @@ def setUp(self): def tearDown(self): try: self.session.execute("DROP KEYSPACE keyspacetodrop ") - except (ConfigurationException): + except (ConfigurationException, InvalidRequest): # we already removed the keyspace. pass self.cluster.shutdown() diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index a5038672d4..6169e5d951 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -723,7 +723,7 @@ def test_refresh_metadata_for_mv(self): try: self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) - self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)" + self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT a, b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)" .format(self.keyspace_name, self.function_table_name)) self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) @@ -745,7 +745,7 @@ def test_refresh_metadata_for_mv(self): cluster3.connect() try: self.assertNotIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) - self.session.execute("CREATE MATERIALIZED VIEW {0}.mv2 AS SELECT b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)" + self.session.execute("CREATE MATERIALIZED VIEW {0}.mv2 AS SELECT a, b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)" .format(self.keyspace_name, self.function_table_name)) self.assertNotIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) cluster3.refresh_materialized_view_metadata(self.keyspace_name, 'mv2') @@ -2007,10 +2007,15 @@ def test_dct_alias(self): dct_table = self.cluster.metadata.keyspaces.get(self.ks_name).tables.get(self.function_table_name) # Format can very slightly between versions, strip out whitespace for consistency sake - self.assertTrue("c1'org.apache.cassandra.db.marshal.DynamicCompositeType(" - "s=>org.apache.cassandra.db.marshal.UTF8Type," - "i=>org.apache.cassandra.db.marshal.Int32Type)'" - in dct_table.as_cql_query().replace(" ", "")) + table_text = dct_table.as_cql_query().replace(" ", "") + dynamic_type_text = "c1'org.apache.cassandra.db.marshal.DynamicCompositeType(" + self.assertIn("c1'org.apache.cassandra.db.marshal.DynamicCompositeType(", table_text) + # Types within in the composite can come out in random order, so grab the type definition and find each one + type_definition_start = table_text.index("(", table_text.find(dynamic_type_text)) + type_definition_end = table_text.index(")") + type_definition_text = table_text[type_definition_start:type_definition_end] + self.assertIn("s=>org.apache.cassandra.db.marshal.UTF8Type", type_definition_text) + self.assertIn("i=>org.apache.cassandra.db.marshal.Int32Type", type_definition_text) @greaterthanorequalcass30 @@ -2018,7 +2023,7 @@ class Materia3lizedViewMetadataTestSimple(BasicSharedKeyspaceUnitTestCase): def setUp(self): self.session.execute("CREATE TABLE {0}.{1} (pk int PRIMARY KEY, c int)".format(self.keyspace_name, self.function_table_name)) - self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.keyspace_name, self.function_table_name)) + self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT pk, c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.keyspace_name, self.function_table_name)) def tearDown(self): self.session.execute("DROP MATERIALIZED VIEW {0}.mv1".format(self.keyspace_name)) @@ -2089,7 +2094,7 @@ def test_materialized_view_metadata_drop(self): self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.keyspace_name].views) - self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.keyspace_name, self.function_table_name)) + self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT pk, c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.keyspace_name, self.function_table_name)) @greaterthanorequalcass30 @@ -2232,7 +2237,7 @@ def test_base_table_column_addition_mv(self): SELECT * FROM {0}.scores WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL PRIMARY KEY (game, score, user, year, month, day) - WITH CLUSTERING ORDER BY (score DESC)""".format(self.keyspace_name) + WITH CLUSTERING ORDER BY (score DESC, user ASC, year ASC, month ASC, day ASC)""".format(self.keyspace_name) self.session.execute(create_mv) @@ -2462,22 +2467,6 @@ def _assert_group_keys_by_host(self, keys, table_name, stmt): class VirtualKeypaceTest(BasicSharedKeyspaceUnitTestCase): virtual_ks_names = ('system_virtual_schema', 'system_views') - virtual_ks_structure = { - # keyspaces - 'system_virtual_schema': { - # tables: columns. columns are a set because we're comparing unordered - 'keyspaces': {'keyspace_name'}, - 'tables': {'comment', 'keyspace_name', 'table_name'}, - 'columns': {'clustering_order', 'column_name', 'column_name_bytes', - 'keyspace_name', 'kind', 'position', 'table_name', - 'type'} - }, - 'system_views': { - 'sstable_tasks': {'keyspace_name', 'kind', 'progress', - 'table_name', 'task_id', 'total', 'unit'} - } - } - def test_existing_keyspaces_have_correct_virtual_tags(self): for name, ks in self.cluster.metadata.keyspaces.items(): if name in self.virtual_ks_names: @@ -2514,5 +2503,7 @@ def test_virtual_keyspaces_have_expected_schema_structure(self): tab.columns.keys() ) - self.assertDictEqual(ingested_virtual_ks_structure, - self.virtual_ks_structure) + # Identify a couple known values to verify we parsed the structure correctly + self.assertIn('table_name', ingested_virtual_ks_structure['system_virtual_schema']['tables']) + self.assertIn('type', ingested_virtual_ks_structure['system_virtual_schema']['columns']) + self.assertIn('total', ingested_virtual_ks_structure['system_views']['sstable_tasks']) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 62478974eb..63f94399a6 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -1193,25 +1193,25 @@ def test_mv_filtering(self): SELECT * FROM {0}.scores WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL PRIMARY KEY (game, score, user, year, month, day) - WITH CLUSTERING ORDER BY (score DESC)""".format(self.keyspace_name) + WITH CLUSTERING ORDER BY (score DESC, user ASC, year ASC, month ASC, day ASC)""".format(self.keyspace_name) create_mv_dailyhigh = """CREATE MATERIALIZED VIEW {0}.dailyhigh AS SELECT * FROM {0}.scores WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL PRIMARY KEY ((game, year, month, day), score, user) - WITH CLUSTERING ORDER BY (score DESC)""".format(self.keyspace_name) + WITH CLUSTERING ORDER BY (score DESC, user ASC)""".format(self.keyspace_name) create_mv_monthlyhigh = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS SELECT * FROM {0}.scores WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL PRIMARY KEY ((game, year, month), score, user, day) - WITH CLUSTERING ORDER BY (score DESC)""".format(self.keyspace_name) + WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.keyspace_name) create_mv_filtereduserhigh = """CREATE MATERIALIZED VIEW {0}.filtereduserhigh AS SELECT * FROM {0}.scores WHERE user in ('jbellis', 'pcmanus') AND game IS NOT NULL AND score IS NOT NULL AND year is NOT NULL AND day is not NULL and month IS NOT NULL PRIMARY KEY (game, score, user, year, month, day) - WITH CLUSTERING ORDER BY (score DESC)""".format(self.keyspace_name) + WITH CLUSTERING ORDER BY (score DESC, user ASC, year ASC, month ASC, day ASC)""".format(self.keyspace_name) self.session.execute(create_mv_alltime) self.session.execute(create_mv_dailyhigh) From d28014c2a08b1d1a0317bd3398db42a8a501dc6e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 23 Jan 2020 15:48:41 -0500 Subject: [PATCH 1754/2005] Minor docs improvements --- cassandra/cluster.py | 1 + docs/api/cassandra/cluster.rst | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d7a963791b..a33404c5fd 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2754,6 +2754,7 @@ def _resolve_execution_profile_options(self, execution_profile): Determine the GraphSON protocol and row factory for a graph query. This is useful to configure automatically the execution profile when executing a query on a core graph. + If `graph_protocol` is not explicitly specified, the following rules apply: - Default to GraphProtocol.GRAPHSON_1_0, or GRAPHSON_2_0 if the `graph_language` is not gremlin-groovy. - If `graph_options.graph_name` is specified and is a Core graph, set GraphSON_3_0. diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 71e110559e..459f287b4c 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -120,13 +120,19 @@ .. automethod:: set_meta_refresh_enabled -.. autoclass:: ExecutionProfile (load_balancing_policy=, retry_policy=None, consistency_level=LOCAL_ONE, serial_consistency_level=None, request_timeout=10.0, row_factory=, speculative_execution_policy=None) +.. autoclass:: ExecutionProfile (load_balancing_policy=, retry_policy=None, consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, request_timeout=10.0, row_factory=, speculative_execution_policy=None) :members: :exclude-members: consistency_level .. autoattribute:: consistency_level :annotation: = LOCAL_ONE +.. autoclass:: GraphExecutionProfile (load_balancing_policy=_NOT_SET, retry_policy=None, consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, request_timeout=30.0, row_factory=, graph_options=None, continuous_paging_options=_NOT_SET) + :members: + +.. autoclass:: GraphAnalyticsExecutionProfile (load_balancing_policy=None, retry_policy=None, consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, request_timeout=3600. * 24. * 7., row_factory=, graph_options=None) + :members: + .. autodata:: EXEC_PROFILE_DEFAULT :annotation: From e1739be48cd6b318e96277ab7fb646e7287f63a6 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 24 Jan 2020 14:27:42 -0500 Subject: [PATCH 1755/2005] Remove tmp build section --- build.yaml | 12 ------------ cassandra/__init__.py | 2 +- 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/build.yaml b/build.yaml index 40c8a15d9d..985a5d66be 100644 --- a/build.yaml +++ b/build.yaml @@ -57,18 +57,6 @@ schedules: env_vars: | EVENT_LOOP_MANAGER='libev' - tmpngdg: - schedule: adhoc - branches: - include: [merge_ossnext_ngdg] - env_vars: | - EVENT_LOOP_MANAGER='libev' - EXCLUDE_LONG=1 - matrix: - exclude: - - python: [2.7, 3.4, 3.6, 3.7] - - cassandra: ['2.1', '2.2', '3.0', '3.11', 'dse-4.8', 'dse-5.0', 'dse-5.1', 'dse-6.0', 'dse-6.7'] - weekly_master: schedule: 0 10 * * 6 disable_pull_requests: true diff --git a/cassandra/__init__.py b/cassandra/__init__.py index d5d02d29e4..76c7a649de 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 21, 0, 'post0+20200123') +__version_info__ = (3, 21, 0, 'post0+20200127') __version__ = '.'.join(map(str, __version_info__)) From 19514c629ec886e89765c79b0296f037ae83f1c6 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 28 Jan 2020 11:37:11 -0500 Subject: [PATCH 1756/2005] Fix some minor test failures: virtual tables, replaced time.sleep by wait_until.. --- build.yaml | 2 +- cassandra/metadata.py | 24 ++++++++++++------- .../advanced/graph/fluent/test_graph.py | 7 ++++-- .../advanced/graph/test_graph_query.py | 2 ++ tests/integration/simulacron/utils.py | 4 +++- 5 files changed, 27 insertions(+), 12 deletions(-) diff --git a/build.yaml b/build.yaml index 985a5d66be..460192131d 100644 --- a/build.yaml +++ b/build.yaml @@ -162,7 +162,7 @@ cassandra: - 'dse-5.1' - 'dse-6.0' - 'dse-6.7' - - 'dse-6.8' + - 'dse-6.8.0' env: CYTHON: diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 4c51392a6b..b4dc19fbf0 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1407,7 +1407,6 @@ def _make_option_strings(cls, options_map): return list(sorted(ret)) -# TODO This should inherit V4 later? class TableMetadataDSE68(TableMetadataV3): vertex = None @@ -2932,7 +2931,7 @@ def _query_all(self): QueryMessage(query=self._SELECT_VIRTUAL_KEYSPACES, consistency_level=cl), QueryMessage(query=self._SELECT_VIRTUAL_TABLES, consistency_level=cl), QueryMessage(query=self._SELECT_VIRTUAL_COLUMNS, consistency_level=cl), - # dse7.0 only + # dse6.8 only QueryMessage(query=self._SELECT_VERTICES, consistency_level=cl), QueryMessage(query=self._SELECT_EDGES, consistency_level=cl) ] @@ -2969,12 +2968,21 @@ def _query_all(self): self.indexes_result = self._handle_results(indexes_success, indexes_result) self.views_result = self._handle_results(views_success, views_result) - self.virtual_keyspaces_result = self._handle_results(virtual_ks_success, - virtual_ks_result) - self.virtual_tables_result = self._handle_results(virtual_table_success, - virtual_table_result) - self.virtual_columns_result = self._handle_results(virtual_column_success, - virtual_column_result) + # These tables don't exist in some DSE versions reporting 4.X so we can + # ignore them if we got an error + self.virtual_keyspaces_result = self._handle_results( + virtual_ks_success, virtual_ks_result, + expected_failures=(InvalidRequest,) + ) + self.virtual_tables_result = self._handle_results( + virtual_table_success, virtual_table_result, + expected_failures=(InvalidRequest,) + ) + self.virtual_columns_result = self._handle_results( + virtual_column_success, virtual_column_result, + expected_failures=(InvalidRequest,) + ) + # dse6.8-only results self.vertices_result = self._handle_results(vertices_success, vertices_result) self.edges_result = self._handle_results(edges_success, edges_result) diff --git a/tests/integration/advanced/graph/fluent/test_graph.py b/tests/integration/advanced/graph/fluent/test_graph.py index f1946a40b7..c3f9c2a43b 100644 --- a/tests/integration/advanced/graph/fluent/test_graph.py +++ b/tests/integration/advanced/graph/fluent/test_graph.py @@ -31,6 +31,7 @@ from gremlin_python.structure.graph import Edge as TravEdge from gremlin_python.structure.graph import Vertex as TravVertex, VertexProperty as TravVertexProperty +from tests.util import wait_until_not_raised from tests.integration import DSE_VERSION, greaterthanorequaldse68 from tests.integration.advanced.graph import GraphUnitTestCase, \ ClassicGraphSchema, CoreGraphSchema, \ @@ -509,8 +510,10 @@ def __test_udt(self, schema, graphson, address_class, address_with_tags_class, property('owners', frozen(listOf(tupleOf(Text, Int)))).create(); """, execution_profile=ep) - time.sleep(2) # wait the UDT to be discovered - self.session.cluster.register_user_type(self.graph_name, 'address', Address) + # wait max 10 seconds to get the UDT discovered. + wait_until_not_raised( + lambda: self.session.cluster.register_user_type(self.graph_name, 'address', Address), + 1, 10) self.session.cluster.register_user_type(self.graph_name, 'addressTags', AddressWithTags) self.session.cluster.register_user_type(self.graph_name, 'complexAddress', ComplexAddress) self.session.cluster.register_user_type(self.graph_name, 'complexAddressWithOwners', ComplexAddressWithOwners) diff --git a/tests/integration/advanced/graph/test_graph_query.py b/tests/integration/advanced/graph/test_graph_query.py index 5ef4e2c749..ff76288008 100644 --- a/tests/integration/advanced/graph/test_graph_query.py +++ b/tests/integration/advanced/graph/test_graph_query.py @@ -161,6 +161,8 @@ def test_profile_graph_options(self): s.execute_graph(statement, execution_profile=ep) except NoHostAvailable: self.assertTrue(DSE_VERSION >= Version("6.0")) + except InvalidRequest: + self.assertTrue(DSE_VERSION >= Version("5.1")) else: if DSE_VERSION < Version("6.8"): # >6.8 returns true self.fail("Should have risen ServerError or InvalidRequest") diff --git a/tests/integration/simulacron/utils.py b/tests/integration/simulacron/utils.py index 5cee5ac3f1..870b60bd46 100644 --- a/tests/integration/simulacron/utils.py +++ b/tests/integration/simulacron/utils.py @@ -358,7 +358,9 @@ def prime_driver_defaults(): # prepare empty rows for NGDG for query in [SchemaParserDSE68._SELECT_VERTICES, SchemaParserDSE68._SELECT_EDGES]: - PrimeQuery(query, result='success', then={'rows': [], 'column_types': {'row1': 'int'}}) + client_simulacron.submit_request( + PrimeQuery(query, result='success', + then={'rows': [], 'column_types': {'row1': 'int'}})) def prime_cluster(data_centers="3", version=None, cluster_name=DEFAULT_CLUSTER, dse_version=None): From d338e66f5cfd4ca25615a763485cf697072c4b0c Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 28 Jan 2020 14:19:09 -0500 Subject: [PATCH 1757/2005] anoother time related test fix --- cassandra/__init__.py | 2 +- .../integration/advanced/graph/fluent/test_graph.py | 12 +++++++++--- tests/integration/advanced/test_adv_metadata.py | 4 ---- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 76c7a649de..b86f1a8c90 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 21, 0, 'post0+20200127') +__version_info__ = (3, 21, 0, 'post0+20200128') __version__ = '.'.join(map(str, __version_info__)) diff --git a/tests/integration/advanced/graph/fluent/test_graph.py b/tests/integration/advanced/graph/fluent/test_graph.py index c3f9c2a43b..4ebb0b6109 100644 --- a/tests/integration/advanced/graph/fluent/test_graph.py +++ b/tests/integration/advanced/graph/fluent/test_graph.py @@ -514,9 +514,15 @@ def __test_udt(self, schema, graphson, address_class, address_with_tags_class, wait_until_not_raised( lambda: self.session.cluster.register_user_type(self.graph_name, 'address', Address), 1, 10) - self.session.cluster.register_user_type(self.graph_name, 'addressTags', AddressWithTags) - self.session.cluster.register_user_type(self.graph_name, 'complexAddress', ComplexAddress) - self.session.cluster.register_user_type(self.graph_name, 'complexAddressWithOwners', ComplexAddressWithOwners) + wait_until_not_raised( + lambda: self.session.cluster.register_user_type(self.graph_name, 'addressTags', AddressWithTags), + 1, 10) + wait_until_not_raised( + lambda: self.session.cluster.register_user_type(self.graph_name, 'complexAddress', ComplexAddress), + 1, 10) + wait_until_not_raised( + lambda: self.session.cluster.register_user_type(self.graph_name, 'complexAddressWithOwners', ComplexAddressWithOwners), + 1, 10) data = { "udt1": ["typeOf('address')", Address('1440 Rd Smith', 'Quebec', 'QC')], diff --git a/tests/integration/advanced/test_adv_metadata.py b/tests/integration/advanced/test_adv_metadata.py index 2c69a769a3..eaa7b7f8f6 100644 --- a/tests/integration/advanced/test_adv_metadata.py +++ b/tests/integration/advanced/test_adv_metadata.py @@ -283,10 +283,6 @@ def test_nodesync_on_table(self): self.assertIn('nodesync =', table_meta.export_as_string()) self.assertIn('nodesync', table_meta.options) - table_3rf = self.cluster.metadata.keyspaces["test3rf"].tables['test'] - self.assertNotIn('nodesync =', table_3rf.export_as_string()) - self.assertIsNone(table_3rf.options['nodesync']) - @greaterthanorequaldse68 class GraphMetadataTests(BasicExistingKeyspaceUnitTestCase): From f61ed208ac353b439124497991f5ecd7f34f2641 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 30 Jan 2020 08:14:13 -0500 Subject: [PATCH 1758/2005] PYTHON-1204: Make graph metadata handling more robust (#47) * Make graph metadata handling more robust --- cassandra/metadata.py | 61 +++++++++++-------- .../integration/advanced/test_adv_metadata.py | 35 ++++++++++- 2 files changed, 71 insertions(+), 25 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index b4dc19fbf0..6ca72440da 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2843,26 +2843,7 @@ def __init__(self, connection, timeout): def get_all_keyspaces(self): for keyspace_meta in super(SchemaParserDSE68, self).get_all_keyspaces(): - - def _build_table_graph_metadata(table_meta): - for row in self.keyspace_table_vertex_rows[keyspace_meta.name][table_meta.name]: - vertex_meta = self._build_table_vertex_metadata(row) - table_meta.vertex = vertex_meta - - for row in self.keyspace_table_edge_rows[keyspace_meta.name][table_meta.name]: - edge_meta = self._build_table_edge_metadata(keyspace_meta, row) - table_meta.edge = edge_meta - - # Make sure we process vertices before edges - for t in [t for t in six.itervalues(keyspace_meta.tables) - if t.name in self.keyspace_table_vertex_rows[keyspace_meta.name]]: - _build_table_graph_metadata(t) - - # all other tables... - for t in [t for t in six.itervalues(keyspace_meta.tables) - if t.name not in self.keyspace_table_vertex_rows[keyspace_meta.name]]: - _build_table_graph_metadata(t) - + self._build_graph_metadata(keyspace_meta) yield keyspace_meta def get_table(self, keyspaces, keyspace, table): @@ -2877,10 +2858,16 @@ def get_table(self, keyspaces, keyspace, table): vertices_result = self._handle_results(vertices_success, vertices_result) edges_result = self._handle_results(edges_success, edges_result) - if vertices_result: - table_meta.vertex = self._build_table_vertex_metadata(vertices_result[0]) - elif edges_result: - table_meta.edge = self._build_table_edge_metadata(keyspaces[keyspace], edges_result[0]) + try: + if vertices_result: + table_meta.vertex = self._build_table_vertex_metadata(vertices_result[0]) + elif edges_result: + table_meta.edge = self._build_table_edge_metadata(keyspaces[keyspace], edges_result[0]) + except Exception: + table_meta.vertex = None + table_meta.edge = None + table_meta._exc_info = sys.exc_info() + log.exception("Error while parsing graph metadata for table %s.%s.", keyspace, table) return table_meta @@ -2893,6 +2880,32 @@ def _build_keyspace_metadata_internal(row): graph_engine = row.get("graph_engine", None) return KeyspaceMetadata(name, durable_writes, replication_class, replication, graph_engine) + def _build_graph_metadata(self, keyspace_meta): + + def _build_table_graph_metadata(table_meta): + for row in self.keyspace_table_vertex_rows[keyspace_meta.name][table_meta.name]: + table_meta.vertex = self._build_table_vertex_metadata(row) + + for row in self.keyspace_table_edge_rows[keyspace_meta.name][table_meta.name]: + table_meta.egde = self._build_table_edge_metadata(keyspace_meta, row) + + try: + # Make sure we process vertices before edges + for table_meta in [t for t in six.itervalues(keyspace_meta.tables) + if t.name in self.keyspace_table_vertex_rows[keyspace_meta.name]]: + _build_table_graph_metadata(table_meta) + + # all other tables... + for table_meta in [t for t in six.itervalues(keyspace_meta.tables) + if t.name not in self.keyspace_table_vertex_rows[keyspace_meta.name]]: + _build_table_graph_metadata(table_meta) + except Exception: + # schema error, remove all graph metadata for this keyspace + for t in six.itervalues(keyspace_meta.tables): + t.edge = t.vertex = None + keyspace_meta._exc_info = sys.exc_info() + log.exception("Error while parsing graph metadata for keyspace %s", keyspace_meta.name) + @staticmethod def _build_table_vertex_metadata(row): return VertexMetadata(row.get("keyspace_name"), row.get("table_name"), diff --git a/tests/integration/advanced/test_adv_metadata.py b/tests/integration/advanced/test_adv_metadata.py index eaa7b7f8f6..52944aabdf 100644 --- a/tests/integration/advanced/test_adv_metadata.py +++ b/tests/integration/advanced/test_adv_metadata.py @@ -14,11 +14,12 @@ from packaging.version import Version +from cassandra.cluster import Cluster from tests.integration import (BasicExistingKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCaseRF1, greaterthanorequaldse51, greaterthanorequaldse60, greaterthanorequaldse68, use_single_node, - DSE_VERSION, requiredse) + DSE_VERSION, requiredse, PROTOCOL_VERSION) try: import unittest2 as unittest @@ -361,3 +362,35 @@ def test_edge_metadata(self): self.assertEqual(edge_meta.to_label, 'rocksolidsoftware') self.assertEqual(edge_meta.to_partition_key_columns, ['company_name', 'software_name']) self.assertEqual(edge_meta.to_clustering_columns, ['software_version']) + + +@greaterthanorequaldse68 +class GraphMetadataSchemaErrorTests(BasicExistingKeyspaceUnitTestCase): + """ + Test that we can connect when the graph schema is broken. + """ + + def test_connection_on_graph_schema_error(self): + self.session = self.cluster.connect() + + self.session.execute(""" + CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} and graph_engine = 'Core'; + """ % (self.ks_name,)) + + self.session.execute(""" + CREATE TABLE %s.person (name text PRIMARY KEY) WITH VERTEX LABEL; + """ % (self.ks_name,)) + + self.session.execute(""" + CREATE TABLE %s.software(company text, name text, version int, PRIMARY KEY((company, name), version)) WITH VERTEX LABEL rocksolidsoftware; + """ % (self.ks_name,)) + + self.session.execute(""" + CREATE TABLE %s.contributors (contributor text, company_name text, software_name text, software_version int, + PRIMARY KEY (contributor, company_name, software_name, software_version) ) + WITH CLUSTERING ORDER BY (company_name ASC, software_name ASC, software_version ASC) + AND EDGE LABEL contrib FROM person(contributor) TO rocksolidsoftware((company_name, software_name), software_version); + """ % (self.ks_name,)) + + self.session.execute('TRUNCATE system_schema.vertices') + Cluster(protocol_version=PROTOCOL_VERSION).connect().shutdown() From bdb5f530950c71b637662cb1f6c8f502e3ecc3ee Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 30 Jan 2020 08:19:47 -0500 Subject: [PATCH 1759/2005] Add python-1204 changelog entry --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a7689ff551..f86e5048da 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -23,6 +23,7 @@ Features * Enable Paging Through DSE Driver for Gremlin Traversals (PYTHON-1045) * Expose filter predicates for cql collections (PYTHON-1019) * Add g:TraversalMetrics/Metrics deserializers (PYTHON-1057) +* Make graph metadata handling more robust (PYTHON-1204) 3.21.0 ====== From db96ffaae045523011c94ddd5e13f5b5eaa07176 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 30 Jan 2020 13:10:49 -0500 Subject: [PATCH 1760/2005] Don't try to enable insights is disabled --- cassandra/cluster.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index a33404c5fd..a6709ba6a5 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2546,18 +2546,19 @@ def __init__(self, cluster, hosts, keyspace=None): self.session_id = uuid.uuid4() self._graph_paging_available = self._check_graph_paging_available() - cc_host = self.cluster.get_control_connection_host() - valid_insights_version = (cc_host and version_supports_insights(cc_host.dse_version)) - if self.cluster.monitor_reporting_enabled and valid_insights_version: - self._monitor_reporter = MonitorReporter( - interval_sec=self.cluster.monitor_reporting_interval, - session=self, - ) - else: - if cc_host: - log.debug('Not starting MonitorReporter thread for Insights; ' - 'not supported by server version {v} on ' - 'ControlConnection host {c}'.format(v=cc_host.release_version, c=cc_host)) + if self.cluster.monitor_reporting_enabled: + cc_host = self.cluster.get_control_connection_host() + valid_insights_version = (cc_host and version_supports_insights(cc_host.dse_version)) + if valid_insights_version: + self._monitor_reporter = MonitorReporter( + interval_sec=self.cluster.monitor_reporting_interval, + session=self, + ) + else: + if cc_host: + log.debug('Not starting MonitorReporter thread for Insights; ' + 'not supported by server version {v} on ' + 'ControlConnection host {c}'.format(v=cc_host.release_version, c=cc_host)) log.debug('Started Session with client_id {} and session_id {}'.format(self.cluster.client_id, self.session_id)) From c9da0566331099f533cd32c0fcd24f0680548978 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 4 Feb 2020 13:39:32 -0500 Subject: [PATCH 1761/2005] Add C* 4.0 testing --- build.yaml | 5 +++-- tests/integration/__init__.py | 16 +++++----------- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/build.yaml b/build.yaml index b60c0950c1..c4c93a803c 100644 --- a/build.yaml +++ b/build.yaml @@ -46,8 +46,8 @@ schedules: EXCLUDE_LONG=1 matrix: exclude: - - python: [2.7, 3.4, 3.7, 3.8] - - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse', dse-4.8', 'dse-5.0'] + - python: [2.7, 3.4, 3.7, 3.6, 3.8] + - cassandra: ['2.0', '2.1', '2.2', '3.0', '3.11', 'test-dse', 'dse-4.8', 'dse-5.0', 'dse-6.0', 'dse-6.7', 'dse-6.8'] release_test: schedule: per_commit @@ -157,6 +157,7 @@ cassandra: - '2.2' - '3.0' - '3.11' + - '4.0' - 'dse-4.8' - 'dse-5.0' - 'dse-5.1' diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 0a3d6542a0..547a3e6e32 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -172,7 +172,7 @@ def _get_dse_version_from_cass(cass_version): cassandra_version = Version(mcv_string) CASSANDRA_VERSION = Version(mcv_string) if mcv_string else cassandra_version - CCM_VERSION = cassandra_version if mcv_string else CASSANDRA_VERSION + CCM_VERSION = mcv_string or cv_string CASSANDRA_IP = os.getenv('CLUSTER_IP', '127.0.0.1') CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) @@ -458,15 +458,9 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, elif ccm_options is None: ccm_options = CCM_KWARGS.copy() - if 'version' in ccm_options and not isinstance(ccm_options['version'], Version): - ccm_options['version'] = Version(ccm_options['version']) - cassandra_version = ccm_options.get('version', CCM_VERSION) dse_version = ccm_options.get('version', DSE_VERSION) - if 'version' in ccm_options: - ccm_options['version'] = ccm_options['version'].base_version - global CCM_CLUSTER if USE_CASS_EXTERNAL: if CCM_CLUSTER: @@ -515,12 +509,12 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, CCM_CLUSTER = DseCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) CCM_CLUSTER.set_configuration_options({'batch_size_warn_threshold_in_kb': 5}) - if dse_version >= Version('5.0'): + if Version(dse_version) >= Version('5.0'): CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) if 'spark' in workloads: config_options = {"initial_spark_worker_resources": 0.1} - if dse_version >= Version('6.7'): + if Version(dse_version) >= Version('6.7'): log.debug("Disabling AlwaysON SQL for a DSE 6.7 Cluster") config_options['alwayson_sql_options'] = {'enabled': False} CCM_CLUSTER.set_dse_configuration_options(config_options) @@ -532,9 +526,9 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, else: CCM_CLUSTER = CCMCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) - if cassandra_version >= Version('2.2'): + if Version(cassandra_version) >= Version('2.2'): CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) - if cassandra_version >= Version('3.0'): + if Version(cassandra_version) >= Version('3.0'): CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) common.switch_cluster(path, cluster_name) CCM_CLUSTER.set_configuration_options(configuration_options) From 2716214599148987ebc9999f7ed1ad330f5e889e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 11 Feb 2020 09:46:31 -0500 Subject: [PATCH 1762/2005] Fix typo when assigning a table edge --- cassandra/metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 6ca72440da..5cdcef807e 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -2887,7 +2887,7 @@ def _build_table_graph_metadata(table_meta): table_meta.vertex = self._build_table_vertex_metadata(row) for row in self.keyspace_table_edge_rows[keyspace_meta.name][table_meta.name]: - table_meta.egde = self._build_table_edge_metadata(keyspace_meta, row) + table_meta.edge = self._build_table_edge_metadata(keyspace_meta, row) try: # Make sure we process vertices before edges From 3a2ea456d1552b898aa962959ada5c6b5a025687 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 4 Feb 2020 14:46:23 -0500 Subject: [PATCH 1763/2005] Make sure to only query the native_transport_address column with DSE --- CHANGELOG.rst | 4 ++++ build.yaml | 12 ++++++------ cassandra/cluster.py | 17 ++++++++++------- tests/integration/__init__.py | 4 ++-- 4 files changed, 22 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index da15a1158b..694da485ad 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,6 +6,10 @@ Features -------- * Add all() function to the ResultSet API (PYTHON-1203) +Bug Fixes +--------- +* Make sure to only query the native_transport_address column with DSE (PYTHON-1205) + 3.21.0 ====== January 15, 2020 diff --git a/build.yaml b/build.yaml index c4c93a803c..3ae684ebb9 100644 --- a/build.yaml +++ b/build.yaml @@ -9,7 +9,7 @@ schedules: matrix: exclude: - python: [3.4, 3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', 'test-dse'] + - cassandra: ['2.1', '3.0', '4.0', 'test-dse'] commit_long_test: schedule: per_commit @@ -21,7 +21,7 @@ schedules: matrix: exclude: - python: [3.4, 3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', 'test-dse'] + - cassandra: ['2.1', '3.0', '4.0', 'test-dse'] commit_branches: schedule: per_commit @@ -34,7 +34,7 @@ schedules: matrix: exclude: - python: [3.4, 3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', 'test-dse'] + - cassandra: ['2.1', '3.0', '4.0', 'test-dse'] commit_branches_dev: schedule: per_commit @@ -47,7 +47,7 @@ schedules: matrix: exclude: - python: [2.7, 3.4, 3.7, 3.6, 3.8] - - cassandra: ['2.0', '2.1', '2.2', '3.0', '3.11', 'test-dse', 'dse-4.8', 'dse-5.0', 'dse-6.0', 'dse-6.7', 'dse-6.8'] + - cassandra: ['2.0', '2.1', '2.2', '3.0', '4.0', 'test-dse', 'dse-4.8', 'dse-5.0', 'dse-6.0', 'dse-6.8'] release_test: schedule: per_commit @@ -139,7 +139,7 @@ schedules: matrix: exclude: - python: [3.4, 3.6, 3.7, 3.8] - - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse'] + - cassandra: ['2.0', '2.1', '2.2', '3.0', '4.0', 'test-dse'] python: - 2.7 @@ -182,7 +182,7 @@ build: pip install --upgrade pip pip install -U setuptools - pip install git+ssh://git@github.com/riptano/ccm-private.git + pip install $HOME/ccm if [ -n "$CCM_IS_DSE" ]; then pip install -r test-datastax-requirements.txt diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 59c8b61f96..0ca962c076 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -80,7 +80,7 @@ from cassandra.marshal import int64_pack from cassandra.timestamps import MonotonicTimestampGenerator from cassandra.compat import Mapping -from cassandra.util import _resolve_contact_points_to_string_map +from cassandra.util import _resolve_contact_points_to_string_map, Version from cassandra.datastax.insights.reporter import MonitorReporter from cassandra.datastax.insights.util import version_supports_insights @@ -3324,7 +3324,7 @@ class ControlConnection(object): _SELECT_SCHEMA_PEERS_TEMPLATE = "SELECT peer, host_id, {nt_col_name}, schema_version FROM system.peers" _SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'" - _MINIMUM_NATIVE_ADDRESS_VERSION = "4.0" + _MINIMUM_NATIVE_ADDRESS_DSE_VERSION = Version("6.0.0") _is_shutdown = False _timeout = None @@ -3884,14 +3884,17 @@ def _peers_query_for_version(self, connection, peers_query_template): field named nt_col_name. """ host_release_version = self._cluster.metadata.get_host(connection.endpoint).release_version - if host_release_version: - use_native_address_query = host_release_version >= self._MINIMUM_NATIVE_ADDRESS_VERSION - if use_native_address_query: - select_peers_query = peers_query_template.format(nt_col_name="native_transport_address") - else: + host_dse_version = self._cluster.metadata.get_host(connection.endpoint).dse_version + uses_native_address_query = ( + host_dse_version and Version(host_dse_version) >= self._MINIMUM_NATIVE_ADDRESS_DSE_VERSION) + + if uses_native_address_query: + select_peers_query = peers_query_template.format(nt_col_name="native_transport_address") + elif host_release_version: select_peers_query = peers_query_template.format(nt_col_name="rpc_address") else: select_peers_query = self._SELECT_PEERS + return select_peers_query def _signal_error(self): diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 547a3e6e32..992de80cda 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -172,7 +172,7 @@ def _get_dse_version_from_cass(cass_version): cassandra_version = Version(mcv_string) CASSANDRA_VERSION = Version(mcv_string) if mcv_string else cassandra_version - CCM_VERSION = mcv_string or cv_string + CCM_VERSION = mcv_string if mcv_string else cv_string CASSANDRA_IP = os.getenv('CLUSTER_IP', '127.0.0.1') CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) @@ -454,7 +454,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, set_default_cass_ip() if ccm_options is None and DSE_VERSION: - ccm_options = {"version": DSE_VERSION} + ccm_options = {"version": CCM_VERSION} elif ccm_options is None: ccm_options = CCM_KWARGS.copy() From df458d2ebd701962e72d8ad6fa1a8c4356de53b2 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 14 Feb 2020 09:35:25 -0500 Subject: [PATCH 1764/2005] Fix test version compare --- tests/integration/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index a350bb40e3..d6f26acbcd 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -530,7 +530,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, if Version(dse_version) >= Version('5.0'): CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) - if dse_version >= Version('5.1'): + if Version(dse_version) >= Version('5.1'): # For Inet4Address CCM_CLUSTER.set_dse_configuration_options({ 'graph': { From 28d85a99a0123508ce14e442de74b1b7a9577e98 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Sat, 15 Feb 2020 13:37:41 -0500 Subject: [PATCH 1765/2005] install pyyaml 5.2 to fix the ccm installation with py3.4 --- build.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/build.yaml b/build.yaml index 605683b085..83bed55a09 100644 --- a/build.yaml +++ b/build.yaml @@ -184,6 +184,8 @@ build: pip install --upgrade pip pip install -U setuptools + # Remove this pyyaml installation when removing Python 3.4 support + pip install PyYAML==5.2 pip install $HOME/ccm if [ -n "$CCM_IS_DSE" ]; then From 02faa6d80fc8adff10fb7a44d2a2c8b9ba3dc79e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 4 Feb 2020 13:39:32 -0500 Subject: [PATCH 1766/2005] Add C* 4.0 testing --- build.yaml | 5 +++-- tests/integration/__init__.py | 16 +++++----------- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/build.yaml b/build.yaml index 460192131d..dabdbc7132 100644 --- a/build.yaml +++ b/build.yaml @@ -46,8 +46,8 @@ schedules: EXCLUDE_LONG=1 matrix: exclude: - - python: [2.7, 3.4, 3.7, 3.8] - - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse', dse-4.8', 'dse-5.0'] + - python: [2.7, 3.4, 3.7, 3.6, 3.8] + - cassandra: ['2.0', '2.1', '2.2', '3.0', '3.11', 'test-dse', 'dse-4.8', 'dse-5.0', 'dse-6.0', 'dse-6.7', 'dse-6.8'] release_test: schedule: per_commit @@ -157,6 +157,7 @@ cassandra: - '2.2' - '3.0' - '3.11' + - '4.0' - 'dse-4.8' - 'dse-5.0' - 'dse-5.1' diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 18c9110b35..db68ae632d 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -178,7 +178,7 @@ def _get_dse_version_from_cass(cass_version): cassandra_version = Version(mcv_string) CASSANDRA_VERSION = Version(mcv_string) if mcv_string else cassandra_version - CCM_VERSION = cassandra_version if mcv_string else CASSANDRA_VERSION + CCM_VERSION = mcv_string or cv_string CASSANDRA_IP = os.getenv('CLUSTER_IP', '127.0.0.1') CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) @@ -476,15 +476,9 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, elif ccm_options is None: ccm_options = CCM_KWARGS.copy() - if 'version' in ccm_options and not isinstance(ccm_options['version'], Version): - ccm_options['version'] = Version(ccm_options['version']) - cassandra_version = ccm_options.get('version', CCM_VERSION) dse_version = ccm_options.get('version', DSE_VERSION) - if 'version' in ccm_options: - ccm_options['version'] = ccm_options['version'].base_version - global CCM_CLUSTER if USE_CASS_EXTERNAL: if CCM_CLUSTER: @@ -533,7 +527,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, CCM_CLUSTER = DseCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) CCM_CLUSTER.set_configuration_options({'batch_size_warn_threshold_in_kb': 5}) - if dse_version >= Version('5.0'): + if Version(dse_version) >= Version('5.0'): CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) if dse_version >= Version('5.1'): @@ -555,7 +549,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, }) if 'spark' in workloads: config_options = {"initial_spark_worker_resources": 0.1} - if dse_version >= Version('6.7'): + if Version(dse_version) >= Version('6.7'): log.debug("Disabling AlwaysON SQL for a DSE 6.7 Cluster") config_options['alwayson_sql_options'] = {'enabled': False} CCM_CLUSTER.set_dse_configuration_options(config_options) @@ -567,9 +561,9 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, else: CCM_CLUSTER = CCMCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) - if cassandra_version >= Version('2.2'): + if Version(cassandra_version) >= Version('2.2'): CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) - if cassandra_version >= Version('3.0'): + if Version(cassandra_version) >= Version('3.0'): CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) common.switch_cluster(path, cluster_name) CCM_CLUSTER.set_configuration_options(configuration_options) From 211bf1d7759f18c2b21412f615d950982a35bde5 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 4 Feb 2020 14:46:23 -0500 Subject: [PATCH 1767/2005] Make sure to only query the native_transport_address column with DSE --- CHANGELOG.rst | 4 ++++ build.yaml | 12 ++++++------ cassandra/cluster.py | 15 +++++++++------ tests/integration/__init__.py | 4 ++-- 4 files changed, 21 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f86e5048da..dc11a9fddd 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -25,6 +25,10 @@ Features * Add g:TraversalMetrics/Metrics deserializers (PYTHON-1057) * Make graph metadata handling more robust (PYTHON-1204) +Bug Fixes +--------- +* Make sure to only query the native_transport_address column with DSE (PYTHON-1205) + 3.21.0 ====== January 15, 2020 diff --git a/build.yaml b/build.yaml index dabdbc7132..605683b085 100644 --- a/build.yaml +++ b/build.yaml @@ -9,7 +9,7 @@ schedules: matrix: exclude: - python: [3.4, 3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', 'test-dse'] + - cassandra: ['2.1', '3.0', '4.0', 'test-dse'] commit_long_test: schedule: per_commit @@ -21,7 +21,7 @@ schedules: matrix: exclude: - python: [3.4, 3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', 'test-dse'] + - cassandra: ['2.1', '3.0', '4.0', 'test-dse'] commit_branches: schedule: per_commit @@ -34,7 +34,7 @@ schedules: matrix: exclude: - python: [3.4, 3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', 'test-dse'] + - cassandra: ['2.1', '3.0', '4.0', 'test-dse'] commit_branches_dev: schedule: per_commit @@ -47,7 +47,7 @@ schedules: matrix: exclude: - python: [2.7, 3.4, 3.7, 3.6, 3.8] - - cassandra: ['2.0', '2.1', '2.2', '3.0', '3.11', 'test-dse', 'dse-4.8', 'dse-5.0', 'dse-6.0', 'dse-6.7', 'dse-6.8'] + - cassandra: ['2.0', '2.1', '2.2', '3.0', '4.0', 'test-dse', 'dse-4.8', 'dse-5.0', 'dse-6.0', 'dse-6.8'] release_test: schedule: per_commit @@ -139,7 +139,7 @@ schedules: matrix: exclude: - python: [3.4, 3.6, 3.7, 3.8] - - cassandra: ['2.0', '2.1', '2.2', '3.0', 'test-dse'] + - cassandra: ['2.0', '2.1', '2.2', '3.0', '4.0', 'test-dse'] python: - 2.7 @@ -184,7 +184,7 @@ build: pip install --upgrade pip pip install -U setuptools - pip install git+ssh://git@github.com/riptano/ccm-private.git + pip install $HOME/ccm if [ -n "$CCM_IS_DSE" ]; then pip install -r test-datastax-requirements.txt diff --git a/cassandra/cluster.py b/cassandra/cluster.py index a6709ba6a5..2263fbdb97 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3418,7 +3418,7 @@ class ControlConnection(object): _SELECT_SCHEMA_PEERS_TEMPLATE = "SELECT peer, host_id, {nt_col_name}, schema_version FROM system.peers" _SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'" - _MINIMUM_NATIVE_ADDRESS_VERSION = "4.0" + _MINIMUM_NATIVE_ADDRESS_DSE_VERSION = Version("6.0.0") _is_shutdown = False _timeout = None @@ -3978,14 +3978,17 @@ def _peers_query_for_version(self, connection, peers_query_template): field named nt_col_name. """ host_release_version = self._cluster.metadata.get_host(connection.endpoint).release_version - if host_release_version: - use_native_address_query = host_release_version >= self._MINIMUM_NATIVE_ADDRESS_VERSION - if use_native_address_query: - select_peers_query = peers_query_template.format(nt_col_name="native_transport_address") - else: + host_dse_version = self._cluster.metadata.get_host(connection.endpoint).dse_version + uses_native_address_query = ( + host_dse_version and Version(host_dse_version) >= self._MINIMUM_NATIVE_ADDRESS_DSE_VERSION) + + if uses_native_address_query: + select_peers_query = peers_query_template.format(nt_col_name="native_transport_address") + elif host_release_version: select_peers_query = peers_query_template.format(nt_col_name="rpc_address") else: select_peers_query = self._SELECT_PEERS + return select_peers_query def _signal_error(self): diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index db68ae632d..a350bb40e3 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -178,7 +178,7 @@ def _get_dse_version_from_cass(cass_version): cassandra_version = Version(mcv_string) CASSANDRA_VERSION = Version(mcv_string) if mcv_string else cassandra_version - CCM_VERSION = mcv_string or cv_string + CCM_VERSION = mcv_string if mcv_string else cv_string CASSANDRA_IP = os.getenv('CLUSTER_IP', '127.0.0.1') CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) @@ -472,7 +472,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, set_default_cass_ip() if ccm_options is None and DSE_VERSION: - ccm_options = {"version": DSE_VERSION} + ccm_options = {"version": CCM_VERSION} elif ccm_options is None: ccm_options = CCM_KWARGS.copy() From 143d45d8ade160a4873e92020030bea061684bde Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 14 Feb 2020 09:35:25 -0500 Subject: [PATCH 1768/2005] Fix test version compare --- tests/integration/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index a350bb40e3..d6f26acbcd 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -530,7 +530,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, if Version(dse_version) >= Version('5.0'): CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) - if dse_version >= Version('5.1'): + if Version(dse_version) >= Version('5.1'): # For Inet4Address CCM_CLUSTER.set_dse_configuration_options({ 'graph': { From 9c0b5be19dae9bfd218d59b82ba2d754a89231c2 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Sat, 15 Feb 2020 13:37:41 -0500 Subject: [PATCH 1769/2005] install pyyaml 5.2 to fix the ccm installation with py3.4 --- build.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/build.yaml b/build.yaml index 605683b085..83bed55a09 100644 --- a/build.yaml +++ b/build.yaml @@ -184,6 +184,8 @@ build: pip install --upgrade pip pip install -U setuptools + # Remove this pyyaml installation when removing Python 3.4 support + pip install PyYAML==5.2 pip install $HOME/ccm if [ -n "$CCM_IS_DSE" ]; then From 1979a481692748e661ea80c9997afc2bf0d428a7 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 18 Feb 2020 11:24:52 -0600 Subject: [PATCH 1770/2005] Test fixes: - fixing a version check in test_graph_query.py - revert guardrails prefx in test_failure_types.py - for tombstone test, delete cell in c* version < 4.0, else delete entire row --- tests/integration/advanced/graph/test_graph_query.py | 2 +- tests/integration/long/test_failure_types.py | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/tests/integration/advanced/graph/test_graph_query.py b/tests/integration/advanced/graph/test_graph_query.py index ff76288008..1ccfc4a90c 100644 --- a/tests/integration/advanced/graph/test_graph_query.py +++ b/tests/integration/advanced/graph/test_graph_query.py @@ -162,7 +162,7 @@ def test_profile_graph_options(self): except NoHostAvailable: self.assertTrue(DSE_VERSION >= Version("6.0")) except InvalidRequest: - self.assertTrue(DSE_VERSION >= Version("5.1")) + self.assertTrue(DSE_VERSION >= Version("5.0")) else: if DSE_VERSION < Version("6.8"): # >6.8 returns true self.fail("Should have risen ServerError or InvalidRequest") diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index 486313e096..25854a57f7 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -16,8 +16,8 @@ import sys import traceback import time +from packaging.version import Version -from ccmlib.dse_cluster import DseCluster from mock import Mock from cassandra.policies import HostFilterPolicy, RoundRobinPolicy @@ -31,7 +31,7 @@ from tests.integration import ( use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace, remove_cluster, get_node, start_cluster_wait_for_up, requiresmallclockgranularity, - local) + local, CASSANDRA_VERSION) try: @@ -55,7 +55,10 @@ def setup_module(): use_singledc(start=False) ccm_cluster = get_cluster() ccm_cluster.stop() - config_options = {'guardrails.tombstone_failure_threshold': 2000, 'guardrails.tombstone_warn_threshold': 1000} + config_options = { + 'tombstone_failure_threshold': 2000, + 'tombstone_warn_threshold': 1000, + } ccm_cluster.set_configuration_options(config_options) start_cluster_wait_for_up(ccm_cluster) setup_keyspace() @@ -255,7 +258,8 @@ def test_tombstone_overflow_read_failure(self): parameters = [(x,) for x in range(3000)] self.execute_concurrent_args_helper(self.session, statement, parameters) - statement = self.session.prepare("DELETE FROM test3rf.test2 WHERE k = 1 AND v0 =?") + column = 'v1' if CASSANDRA_VERSION < Version('4.0') else '' + statement = self.session.prepare("DELETE {} FROM test3rf.test2 WHERE k = 1 AND v0 =?".format(column)) parameters = [(x,) for x in range(2001)] self.execute_concurrent_args_helper(self.session, statement, parameters) From 2ccc9b6671be372d24b6208544301859acbfc2e1 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 24 Feb 2020 10:59:05 -0600 Subject: [PATCH 1771/2005] Remove platform and runtime survey from README --- README.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/README.rst b/README.rst index 0b6c1e206d..6fabdf7531 100644 --- a/README.rst +++ b/README.rst @@ -11,10 +11,6 @@ The driver supports Python 2.7, 3.4, 3.5, 3.6, 3.7 and 3.8. **Note:** DataStax products do not support big-endian systems. -Feedback Requested ------------------- -**Help us focus our efforts!** Provide your input on the `Platform and Runtime Survey `_ (we kept it short). - Features -------- * `Synchronous `_ and `Asynchronous `_ APIs From dff53fb26fa5834ecfeb361b0e202221f918bf76 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 24 Feb 2020 16:09:03 -0600 Subject: [PATCH 1772/2005] Update docs for 3.22 release --- cassandra/cluster.py | 2 ++ docs/api/cassandra/cluster.rst | 4 ++-- .../datastax/graph/fluent/predicates.rst | 3 +++ docs/api/cassandra/datastax/graph/index.rst | 24 +++++++++++++++++++ docs/api/cassandra/metadata.rst | 3 +++ docs/index.rst | 1 - 6 files changed, 34 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 2263fbdb97..58051104a1 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -426,6 +426,8 @@ def __init__(self, load_balancing_policy=_NOT_SET, retry_policy=None, See :class:`.ExecutionProfile` for base attributes. Note that if not explicitly set, the row_factory and graph_options.graph_protocol are resolved during the query execution. + These options will resolve to graph_graphson3_row_factory and GraphProtocol.GRAPHSON_3_0 + for the core graph engine (DSE 6.8+), otherwise graph_object_row_factory and GraphProtocol.GRAPHSON_1_0 In addition to default parameters shown in the signature, this profile also defaults ``retry_policy`` to :class:`cassandra.policies.NeverRetryPolicy`. diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 459f287b4c..2b3d7828a8 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -127,10 +127,10 @@ .. autoattribute:: consistency_level :annotation: = LOCAL_ONE -.. autoclass:: GraphExecutionProfile (load_balancing_policy=_NOT_SET, retry_policy=None, consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, request_timeout=30.0, row_factory=, graph_options=None, continuous_paging_options=_NOT_SET) +.. autoclass:: GraphExecutionProfile (load_balancing_policy=_NOT_SET, retry_policy=None, consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, request_timeout=30.0, row_factory=None, graph_options=None, continuous_paging_options=_NOT_SET) :members: -.. autoclass:: GraphAnalyticsExecutionProfile (load_balancing_policy=None, retry_policy=None, consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, request_timeout=3600. * 24. * 7., row_factory=, graph_options=None) +.. autoclass:: GraphAnalyticsExecutionProfile (load_balancing_policy=None, retry_policy=None, consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, request_timeout=3600. * 24. * 7., row_factory=None, graph_options=None) :members: .. autodata:: EXEC_PROFILE_DEFAULT diff --git a/docs/api/cassandra/datastax/graph/fluent/predicates.rst b/docs/api/cassandra/datastax/graph/fluent/predicates.rst index 09f9e4c8fa..f6e86f6451 100644 --- a/docs/api/cassandra/datastax/graph/fluent/predicates.rst +++ b/docs/api/cassandra/datastax/graph/fluent/predicates.rst @@ -7,5 +7,8 @@ .. autoclass:: Search :members: +.. autoclass:: CqlCollection + :members: + .. autoclass:: Geo :members: diff --git a/docs/api/cassandra/datastax/graph/index.rst b/docs/api/cassandra/datastax/graph/index.rst index 285ca5ffc2..18a0e7c511 100644 --- a/docs/api/cassandra/datastax/graph/index.rst +++ b/docs/api/cassandra/datastax/graph/index.rst @@ -11,6 +11,30 @@ .. autofunction:: graph_object_row_factory +.. autofunction:: graph_graphson2_row_factory + +.. autofunction:: graph_graphson3_row_factory + +.. function:: to_int(value) + + Wraps a value to be explicitly serialized as a graphson Int. + +.. function:: to_bigint(value) + + Wraps a value to be explicitly serialized as a graphson Bigint. + +.. function:: to_smallint(value) + + Wraps a value to be explicitly serialized as a graphson Smallint. + +.. function:: to_float(value) + + Wraps a value to be explicitly serialized as a graphson Float. + +.. function:: to_double(value) + + Wraps a value to be explicitly serialized as a graphson Double. + .. autoclass:: GraphProtocol :members: diff --git a/docs/api/cassandra/metadata.rst b/docs/api/cassandra/metadata.rst index b5e6dae904..ca33e34739 100644 --- a/docs/api/cassandra/metadata.rst +++ b/docs/api/cassandra/metadata.rst @@ -37,6 +37,9 @@ Schemas .. autoclass:: TableMetadataV3 () :members: +.. autoclass:: TableMetadataDSE68 () + :members: + .. autoclass:: ColumnMetadata () :members: diff --git a/docs/index.rst b/docs/index.rst index fd5b4e683e..f79cc36ac8 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -89,7 +89,6 @@ Contents graph classic_graph graph_fluent - dse_auth dates_and_times cloud faq From e347d8f36720f41a568d60de3f906fb3080fd30d Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 25 Feb 2020 09:54:56 -0600 Subject: [PATCH 1773/2005] release 3.22: bump version and release date --- CHANGELOG.rst | 2 +- cassandra/__init__.py | 2 +- docs/installation.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index dc11a9fddd..fe8c2c13e5 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,6 @@ 3.22.0 ====== -UNRELEASED +February 26, 2020 Features -------- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index b86f1a8c90..c4479464c3 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 21, 0, 'post0+20200128') +__version_info__ = (3, 22, 0) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/installation.rst b/docs/installation.rst index d33ce441c9..b381425302 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -26,7 +26,7 @@ To check if the installation was successful, you can run:: python -c 'import cassandra; print cassandra.__version__' -It should print something like "3.21.0". +It should print something like "3.22.0". .. _installation-datastax-graph: From c79cf6014c6f5a54c8c99df8b771199faa105137 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 25 Feb 2020 10:02:24 -0600 Subject: [PATCH 1774/2005] release 3.22: bump docs.yaml --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index e2e1231834..e19ccd7d9e 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,6 +22,8 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: + - name: '3.22' + ref: e347d8f3 - name: '3.21' ref: 5589d96b - name: '3.20' From 0205565752b4bc88a2617cb04ae3e3696d275f77 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 25 Feb 2020 20:48:01 -0500 Subject: [PATCH 1775/2005] Remove temporarily the core engine graph documentation --- docs/.nav | 1 - docs/classic_graph.rst | 299 ------------------------ docs/core_graph.rst | 434 +++++++++++++++++++++++++++++++++++ docs/graph.rst | 503 +++++++++++++++-------------------------- docs/graph_fluent.rst | 38 ---- docs/index.rst | 5 +- 6 files changed, 619 insertions(+), 661 deletions(-) delete mode 100644 docs/classic_graph.rst create mode 100644 docs/core_graph.rst diff --git a/docs/.nav b/docs/.nav index d5b54c4e13..568cd6a383 100644 --- a/docs/.nav +++ b/docs/.nav @@ -5,7 +5,6 @@ lwt object_mapper geo_types graph -classic_graph performance query_paging security diff --git a/docs/classic_graph.rst b/docs/classic_graph.rst deleted file mode 100644 index ef68c86359..0000000000 --- a/docs/classic_graph.rst +++ /dev/null @@ -1,299 +0,0 @@ -DataStax Classic Graph Queries -============================== - -Getting Started -~~~~~~~~~~~~~~~ - -First, we need to create a graph in the system. To access the system API, we -use the system execution profile :: - - from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT - - cluster = Cluster() - session = cluster.connect() - - graph_name = 'movies' - session.execute_graph("system.graph(name).ifNotExists().engine(Classic).create()", {'name': graph_name}, - execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) - - -To execute requests on our newly created graph, we need to setup an execution -profile. Additionally, we also need to set the schema_mode to `development` -for the schema creation:: - - - from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.graph import GraphOptions - - graph_name = 'movies' - ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name=graph_name)) - - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) - session = cluster.connect() - - session.execute_graph("schema.config().option('graph.schema_mode').set('development')") - - -We are ready to configure our graph schema. We will create a simple one for movies:: - - # properties are used to define a vertex - properties = """ - schema.propertyKey("genreId").Text().create(); - schema.propertyKey("personId").Text().create(); - schema.propertyKey("movieId").Text().create(); - schema.propertyKey("name").Text().create(); - schema.propertyKey("title").Text().create(); - schema.propertyKey("year").Int().create(); - schema.propertyKey("country").Text().create(); - """ - - session.execute_graph(properties) # we can execute multiple statements in a single request - - # A Vertex represents a "thing" in the world. - vertices = """ - schema.vertexLabel("genre").properties("genreId","name").create(); - schema.vertexLabel("person").properties("personId","name").create(); - schema.vertexLabel("movie").properties("movieId","title","year","country").create(); - """ - - session.execute_graph(vertices) - - # An edge represents a relationship between two vertices - edges = """ - schema.edgeLabel("belongsTo").single().connection("movie","genre").create(); - schema.edgeLabel("actor").connection("movie","person").create(); - """ - - session.execute_graph(edges) - - # Indexes to execute graph requests efficiently - indexes = """ - schema.vertexLabel("genre").index("genresById").materialized().by("genreId").add(); - schema.vertexLabel("genre").index("genresByName").materialized().by("name").add(); - schema.vertexLabel("person").index("personsById").materialized().by("personId").add(); - schema.vertexLabel("person").index("personsByName").materialized().by("name").add(); - schema.vertexLabel("movie").index("moviesById").materialized().by("movieId").add(); - schema.vertexLabel("movie").index("moviesByTitle").materialized().by("title").add(); - schema.vertexLabel("movie").index("moviesByYear").secondary().by("year").add(); - """ - -Next, we'll add some data:: - - session.execute_graph(""" - g.addV('genre').property('genreId', 1).property('name', 'Action').next(); - g.addV('genre').property('genreId', 2).property('name', 'Drama').next(); - g.addV('genre').property('genreId', 3).property('name', 'Comedy').next(); - g.addV('genre').property('genreId', 4).property('name', 'Horror').next(); - """) - - session.execute_graph(""" - g.addV('person').property('personId', 1).property('name', 'Mark Wahlberg').next(); - g.addV('person').property('personId', 2).property('name', 'Leonardo DiCaprio').next(); - g.addV('person').property('personId', 3).property('name', 'Iggy Pop').next(); - """) - - session.execute_graph(""" - g.addV('movie').property('movieId', 1).property('title', 'The Happening'). - property('year', 2008).property('country', 'United States').next(); - g.addV('movie').property('movieId', 2).property('title', 'The Italian Job'). - property('year', 2003).property('country', 'United States').next(); - - g.addV('movie').property('movieId', 3).property('title', 'Revolutionary Road'). - property('year', 2008).property('country', 'United States').next(); - g.addV('movie').property('movieId', 4).property('title', 'The Man in the Iron Mask'). - property('year', 1998).property('country', 'United States').next(); - - g.addV('movie').property('movieId', 5).property('title', 'Dead Man'). - property('year', 1995).property('country', 'United States').next(); - """) - -Now that our genre, actor and movie vertices are added, we'll create the relationships (edges) between them:: - - session.execute_graph(""" - genre_horror = g.V().hasLabel('genre').has('name', 'Horror').next(); - genre_drama = g.V().hasLabel('genre').has('name', 'Drama').next(); - genre_action = g.V().hasLabel('genre').has('name', 'Action').next(); - - leo = g.V().hasLabel('person').has('name', 'Leonardo DiCaprio').next(); - mark = g.V().hasLabel('person').has('name', 'Mark Wahlberg').next(); - iggy = g.V().hasLabel('person').has('name', 'Iggy Pop').next(); - - the_happening = g.V().hasLabel('movie').has('title', 'The Happening').next(); - the_italian_job = g.V().hasLabel('movie').has('title', 'The Italian Job').next(); - rev_road = g.V().hasLabel('movie').has('title', 'Revolutionary Road').next(); - man_mask = g.V().hasLabel('movie').has('title', 'The Man in the Iron Mask').next(); - dead_man = g.V().hasLabel('movie').has('title', 'Dead Man').next(); - - the_happening.addEdge('belongsTo', genre_horror); - the_italian_job.addEdge('belongsTo', genre_action); - rev_road.addEdge('belongsTo', genre_drama); - man_mask.addEdge('belongsTo', genre_drama); - man_mask.addEdge('belongsTo', genre_action); - dead_man.addEdge('belongsTo', genre_drama); - - the_happening.addEdge('actor', mark); - the_italian_job.addEdge('actor', mark); - rev_road.addEdge('actor', leo); - man_mask.addEdge('actor', leo); - dead_man.addEdge('actor', iggy); - """) - -We are all set. You can now query your graph. Here are some examples:: - - # Find all movies of the genre Drama - for r in session.execute_graph(""" - g.V().has('genre', 'name', 'Drama').in('belongsTo').valueMap();"""): - print(r) - - # Find all movies of the same genre than the movie 'Dead Man' - for r in session.execute_graph(""" - g.V().has('movie', 'title', 'Dead Man').out('belongsTo').in('belongsTo').valueMap();"""): - print(r) - - # Find all movies of Mark Wahlberg - for r in session.execute_graph(""" - g.V().has('person', 'name', 'Mark Wahlberg').in('actor').valueMap();"""): - print(r) - -To see a more graph examples, see `DataStax Graph Examples `_. - -Graph Types -~~~~~~~~~~~ - -Here are the supported graph types with their python representations: - -========== ================ -DSE Graph Python -========== ================ -boolean bool -bigint long, int (PY3) -int int -smallint int -varint int -float float -double double -uuid uuid.UUID -Decimal Decimal -inet str -timestamp datetime.datetime -date datetime.date -time datetime.time -duration datetime.timedelta -point Point -linestring LineString -polygon Polygon -blob bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) -========== ================ - -Graph Row Factory -~~~~~~~~~~~~~~~~~ - -By default (with :class:`.GraphExecutionProfile.row_factory` set to :func:`.graph.graph_object_row_factory`), known graph result -types are unpacked and returned as specialized types (:class:`.Vertex`, :class:`.Edge`). If the result is not one of these -types, a :class:`.graph.Result` is returned, containing the graph result parsed from JSON and removed from its outer dict. -The class has some accessor convenience methods for accessing top-level properties by name (`type`, `properties` above), -or lists by index:: - - # dicts with `__getattr__` or `__getitem__` - result = session.execute_graph("[[key_str: 'value', key_int: 3]]", execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0] # Using system exec just because there is no graph defined - result # dse.graph.Result({u'key_str': u'value', u'key_int': 3}) - result.value # {u'key_int': 3, u'key_str': u'value'} (dict) - result.key_str # u'value' - result.key_int # 3 - result['key_str'] # u'value' - result['key_int'] # 3 - - # lists with `__getitem__` - result = session.execute_graph('[[0, 1, 2]]', execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0] - result # dse.graph.Result([0, 1, 2]) - result.value # [0, 1, 2] (list) - result[1] # 1 (list[1]) - -You can use a different row factory by setting :attr:`.Session.default_graph_row_factory` or passing it to -:meth:`.Session.execute_graph`. For example, :func:`.graph.single_object_row_factory` returns the JSON result string`, -unparsed. :func:`.graph.graph_result_row_factory` returns parsed, but unmodified results (such that all metadata is retained, -unlike :func:`.graph.graph_object_row_factory`, which sheds some as attributes and properties are unpacked). These results -also provide convenience methods for converting to known types (:meth:`~.Result.as_vertex`, :meth:`~.Result.as_edge`, :meth:`~.Result.as_path`). - -Vertex and Edge properties are never unpacked since their types are unknown. If you know your graph schema and want to -deserialize properties, use the :class:`.GraphSON1Deserializer`. It provides convenient methods to deserialize by types (e.g. -deserialize_date, deserialize_uuid, deserialize_polygon etc.) Example:: - - # ... - from cassandra.graph import GraphSON1Deserializer - - row = session.execute_graph("g.V().toList()")[0] - value = row.properties['my_property_key'][0].value # accessing the VertexProperty value - value = GraphSON1Deserializer.deserialize_timestamp(value) - - print(value) # 2017-06-26 08:27:05 - print(type(value)) # - - -Named Parameters -~~~~~~~~~~~~~~~~ - -Named parameters are passed in a dict to :meth:`.cluster.Session.execute_graph`:: - - result_set = session.execute_graph('[a, b]', {'a': 1, 'b': 2}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) - [r.value for r in result_set] # [1, 2] - -All python types listed in `Graph Types`_ can be passed as named parameters and will be serialized -automatically to their graph representation: - -Example:: - - session.execute_graph(""" - g.addV('person'). - property('name', text_value). - property('age', integer_value). - property('birthday', timestamp_value). - property('house_yard', polygon_value).toList() - """, { - 'text_value': 'Mike Smith', - 'integer_value': 34, - 'timestamp_value': datetime.datetime(1967, 12, 30), - 'polygon_value': Polygon(((30, 10), (40, 40), (20, 40), (10, 20), (30, 10))) - }) - - -As with all Execution Profile parameters, graph options can be set in the cluster default (as shown in the first example) -or specified per execution:: - - ep = session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, - graph_options=GraphOptions(graph_name='something-else')) - session.execute_graph(statement, execution_profile=ep) - -Using GraphSON2 Protocol -~~~~~~~~~~~~~~~~~~~~~~~~ - -The default graph protocol used is GraphSON1. However GraphSON1 may -cause problems of type conversion happening during the serialization -of the query to the DSE Graph server, or the deserialization of the -responses back from a string Gremlin query. GraphSON2 offers better -support for the complex data types handled by DSE Graph. - -DSE >=5.0.4 now offers the possibility to use the GraphSON2 protocol -for graph queries. Enabling GraphSON2 can be done by `changing the -graph protocol of the execution profile` and `setting the graphson2 row factory`:: - - from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.graph import GraphOptions, GraphProtocol, graph_graphson2_row_factory - - # Create a GraphSON2 execution profile - ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name='types', - graph_protocol=GraphProtocol.GRAPHSON_2_0), - row_factory=graph_graphson2_row_factory) - - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) - session = cluster.connect() - session.execute_graph(...) - -Using GraphSON2, all properties will be automatically deserialized to -its Python representation. Note that it may bring significant -behavioral change at runtime. - -It is generally recommended to switch to GraphSON2 as it brings more -consistent support for complex data types in the Graph driver and will -be activated by default in the next major version (Python dse-driver -driver 3.0). diff --git a/docs/core_graph.rst b/docs/core_graph.rst new file mode 100644 index 0000000000..47dc53d38d --- /dev/null +++ b/docs/core_graph.rst @@ -0,0 +1,434 @@ +DataStax Graph Queries +====================== + +The driver executes graph queries over the Cassandra native protocol. Use +:meth:`.Session.execute_graph` or :meth:`.Session.execute_graph_async` for +executing gremlin queries in DataStax Graph. + +The driver defines three Execution Profiles suitable for graph execution: + +* :data:`~.cluster.EXEC_PROFILE_GRAPH_DEFAULT` +* :data:`~.cluster.EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT` +* :data:`~.cluster.EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT` + +See :doc:`getting_started` and :doc:`execution_profiles` +for more detail on working with profiles. + +In DSE 6.8.0, the Core graph engine has been introduced and is now the default. It +provides a better unified multi-model, performance and scale. This guide +is for graphs that use the core engine. If you work with previous versions of +DSE or existing graphs, see :doc:`classic_graph`. + +Getting Started with Graph and the Core Engine +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +First, we need to create a graph in the system. To access the system API, we +use the system execution profile :: + + from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT + + cluster = Cluster() + session = cluster.connect() + + graph_name = 'movies' + session.execute_graph("system.graph(name).create()", {'name': graph_name}, + execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) + + +Graphs that use the core engine only support GraphSON3. Since they are Cassandra tables under +the hood, we can automatically configure the execution profile with the proper options +(row_factory and graph_protocol) when executing queries. You only need to make sure that +the `graph_name` is set and GraphSON3 will be automatically used:: + + from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT + + graph_name = 'movies' + ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name=graph_name)) + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) + session = cluster.connect() + session.execute_graph("g.addV(...)") + + +Note that this graph engine detection is based on the metadata. You might experience +some query errors if the graph has been newly created and is not yet in the metadata. This +would result to a badly configured execution profile. If you really want to avoid that, +configure your execution profile explicitly:: + + from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.graph import GraphOptions, GraphProtocol, graph_graphson3_row_factory + + graph_name = 'movies' + ep_graphson3 = GraphExecutionProfile( + row_factory=graph_graphson3_row_factory, + graph_options=GraphOptions( + graph_protocol=GraphProtocol.GRAPHSON_3_0, + graph_name=graph_name)) + + cluster = Cluster(execution_profiles={'core': ep_graphson3}) + session = cluster.connect() + session.execute_graph("g.addV(...)", execution_profile='core') + + +We are ready to configure our graph schema. We will create a simple one for movies:: + + # A Vertex represents a "thing" in the world. + # Create the genre vertex + query = """ + schema.vertexLabel('genre') + .partitionBy('genreId', Int) + .property('name', Text) + .create() + """ + session.execute_graph(query) + + # Create the person vertex + query = """ + schema.vertexLabel('person') + .partitionBy('personId', Int) + .property('name', Text) + .create() + """ + session.execute_graph(query) + + # Create the movie vertex + query = """ + schema.vertexLabel('movie') + .partitionBy('movieId', Int) + .property('title', Text) + .property('year', Int) + .property('country', Text) + .create() + """ + session.execute_graph(query) + + # An edge represents a relationship between two vertices + # Create our edges + queries = """ + schema.edgeLabel('belongsTo').from('movie').to('genre').create(); + schema.edgeLabel('actor').from('movie').to('person').create(); + """ + session.execute_graph(queries) + + # Indexes to execute graph requests efficiently + + # If you have a node with the search workload enabled (solr), use the following: + indexes = """ + schema.vertexLabel('genre').searchIndex() + .by("name") + .create(); + + schema.vertexLabel('person').searchIndex() + .by("name") + .create(); + + schema.vertexLabel('movie').searchIndex() + .by('title') + .by("year") + .create(); + """ + session.execute_graph(indexes) + + # Otherwise, use secondary indexes: + indexes = """ + schema.vertexLabel('genre') + .secondaryIndex('by_genre') + .by('name') + .create() + + schema.vertexLabel('person') + .secondaryIndex('by_name') + .by('name') + .create() + + schema.vertexLabel('movie') + .secondaryIndex('by_title') + .by('title') + .create() + """ + session.execute_graph(indexes) + +Add some edge indexes (materialized views):: + + indexes = """ + schema.edgeLabel('belongsTo') + .from('movie') + .to('genre') + .materializedView('movie__belongsTo__genre_by_in_genreId') + .ifNotExists() + .partitionBy(IN, 'genreId') + .clusterBy(OUT, 'movieId', Asc) + .create() + + schema.edgeLabel('actor') + .from('movie') + .to('person') + .materializedView('movie__actor__person_by_in_personId') + .ifNotExists() + .partitionBy(IN, 'personId') + .clusterBy(OUT, 'movieId', Asc) + .create() + """ + session.execute_graph(indexes) + +Next, we'll add some data:: + + session.execute_graph(""" + g.addV('genre').property('genreId', 1).property('name', 'Action').next(); + g.addV('genre').property('genreId', 2).property('name', 'Drama').next(); + g.addV('genre').property('genreId', 3).property('name', 'Comedy').next(); + g.addV('genre').property('genreId', 4).property('name', 'Horror').next(); + """) + + session.execute_graph(""" + g.addV('person').property('personId', 1).property('name', 'Mark Wahlberg').next(); + g.addV('person').property('personId', 2).property('name', 'Leonardo DiCaprio').next(); + g.addV('person').property('personId', 3).property('name', 'Iggy Pop').next(); + """) + + session.execute_graph(""" + g.addV('movie').property('movieId', 1).property('title', 'The Happening'). + property('year', 2008).property('country', 'United States').next(); + g.addV('movie').property('movieId', 2).property('title', 'The Italian Job'). + property('year', 2003).property('country', 'United States').next(); + + g.addV('movie').property('movieId', 3).property('title', 'Revolutionary Road'). + property('year', 2008).property('country', 'United States').next(); + g.addV('movie').property('movieId', 4).property('title', 'The Man in the Iron Mask'). + property('year', 1998).property('country', 'United States').next(); + + g.addV('movie').property('movieId', 5).property('title', 'Dead Man'). + property('year', 1995).property('country', 'United States').next(); + """) + +Now that our genre, actor and movie vertices are added, we'll create the relationships (edges) between them:: + + session.execute_graph(""" + genre_horror = g.V().hasLabel('genre').has('name', 'Horror').id().next(); + genre_drama = g.V().hasLabel('genre').has('name', 'Drama').id().next(); + genre_action = g.V().hasLabel('genre').has('name', 'Action').id().next(); + + leo = g.V().hasLabel('person').has('name', 'Leonardo DiCaprio').id().next(); + mark = g.V().hasLabel('person').has('name', 'Mark Wahlberg').id().next(); + iggy = g.V().hasLabel('person').has('name', 'Iggy Pop').id().next(); + + the_happening = g.V().hasLabel('movie').has('title', 'The Happening').id().next(); + the_italian_job = g.V().hasLabel('movie').has('title', 'The Italian Job').id().next(); + rev_road = g.V().hasLabel('movie').has('title', 'Revolutionary Road').id().next(); + man_mask = g.V().hasLabel('movie').has('title', 'The Man in the Iron Mask').id().next(); + dead_man = g.V().hasLabel('movie').has('title', 'Dead Man').id().next(); + + g.addE('belongsTo').from(__.V(the_happening)).to(__.V(genre_horror)).next(); + g.addE('belongsTo').from(__.V(the_italian_job)).to(__.V(genre_action)).next(); + g.addE('belongsTo').from(__.V(rev_road)).to(__.V(genre_drama)).next(); + g.addE('belongsTo').from(__.V(man_mask)).to(__.V(genre_drama)).next(); + g.addE('belongsTo').from(__.V(man_mask)).to(__.V(genre_action)).next(); + g.addE('belongsTo').from(__.V(dead_man)).to(__.V(genre_drama)).next(); + + g.addE('actor').from(__.V(the_happening)).to(__.V(mark)).next(); + g.addE('actor').from(__.V(the_italian_job)).to(__.V(mark)).next(); + g.addE('actor').from(__.V(rev_road)).to(__.V(leo)).next(); + g.addE('actor').from(__.V(man_mask)).to(__.V(leo)).next(); + g.addE('actor').from(__.V(dead_man)).to(__.V(iggy)).next(); + """) + +We are all set. You can now query your graph. Here are some examples:: + + # Find all movies of the genre Drama + for r in session.execute_graph(""" + g.V().has('genre', 'name', 'Drama').in('belongsTo').valueMap();"""): + print(r) + + # Find all movies of the same genre than the movie 'Dead Man' + for r in session.execute_graph(""" + g.V().has('movie', 'title', 'Dead Man').out('belongsTo').in('belongsTo').valueMap();"""): + print(r) + + # Find all movies of Mark Wahlberg + for r in session.execute_graph(""" + g.V().has('person', 'name', 'Mark Wahlberg').in('actor').valueMap();"""): + print(r) + +To see a more graph examples, see `DataStax Graph Examples `_. + +Graph Types for the Core Engine +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Here are the supported graph types with their python representations: + +============ ================= +DSE Graph Python Driver +============ ================= +text str +boolean bool +bigint long +int int +smallint int +varint long +double float +float float +uuid UUID +bigdecimal Decimal +duration Duration (cassandra.util) +inet str or IPV4Address/IPV6Address (if available) +timestamp datetime.datetime +date datetime.date +time datetime.time +polygon Polygon +point Point +linestring LineString +blob bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) +list list +map dict +set set or list + (Can return a list due to numerical values returned by Java) +tuple tuple +udt class or namedtuple +============ ================= + +Named Parameters +~~~~~~~~~~~~~~~~ + +Named parameters are passed in a dict to :meth:`.cluster.Session.execute_graph`:: + + result_set = session.execute_graph('[a, b]', {'a': 1, 'b': 2}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) + [r.value for r in result_set] # [1, 2] + +All python types listed in `Graph Types for the Core Engine`_ can be passed as named parameters and will be serialized +automatically to their graph representation: + +Example:: + + session.execute_graph(""" + g.addV('person'). + property('name', text_value). + property('age', integer_value). + property('birthday', timestamp_value). + property('house_yard', polygon_value).next() + """, { + 'text_value': 'Mike Smith', + 'integer_value': 34, + 'timestamp_value': datetime.datetime(1967, 12, 30), + 'polygon_value': Polygon(((30, 10), (40, 40), (20, 40), (10, 20), (30, 10))) + }) + + +As with all Execution Profile parameters, graph options can be set in the cluster default (as shown in the first example) +or specified per execution:: + + ep = session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, + graph_options=GraphOptions(graph_name='something-else')) + session.execute_graph(statement, execution_profile=ep) + +CQL collections, Tuple and UDT +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This is a very interesting feature of the core engine: we can use all CQL data types, including +list, map, set, tuple and udt. Here is an example using all these types:: + + query = """ + schema.type('address') + .property('address', Text) + .property('city', Text) + .property('state', Text) + .create(); + """ + session.execute_graph(query) + + # It works the same way than normal CQL UDT, so we + # can create an udt class and register it + class Address(object): + def __init__(self, address, city, state): + self.address = address + self.city = city + self.state = state + + session.cluster.register_user_type(graph_name, 'address', Address) + + query = """ + schema.vertexLabel('person') + .partitionBy('personId', Int) + .property('address', typeOf('address')) + .property('friends', listOf(Text)) + .property('skills', setOf(Text)) + .property('scores', mapOf(Text, Int)) + .property('last_workout', tupleOf(Text, Date)) + .create() + """ + session.execute_graph(query) + + # insertion example + query = """ + g.addV('person') + .property('personId', pid) + .property('address', address) + .property('friends', friends) + .property('skills', skills) + .property('scores', scores) + .property('last_workout', last_workout) + .next() + """ + + session.execute_graph(query, { + 'pid': 3, + 'address': Address('42 Smith St', 'Quebec', 'QC'), + 'friends': ['Al', 'Mike', 'Cathy'], + 'skills': {'food', 'fight', 'chess'}, + 'scores': {'math': 98, 'french': 3}, + 'last_workout': ('CrossFit', datetime.date(2018, 11, 20)) + }) + +Limitations +----------- + +Since Python is not a strongly-typed language and the UDT/Tuple graphson representation is, you might +get schema errors when trying to write numerical data. Example:: + + session.execute_graph(""" + schema.vertexLabel('test_tuple').partitionBy('id', Int).property('t', tupleOf(Text, Bigint)).create() + """) + + session.execute_graph(""" + g.addV('test_tuple').property('id', 0).property('t', t) + """, + {'t': ('Test', 99))} + ) + + # error: [Invalid query] message="Value component 1 is of type int, not bigint" + +This is because the server requires the client to include a GraphSON schema definition +with every UDT or tuple query. In the general case, the driver can't determine what Graph type +is meant by, e.g., an int value, and so it can't serialize the value with the correct type in the schema. +The driver provides some numerical type-wrapper factories that you can use to specify types: + +* :func:`~.to_int` +* :func:`~.to_bigint` +* :func:`~.to_smallint` +* :func:`~.to_float` +* :func:`~.to_double` + +Here's the working example of the case above:: + + from cassandra.graph import to_bigint + + session.execute_graph(""" + g.addV('test_tuple').property('id', 0).property('t', t) + """, + {'t': ('Test', to_bigint(99))} + ) + +Continuous Paging +~~~~~~~~~~~~~~~~~ + +This is another nice feature that comes with the core engine: continuous paging with +graph queries. If all nodes of the cluster are >= DSE 6.8.0, it is automatically +enabled under the hood to get the best performance. If you want to explicitly +enable/disable it, you can do it through the execution profile:: + + # Disable it + ep = GraphExecutionProfile(..., continuous_paging_options=None)) + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) + + # Enable with a custom max_pages option + ep = GraphExecutionProfile(..., + continuous_paging_options=ContinuousPagingOptions(max_pages=10))) + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) diff --git a/docs/graph.rst b/docs/graph.rst index 47dc53d38d..49ec51e73b 100644 --- a/docs/graph.rst +++ b/docs/graph.rst @@ -1,26 +1,8 @@ DataStax Graph Queries ====================== -The driver executes graph queries over the Cassandra native protocol. Use -:meth:`.Session.execute_graph` or :meth:`.Session.execute_graph_async` for -executing gremlin queries in DataStax Graph. - -The driver defines three Execution Profiles suitable for graph execution: - -* :data:`~.cluster.EXEC_PROFILE_GRAPH_DEFAULT` -* :data:`~.cluster.EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT` -* :data:`~.cluster.EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT` - -See :doc:`getting_started` and :doc:`execution_profiles` -for more detail on working with profiles. - -In DSE 6.8.0, the Core graph engine has been introduced and is now the default. It -provides a better unified multi-model, performance and scale. This guide -is for graphs that use the core engine. If you work with previous versions of -DSE or existing graphs, see :doc:`classic_graph`. - -Getting Started with Graph and the Core Engine -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Getting Started +~~~~~~~~~~~~~~~ First, we need to create a graph in the system. To access the system API, we use the system execution profile :: @@ -31,204 +13,129 @@ use the system execution profile :: session = cluster.connect() graph_name = 'movies' - session.execute_graph("system.graph(name).create()", {'name': graph_name}, + session.execute_graph("system.graph(name).ifNotExists().create()", {'name': graph_name}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) -Graphs that use the core engine only support GraphSON3. Since they are Cassandra tables under -the hood, we can automatically configure the execution profile with the proper options -(row_factory and graph_protocol) when executing queries. You only need to make sure that -the `graph_name` is set and GraphSON3 will be automatically used:: - - from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT - - graph_name = 'movies' - ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name=graph_name)) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) - session = cluster.connect() - session.execute_graph("g.addV(...)") - +To execute requests on our newly created graph, we need to setup an execution +profile. Additionally, we also need to set the schema_mode to `development` +for the schema creation:: -Note that this graph engine detection is based on the metadata. You might experience -some query errors if the graph has been newly created and is not yet in the metadata. This -would result to a badly configured execution profile. If you really want to avoid that, -configure your execution profile explicitly:: from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.graph import GraphOptions, GraphProtocol, graph_graphson3_row_factory + from cassandra.graph import GraphOptions graph_name = 'movies' - ep_graphson3 = GraphExecutionProfile( - row_factory=graph_graphson3_row_factory, - graph_options=GraphOptions( - graph_protocol=GraphProtocol.GRAPHSON_3_0, - graph_name=graph_name)) + ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name=graph_name)) - cluster = Cluster(execution_profiles={'core': ep_graphson3}) + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) session = cluster.connect() - session.execute_graph("g.addV(...)", execution_profile='core') + + session.execute_graph("schema.config().option('graph.schema_mode').set('development')") We are ready to configure our graph schema. We will create a simple one for movies:: - # A Vertex represents a "thing" in the world. - # Create the genre vertex - query = """ - schema.vertexLabel('genre') - .partitionBy('genreId', Int) - .property('name', Text) - .create() + # properties are used to define a vertex + properties = """ + schema.propertyKey("genreId").Text().create(); + schema.propertyKey("personId").Text().create(); + schema.propertyKey("movieId").Text().create(); + schema.propertyKey("name").Text().create(); + schema.propertyKey("title").Text().create(); + schema.propertyKey("year").Int().create(); + schema.propertyKey("country").Text().create(); """ - session.execute_graph(query) - - # Create the person vertex - query = """ - schema.vertexLabel('person') - .partitionBy('personId', Int) - .property('name', Text) - .create() - """ - session.execute_graph(query) - - # Create the movie vertex - query = """ - schema.vertexLabel('movie') - .partitionBy('movieId', Int) - .property('title', Text) - .property('year', Int) - .property('country', Text) - .create() - """ - session.execute_graph(query) - - # An edge represents a relationship between two vertices - # Create our edges - queries = """ - schema.edgeLabel('belongsTo').from('movie').to('genre').create(); - schema.edgeLabel('actor').from('movie').to('person').create(); - """ - session.execute_graph(queries) - # Indexes to execute graph requests efficiently + session.execute_graph(properties) # we can execute multiple statements in a single request - # If you have a node with the search workload enabled (solr), use the following: - indexes = """ - schema.vertexLabel('genre').searchIndex() - .by("name") - .create(); - - schema.vertexLabel('person').searchIndex() - .by("name") - .create(); - - schema.vertexLabel('movie').searchIndex() - .by('title') - .by("year") - .create(); + # A Vertex represents a "thing" in the world. + vertices = """ + schema.vertexLabel("genre").properties("genreId","name").create(); + schema.vertexLabel("person").properties("personId","name").create(); + schema.vertexLabel("movie").properties("movieId","title","year","country").create(); """ - session.execute_graph(indexes) - # Otherwise, use secondary indexes: - indexes = """ - schema.vertexLabel('genre') - .secondaryIndex('by_genre') - .by('name') - .create() - - schema.vertexLabel('person') - .secondaryIndex('by_name') - .by('name') - .create() - - schema.vertexLabel('movie') - .secondaryIndex('by_title') - .by('title') - .create() + session.execute_graph(vertices) + + # An edge represents a relationship between two vertices + edges = """ + schema.edgeLabel("belongsTo").single().connection("movie","genre").create(); + schema.edgeLabel("actor").connection("movie","person").create(); """ - session.execute_graph(indexes) -Add some edge indexes (materialized views):: + session.execute_graph(edges) + # Indexes to execute graph requests efficiently indexes = """ - schema.edgeLabel('belongsTo') - .from('movie') - .to('genre') - .materializedView('movie__belongsTo__genre_by_in_genreId') - .ifNotExists() - .partitionBy(IN, 'genreId') - .clusterBy(OUT, 'movieId', Asc) - .create() - - schema.edgeLabel('actor') - .from('movie') - .to('person') - .materializedView('movie__actor__person_by_in_personId') - .ifNotExists() - .partitionBy(IN, 'personId') - .clusterBy(OUT, 'movieId', Asc) - .create() + schema.vertexLabel("genre").index("genresById").materialized().by("genreId").add(); + schema.vertexLabel("genre").index("genresByName").materialized().by("name").add(); + schema.vertexLabel("person").index("personsById").materialized().by("personId").add(); + schema.vertexLabel("person").index("personsByName").materialized().by("name").add(); + schema.vertexLabel("movie").index("moviesById").materialized().by("movieId").add(); + schema.vertexLabel("movie").index("moviesByTitle").materialized().by("title").add(); + schema.vertexLabel("movie").index("moviesByYear").secondary().by("year").add(); """ - session.execute_graph(indexes) Next, we'll add some data:: session.execute_graph(""" - g.addV('genre').property('genreId', 1).property('name', 'Action').next(); - g.addV('genre').property('genreId', 2).property('name', 'Drama').next(); - g.addV('genre').property('genreId', 3).property('name', 'Comedy').next(); - g.addV('genre').property('genreId', 4).property('name', 'Horror').next(); + g.addV('genre').property('genreId', 1).property('name', 'Action').next(); + g.addV('genre').property('genreId', 2).property('name', 'Drama').next(); + g.addV('genre').property('genreId', 3).property('name', 'Comedy').next(); + g.addV('genre').property('genreId', 4).property('name', 'Horror').next(); """) session.execute_graph(""" - g.addV('person').property('personId', 1).property('name', 'Mark Wahlberg').next(); - g.addV('person').property('personId', 2).property('name', 'Leonardo DiCaprio').next(); - g.addV('person').property('personId', 3).property('name', 'Iggy Pop').next(); + g.addV('person').property('personId', 1).property('name', 'Mark Wahlberg').next(); + g.addV('person').property('personId', 2).property('name', 'Leonardo DiCaprio').next(); + g.addV('person').property('personId', 3).property('name', 'Iggy Pop').next(); """) session.execute_graph(""" - g.addV('movie').property('movieId', 1).property('title', 'The Happening'). - property('year', 2008).property('country', 'United States').next(); - g.addV('movie').property('movieId', 2).property('title', 'The Italian Job'). - property('year', 2003).property('country', 'United States').next(); - - g.addV('movie').property('movieId', 3).property('title', 'Revolutionary Road'). - property('year', 2008).property('country', 'United States').next(); - g.addV('movie').property('movieId', 4).property('title', 'The Man in the Iron Mask'). - property('year', 1998).property('country', 'United States').next(); - - g.addV('movie').property('movieId', 5).property('title', 'Dead Man'). - property('year', 1995).property('country', 'United States').next(); + g.addV('movie').property('movieId', 1).property('title', 'The Happening'). + property('year', 2008).property('country', 'United States').next(); + g.addV('movie').property('movieId', 2).property('title', 'The Italian Job'). + property('year', 2003).property('country', 'United States').next(); + + g.addV('movie').property('movieId', 3).property('title', 'Revolutionary Road'). + property('year', 2008).property('country', 'United States').next(); + g.addV('movie').property('movieId', 4).property('title', 'The Man in the Iron Mask'). + property('year', 1998).property('country', 'United States').next(); + + g.addV('movie').property('movieId', 5).property('title', 'Dead Man'). + property('year', 1995).property('country', 'United States').next(); """) Now that our genre, actor and movie vertices are added, we'll create the relationships (edges) between them:: session.execute_graph(""" - genre_horror = g.V().hasLabel('genre').has('name', 'Horror').id().next(); - genre_drama = g.V().hasLabel('genre').has('name', 'Drama').id().next(); - genre_action = g.V().hasLabel('genre').has('name', 'Action').id().next(); - - leo = g.V().hasLabel('person').has('name', 'Leonardo DiCaprio').id().next(); - mark = g.V().hasLabel('person').has('name', 'Mark Wahlberg').id().next(); - iggy = g.V().hasLabel('person').has('name', 'Iggy Pop').id().next(); - - the_happening = g.V().hasLabel('movie').has('title', 'The Happening').id().next(); - the_italian_job = g.V().hasLabel('movie').has('title', 'The Italian Job').id().next(); - rev_road = g.V().hasLabel('movie').has('title', 'Revolutionary Road').id().next(); - man_mask = g.V().hasLabel('movie').has('title', 'The Man in the Iron Mask').id().next(); - dead_man = g.V().hasLabel('movie').has('title', 'Dead Man').id().next(); - - g.addE('belongsTo').from(__.V(the_happening)).to(__.V(genre_horror)).next(); - g.addE('belongsTo').from(__.V(the_italian_job)).to(__.V(genre_action)).next(); - g.addE('belongsTo').from(__.V(rev_road)).to(__.V(genre_drama)).next(); - g.addE('belongsTo').from(__.V(man_mask)).to(__.V(genre_drama)).next(); - g.addE('belongsTo').from(__.V(man_mask)).to(__.V(genre_action)).next(); - g.addE('belongsTo').from(__.V(dead_man)).to(__.V(genre_drama)).next(); - - g.addE('actor').from(__.V(the_happening)).to(__.V(mark)).next(); - g.addE('actor').from(__.V(the_italian_job)).to(__.V(mark)).next(); - g.addE('actor').from(__.V(rev_road)).to(__.V(leo)).next(); - g.addE('actor').from(__.V(man_mask)).to(__.V(leo)).next(); - g.addE('actor').from(__.V(dead_man)).to(__.V(iggy)).next(); + genre_horror = g.V().hasLabel('genre').has('name', 'Horror').next(); + genre_drama = g.V().hasLabel('genre').has('name', 'Drama').next(); + genre_action = g.V().hasLabel('genre').has('name', 'Action').next(); + + leo = g.V().hasLabel('person').has('name', 'Leonardo DiCaprio').next(); + mark = g.V().hasLabel('person').has('name', 'Mark Wahlberg').next(); + iggy = g.V().hasLabel('person').has('name', 'Iggy Pop').next(); + + the_happening = g.V().hasLabel('movie').has('title', 'The Happening').next(); + the_italian_job = g.V().hasLabel('movie').has('title', 'The Italian Job').next(); + rev_road = g.V().hasLabel('movie').has('title', 'Revolutionary Road').next(); + man_mask = g.V().hasLabel('movie').has('title', 'The Man in the Iron Mask').next(); + dead_man = g.V().hasLabel('movie').has('title', 'Dead Man').next(); + + the_happening.addEdge('belongsTo', genre_horror); + the_italian_job.addEdge('belongsTo', genre_action); + rev_road.addEdge('belongsTo', genre_drama); + man_mask.addEdge('belongsTo', genre_drama); + man_mask.addEdge('belongsTo', genre_action); + dead_man.addEdge('belongsTo', genre_drama); + + the_happening.addEdge('actor', mark); + the_italian_job.addEdge('actor', mark); + rev_road.addEdge('actor', leo); + man_mask.addEdge('actor', leo); + dead_man.addEdge('actor', iggy); """) We are all set. You can now query your graph. Here are some examples:: @@ -237,7 +144,7 @@ We are all set. You can now query your graph. Here are some examples:: for r in session.execute_graph(""" g.V().has('genre', 'name', 'Drama').in('belongsTo').valueMap();"""): print(r) - + # Find all movies of the same genre than the movie 'Dead Man' for r in session.execute_graph(""" g.V().has('movie', 'title', 'Dead Man').out('belongsTo').in('belongsTo').valueMap();"""): @@ -250,40 +157,78 @@ We are all set. You can now query your graph. Here are some examples:: To see a more graph examples, see `DataStax Graph Examples `_. -Graph Types for the Core Engine -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Graph Types +~~~~~~~~~~~ Here are the supported graph types with their python representations: -============ ================= -DSE Graph Python Driver -============ ================= -text str -boolean bool -bigint long -int int -smallint int -varint long -double float -float float -uuid UUID -bigdecimal Decimal -duration Duration (cassandra.util) -inet str or IPV4Address/IPV6Address (if available) -timestamp datetime.datetime -date datetime.date -time datetime.time -polygon Polygon -point Point -linestring LineString -blob bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) -list list -map dict -set set or list - (Can return a list due to numerical values returned by Java) -tuple tuple -udt class or namedtuple -============ ================= +========== ================ +DSE Graph Python +========== ================ +boolean bool +bigint long, int (PY3) +int int +smallint int +varint int +float float +double double +uuid uuid.UUID +Decimal Decimal +inet str +timestamp datetime.datetime +date datetime.date +time datetime.time +duration datetime.timedelta +point Point +linestring LineString +polygon Polygon +blob bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) +========== ================ + +Graph Row Factory +~~~~~~~~~~~~~~~~~ + +By default (with :class:`.GraphExecutionProfile.row_factory` set to :func:`.graph.graph_object_row_factory`), known graph result +types are unpacked and returned as specialized types (:class:`.Vertex`, :class:`.Edge`). If the result is not one of these +types, a :class:`.graph.Result` is returned, containing the graph result parsed from JSON and removed from its outer dict. +The class has some accessor convenience methods for accessing top-level properties by name (`type`, `properties` above), +or lists by index:: + + # dicts with `__getattr__` or `__getitem__` + result = session.execute_graph("[[key_str: 'value', key_int: 3]]", execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0] # Using system exec just because there is no graph defined + result # dse.graph.Result({u'key_str': u'value', u'key_int': 3}) + result.value # {u'key_int': 3, u'key_str': u'value'} (dict) + result.key_str # u'value' + result.key_int # 3 + result['key_str'] # u'value' + result['key_int'] # 3 + + # lists with `__getitem__` + result = session.execute_graph('[[0, 1, 2]]', execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0] + result # dse.graph.Result([0, 1, 2]) + result.value # [0, 1, 2] (list) + result[1] # 1 (list[1]) + +You can use a different row factory by setting :attr:`.Session.default_graph_row_factory` or passing it to +:meth:`.Session.execute_graph`. For example, :func:`.graph.single_object_row_factory` returns the JSON result string`, +unparsed. :func:`.graph.graph_result_row_factory` returns parsed, but unmodified results (such that all metadata is retained, +unlike :func:`.graph.graph_object_row_factory`, which sheds some as attributes and properties are unpacked). These results +also provide convenience methods for converting to known types (:meth:`~.Result.as_vertex`, :meth:`~.Result.as_edge`, :meth:`~.Result.as_path`). + +Vertex and Edge properties are never unpacked since their types are unknown. If you know your graph schema and want to +deserialize properties, use the :class:`.GraphSON1Deserializer`. It provides convenient methods to deserialize by types (e.g. +deserialize_date, deserialize_uuid, deserialize_polygon etc.) Example:: + + # ... + from cassandra.graph import GraphSON1Deserializer + + row = session.execute_graph("g.V().toList()")[0] + value = row.properties['my_property_key'][0].value # accessing the VertexProperty value + value = GraphSON1Deserializer.deserialize_timestamp(value) + + print(value) # 2017-06-26 08:27:05 + print(type(value)) # + Named Parameters ~~~~~~~~~~~~~~~~ @@ -293,7 +238,7 @@ Named parameters are passed in a dict to :meth:`.cluster.Session.execute_graph`: result_set = session.execute_graph('[a, b]', {'a': 1, 'b': 2}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) [r.value for r in result_set] # [1, 2] -All python types listed in `Graph Types for the Core Engine`_ can be passed as named parameters and will be serialized +All python types listed in `Graph Types`_ can be passed as named parameters and will be serialized automatically to their graph representation: Example:: @@ -303,7 +248,7 @@ Example:: property('name', text_value). property('age', integer_value). property('birthday', timestamp_value). - property('house_yard', polygon_value).next() + property('house_yard', polygon_value).toList() """, { 'text_value': 'Mike Smith', 'integer_value': 34, @@ -319,116 +264,36 @@ or specified per execution:: graph_options=GraphOptions(graph_name='something-else')) session.execute_graph(statement, execution_profile=ep) -CQL collections, Tuple and UDT -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Using GraphSON2 Protocol +~~~~~~~~~~~~~~~~~~~~~~~~ -This is a very interesting feature of the core engine: we can use all CQL data types, including -list, map, set, tuple and udt. Here is an example using all these types:: +The default graph protocol used is GraphSON1. However GraphSON1 may +cause problems of type conversion happening during the serialization +of the query to the DSE Graph server, or the deserialization of the +responses back from a string Gremlin query. GraphSON2 offers better +support for the complex data types handled by DSE Graph. - query = """ - schema.type('address') - .property('address', Text) - .property('city', Text) - .property('state', Text) - .create(); - """ - session.execute_graph(query) - - # It works the same way than normal CQL UDT, so we - # can create an udt class and register it - class Address(object): - def __init__(self, address, city, state): - self.address = address - self.city = city - self.state = state - - session.cluster.register_user_type(graph_name, 'address', Address) - - query = """ - schema.vertexLabel('person') - .partitionBy('personId', Int) - .property('address', typeOf('address')) - .property('friends', listOf(Text)) - .property('skills', setOf(Text)) - .property('scores', mapOf(Text, Int)) - .property('last_workout', tupleOf(Text, Date)) - .create() - """ - session.execute_graph(query) - - # insertion example - query = """ - g.addV('person') - .property('personId', pid) - .property('address', address) - .property('friends', friends) - .property('skills', skills) - .property('scores', scores) - .property('last_workout', last_workout) - .next() - """ - - session.execute_graph(query, { - 'pid': 3, - 'address': Address('42 Smith St', 'Quebec', 'QC'), - 'friends': ['Al', 'Mike', 'Cathy'], - 'skills': {'food', 'fight', 'chess'}, - 'scores': {'math': 98, 'french': 3}, - 'last_workout': ('CrossFit', datetime.date(2018, 11, 20)) - }) - -Limitations ------------ +DSE >=5.0.4 now offers the possibility to use the GraphSON2 protocol +for graph queries. Enabling GraphSON2 can be done by `changing the +graph protocol of the execution profile` and `setting the graphson2 row factory`:: -Since Python is not a strongly-typed language and the UDT/Tuple graphson representation is, you might -get schema errors when trying to write numerical data. Example:: - - session.execute_graph(""" - schema.vertexLabel('test_tuple').partitionBy('id', Int).property('t', tupleOf(Text, Bigint)).create() - """) - - session.execute_graph(""" - g.addV('test_tuple').property('id', 0).property('t', t) - """, - {'t': ('Test', 99))} - ) - - # error: [Invalid query] message="Value component 1 is of type int, not bigint" - -This is because the server requires the client to include a GraphSON schema definition -with every UDT or tuple query. In the general case, the driver can't determine what Graph type -is meant by, e.g., an int value, and so it can't serialize the value with the correct type in the schema. -The driver provides some numerical type-wrapper factories that you can use to specify types: - -* :func:`~.to_int` -* :func:`~.to_bigint` -* :func:`~.to_smallint` -* :func:`~.to_float` -* :func:`~.to_double` - -Here's the working example of the case above:: - - from cassandra.graph import to_bigint - - session.execute_graph(""" - g.addV('test_tuple').property('id', 0).property('t', t) - """, - {'t': ('Test', to_bigint(99))} - ) - -Continuous Paging -~~~~~~~~~~~~~~~~~ + from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.graph import GraphOptions, GraphProtocol, graph_graphson2_row_factory -This is another nice feature that comes with the core engine: continuous paging with -graph queries. If all nodes of the cluster are >= DSE 6.8.0, it is automatically -enabled under the hood to get the best performance. If you want to explicitly -enable/disable it, you can do it through the execution profile:: + # Create a GraphSON2 execution profile + ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name='types', + graph_protocol=GraphProtocol.GRAPHSON_2_0), + row_factory=graph_graphson2_row_factory) - # Disable it - ep = GraphExecutionProfile(..., continuous_paging_options=None)) cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) + session = cluster.connect() + session.execute_graph(...) - # Enable with a custom max_pages option - ep = GraphExecutionProfile(..., - continuous_paging_options=ContinuousPagingOptions(max_pages=10))) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) +Using GraphSON2, all properties will be automatically deserialized to +its Python representation. Note that it may bring significant +behavioral change at runtime. + +It is generally recommended to switch to GraphSON2 as it brings more +consistent support for complex data types in the Graph driver and will +be activated by default in the next major version (Python dse-driver +driver 3.0). diff --git a/docs/graph_fluent.rst b/docs/graph_fluent.rst index 9a039847c4..c79aa1ecf4 100644 --- a/docs/graph_fluent.rst +++ b/docs/graph_fluent.rst @@ -71,28 +71,6 @@ If you want to change execution property defaults, please see the :doc:`Executio for a more generalized discussion of the API. Graph traversal queries use the same execution profile defined for DSE graph. If you need to change the default properties, please refer to the :doc:`DSE Graph query documentation page ` -Configuring a Traversal Execution Profile for the Core graph engine -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To execute a traversal query with graphs that use the core engine, you need to configure -a graphson3 execution profile: - -.. code-block:: python - - from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.datastax.graph import GraphProtocol - from cassandra.datastax.graph.fluent import DseGraph - - ep_graphson3 = DseGraph.create_execution_profile( - 'my_core_graph_name', - graph_protocol=GraphProtocol.GRAPHSON_3_0 - ) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep_graphson3}) - - g = DseGraph.traversal_source(session) - print g.V().toList() - - Explicit Graph Traversal Execution with a DSE Session ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -121,22 +99,6 @@ Below is an example of explicit execution. For this example, assume the schema h for result in session.execute_graph(v_query): pprint(result.value) -Converting a traversal to a bytecode query for core graphs require some more work, because we -need the cluster context for UDT and tuple types: - -.. code-block:: python - - g = DseGraph.traversal_source(session=session) - context = { - 'cluster': cluster, - 'graph_name': 'the_graph_for_the_query' - } - addV_query = DseGraph.query_from_traversal( - g.addV('genre').property('genreId', 1).property('name', 'Action'), - graph_protocol=GraphProtocol.GRAPHSON_3_0, - context=context - ) - Implicit Graph Traversal Execution with TinkerPop ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/index.rst b/docs/index.rst index f79cc36ac8..3bd6312f7a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -54,10 +54,7 @@ Contents Working with DSE geometry types :doc:`graph` - Graph queries with the Core engine - -:doc:`classic_graph` - Graph queries with the Classic engine + Graph queries :doc:`graph_fluent` DataStax Graph Fluent API From 72ee31bfbdf9176418c3d6e6268c7ffeee7ad003 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 25 Feb 2020 20:49:49 -0500 Subject: [PATCH 1776/2005] release 3.22: bump docs.yaml (no core graph docs) --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index e19ccd7d9e..4b34f6cb5f 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.22' - ref: e347d8f3 + ref: 02055657 - name: '3.21' ref: 5589d96b - name: '3.20' From c0bb0427e705e04c9ea65ac31a6c69cf6f932a94 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 26 Feb 2020 13:16:35 -0600 Subject: [PATCH 1777/2005] release 3.22: bump docs.yaml again --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 4b34f6cb5f..fe5bbf548e 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.22' - ref: 02055657 + ref: a1f8e102 - name: '3.21' ref: 5589d96b - name: '3.20' From 75affb70779a9d8983a070564a5ec382fe4541f1 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 12 Mar 2020 15:17:56 -0500 Subject: [PATCH 1778/2005] PYTHON-1228 Provide log wrapper to asyncore so it doesnt error on shutdown --- cassandra/io/asyncorereactor.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 1a6b9fd3e9..e07aab4697 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -35,7 +35,21 @@ from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager -log = logging.getLogger(__name__) + +# TODO: Remove when Python 2 is removed +class LogWrapper(object): + """ PYTHON-1228. If our logger has disappeared, there's nothing we can do, so just execute nothing """ + def __init__(self): + self._log = logging.getLogger(__name__) + + def __getattr__(self, name): + try: + return getattr(self._log, name) + except: + return lambda *args, **kwargs: None + + +log = LogWrapper() _dispatcher_map = {} From cbcb2259c4adcab60d2c5844080058dd505e5e64 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 13 Mar 2020 09:38:20 -0500 Subject: [PATCH 1779/2005] Changelog entry for PYTHON-1228 --- CHANGELOG.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index fe8c2c13e5..c28d9229f9 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +3.23.0 +====== +Unreleased + +Bug Fixes +--------- +* Asyncore logging exception on shutdown (PYTHON-1228) + 3.22.0 ====== February 26, 2020 From 316937f105be9f9170b9bf74c20895d96f9880e7 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 16 Mar 2020 07:49:35 -0500 Subject: [PATCH 1780/2005] PYTHON-1226 Remove all user-facing references to Apollo or Constellation (#1070) --- CHANGELOG.rst | 2 +- README.rst | 2 +- cassandra/cluster.py | 2 +- cassandra/datastax/cloud/__init__.py | 6 +++--- docs/cloud.rst | 8 ++++---- docs/index.rst | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index c28d9229f9..413b0631a2 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -154,7 +154,7 @@ October 28, 2019 Features -------- -* DataStax Apollo Support (PYTHON-1074) +* DataStax Astra Support (PYTHON-1074) * Use 4.0 schema parser in 4 alpha and snapshot builds (PYTHON-1158) Bug Fixes diff --git a/README.rst b/README.rst index 6fabdf7531..6bd3e45749 100644 --- a/README.rst +++ b/README.rst @@ -22,7 +22,7 @@ Features * Configurable `load balancing `_ and `retry policies `_ * `Concurrent execution utilities `_ * `Object mapper `_ -* `Connecting to DataStax Apollo database (cloud) `_ +* `Connecting to DataStax Astra database (cloud) `_ * DSE Graph execution API * DSE Geometric type serialization * DSE PlainText and GSSAPI authentication diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 58051104a1..255d035dac 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3469,7 +3469,7 @@ def connect(self): self._protocol_version = self._cluster.protocol_version self._set_new_connection(self._reconnect_internal()) - self._cluster.metadata.dbaas = self._connection._product_type == dscloud.PRODUCT_APOLLO + self._cluster.metadata.dbaas = self._connection._product_type == dscloud.DATASTAX_CLOUD_PRODUCT_TYPE def _set_new_connection(self, conn): """ diff --git a/cassandra/datastax/cloud/__init__.py b/cassandra/datastax/cloud/__init__.py index 46fd822b87..f79d72a7a2 100644 --- a/cassandra/datastax/cloud/__init__.py +++ b/cassandra/datastax/cloud/__init__.py @@ -41,7 +41,7 @@ __all__ = ['get_cloud_config'] -PRODUCT_APOLLO = "DATASTAX_APOLLO" +DATASTAX_CLOUD_PRODUCT_TYPE = "DATASTAX_APOLLO" class CloudConfig(object): @@ -138,7 +138,7 @@ def read_metadata_info(config, cloud_config): except Exception as e: log.exception(e) raise DriverException("Unable to connect to the metadata service at %s. " - "Check the cluster status in the Constellation cloud console. " % url) + "Check the cluster status in the cloud console. " % url) if response.code != 200: raise DriverException(("Error while fetching the metadata at: %s. " @@ -183,7 +183,7 @@ def _pyopenssl_context_from_cert(ca_cert_location, cert_location, key_location): except ImportError as e: six.reraise( ImportError, - ImportError("PyOpenSSL must be installed to connect to Apollo with the Eventlet or Twisted event loops"), + ImportError("PyOpenSSL must be installed to connect to Astra with the Eventlet or Twisted event loops"), sys.exc_info()[2] ) ssl_context = SSL.Context(SSL.TLSv1_METHOD) diff --git a/docs/cloud.rst b/docs/cloud.rst index 7ddb763a42..e8cc2fa750 100644 --- a/docs/cloud.rst +++ b/docs/cloud.rst @@ -2,9 +2,9 @@ Cloud ----- Connecting ========== -To connect to a DataStax Apollo cluster: +To connect to a DataStax Astra cluster: -1. Download the secure connect bundle from your Apollo account. +1. Download the secure connect bundle from your Astra account. 2. Connect to your cluster with .. code-block:: python @@ -19,9 +19,9 @@ To connect to a DataStax Apollo cluster: cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider) session = cluster.connect() -Apollo Differences +Astra Differences ================== -In most circumstances, the client code for interacting with an Apollo cluster will be the same as interacting with any other Cassandra cluster. The exceptions being: +In most circumstances, the client code for interacting with an Astra cluster will be the same as interacting with any other Cassandra cluster. The exceptions being: * A cloud configuration must be passed to a :class:`~.Cluster` instance via the `cloud` attribute (as demonstrated above). * An SSL connection will be established automatically. Manual SSL configuration is not allowed, and using `ssl_context` or `ssl_options` will result in an exception. diff --git a/docs/index.rst b/docs/index.rst index 3bd6312f7a..d18cf6acd2 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -48,7 +48,7 @@ Contents Some discussion on the driver's approach to working with timestamp, date, time types :doc:`cloud` - A guide to connecting to Datastax Apollo + A guide to connecting to Datastax Astra :doc:`geo_types` Working with DSE geometry types From 1842c2e4d2d85a46fc677e171851f3f493907b70 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 11 Mar 2020 20:55:30 -0500 Subject: [PATCH 1781/2005] PYTHON-1207 Support transient replication in metadata --- CHANGELOG.rst | 4 ++ cassandra/metadata.py | 50 ++++++++++++++++------ tests/unit/test_metadata.py | 83 +++++++++++++++++++++++++++++++++++++ 3 files changed, 125 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 413b0631a2..9cd64d8fc8 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,10 @@ ====== Unreleased +Features +-------- +Transient Replication Support (PYTHON-1207) + Bug Fixes --------- * Asyncore logging exception on shutdown (PYTHON-1228) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 5cdcef807e..a839c2206c 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -450,18 +450,37 @@ def make_token_replica_map(self, token_to_host_owner, ring): return {} +def parse_replication_factor(input_rf): + """ + Given the inputted replication factor, returns a tuple containing number of total replicas + and number of transient replicas + """ + transient_replicas = None + try: + total_replicas = int(input_rf) + except ValueError: + try: + rf = input_rf.split('/') + total_replicas, transient_replicas = int(rf[0]), int(rf[1]) + except Exception: + raise ValueError("Unable to determine replication factor from: {}".format(input_rf)) + return total_replicas, transient_replicas + + class SimpleStrategy(ReplicationStrategy): replication_factor = None """ The replication factor for this keyspace. """ + transient_replicas = None + """ + The number of transient replicas for this keyspace. + """ def __init__(self, options_map): - try: - self.replication_factor = int(options_map['replication_factor']) - except Exception: - raise ValueError("SimpleStrategy requires an integer 'replication_factor' option") + self._raw_replication_factor = options_map['replication_factor'] + self.replication_factor, self.transient_replicas = parse_replication_factor(self._raw_replication_factor) def make_token_replica_map(self, token_to_host_owner, ring): replica_map = {} @@ -482,14 +501,14 @@ def export_for_schema(self): Returns a string version of these replication options which are suitable for use in a CREATE KEYSPACE statement. """ - return "{'class': 'SimpleStrategy', 'replication_factor': '%d'}" \ - % (self.replication_factor,) + return "{'class': 'SimpleStrategy', 'replication_factor': '%s'}" \ + % (self._raw_replication_factor,) def __eq__(self, other): if not isinstance(other, SimpleStrategy): return False - return self.replication_factor == other.replication_factor + return str(self._raw_replication_factor) == str(other._raw_replication_factor) class NetworkTopologyStrategy(ReplicationStrategy): @@ -500,12 +519,19 @@ class NetworkTopologyStrategy(ReplicationStrategy): """ def __init__(self, dc_replication_factors): - self.dc_replication_factors = dict( - (str(k), int(v)) for k, v in dc_replication_factors.items()) + try: + self.dc_replication_factors = dict( + (str(k), int(v)) for k, v in dc_replication_factors.items()) + except ValueError: + self.dc_replication_factors = dict( + (str(k), str(v)) for k, v in dc_replication_factors.items()) def make_token_replica_map(self, token_to_host_owner, ring): - dc_rf_map = dict((dc, int(rf)) - for dc, rf in self.dc_replication_factors.items() if rf > 0) + dc_rf_map = {} + for dc, rf in self.dc_replication_factors.items(): + total_rf = parse_replication_factor(rf)[0] + if total_rf > 0: + dc_rf_map[dc] = total_rf # build a map of DCs to lists of indexes into `ring` for tokens that # belong to that DC @@ -586,7 +612,7 @@ def export_for_schema(self): """ ret = "{'class': 'NetworkTopologyStrategy'" for dc, repl_factor in sorted(self.dc_replication_factors.items()): - ret += ", '%s': '%d'" % (dc, repl_factor) + ret += ", '%s': '%s'" % (dc, repl_factor) return ret + "}" def __eq__(self, other): diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 0ab64a4fcc..174ac1493b 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -85,6 +85,89 @@ def test_replication_strategy(self): self.assertRaises(NotImplementedError, rs.make_token_replica_map, None, None) self.assertRaises(NotImplementedError, rs.export_for_schema) + def test_simple_replication_type_parsing(self): + """ Test equality between passing numeric and string replication factor for simple strategy """ + rs = ReplicationStrategy() + + simple_int = rs.create('SimpleStrategy', {'replication_factor': 3}) + simple_str = rs.create('SimpleStrategy', {'replication_factor': '3'}) + + self.assertEqual(simple_int.export_for_schema(), simple_str.export_for_schema()) + self.assertEqual(simple_int, simple_str) + + # make token replica map + ring = [MD5Token(0), MD5Token(1), MD5Token(2)] + hosts = [Host('dc1.{}'.format(host), SimpleConvictionPolicy) for host in range(3)] + token_to_host = dict(zip(ring, hosts)) + self.assertEqual( + simple_int.make_token_replica_map(token_to_host, ring), + simple_str.make_token_replica_map(token_to_host, ring) + ) + + def test_transient_replication_parsing(self): + """ Test that we can PARSE a transient replication factor for SimpleStrategy """ + rs = ReplicationStrategy() + + simple_transient = rs.create('SimpleStrategy', {'replication_factor': '3/1'}) + self.assertEqual(simple_transient.replication_factor, 3) + self.assertEqual(simple_transient.transient_replicas, 1) + self.assertIn("'replication_factor': '3/1'", simple_transient.export_for_schema()) + + simple_str = rs.create('SimpleStrategy', {'replication_factor': '3'}) + self.assertNotEqual(simple_transient, simple_str) + + # make token replica map + ring = [MD5Token(0), MD5Token(1), MD5Token(2)] + hosts = [Host('dc1.{}'.format(host), SimpleConvictionPolicy) for host in range(3)] + token_to_host = dict(zip(ring, hosts)) + self.assertEqual( + simple_transient.make_token_replica_map(token_to_host, ring), + simple_str.make_token_replica_map(token_to_host, ring) + ) + + def test_nts_replication_parsing(self): + """ Test equality between passing numeric and string replication factor for NTS """ + rs = ReplicationStrategy() + + nts_int = rs.create('NetworkTopologyStrategy', {'dc1': 3, 'dc2': 5}) + nts_str = rs.create('NetworkTopologyStrategy', {'dc1': '3', 'dc2': '5'}) + + self.assertEqual(nts_int.dc_replication_factors['dc1'], 3) + self.assertEqual(nts_str.dc_replication_factors['dc1'], 3) + + self.assertEqual(nts_int.export_for_schema(), nts_str.export_for_schema()) + self.assertEqual(nts_int, nts_str) + + # make token replica map + ring = [MD5Token(0), MD5Token(1), MD5Token(2)] + hosts = [Host('dc1.{}'.format(host), SimpleConvictionPolicy) for host in range(3)] + token_to_host = dict(zip(ring, hosts)) + self.assertEqual( + nts_int.make_token_replica_map(token_to_host, ring), + nts_str.make_token_replica_map(token_to_host, ring) + ) + + def test_nts_transient_parsing(self): + """ Test that we can PARSE a transient replication factor for NTS """ + rs = ReplicationStrategy() + + nts_transient = rs.create('NetworkTopologyStrategy', {'dc1': '3/1', 'dc2': '5/1'}) + self.assertEqual(nts_transient.dc_replication_factors['dc1'], '3/1') + self.assertEqual(nts_transient.dc_replication_factors['dc2'], '5/1') + self.assertIn("'dc1': '3/1', 'dc2': '5/1'", nts_transient.export_for_schema()) + + nts_str = rs.create('NetworkTopologyStrategy', {'dc1': '3', 'dc2': '5'}) + self.assertNotEqual(nts_transient, nts_str) + + # make token replica map + ring = [MD5Token(0), MD5Token(1), MD5Token(2)] + hosts = [Host('dc1.{}'.format(host), SimpleConvictionPolicy) for host in range(3)] + token_to_host = dict(zip(ring, hosts)) + self.assertEqual( + nts_transient.make_token_replica_map(token_to_host, ring), + nts_str.make_token_replica_map(token_to_host, ring) + ) + def test_nts_make_token_replica_map(self): token_to_host_owner = {} From f59be8da5b1a4c2ad1747bcf15e21eb07c44a1e9 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 26 Feb 2020 13:16:35 -0600 Subject: [PATCH 1782/2005] release 3.22: bump docs.yaml again --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 4b34f6cb5f..fe5bbf548e 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.22' - ref: 02055657 + ref: a1f8e102 - name: '3.21' ref: 5589d96b - name: '3.20' From 291ae2e4e16447db65c98c003163adb5f978b7e1 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 12 Mar 2020 19:26:21 -0500 Subject: [PATCH 1783/2005] 4.0 test changes: - Add TestCluster class to integration tests to allow easier setting of default options and updated all call points - Removed setting __defaults__ on stuff - Changed a bunch of '4.0' checks to be '4.0-a' - Enabled materialized views, sasi, and transient replication in standard test startup options - Updated materialized views to specify not null for primary keys as required in 4.0 - Added new protocol error message received from 4.0 in test_no_connection_refused_on_timeout --- tests/integration/__init__.py | 71 +++++---- tests/integration/advanced/__init__.py | 11 +- tests/integration/advanced/graph/__init__.py | 32 ++-- .../integration/advanced/graph/test_graph.py | 8 +- .../integration/advanced/test_adv_metadata.py | 5 +- tests/integration/advanced/test_auth.py | 14 +- .../integration/advanced/test_cont_paging.py | 6 +- .../test_cqlengine_where_operators.py | 7 +- .../advanced/test_unixsocketendpoint.py | 6 +- tests/integration/cloud/__init__.py | 3 +- tests/integration/cloud/test_cloud.py | 2 +- tests/integration/cqlengine/__init__.py | 5 +- .../cqlengine/advanced/test_cont_paging.py | 8 +- .../cqlengine/connections/test_connection.py | 16 +- .../cqlengine/query/test_queryset.py | 6 +- .../statements/test_base_statement.py | 5 +- .../integration/cqlengine/test_connections.py | 7 +- tests/integration/long/test_consistency.py | 41 ++--- tests/integration/long/test_failure_types.py | 9 +- tests/integration/long/test_ipv6.py | 15 +- tests/integration/long/test_large_data.py | 10 +- .../long/test_loadbalancingpolicies.py | 35 +++-- tests/integration/long/test_policies.py | 6 +- tests/integration/long/test_schema.py | 11 +- tests/integration/long/test_ssl.py | 24 +-- .../integration/long/test_topology_change.py | 5 +- tests/integration/long/utils.py | 2 +- .../standard/test_authentication.py | 14 +- .../test_authentication_misconfiguration.py | 5 +- .../standard/test_client_warnings.py | 5 +- tests/integration/standard/test_cluster.py | 144 +++++++++--------- tests/integration/standard/test_concurrent.py | 7 +- tests/integration/standard/test_connection.py | 36 +++-- .../standard/test_control_connection.py | 5 +- .../standard/test_custom_cluster.py | 14 +- .../standard/test_custom_payload.py | 6 +- .../standard/test_custom_protocol_handler.py | 22 +-- .../standard/test_cython_protocol_handlers.py | 28 ++-- tests/integration/standard/test_dse.py | 6 +- tests/integration/standard/test_metadata.py | 84 +++++----- tests/integration/standard/test_metrics.py | 36 +++-- tests/integration/standard/test_policies.py | 12 +- .../standard/test_prepared_statements.py | 16 +- tests/integration/standard/test_query.py | 66 ++++---- .../integration/standard/test_query_paging.py | 7 +- tests/integration/standard/test_routing.py | 6 +- .../standard/test_row_factories.py | 17 ++- tests/integration/standard/test_types.py | 33 ++-- tests/integration/standard/test_udts.py | 33 ++-- 49 files changed, 491 insertions(+), 481 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index d6f26acbcd..65cd6a2f1f 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -75,7 +75,7 @@ def get_server_versions(): if cass_version is not None: return (cass_version, cql_version) - c = Cluster() + c = TestCluster() s = c.connect() row = s.execute('SELECT cql_version, release_version FROM system.local')[0] @@ -199,33 +199,14 @@ def _get_dse_version_from_cass(cass_version): CCM_KWARGS['version'] = CCM_VERSION -#This changes the default contact_point parameter in Cluster -def set_default_cass_ip(): - if CASSANDRA_IP.startswith("127.0.0."): - return - defaults = list(Cluster.__init__.__defaults__) - defaults = [[CASSANDRA_IP]] + defaults[1:] - try: - Cluster.__init__.__defaults__ = tuple(defaults) - except: - Cluster.__init__.__func__.__defaults__ = tuple(defaults) - - -def set_default_beta_flag_true(): - defaults = list(Cluster.__init__.__defaults__) - defaults = (defaults[:28] + [True] + defaults[29:]) - try: - Cluster.__init__.__defaults__ = tuple(defaults) - except: - Cluster.__init__.__func__.__defaults__ = tuple(defaults) - - +ALLOW_BETA_PROTOCOL = False def get_default_protocol(): - if CASSANDRA_VERSION >= Version('4.0'): + if CASSANDRA_VERSION >= Version('4.0-a'): if DSE_VERSION: return ProtocolVersion.DSE_V2 else: - set_default_beta_flag_true() + global ALLOW_BETA_PROTOCOL + ALLOW_BETA_PROTOCOL = True return ProtocolVersion.V5 if CASSANDRA_VERSION >= Version('3.10'): if DSE_VERSION: @@ -254,7 +235,7 @@ def get_supported_protocol_versions(): 4.0(C*) -> 5(beta),4,3 4.0(DSE) -> DSE_v2, DSE_V1,4,3 ` """ - if CASSANDRA_VERSION >= Version('4.0'): + if CASSANDRA_VERSION >= Version('4.0-a'): if DSE_VERSION: return (3, 4, ProtocolVersion.DSE_V1, ProtocolVersion.DSE_V2) else: @@ -293,7 +274,7 @@ def get_unsupported_upper_protocol(): supported by the version of C* running """ - if CASSANDRA_VERSION >= Version('4.0'): + if CASSANDRA_VERSION >= Version('4.0-a'): if DSE_VERSION: return None else: @@ -341,9 +322,9 @@ def _id_and_mark(f): greaterthanorequalcass36 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.6'), 'Cassandra version 3.6 or greater required') greaterthanorequalcass3_10 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.10'), 'Cassandra version 3.10 or greater required') greaterthanorequalcass3_11 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.11'), 'Cassandra version 3.11 or greater required') -greaterthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION >= Version('4.0'), 'Cassandra version 4.0 or greater required') -lessthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION <= Version('4.0'), 'Cassandra version less or equal to 4.0 required') -lessthancass40 = unittest.skipUnless(CASSANDRA_VERSION < Version('4.0'), 'Cassandra version less than 4.0 required') +greaterthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION >= Version('4.0-a'), 'Cassandra version 4.0 or greater required') +lessthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION <= Version('4.0-a'), 'Cassandra version less or equal to 4.0 required') +lessthancass40 = unittest.skipUnless(CASSANDRA_VERSION < Version('4.0-a'), 'Cassandra version less than 4.0 required') lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < Version('3.0'), 'Cassandra version less then 3.0 required') greaterthanorequaldse68 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.8'), "DSE 6.8 or greater required for this test") greaterthanorequaldse67 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.7'), "DSE 6.7 or greater required for this test") @@ -469,7 +450,6 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, dse_cluster = True if DSE_VERSION else False if not workloads: workloads = [] - set_default_cass_ip() if ccm_options is None and DSE_VERSION: ccm_options = {"version": CCM_VERSION} @@ -527,9 +507,6 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, CCM_CLUSTER = DseCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) CCM_CLUSTER.set_configuration_options({'batch_size_warn_threshold_in_kb': 5}) - if Version(dse_version) >= Version('5.0'): - CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) - CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) if Version(dse_version) >= Version('5.1'): # For Inet4Address CCM_CLUSTER.set_dse_configuration_options({ @@ -565,6 +542,12 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) if Version(cassandra_version) >= Version('3.0'): CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) + if Version(cassandra_version) >= Version('4.0-a'): + CCM_CLUSTER.set_configuration_options({ + 'enable_materialized_views': True, + 'enable_sasi_indexes': True, + 'enable_transient_replication': True, + }) common.switch_cluster(path, cluster_name) CCM_CLUSTER.set_configuration_options(configuration_options) CCM_CLUSTER.populate(nodes, ipformat=ipformat) @@ -699,9 +682,9 @@ def setup_keyspace(ipformat=None, wait=True, protocol_version=None): _protocol_version = PROTOCOL_VERSION if not ipformat: - cluster = Cluster(protocol_version=_protocol_version) + cluster = TestCluster(protocol_version=_protocol_version) else: - cluster = Cluster(contact_points=["::1"], protocol_version=_protocol_version) + cluster = TestCluster(contact_points=["::1"], protocol_version=_protocol_version) session = cluster.connect() try: @@ -795,7 +778,7 @@ def create_keyspace(cls, rf): @classmethod def common_setup(cls, rf, keyspace_creation=True, create_class_table=False, **cluster_kwargs): - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, **cluster_kwargs) + cls.cluster = TestCluster(**cluster_kwargs) cls.session = cls.cluster.connect(wait_for_all_pools=True) cls.ks_name = cls.__name__.lower() if keyspace_creation: @@ -981,3 +964,19 @@ def assert_startswith(s, prefix): raise AssertionError( '{} does not start with {}'.format(repr(s), repr(prefix)) ) + + +class TestCluster(object): + DEFAULT_PROTOCOL_VERSION = default_protocol_version + DEFAULT_CASSANDRA_IP = CASSANDRA_IP + DEFAULT_ALLOW_BETA = ALLOW_BETA_PROTOCOL + + def __new__(cls, **kwargs): + if 'protocol_version' not in kwargs: + kwargs['protocol_version'] = cls.DEFAULT_PROTOCOL_VERSION + if 'contact_points' not in kwargs: + kwargs['contact_points'] = [cls.DEFAULT_CASSANDRA_IP] + if 'allow_beta_protocol_version' not in kwargs: + kwargs['allow_beta_protocol_version'] = cls.DEFAULT_ALLOW_BETA + return Cluster(**kwargs) + diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py index c5da6c0154..b2820e037b 100644 --- a/tests/integration/advanced/__init__.py +++ b/tests/integration/advanced/__init__.py @@ -25,10 +25,8 @@ from ccmlib import common -from cassandra.cluster import Cluster - -from tests.integration import PROTOCOL_VERSION, get_server_versions, BasicKeyspaceUnitTestCase, \ - drop_keyspace_shutdown_cluster, get_node, USE_CASS_EXTERNAL, set_default_cass_ip +from tests.integration import get_server_versions, BasicKeyspaceUnitTestCase, \ + drop_keyspace_shutdown_cluster, get_node, USE_CASS_EXTERNAL, TestCluster from tests.integration import use_singledc, use_single_node, wait_for_node_socket, CASSANDRA_IP home = expanduser('~') @@ -97,7 +95,6 @@ def use_cluster_with_graph(num_nodes): when started all at once. """ if USE_CASS_EXTERNAL: - set_default_cass_ip() return # Create the cluster but don't start it. @@ -109,7 +106,7 @@ def use_cluster_with_graph(num_nodes): # Wait for spark master to start up spark_master_http = ("localhost", 7080) common.check_socket_listening(spark_master_http, timeout=60) - tmp_cluster = Cluster(protocol_version=PROTOCOL_VERSION) + tmp_cluster = TestCluster() # Start up remaining nodes. try: @@ -137,7 +134,7 @@ class BasicGeometricUnitTestCase(BasicKeyspaceUnitTestCase): @classmethod def common_dse_setup(cls, rf, keyspace_creation=True): - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.cluster = TestCluster() cls.session = cls.cluster.connect() cls.ks_name = cls.__name__.lower() if keyspace_creation: diff --git a/tests/integration/advanced/graph/__init__.py b/tests/integration/advanced/graph/__init__.py index 6002d57f78..0573cf2557 100644 --- a/tests/integration/advanced/graph/__init__.py +++ b/tests/integration/advanced/graph/__init__.py @@ -160,14 +160,13 @@ def session_setup(self): ) ) - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={ - EXEC_PROFILE_GRAPH_DEFAULT: ep_graphson1, - EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT: ep_analytics, - "graphson1": ep_graphson1, - "graphson2": ep_graphson2, - "graphson3": ep_graphson3 - }) + self.cluster = TestCluster(execution_profiles={ + EXEC_PROFILE_GRAPH_DEFAULT: ep_graphson1, + EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT: ep_analytics, + "graphson1": ep_graphson1, + "graphson2": ep_graphson2, + "graphson3": ep_graphson3 + }) self.session = self.cluster.connect() self.ks_name = self._testMethodName.lower() @@ -276,14 +275,13 @@ def session_setup(self): ) ) - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={ - EXEC_PROFILE_GRAPH_DEFAULT: ep_graphson1, - EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT: ep_analytics, - "graphson1": ep_graphson1, - "graphson2": ep_graphson2, - "graphson3": ep_graphson3 - }) + self.cluster = TestCluster(execution_profiles={ + EXEC_PROFILE_GRAPH_DEFAULT: ep_graphson1, + EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT: ep_analytics, + "graphson1": ep_graphson1, + "graphson2": ep_graphson2, + "graphson3": ep_graphson3 + }) self.session = self.cluster.connect() self.ks_name = self._testMethodName.lower() @@ -362,7 +360,7 @@ class BasicSharedGraphUnitTestCase(BasicKeyspaceUnitTestCase): @classmethod def session_setup(cls): - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.cluster = TestCluster() cls.session = cls.cluster.connect() cls.ks_name = cls.__name__.lower() cls.cass_version, cls.cql_version = get_server_versions() diff --git a/tests/integration/advanced/graph/test_graph.py b/tests/integration/advanced/graph/test_graph.py index 898779f789..020d631d69 100644 --- a/tests/integration/advanced/graph/test_graph.py +++ b/tests/integration/advanced/graph/test_graph.py @@ -19,12 +19,13 @@ from cassandra.protocol import SyntaxException from cassandra.policies import WhiteListRoundRobinPolicy from cassandra.cluster import NoHostAvailable -from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, GraphExecutionProfile, Cluster +from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, GraphExecutionProfile, from cassandra.graph import single_object_row_factory, Vertex, graph_object_row_factory, \ graph_graphson2_row_factory, graph_graphson3_row_factory from cassandra.util import SortedSet -from tests.integration import PROTOCOL_VERSION, DSE_VERSION, greaterthanorequaldse51, greaterthanorequaldse68, requiredse +from tests.integration import DSE_VERSION, greaterthanorequaldse51, greaterthanorequaldse68, \ + requiredse, TestCluster from tests.integration.advanced.graph import BasicGraphUnitTestCase, GraphUnitTestCase, \ GraphProtocol, ClassicGraphSchema, CoreGraphSchema, use_single_node_with_graph @@ -149,8 +150,7 @@ def test_graph_profile(self): exec_short_timeout.graph_options.graph_name = self.graph_name # Add a single execution policy on cluster creation - local_cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={"exec_dif_factory": exec_dif_factory}) + local_cluster = TestCluster(execution_profiles={"exec_dif_factory": exec_dif_factory}) local_session = local_cluster.connect() self.addCleanup(local_cluster.shutdown) diff --git a/tests/integration/advanced/test_adv_metadata.py b/tests/integration/advanced/test_adv_metadata.py index 52944aabdf..b3af6fa5d1 100644 --- a/tests/integration/advanced/test_adv_metadata.py +++ b/tests/integration/advanced/test_adv_metadata.py @@ -14,12 +14,11 @@ from packaging.version import Version -from cassandra.cluster import Cluster from tests.integration import (BasicExistingKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCaseRF1, greaterthanorequaldse51, greaterthanorequaldse60, greaterthanorequaldse68, use_single_node, - DSE_VERSION, requiredse, PROTOCOL_VERSION) + DSE_VERSION, requiredse, TestCluster) try: import unittest2 as unittest @@ -393,4 +392,4 @@ def test_connection_on_graph_schema_error(self): """ % (self.ks_name,)) self.session.execute('TRUNCATE system_schema.vertices') - Cluster(protocol_version=PROTOCOL_VERSION).connect().shutdown() + TestCluster().connect().shutdown() diff --git a/tests/integration/advanced/test_auth.py b/tests/integration/advanced/test_auth.py index 59bd3dec5c..748304aef4 100644 --- a/tests/integration/advanced/test_auth.py +++ b/tests/integration/advanced/test_auth.py @@ -26,11 +26,11 @@ from cassandra.auth import (DSEGSSAPIAuthProvider, DSEPlainTextAuthProvider, SaslAuthProvider, TransitionalModePlainTextAuthProvider) -from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, Cluster, NoHostAvailable +from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, NoHostAvailable from cassandra.protocol import Unauthorized from cassandra.query import SimpleStatement from tests.integration import (get_cluster, greaterthanorequaldse51, - remove_cluster, requiredse, DSE_VERSION) + remove_cluster, requiredse, DSE_VERSION, TestCluster) from tests.integration.advanced import ADS_HOME, use_single_node_with_graph from tests.integration.advanced.graph import reset_graph, ClassicGraphFixtures @@ -157,7 +157,7 @@ def connect_and_query(self, auth_provider, query=None): Runs a simple system query with the auth_provided specified. """ os.environ['KRB5_CONFIG'] = self.krb_conf - self.cluster = Cluster(auth_provider=auth_provider) + self.cluster = TestCluster(auth_provider=auth_provider) self.session = self.cluster.connect() query = query if query else "SELECT * FROM system.local" statement = SimpleStatement(query) @@ -320,7 +320,7 @@ def _remove_proxy_setup(self): os.environ['KRB5_CONFIG'] = self.krb_conf self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf) auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal='cassandra@DATASTAX.COM') - cluster = Cluster(auth_provider=auth_provider) + cluster = TestCluster(auth_provider=auth_provider) session = cluster.connect() session.execute("REVOKE PROXY.LOGIN ON ROLE '{0}' FROM '{1}'".format('charlie@DATASTAX.COM', 'bob@DATASTAX.COM')) @@ -338,7 +338,7 @@ def _setup_for_proxy(self, grant=True): os.environ['KRB5_CONFIG'] = self.krb_conf self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf) auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal='cassandra@DATASTAX.COM') - cluster = Cluster(auth_provider=auth_provider) + cluster = TestCluster(auth_provider=auth_provider) session = cluster.connect() stmts = [ @@ -403,7 +403,7 @@ def setUpClass(self): # Create users and test keyspace self.user_role = 'user1' self.server_role = 'server' - self.root_cluster = Cluster(auth_provider=DSEPlainTextAuthProvider('cassandra', 'cassandra')) + self.root_cluster = TestCluster(auth_provider=DSEPlainTextAuthProvider('cassandra', 'cassandra')) self.root_session = self.root_cluster.connect() stmts = [ @@ -469,7 +469,7 @@ def get_sasl_options(self, mechanism='PLAIN'): return sasl_options def connect_and_query(self, auth_provider, execute_as=None, query="SELECT * FROM testproxy.testproxy"): - self.cluster = Cluster(auth_provider=auth_provider) + self.cluster = TestCluster(auth_provider=auth_provider) self.session = self.cluster.connect() rs = self.session.execute(query, execute_as=execute_as) return rs diff --git a/tests/integration/advanced/test_cont_paging.py b/tests/integration/advanced/test_cont_paging.py index 82b3fe2960..c5f1cbfff3 100644 --- a/tests/integration/advanced/test_cont_paging.py +++ b/tests/integration/advanced/test_cont_paging.py @@ -13,7 +13,7 @@ # limitations under the License. from tests.integration import use_singledc, greaterthanorequaldse51, BasicSharedKeyspaceUnitTestCaseRF3WM, \ - DSE_VERSION, ProtocolVersion, greaterthanorequaldse60, requiredse + DSE_VERSION, ProtocolVersion, greaterthanorequaldse60, requiredse, TestCluster import logging log = logging.getLogger(__name__) @@ -28,7 +28,7 @@ from packaging.version import Version import time -from cassandra.cluster import Cluster, ExecutionProfile, ContinuousPagingOptions +from cassandra.cluster import ExecutionProfile, ContinuousPagingOptions from cassandra.concurrent import execute_concurrent from cassandra.query import SimpleStatement @@ -64,7 +64,7 @@ def tearDownClass(cls): @classmethod def create_cluster(cls): - cls.cluster_with_profiles = Cluster(protocol_version=cls.protocol_version, execution_profiles=cls.execution_profiles) + cls.cluster_with_profiles = TestCluster(protocol_version=cls.protocol_version, execution_profiles=cls.execution_profiles) cls.session_with_profiles = cls.cluster_with_profiles.connect(wait_for_all_pools=True) statements_and_params = zip( diff --git a/tests/integration/advanced/test_cqlengine_where_operators.py b/tests/integration/advanced/test_cqlengine_where_operators.py index 9497feabd7..8ade3db09d 100644 --- a/tests/integration/advanced/test_cqlengine_where_operators.py +++ b/tests/integration/advanced/test_cqlengine_where_operators.py @@ -20,13 +20,12 @@ import os import time -from cassandra.cluster import Cluster from cassandra.cqlengine import columns, connection, models from cassandra.cqlengine.management import (CQLENG_ALLOW_SCHEMA_MANAGEMENT, create_keyspace_simple, drop_table, sync_table) from cassandra.cqlengine.statements import IsNotNull -from tests.integration import DSE_VERSION, requiredse, CASSANDRA_IP, greaterthanorequaldse60 +from tests.integration import DSE_VERSION, requiredse, CASSANDRA_IP, greaterthanorequaldse60, TestCluster from tests.integration.advanced import use_single_node_with_graph_and_solr from tests.integration.cqlengine import DEFAULT_KEYSPACE @@ -65,7 +64,7 @@ class IsNotNullTests(unittest.TestCase): @classmethod def setUpClass(cls): if DSE_VERSION: - cls.cluster = Cluster() + cls.cluster = TestCluster() @greaterthanorequaldse60 def test_is_not_null_execution(self): @@ -81,7 +80,7 @@ def test_is_not_null_execution(self): @test_category cqlengine """ - cluster = Cluster() + cluster = TestCluster() self.addCleanup(cluster.shutdown) session = cluster.connect() diff --git a/tests/integration/advanced/test_unixsocketendpoint.py b/tests/integration/advanced/test_unixsocketendpoint.py index e435314637..1f6665964a 100644 --- a/tests/integration/advanced/test_unixsocketendpoint.py +++ b/tests/integration/advanced/test_unixsocketendpoint.py @@ -20,12 +20,12 @@ import subprocess import logging -from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.connection import UnixSocketEndPoint from cassandra.policies import WhiteListRoundRobinPolicy, RoundRobinPolicy from tests import notwindows -from tests.integration import use_single_node +from tests.integration import use_single_node, TestCluster log = logging.getLogger() log.setLevel('DEBUG') @@ -65,7 +65,7 @@ def setUpClass(cls): lbp = UnixSocketWhiteListRoundRobinPolicy([UNIX_SOCKET_PATH]) ep = ExecutionProfile(load_balancing_policy=lbp) endpoint = UnixSocketEndPoint(UNIX_SOCKET_PATH) - cls.cluster = Cluster([endpoint], execution_profiles={EXEC_PROFILE_DEFAULT: ep}) + cls.cluster = TestCluster([endpoint], execution_profiles={EXEC_PROFILE_DEFAULT: ep}) @classmethod def tearDownClass(cls): diff --git a/tests/integration/cloud/__init__.py b/tests/integration/cloud/__init__.py index 83f5e21ce3..ca05ae4ce5 100644 --- a/tests/integration/cloud/__init__.py +++ b/tests/integration/cloud/__init__.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License +from cassandra.cluster import Cluster try: import unittest2 as unittest @@ -20,8 +21,6 @@ import os import subprocess -from cassandra.cluster import Cluster - from tests.integration import CLOUD_PROXY_PATH, USE_CASS_EXTERNAL diff --git a/tests/integration/cloud/test_cloud.py b/tests/integration/cloud/test_cloud.py index 31b5367f3c..ef76b71303 100644 --- a/tests/integration/cloud/test_cloud.py +++ b/tests/integration/cloud/test_cloud.py @@ -30,7 +30,7 @@ from mock import patch -from tests.integration import requirescloudproxy +from tests.integration import requirescloudproxy, TestCluster from tests.util import wait_until_not_raised from tests.integration.cloud import CloudProxyCluster, CLOUD_PROXY_SERVER diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index d098ea7014..e68baaabf1 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -24,7 +24,8 @@ from cassandra.cqlengine.management import create_keyspace_simple, drop_keyspace, CQLENG_ALLOW_SCHEMA_MANAGEMENT import cassandra -from tests.integration import get_server_versions, use_single_node, PROTOCOL_VERSION, CASSANDRA_IP, set_default_cass_ip +from tests.integration import get_server_versions, use_single_node, PROTOCOL_VERSION, CASSANDRA_IP, ALLOW_BETA_PROTOCOL + DEFAULT_KEYSPACE = 'cqlengine_test' @@ -35,7 +36,6 @@ def setup_package(): warnings.simplefilter('always') # for testing warnings, make sure all are let through os.environ[CQLENG_ALLOW_SCHEMA_MANAGEMENT] = '1' - set_default_cass_ip() use_single_node() setup_connection(DEFAULT_KEYSPACE) @@ -55,6 +55,7 @@ def setup_connection(keyspace_name): connection.setup([CASSANDRA_IP], consistency=ConsistencyLevel.ONE, protocol_version=PROTOCOL_VERSION, + allow_beta_protocol_version=ALLOW_BETA_PROTOCOL, default_keyspace=keyspace_name) diff --git a/tests/integration/cqlengine/advanced/test_cont_paging.py b/tests/integration/cqlengine/advanced/test_cont_paging.py index ec7b196f1a..38b4355312 100644 --- a/tests/integration/cqlengine/advanced/test_cont_paging.py +++ b/tests/integration/cqlengine/advanced/test_cont_paging.py @@ -21,13 +21,13 @@ from packaging.version import Version -from cassandra.cluster import (EXEC_PROFILE_DEFAULT, Cluster, +from cassandra.cluster import (EXEC_PROFILE_DEFAULT, ContinuousPagingOptions, ExecutionProfile, ProtocolVersion) from cassandra.cqlengine import columns, connection, models from cassandra.cqlengine.management import drop_table, sync_table from tests.integration import (DSE_VERSION, greaterthanorequaldse51, - greaterthanorequaldse60, requiredse) + greaterthanorequaldse60, requiredse, TestCluster) class TestMultiKeyModel(models.Model): @@ -76,8 +76,8 @@ def tearDownClass(cls): def _create_cluster_with_cp_options(cls, name, cp_options): execution_profiles = {EXEC_PROFILE_DEFAULT: ExecutionProfile(continuous_paging_options=cp_options)} - cls.cluster_default = Cluster(protocol_version=cls.protocol_version, - execution_profiles=execution_profiles) + cls.cluster_default = TestCluster(protocol_version=cls.protocol_version, + execution_profiles=execution_profiles) cls.session_default = cls.cluster_default.connect(wait_for_all_pools=True) connection.register_connection(name, default=True, session=cls.session_default) cls.connections.add(name) diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index bbc0231565..c46df31280 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -22,11 +22,11 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns, connection, models from cassandra.cqlengine.management import sync_table -from cassandra.cluster import Cluster, ExecutionProfile, _clusters_for_shutdown, _ConfigMode, EXEC_PROFILE_DEFAULT +from cassandra.cluster import ExecutionProfile, _clusters_for_shutdown, _ConfigMode, EXEC_PROFILE_DEFAULT from cassandra.policies import RoundRobinPolicy from cassandra.query import dict_factory -from tests.integration import CASSANDRA_IP, PROTOCOL_VERSION, execute_with_long_wait_retry, local +from tests.integration import CASSANDRA_IP, PROTOCOL_VERSION, execute_with_long_wait_retry, local, TestCluster from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine import DEFAULT_KEYSPACE, setup_connection @@ -76,7 +76,7 @@ def setUpClass(cls): cls.keyspace1 = 'ctest1' cls.keyspace2 = 'ctest2' super(SeveralConnectionsTest, cls).setUpClass() - cls.setup_cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.setup_cluster = TestCluster() cls.setup_session = cls.setup_cluster.connect() ddl = "CREATE KEYSPACE {0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': '{1}'}}".format(cls.keyspace1, 1) execute_with_long_wait_retry(cls.setup_session, ddl) @@ -93,7 +93,7 @@ def tearDownClass(cls): models.DEFAULT_KEYSPACE def setUp(self): - self.c = Cluster(protocol_version=PROTOCOL_VERSION) + self.c = TestCluster() self.session1 = self.c.connect(keyspace=self.keyspace1) self.session1.row_factory = dict_factory self.session2 = self.c.connect(keyspace=self.keyspace2) @@ -149,7 +149,7 @@ def test_connection_with_legacy_settings(self): self.assertEqual(conn.cluster._config_mode, _ConfigMode.LEGACY) def test_connection_from_session_with_execution_profile(self): - cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) + cluster = TestCluster(execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) session = cluster.connect() connection.default() connection.set_session(session) @@ -157,7 +157,7 @@ def test_connection_from_session_with_execution_profile(self): self.assertEqual(conn.cluster._config_mode, _ConfigMode.PROFILES) def test_connection_from_session_with_legacy_settings(self): - cluster = Cluster(load_balancing_policy=RoundRobinPolicy()) + cluster = TestCluster(load_balancing_policy=RoundRobinPolicy()) session = cluster.connect() session.row_factory = dict_factory connection.set_session(session) @@ -165,7 +165,7 @@ def test_connection_from_session_with_legacy_settings(self): self.assertEqual(conn.cluster._config_mode, _ConfigMode.LEGACY) def test_uncommitted_session_uses_legacy(self): - cluster = Cluster() + cluster = TestCluster() session = cluster.connect() session.row_factory = dict_factory connection.set_session(session) @@ -186,7 +186,7 @@ def test_legacy_insert_query(self): self.assertEqual(ConnectionModel.objects(key=0)[0].some_data, 'text0') def test_execution_profile_insert_query(self): - cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) + cluster = TestCluster(execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) session = cluster.connect() connection.default() connection.set_session(session) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index e5a15b7c4b..6bc9d701b8 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -23,7 +23,7 @@ from packaging.version import Version import uuid -from cassandra.cluster import Cluster, Session +from cassandra.cluster import Session from cassandra import InvalidRequest from tests.integration.cqlengine.base import BaseCassEngTestCase from cassandra.cqlengine.connection import NOT_SET @@ -42,7 +42,7 @@ from cassandra.util import uuid_from_time from cassandra.cqlengine.connection import get_session from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthancass20, greaterthancass21, \ - greaterthanorequalcass30 + greaterthanorequalcass30, TestCluster from tests.integration.cqlengine import execute_count, DEFAULT_KEYSPACE @@ -775,7 +775,7 @@ def test_custom_indexed_field_can_be_queried(self): with self.assertRaises(InvalidRequest): list(CustomIndexedTestModel.objects.filter(description__gte='test')) - with Cluster().connect() as session: + with TestCluster().connect() as session: session.execute("CREATE INDEX custom_index_cqlengine ON {}.{} (description)". format(DEFAULT_KEYSPACE, CustomIndexedTestModel._table_name)) diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index db7d1ebd6a..474c45d02b 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -20,7 +20,6 @@ import six from cassandra.query import FETCH_SIZE_UNSET -from cassandra.cluster import Cluster, ConsistencyLevel from cassandra.cqlengine.statements import BaseCQLStatement from cassandra.cqlengine.management import sync_table, drop_table from cassandra.cqlengine.statements import InsertStatement, UpdateStatement, SelectStatement, DeleteStatement, \ @@ -30,7 +29,7 @@ from tests.integration.cqlengine.base import BaseCassEngTestCase, TestQueryUpdateModel from tests.integration.cqlengine import DEFAULT_KEYSPACE -from tests.integration import greaterthanorequalcass3_10 +from tests.integration import greaterthanorequalcass3_10, TestCluster from cassandra.cqlengine.connection import execute @@ -116,7 +115,7 @@ def test_like_operator(self): @test_category data_types:object_mapper """ - cluster = Cluster() + cluster = TestCluster() session = cluster.connect() self.addCleanup(cluster.shutdown) diff --git a/tests/integration/cqlengine/test_connections.py b/tests/integration/cqlengine/test_connections.py index 10dee66ddc..15adff3380 100644 --- a/tests/integration/cqlengine/test_connections.py +++ b/tests/integration/cqlengine/test_connections.py @@ -13,7 +13,6 @@ # limitations under the License. from cassandra import InvalidRequest -from cassandra.cluster import Cluster from cassandra.cluster import NoHostAvailable from cassandra.cqlengine import columns, CQLEngineException from cassandra.cqlengine import connection as conn @@ -23,7 +22,7 @@ from tests.integration.cqlengine import setup_connection, DEFAULT_KEYSPACE from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine.query import test_queryset -from tests.integration import local, CASSANDRA_IP +from tests.integration import local, CASSANDRA_IP, TestCluster class TestModel(Model): @@ -227,7 +226,7 @@ def test_connection_creation_from_session(self): @test_category object_mapper """ - cluster = Cluster([CASSANDRA_IP]) + cluster = TestCluster() session = cluster.connect() connection_name = 'from_session' conn.register_connection(connection_name, session=session) @@ -258,7 +257,7 @@ def test_connection_param_validation(self): @test_category object_mapper """ - cluster = Cluster([CASSANDRA_IP]) + cluster = TestCluster() session = cluster.connect() with self.assertRaises(CQLEngineException): conn.register_connection("bad_coonection1", session=session, consistency="not_null") diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index a4507a9bf0..bbf446861a 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -19,10 +19,10 @@ import traceback from cassandra import ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, Unavailable -from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.policies import TokenAwarePolicy, RoundRobinPolicy, DowngradingConsistencyRetryPolicy from cassandra.query import SimpleStatement -from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass +from tests.integration import use_singledc, execute_until_pass, TestCluster from tests.integration.long.utils import ( force_stop, create_schema, wait_for_down, wait_for_up, start, CoordinatorStats @@ -129,8 +129,9 @@ def _assert_reads_fail(self, session, keyspace, consistency_levels): pass def _test_tokenaware_one_node_down(self, keyspace, rf, accepted): - cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()))}) + cluster = TestCluster( + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()))} + ) session = cluster.connect(wait_for_all_pools=True) wait_for_up(cluster, 1) wait_for_up(cluster, 2) @@ -180,8 +181,9 @@ def test_rfthree_tokenaware_one_node_down(self): def test_rfthree_tokenaware_none_down(self): keyspace = 'test_rfthree_tokenaware_none_down' - cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()))}) + cluster = TestCluster( + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()))} + ) session = cluster.connect(wait_for_all_pools=True) wait_for_up(cluster, 1) wait_for_up(cluster, 2) @@ -203,9 +205,10 @@ def test_rfthree_tokenaware_none_down(self): cluster.shutdown() def _test_downgrading_cl(self, keyspace, rf, accepted): - cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()), - DowngradingConsistencyRetryPolicy())}) + cluster = TestCluster(execution_profiles={ + EXEC_PROFILE_DEFAULT: ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()), + DowngradingConsistencyRetryPolicy()) + }) session = cluster.connect(wait_for_all_pools=True) create_schema(cluster, session, keyspace, replication_factor=rf) @@ -246,16 +249,18 @@ def test_rftwo_downgradingcl(self): def test_rfthree_roundrobin_downgradingcl(self): keyspace = 'test_rfthree_roundrobin_downgradingcl' - with Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(RoundRobinPolicy(), - DowngradingConsistencyRetryPolicy())}) as cluster: + with TestCluster(execution_profiles={ + EXEC_PROFILE_DEFAULT: ExecutionProfile(RoundRobinPolicy(), + DowngradingConsistencyRetryPolicy()) + }) as cluster: self.rfthree_downgradingcl(cluster, keyspace, True) def test_rfthree_tokenaware_downgradingcl(self): keyspace = 'test_rfthree_tokenaware_downgradingcl' - with Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()), - DowngradingConsistencyRetryPolicy())}) as cluster: + with TestCluster(execution_profiles={ + EXEC_PROFILE_DEFAULT: ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()), + DowngradingConsistencyRetryPolicy()) + }) as cluster: self.rfthree_downgradingcl(cluster, keyspace, False) def rfthree_downgradingcl(self, cluster, keyspace, roundrobin): @@ -334,7 +339,7 @@ def test_pool_with_host_down(self): all_contact_points = ["127.0.0.1", "127.0.0.2", "127.0.0.3"] # Connect up and find out which host will bet queries routed to to first - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() cluster.connect(wait_for_all_pools=True) hosts = cluster.metadata.all_hosts() address = hosts[0].address @@ -344,13 +349,13 @@ def test_pool_with_host_down(self): # We now register a cluster that has it's Control Connection NOT on the node that we are shutting down. # We do this so we don't miss the event contact_point = '127.0.0.{0}'.format(self.get_node_not_x(node_to_stop)) - cluster = Cluster(contact_points=[contact_point], protocol_version=PROTOCOL_VERSION) + cluster = TestCluster(contact_points=[contact_point]) cluster.connect(wait_for_all_pools=True) try: force_stop(node_to_stop) wait_for_down(cluster, node_to_stop) # Attempt a query against that node. It should complete - cluster2 = Cluster(contact_points=all_contact_points, protocol_version=PROTOCOL_VERSION) + cluster2 = TestCluster(contact_points=all_contact_points) session2 = cluster2.connect() session2.execute("SELECT * FROM system.local") finally: diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index 25854a57f7..6bdff8d15d 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -25,13 +25,13 @@ ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, ReadFailure, WriteFailure, FunctionFailure, ProtocolVersion, ) -from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.concurrent import execute_concurrent_with_args from cassandra.query import SimpleStatement from tests.integration import ( use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace, remove_cluster, get_node, start_cluster_wait_for_up, requiresmallclockgranularity, - local, CASSANDRA_VERSION) + local, CASSANDRA_VERSION, TestCluster) try: @@ -83,7 +83,7 @@ def setUp(self): raise unittest.SkipTest( "Native protocol 4,0+ is required for custom payloads, currently using %r" % (PROTOCOL_VERSION,)) - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.cluster = TestCluster() self.session = self.cluster.connect() self.nodes_currently_failing = [] self.node1, self.node2, self.node3 = get_cluster().nodes.values() @@ -332,8 +332,7 @@ def setUp(self): """ Setup sessions and pause node1 """ - self.cluster = Cluster( - protocol_version=PROTOCOL_VERSION, + self.cluster = TestCluster( execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile( load_balancing_policy=HostFilterPolicy( diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index 5f2bdbddf3..a49c1677e8 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -15,11 +15,11 @@ import os, socket, errno from ccmlib import common -from cassandra.cluster import Cluster, NoHostAvailable +from cassandra.cluster import NoHostAvailable from cassandra.io.asyncorereactor import AsyncoreConnection from tests import is_monkey_patched -from tests.integration import use_cluster, remove_cluster, PROTOCOL_VERSION +from tests.integration import use_cluster, remove_cluster, TestCluster if is_monkey_patched(): LibevConnection = -1 @@ -75,8 +75,7 @@ class IPV6ConnectionTest(object): connection_class = None def test_connect(self): - cluster = Cluster(connection_class=self.connection_class, contact_points=['::1'], connect_timeout=10, - protocol_version=PROTOCOL_VERSION) + cluster = TestCluster(connection_class=self.connection_class, contact_points=['::1'], connect_timeout=10) session = cluster.connect() future = session.execute_async("SELECT * FROM system.local") future.result() @@ -84,16 +83,16 @@ def test_connect(self): cluster.shutdown() def test_error(self): - cluster = Cluster(connection_class=self.connection_class, contact_points=['::1'], port=9043, - connect_timeout=10, protocol_version=PROTOCOL_VERSION) + cluster = TestCluster(connection_class=self.connection_class, contact_points=['::1'], port=9043, + connect_timeout=10) self.assertRaisesRegexp(NoHostAvailable, '\(\'Unable to connect.*%s.*::1\', 9043.*Connection refused.*' % errno.ECONNREFUSED, cluster.connect) def test_error_multiple(self): if len(socket.getaddrinfo('localhost', 9043, socket.AF_UNSPEC, socket.SOCK_STREAM)) < 2: raise unittest.SkipTest('localhost only resolves one address') - cluster = Cluster(connection_class=self.connection_class, contact_points=['localhost'], port=9043, - connect_timeout=10, protocol_version=PROTOCOL_VERSION) + cluster = TestCluster(connection_class=self.connection_class, contact_points=['localhost'], port=9043, + connect_timeout=10) self.assertRaisesRegexp(NoHostAvailable, '\(\'Unable to connect.*Tried connecting to \[\(.*\(.*\].*Last error', cluster.connect) diff --git a/tests/integration/long/test_large_data.py b/tests/integration/long/test_large_data.py index 071268d86c..ce7e4398da 100644 --- a/tests/integration/long/test_large_data.py +++ b/tests/integration/long/test_large_data.py @@ -21,10 +21,10 @@ import logging, sys, traceback, time from cassandra import ConsistencyLevel, OperationTimedOut, WriteTimeout -from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.query import dict_factory from cassandra.query import SimpleStatement -from tests.integration import use_singledc, PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster from tests.integration.long.utils import create_schema try: @@ -61,9 +61,9 @@ def setUp(self): self.keyspace = 'large_data' def make_session_and_keyspace(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(request_timeout=20, - row_factory=dict_factory)}) + cluster = TestCluster(execution_profiles={ + EXEC_PROFILE_DEFAULT: ExecutionProfile(request_timeout=20, row_factory=dict_factory) + }) session = cluster.connect() create_schema(cluster, session, self.keyspace) return session diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 8a5b7fe4c8..c3bf911ed0 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -19,7 +19,7 @@ from cassandra import ConsistencyLevel, Unavailable, OperationTimedOut, ReadTimeout, ReadFailure, \ WriteTimeout, WriteFailure -from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.concurrent import execute_concurrent_with_args from cassandra.metadata import murmur3 from cassandra.policies import ( @@ -29,7 +29,7 @@ ) from cassandra.query import SimpleStatement -from tests.integration import use_singledc, use_multidc, remove_cluster, PROTOCOL_VERSION +from tests.integration import use_singledc, use_multidc, remove_cluster, TestCluster from tests.integration.long.utils import (wait_for_up, create_schema, CoordinatorStats, force_stop, wait_for_down, decommission, start, @@ -62,8 +62,11 @@ def teardown_class(cls): def _connect_probe_cluster(self): if not self.probe_cluster: # distinct cluster so we can see the status of nodes ignored by the LBP being tested - self.probe_cluster = Cluster(schema_metadata_enabled=False, token_metadata_enabled=False, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=RoundRobinPolicy())}) + self.probe_cluster = TestCluster( + schema_metadata_enabled=False, + token_metadata_enabled=False, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=RoundRobinPolicy())} + ) self.probe_session = self.probe_cluster.connect() def _wait_for_nodes_up(self, nodes, cluster=None): @@ -90,8 +93,8 @@ def _wait_for_nodes_down(self, nodes, cluster=None): def _cluster_session_with_lbp(self, lbp): # create a cluster with no delay on events - cluster = Cluster(protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=lbp)}) + cluster = TestCluster(topology_event_refresh_window=0, status_event_refresh_window=0, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=lbp)}) session = cluster.connect() return cluster, session @@ -180,7 +183,7 @@ def test_token_aware_is_used_by_default(self): @test_category load_balancing:token_aware """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() if murmur3 is not None: self.assertTrue(isinstance(cluster.profile_manager.default.load_balancing_policy, TokenAwarePolicy)) @@ -659,11 +662,14 @@ def test_white_list(self): use_singledc() keyspace = 'test_white_list' - cluster = Cluster(('127.0.0.2',), protocol_version=PROTOCOL_VERSION, - topology_event_refresh_window=0, status_event_refresh_window=0, - execution_profiles={EXEC_PROFILE_DEFAULT: - ExecutionProfile(load_balancing_policy= - WhiteListRoundRobinPolicy((IP_FORMAT % 2,)))}) + cluster = TestCluster( + contact_points=('127.0.0.2',), topology_event_refresh_window=0, status_event_refresh_window=0, + execution_profiles={ + EXEC_PROFILE_DEFAULT: ExecutionProfile( + load_balancing_policy=WhiteListRoundRobinPolicy((IP_FORMAT % 2,)) + ) + } + ) session = cluster.connect() self._wait_for_nodes_up([1, 2, 3]) @@ -709,9 +715,8 @@ def test_black_list_with_host_filter_policy(self): child_policy=RoundRobinPolicy(), predicate=lambda host: host.address != ignored_address ) - cluster = Cluster( - (IP_FORMAT % 1,), - protocol_version=PROTOCOL_VERSION, + cluster = TestCluster( + contact_points=(IP_FORMAT % 1,), topology_event_refresh_window=0, status_event_refresh_window=0, execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=hfp)} diff --git a/tests/integration/long/test_policies.py b/tests/integration/long/test_policies.py index d694476fb5..0648e6cc93 100644 --- a/tests/integration/long/test_policies.py +++ b/tests/integration/long/test_policies.py @@ -18,9 +18,9 @@ import unittest # noqa from cassandra import ConsistencyLevel, Unavailable -from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT -from tests.integration import use_cluster, get_cluster, get_node +from tests.integration import use_cluster, get_cluster, get_node, TestCluster def setup_module(): @@ -47,7 +47,7 @@ def test_should_rethrow_on_unvailable_with_default_policy_if_cas(self): ep = ExecutionProfile(consistency_level=ConsistencyLevel.ALL, serial_consistency_level=ConsistencyLevel.SERIAL) - cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ep}) + cluster = TestCluster(execution_profiles={EXEC_PROFILE_DEFAULT: ep}) session = cluster.connect() session.execute("CREATE KEYSPACE test_retry_policy_cas WITH replication = {'class':'SimpleStrategy','replication_factor': 3};") diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index 2ad854688d..e2945a117b 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -15,10 +15,9 @@ import logging from cassandra import ConsistencyLevel, AlreadyExists -from cassandra.cluster import Cluster from cassandra.query import SimpleStatement -from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass +from tests.integration import use_singledc, execute_until_pass, TestCluster import time @@ -38,7 +37,7 @@ class SchemaTests(unittest.TestCase): @classmethod def setup_class(cls): - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.cluster = TestCluster() cls.session = cls.cluster.connect(wait_for_all_pools=True) @classmethod @@ -99,7 +98,7 @@ def test_for_schema_disagreements_same_keyspace(self): Tests for any schema disagreements using the same keyspace multiple times """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() session = cluster.connect(wait_for_all_pools=True) for i in range(30): @@ -133,7 +132,7 @@ def test_for_schema_disagreement_attribute(self): @test_category schema """ # This should yield a schema disagreement - cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=0.001) + cluster = TestCluster(max_schema_agreement_wait=0.001) session = cluster.connect(wait_for_all_pools=True) rs = session.execute("CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}") @@ -146,7 +145,7 @@ def test_for_schema_disagreement_attribute(self): cluster.shutdown() # These should have schema agreement - cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=100) + cluster = TestCluster(max_schema_agreement_wait=100) session = cluster.connect() rs = session.execute("CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}") self.check_and_wait_for_agreement(session, rs, True) diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 49cad63c68..7698849945 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -18,7 +18,7 @@ import unittest import os, sys, traceback, logging, ssl, time, math, uuid -from cassandra.cluster import Cluster, NoHostAvailable +from cassandra.cluster import NoHostAvailable from cassandra.connection import DefaultEndPoint from cassandra import ConsistencyLevel from cassandra.query import SimpleStatement @@ -26,7 +26,7 @@ from OpenSSL import SSL, crypto from tests.integration import ( - PROTOCOL_VERSION, get_cluster, remove_cluster, use_single_node, start_cluster_wait_for_up, EVENT_LOOP_MANAGER, + get_cluster, remove_cluster, use_single_node, start_cluster_wait_for_up, EVENT_LOOP_MANAGER, TestCluster ) if not hasattr(ssl, 'match_hostname'): @@ -103,9 +103,8 @@ def validate_ssl_options(**kwargs): if tries > 5: raise RuntimeError("Failed to connect to SSL cluster after 5 attempts") try: - cluster = Cluster( + cluster = TestCluster( contact_points=[DefaultEndPoint(hostname)], - protocol_version=PROTOCOL_VERSION, ssl_options=ssl_options, ssl_context=ssl_context ) @@ -185,7 +184,7 @@ def test_can_connect_with_ssl_long_running(self): if tries > 5: raise RuntimeError("Failed to connect to SSL cluster after 5 attempts") try: - cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options=ssl_options) + cluster = TestCluster(ssl_options=ssl_options) session = cluster.connect(wait_for_all_pools=True) break except Exception: @@ -291,8 +290,8 @@ def test_cannot_connect_without_client_auth(self): @test_category connection:ssl """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': CLIENT_CA_CERTS, - 'ssl_version': ssl_version}) + cluster = TestCluster(ssl_options={'ca_certs': CLIENT_CA_CERTS, + 'ssl_version': ssl_version}) with self.assertRaises(NoHostAvailable) as _: cluster.connect() @@ -320,10 +319,11 @@ def test_cannot_connect_with_bad_client_auth(self): # I don't set the bad certfile for pyopenssl because it hangs ssl_options['certfile'] = DRIVER_CERTFILE_BAD - cluster = Cluster(protocol_version=PROTOCOL_VERSION, - ssl_options={'ca_certs': CLIENT_CA_CERTS, - 'ssl_version': ssl_version, - 'keyfile': DRIVER_KEYFILE}) + cluster = TestCluster( + ssl_options={'ca_certs': CLIENT_CA_CERTS, + 'ssl_version': ssl_version, + 'keyfile': DRIVER_KEYFILE} + ) with self.assertRaises(NoHostAvailable) as _: cluster.connect() @@ -364,7 +364,7 @@ def test_ssl_want_write_errors_are_retried(self): """ ssl_options = {'ca_certs': CLIENT_CA_CERTS, 'ssl_version': ssl_version} - cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options=ssl_options) + cluster = TestCluster(ssl_options=ssl_options) session = cluster.connect(wait_for_all_pools=True) try: session.execute('drop keyspace ssl_error_test') diff --git a/tests/integration/long/test_topology_change.py b/tests/integration/long/test_topology_change.py index 8800cd802b..5b12eef28c 100644 --- a/tests/integration/long/test_topology_change.py +++ b/tests/integration/long/test_topology_change.py @@ -1,8 +1,7 @@ from unittest import TestCase -from cassandra.cluster import Cluster from cassandra.policies import HostStateListener -from tests.integration import PROTOCOL_VERSION, get_node, use_cluster, local +from tests.integration import get_node, use_cluster, local, TestCluster from tests.integration.long.utils import decommission from tests.util import wait_until @@ -32,7 +31,7 @@ def test_removed_node_stops_reconnecting(self): use_cluster("test_down_then_removed", [3], start=True) state_listener = StateListener() - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() self.addCleanup(cluster.shutdown) cluster.register_listener(state_listener) session = cluster.connect(wait_for_all_pools=True) diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index 421e694a9a..a5b5bdd226 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -93,7 +93,7 @@ def force_stop(node): def decommission(node): - if (DSE_VERSION and DSE_VERSION >= Version("5.1")) or CASSANDRA_VERSION >= Version("4.0"): + if (DSE_VERSION and DSE_VERSION >= Version("5.1")) or CASSANDRA_VERSION >= Version("4.0-a"): # CASSANDRA-12510 get_node(node).decommission(force=True) else: diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 4c32e9de3d..9755c5098b 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -15,11 +15,11 @@ import logging import time -from cassandra.cluster import Cluster, NoHostAvailable +from cassandra.cluster import NoHostAvailable from cassandra.auth import PlainTextAuthProvider, SASLClient, SaslAuthProvider from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION, CASSANDRA_IP, \ - set_default_cass_ip, USE_CASS_EXTERNAL, start_cluster_wait_for_up + USE_CASS_EXTERNAL, start_cluster_wait_for_up, TestCluster from tests.integration.util import assert_quiescent_pool_state try: @@ -44,8 +44,6 @@ def setup_module(): ccm_cluster.set_configuration_options(config_options) log.debug("Starting ccm test cluster with %s", config_options) start_cluster_wait_for_up(ccm_cluster) - else: - set_default_cass_ip() def teardown_module(): @@ -77,14 +75,12 @@ def cluster_as(self, usr, pwd): # to ensure the role manager is setup for _ in range(5): try: - cluster = Cluster( - protocol_version=PROTOCOL_VERSION, + cluster = TestCluster( idle_heartbeat_interval=0, auth_provider=self.get_authentication_provider(username='cassandra', password='cassandra')) cluster.connect(wait_for_all_pools=True) - return Cluster( - protocol_version=PROTOCOL_VERSION, + return TestCluster( idle_heartbeat_interval=0, auth_provider=self.get_authentication_provider(username=usr, password=pwd)) except Exception as e: @@ -147,7 +143,7 @@ def test_connect_empty_pwd(self): cluster.shutdown() def test_connect_no_auth_provider(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() try: self.assertRaisesRegexp(NoHostAvailable, '.*AuthenticationFailed.*', diff --git a/tests/integration/standard/test_authentication_misconfiguration.py b/tests/integration/standard/test_authentication_misconfiguration.py index caac84dd25..546141d801 100644 --- a/tests/integration/standard/test_authentication_misconfiguration.py +++ b/tests/integration/standard/test_authentication_misconfiguration.py @@ -14,8 +14,7 @@ import unittest -from cassandra.cluster import Cluster -from tests.integration import CASSANDRA_IP, USE_CASS_EXTERNAL, use_cluster, PROTOCOL_VERSION +from tests.integration import USE_CASS_EXTERNAL, use_cluster, TestCluster class MisconfiguredAuthenticationTests(unittest.TestCase): @@ -34,7 +33,7 @@ def setUpClass(cls): cls.ccm_cluster = ccm_cluster def test_connect_no_auth_provider(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION, contact_points=[CASSANDRA_IP]) + cluster = TestCluster() cluster.connect() cluster.refresh_nodes() down_hosts = [host for host in cluster.metadata.all_hosts() if not host.is_up] diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index 1092af7776..c5ce5dc726 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -19,9 +19,8 @@ import unittest from cassandra.query import BatchStatement -from cassandra.cluster import Cluster -from tests.integration import use_singledc, PROTOCOL_VERSION, local +from tests.integration import use_singledc, PROTOCOL_VERSION, local, TestCluster def setup_module(): @@ -35,7 +34,7 @@ def setUpClass(cls): if PROTOCOL_VERSION < 4: return - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.cluster = TestCluster() cls.session = cls.cluster.connect() cls.session.execute("CREATE TABLE IF NOT EXISTS test1rf.client_warning (k int, v0 int, v1 int, PRIMARY KEY (k, v0))") diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 2314931b7d..cdb6f1f3b7 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -27,7 +27,7 @@ from packaging.version import Version import cassandra -from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT, ControlConnection +from cassandra.cluster import NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT, ControlConnection, Cluster from cassandra.concurrent import execute_concurrent from cassandra.policies import (RoundRobinPolicy, ExponentialReconnectionPolicy, RetryPolicy, SimpleConvictionPolicy, HostDistance, @@ -40,10 +40,10 @@ from cassandra.connection import DefaultEndPoint from tests import notwindows -from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, CASSANDRA_VERSION, \ +from tests.integration import use_singledc, get_server_versions, CASSANDRA_VERSION, \ execute_until_pass, execute_with_long_wait_retry, get_node, MockLoggingHandler, get_unsupported_lower_protocol, \ get_unsupported_upper_protocol, protocolv5, local, CASSANDRA_IP, greaterthanorequalcass30, lessthanorequalcass40, \ - DSE_VERSION + DSE_VERSION, TestCluster, PROTOCOL_VERSION from tests.integration.util import assert_quiescent_pool_state import sys @@ -81,8 +81,9 @@ def test_ignored_host_up(self): @test_category connection """ ignored_host_policy = IgnoredHostPolicy(["127.0.0.2", "127.0.0.3"]) - cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=ignored_host_policy)}) + cluster = TestCluster( + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=ignored_host_policy)} + ) cluster.connect() for host in cluster.metadata.all_hosts(): if str(host) == "127.0.0.1:9042": @@ -102,7 +103,7 @@ def test_host_resolution(self): @test_category connection """ - cluster = Cluster(contact_points=["localhost"], protocol_version=PROTOCOL_VERSION, connect_timeout=1) + cluster = TestCluster(contact_points=["localhost"], connect_timeout=1) self.assertTrue(DefaultEndPoint('127.0.0.1') in cluster.endpoints_resolved) @local @@ -116,11 +117,14 @@ def test_host_duplication(self): @test_category connection """ - cluster = Cluster(contact_points=["localhost", "127.0.0.1", "localhost", "localhost", "localhost"], protocol_version=PROTOCOL_VERSION, connect_timeout=1) + cluster = TestCluster( + contact_points=["localhost", "127.0.0.1", "localhost", "localhost", "localhost"], + connect_timeout=1 + ) cluster.connect(wait_for_all_pools=True) self.assertEqual(len(cluster.metadata.all_hosts()), 3) cluster.shutdown() - cluster = Cluster(contact_points=["127.0.0.1", "localhost"], protocol_version=PROTOCOL_VERSION, connect_timeout=1) + cluster = TestCluster(contact_points=["127.0.0.1", "localhost"], connect_timeout=1) cluster.connect(wait_for_all_pools=True) self.assertEqual(len(cluster.metadata.all_hosts()), 3) cluster.shutdown() @@ -144,7 +148,7 @@ def test_raise_error_on_control_connection_timeout(self): """ get_node(1).pause() - cluster = Cluster(contact_points=['127.0.0.1'], protocol_version=PROTOCOL_VERSION, connect_timeout=1) + cluster = TestCluster(contact_points=['127.0.0.1'], connect_timeout=1) with self.assertRaisesRegexp(NoHostAvailable, "OperationTimedOut\('errors=Timed out creating connection \(1 seconds\)"): cluster.connect() @@ -157,7 +161,7 @@ def test_basic(self): Test basic connection and usage """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() session = cluster.connect() result = execute_until_pass(session, """ @@ -213,20 +217,19 @@ def cleanup(): self.addCleanup(cleanup) # Test with empty list - self.cluster_to_shutdown = Cluster([], protocol_version=PROTOCOL_VERSION) + self.cluster_to_shutdown = TestCluster(contact_points=[]) with self.assertRaises(NoHostAvailable): self.cluster_to_shutdown.connect() self.cluster_to_shutdown.shutdown() # Test with only invalid - self.cluster_to_shutdown = Cluster(('1.2.3.4',), protocol_version=PROTOCOL_VERSION) + self.cluster_to_shutdown = TestCluster(contact_points=('1.2.3.4',)) with self.assertRaises(NoHostAvailable): self.cluster_to_shutdown.connect() self.cluster_to_shutdown.shutdown() # Test with valid and invalid hosts - self.cluster_to_shutdown = Cluster(("127.0.0.1", "127.0.0.2", "1.2.3.4"), - protocol_version=PROTOCOL_VERSION) + self.cluster_to_shutdown = TestCluster(contact_points=("127.0.0.1", "127.0.0.2", "1.2.3.4")) self.cluster_to_shutdown.connect() self.cluster_to_shutdown.shutdown() @@ -298,7 +301,7 @@ def test_invalid_protocol_negotation(self): upper_bound = get_unsupported_upper_protocol() log.debug('got upper_bound of {}'.format(upper_bound)) if upper_bound is not None: - cluster = Cluster(protocol_version=upper_bound) + cluster = TestCluster(protocol_version=upper_bound) with self.assertRaises(NoHostAvailable): cluster.connect() cluster.shutdown() @@ -306,7 +309,7 @@ def test_invalid_protocol_negotation(self): lower_bound = get_unsupported_lower_protocol() log.debug('got lower_bound of {}'.format(lower_bound)) if lower_bound is not None: - cluster = Cluster(protocol_version=lower_bound) + cluster = TestCluster(protocol_version=lower_bound) with self.assertRaises(NoHostAvailable): cluster.connect() cluster.shutdown() @@ -316,7 +319,7 @@ def test_connect_on_keyspace(self): Ensure clusters that connect on a keyspace, do """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() session = cluster.connect() result = session.execute( """ @@ -334,7 +337,7 @@ def test_connect_on_keyspace(self): cluster.shutdown() def test_set_keyspace_twice(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() session = cluster.connect() session.execute("USE system") session.execute("USE system") @@ -345,7 +348,7 @@ def test_default_connections(self): Ensure errors are not thrown when using non-default policies """ - Cluster( + TestCluster( reconnection_policy=ExponentialReconnectionPolicy(1.0, 600.0), conviction_policy_factory=SimpleConvictionPolicy, protocol_version=PROTOCOL_VERSION @@ -355,7 +358,7 @@ def test_connect_to_already_shutdown_cluster(self): """ Ensure you cannot connect to a cluster that's been shutdown """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() cluster.shutdown() self.assertRaises(Exception, cluster.connect) @@ -364,7 +367,7 @@ def test_auth_provider_is_callable(self): Ensure that auth_providers are always callable """ self.assertRaises(TypeError, Cluster, auth_provider=1, protocol_version=1) - c = Cluster(protocol_version=1) + c = TestCluster(protocol_version=1) self.assertRaises(TypeError, setattr, c, 'auth_provider', 1) def test_v2_auth_provider(self): @@ -373,7 +376,7 @@ def test_v2_auth_provider(self): """ bad_auth_provider = lambda x: {'username': 'foo', 'password': 'bar'} self.assertRaises(TypeError, Cluster, auth_provider=bad_auth_provider, protocol_version=2) - c = Cluster(protocol_version=2) + c = TestCluster(protocol_version=2) self.assertRaises(TypeError, setattr, c, 'auth_provider', bad_auth_provider) def test_conviction_policy_factory_is_callable(self): @@ -389,8 +392,8 @@ def test_connect_to_bad_hosts(self): when a cluster cannot connect to given hosts """ - cluster = Cluster(['127.1.2.9', '127.1.2.10'], - protocol_version=PROTOCOL_VERSION) + cluster = TestCluster(contact_points=['127.1.2.9', '127.1.2.10'], + protocol_version=PROTOCOL_VERSION) self.assertRaises(NoHostAvailable, cluster.connect) def test_cluster_settings(self): @@ -400,7 +403,7 @@ def test_cluster_settings(self): if PROTOCOL_VERSION >= 3: raise unittest.SkipTest("min/max requests and core/max conns aren't used with v3 protocol") - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() min_requests_per_connection = cluster.get_min_requests_per_connection(HostDistance.LOCAL) self.assertEqual(cassandra.cluster.DEFAULT_MIN_REQUESTS, min_requests_per_connection) @@ -423,7 +426,7 @@ def test_cluster_settings(self): self.assertEqual(cluster.get_max_connections_per_host(HostDistance.LOCAL), max_connections_per_host + 1) def test_refresh_schema(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() session = cluster.connect() original_meta = cluster.metadata.keyspaces @@ -435,7 +438,7 @@ def test_refresh_schema(self): cluster.shutdown() def test_refresh_schema_keyspace(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() session = cluster.connect() original_meta = cluster.metadata.keyspaces @@ -451,7 +454,7 @@ def test_refresh_schema_keyspace(self): cluster.shutdown() def test_refresh_schema_table(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() session = cluster.connect() original_meta = cluster.metadata.keyspaces @@ -477,7 +480,7 @@ def test_refresh_schema_type(self): raise unittest.SkipTest('UDTs are not specified in change events for protocol v2') # We may want to refresh types on keyspace change events in that case(?) - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() session = cluster.connect() keyspace_name = 'test1rf' @@ -516,7 +519,7 @@ def patched_wait_for_responses(*args, **kwargs): agreement_timeout = 1 # cluster agreement wait exceeded - c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=agreement_timeout) + c = TestCluster(max_schema_agreement_wait=agreement_timeout) c.connect() self.assertTrue(c.metadata.keyspaces) @@ -541,7 +544,7 @@ def patched_wait_for_responses(*args, **kwargs): refresh_threshold = 0.5 # cluster agreement bypass - c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=0) + c = TestCluster(max_schema_agreement_wait=0) start_time = time.time() s = c.connect() end_time = time.time() @@ -572,7 +575,7 @@ def test_trace(self): Ensure trace can be requested for async and non-async queries """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() session = cluster.connect() result = session.execute( "SELECT * FROM system.local", trace=True) @@ -618,7 +621,7 @@ def test_trace_unavailable(self): @test_category query """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() self.addCleanup(cluster.shutdown) session = cluster.connect() @@ -660,7 +663,7 @@ def test_one_returns_none(self): @test_category query """ - with Cluster() as cluster: + with TestCluster() as cluster: session = cluster.connect() self.assertIsNone(session.execute("SELECT * from system.local WHERE key='madeup_key'").one()) @@ -669,7 +672,7 @@ def test_string_coverage(self): Ensure str(future) returns without error """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() session = cluster.connect() query = "SELECT * FROM system.local" @@ -726,7 +729,7 @@ def test_can_connect_with_sslauth(self): def _warning_are_issued_when_auth(self, auth_provider): with MockLoggingHandler().set_module_name(connection.__name__) as mock_handler: - with Cluster(auth_provider=auth_provider) as cluster: + with TestCluster(auth_provider=auth_provider) as cluster: session = cluster.connect() self.assertIsNotNone(session.execute("SELECT * from system.local")) @@ -740,8 +743,8 @@ def _warning_are_issued_when_auth(self, auth_provider): def test_idle_heartbeat(self): interval = 2 - cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=interval, - monitor_reporting_enabled=False) + cluster = TestCluster(idle_heartbeat_interval=interval, + monitor_reporting_enabled=False) if PROTOCOL_VERSION < 3: cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) session = cluster.connect(wait_for_all_pools=True) @@ -803,7 +806,7 @@ def test_idle_heartbeat_disabled(self): self.assertTrue(Cluster.idle_heartbeat_interval) # heartbeat disabled with '0' - cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=0) + cluster = TestCluster(idle_heartbeat_interval=0) self.assertEqual(cluster.idle_heartbeat_interval, 0) session = cluster.connect() @@ -819,7 +822,7 @@ def test_idle_heartbeat_disabled(self): def test_pool_management(self): # Ensure that in_flight and request_ids quiesce after cluster operations - cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=0) # no idle heartbeat here, pool management is tested in test_idle_heartbeat + cluster = TestCluster(idle_heartbeat_interval=0) # no idle heartbeat here, pool management is tested in test_idle_heartbeat session = cluster.connect() session2 = cluster.connect() @@ -863,7 +866,7 @@ def test_profile_load_balancing(self): RoundRobinPolicy(), lambda host: host.address == CASSANDRA_IP ) ) - with Cluster(execution_profiles={'node1': node1}, monitor_reporting_enabled=False) as cluster: + with TestCluster(execution_profiles={'node1': node1}, monitor_reporting_enabled=False) as cluster: session = cluster.connect(wait_for_all_pools=True) # default is DCA RR for all hosts @@ -904,7 +907,7 @@ def test_profile_load_balancing(self): self.assertTrue(session.execute(query, execution_profile='node1')[0].release_version) def test_setting_lbp_legacy(self): - cluster = Cluster() + cluster = TestCluster() self.addCleanup(cluster.shutdown) cluster.load_balancing_policy = RoundRobinPolicy() self.assertEqual( @@ -932,7 +935,7 @@ def test_profile_lb_swap(self): rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) rr2 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) exec_profiles = {'rr1': rr1, 'rr2': rr2} - with Cluster(execution_profiles=exec_profiles) as cluster: + with TestCluster(execution_profiles=exec_profiles) as cluster: session = cluster.connect(wait_for_all_pools=True) # default is DCA RR for all hosts @@ -959,7 +962,7 @@ def test_ta_lbp(self): """ query = "select release_version from system.local" ta1 = ExecutionProfile() - with Cluster() as cluster: + with TestCluster() as cluster: session = cluster.connect() cluster.add_execution_profile("ta1", ta1) rs = session.execute(query, execution_profile='ta1') @@ -980,7 +983,7 @@ def test_clone_shared_lbp(self): query = "select release_version from system.local" rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) exec_profiles = {'rr1': rr1} - with Cluster(execution_profiles=exec_profiles) as cluster: + with TestCluster(execution_profiles=exec_profiles) as cluster: session = cluster.connect(wait_for_all_pools=True) self.assertGreater(len(cluster.metadata.all_hosts()), 1, "We only have one host connected at this point") @@ -1008,7 +1011,7 @@ def test_missing_exec_prof(self): rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) rr2 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) exec_profiles = {'rr1': rr1, 'rr2': rr2} - with Cluster(execution_profiles=exec_profiles) as cluster: + with TestCluster(execution_profiles=exec_profiles) as cluster: session = cluster.connect() with self.assertRaises(ValueError): session.execute(query, execution_profile='rr3') @@ -1035,7 +1038,7 @@ def test_profile_pool_management(self): RoundRobinPolicy(), lambda host: host.address == "127.0.0.2" ) ) - with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1, 'node2': node2}) as cluster: + with TestCluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1, 'node2': node2}) as cluster: session = cluster.connect(wait_for_all_pools=True) pools = session.get_pool_state() # there are more hosts, but we connected to the ones in the lbp aggregate @@ -1070,7 +1073,7 @@ def test_add_profile_timeout(self): RoundRobinPolicy(), lambda host: host.address == "127.0.0.1" ) ) - with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1}) as cluster: + with TestCluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1}) as cluster: session = cluster.connect(wait_for_all_pools=True) pools = session.get_pool_state() self.assertGreater(len(cluster.metadata.all_hosts()), 2) @@ -1096,7 +1099,7 @@ def test_add_profile_timeout(self): @notwindows def test_execute_query_timeout(self): - with Cluster() as cluster: + with TestCluster() as cluster: session = cluster.connect(wait_for_all_pools=True) query = "SELECT * FROM system.local" @@ -1142,8 +1145,7 @@ def test_replicas_are_queried(self): tap_profile = ExecutionProfile( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()) ) - with Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: tap_profile}) as cluster: + with TestCluster(execution_profiles={EXEC_PROFILE_DEFAULT: tap_profile}) as cluster: session = cluster.connect(wait_for_all_pools=True) session.execute(''' CREATE TABLE test1rf.table_with_big_key ( @@ -1168,9 +1170,8 @@ def test_replicas_are_queried(self): log = logging.getLogger(__name__) log.info("The only replica found was: {}".format(only_replica)) available_hosts = [host for host in ["127.0.0.1", "127.0.0.2", "127.0.0.3"] if host != only_replica] - with Cluster(contact_points=available_hosts, - protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: hfp_profile}) as cluster: + with TestCluster(contact_points=available_hosts, + execution_profiles={EXEC_PROFILE_DEFAULT: hfp_profile}) as cluster: session = cluster.connect(wait_for_all_pools=True) prepared = session.prepare("""SELECT * from test1rf.table_with_big_key @@ -1196,10 +1197,10 @@ def test_compact_option(self): @test_category connection """ - nc_cluster = Cluster(protocol_version=PROTOCOL_VERSION, no_compact=True) + nc_cluster = TestCluster(no_compact=True) nc_session = nc_cluster.connect() - cluster = Cluster(protocol_version=PROTOCOL_VERSION, no_compact=False) + cluster = TestCluster(no_compact=False) session = cluster.connect() self.addCleanup(cluster.shutdown) @@ -1284,7 +1285,7 @@ def test_address_translator_basic(self): @test_category metadata """ lh_ad = LocalHostAdressTranslator({'127.0.0.1': '127.0.0.1', '127.0.0.2': '127.0.0.1', '127.0.0.3': '127.0.0.1'}) - c = Cluster(address_translator=lh_ad) + c = TestCluster(address_translator=lh_ad) c.connect() self.assertEqual(len(c.metadata.all_hosts()), 1) c.shutdown() @@ -1304,7 +1305,7 @@ def test_address_translator_with_mixed_nodes(self): """ adder_map = {'127.0.0.1': '127.0.0.1', '127.0.0.2': '127.0.0.3', '127.0.0.3': '127.0.0.2'} lh_ad = LocalHostAdressTranslator(adder_map) - c = Cluster(address_translator=lh_ad) + c = TestCluster(address_translator=lh_ad) c.connect() for host in c.metadata.all_hosts(): self.assertEqual(adder_map.get(host.address), host.broadcast_address) @@ -1330,7 +1331,7 @@ def test_no_connect(self): @test_category configuration """ - with Cluster() as cluster: + with TestCluster() as cluster: self.assertFalse(cluster.is_shutdown) self.assertTrue(cluster.is_shutdown) @@ -1344,7 +1345,7 @@ def test_simple_nested(self): @test_category configuration """ - with Cluster(**self.cluster_kwargs) as cluster: + with TestCluster(**self.cluster_kwargs) as cluster: with cluster.connect() as session: self.assertFalse(cluster.is_shutdown) self.assertFalse(session.is_shutdown) @@ -1362,7 +1363,7 @@ def test_cluster_no_session(self): @test_category configuration """ - with Cluster(**self.cluster_kwargs) as cluster: + with TestCluster(**self.cluster_kwargs) as cluster: session = cluster.connect() self.assertFalse(cluster.is_shutdown) self.assertFalse(session.is_shutdown) @@ -1380,7 +1381,7 @@ def test_session_no_cluster(self): @test_category configuration """ - cluster = Cluster(**self.cluster_kwargs) + cluster = TestCluster(**self.cluster_kwargs) unmanaged_session = cluster.connect() with cluster.connect() as session: self.assertFalse(cluster.is_shutdown) @@ -1411,7 +1412,7 @@ def test_down_event_with_active_connection(self): @test_category connection """ - with Cluster(protocol_version=PROTOCOL_VERSION) as cluster: + with TestCluster() as cluster: session = cluster.connect(wait_for_all_pools=True) random_host = cluster.metadata.all_hosts()[0] cluster.on_down(random_host, False) @@ -1440,8 +1441,9 @@ class DontPrepareOnIgnoredHostsTest(unittest.TestCase): def test_prepare_on_ignored_hosts(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=self.ignore_node_3_policy)}) + cluster = TestCluster( + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=self.ignore_node_3_policy)} + ) session = cluster.connect() cluster.reprepare_on_up, cluster.prepare_on_all_hosts = True, False @@ -1486,7 +1488,7 @@ def test_invalid_protocol_version_beta_option(self): @test_category connection """ - cluster = Cluster(protocol_version=cassandra.ProtocolVersion.MAX_SUPPORTED, allow_beta_protocol_version=False) + cluster = TestCluster(protocol_version=cassandra.ProtocolVersion.V5, allow_beta_protocol_version=False) try: with self.assertRaises(NoHostAvailable): cluster.connect() @@ -1504,9 +1506,9 @@ def test_valid_protocol_version_beta_options_connect(self): @test_category connection """ - cluster = Cluster(protocol_version=cassandra.ProtocolVersion.MAX_SUPPORTED, allow_beta_protocol_version=True) + cluster = Cluster(protocol_version=cassandra.ProtocolVersion.V5, allow_beta_protocol_version=True) session = cluster.connect() - self.assertEqual(cluster.protocol_version, cassandra.ProtocolVersion.MAX_SUPPORTED) + self.assertEqual(cluster.protocol_version, cassandra.ProtocolVersion.V5) self.assertTrue(session.execute("select release_version from system.local")[0]) cluster.shutdown() @@ -1524,7 +1526,7 @@ def test_deprecation_warnings_legacy_parameters(self): @test_category logs """ with warnings.catch_warnings(record=True) as w: - Cluster(load_balancing_policy=RoundRobinPolicy()) + TestCluster(load_balancing_policy=RoundRobinPolicy()) self.assertEqual(len(w), 1) self.assertIn("Legacy execution parameters will be removed in 4.0. Consider using execution profiles.", str(w[0].message)) @@ -1541,7 +1543,7 @@ def test_deprecation_warnings_meta_refreshed(self): @test_category logs """ with warnings.catch_warnings(record=True) as w: - cluster = Cluster() + cluster = TestCluster() cluster.set_meta_refresh_enabled(True) self.assertEqual(len(w), 1) self.assertIn("Cluster.set_meta_refresh_enabled is deprecated and will be removed in 4.0.", @@ -1559,7 +1561,7 @@ def test_deprecation_warning_default_consistency_level(self): @test_category logs """ with warnings.catch_warnings(record=True) as w: - cluster = Cluster() + cluster = TestCluster() session = cluster.connect() session.default_consistency_level = ConsistencyLevel.ONE self.assertEqual(len(w), 1) diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index 954e5f28f4..8bd65c7f6f 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -17,12 +17,12 @@ from cassandra import InvalidRequest, ConsistencyLevel, ReadTimeout, WriteTimeout, OperationTimedOut, \ ReadFailure, WriteFailure -from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args, ExecutionResult from cassandra.policies import HostDistance from cassandra.query import tuple_factory, SimpleStatement -from tests.integration import use_singledc, PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster from six import next @@ -42,8 +42,7 @@ class ClusterTests(unittest.TestCase): @classmethod def setUpClass(cls): - cls.cluster = Cluster( - protocol_version=PROTOCOL_VERSION, + cls.cluster = TestCluster( execution_profiles = { EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory) } diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 4af48a562c..aaa5a27dfd 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -28,7 +28,7 @@ from unittest import SkipTest from cassandra import ConsistencyLevel, OperationTimedOut -from cassandra.cluster import NoHostAvailable, ConnectionShutdown, Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import NoHostAvailable, ConnectionShutdown, ExecutionProfile, EXEC_PROFILE_DEFAULT import cassandra.io.asyncorereactor from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.protocol import QueryMessage @@ -37,8 +37,9 @@ from cassandra.pool import HostConnectionPool from tests import is_monkey_patched -from tests.integration import use_singledc, PROTOCOL_VERSION, get_node, CASSANDRA_IP, local, \ - requiresmallclockgranularity, greaterthancass20 +from tests.integration import use_singledc, get_node, CASSANDRA_IP, local, \ + requiresmallclockgranularity, greaterthancass20, TestCluster + try: from cassandra.io.libevreactor import LibevConnection import cassandra.io.libevreactor @@ -56,15 +57,13 @@ def setup_module(): class ConnectionTimeoutTest(unittest.TestCase): def setUp(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles= - {EXEC_PROFILE_DEFAULT: ExecutionProfile( - load_balancing_policy=HostFilterPolicy( - RoundRobinPolicy(), predicate=lambda host: host.address == CASSANDRA_IP - ) - ) - } - ) + self.cluster = TestCluster(execution_profiles={ + EXEC_PROFILE_DEFAULT: ExecutionProfile( + load_balancing_policy=HostFilterPolicy( + RoundRobinPolicy(), predicate=lambda host: host.address == CASSANDRA_IP + ) + ) + }) self.session = self.cluster.connect() @@ -118,7 +117,7 @@ class HeartbeatTest(unittest.TestCase): """ def setUp(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=1) + self.cluster = TestCluster(idle_heartbeat_interval=1) self.session = self.cluster.connect(wait_for_all_pools=True) def tearDown(self): @@ -217,7 +216,12 @@ def get_connection(self, timeout=5): for i in range(5): try: contact_point = CASSANDRA_IP - conn = self.klass.factory(endpoint=contact_point, timeout=timeout, protocol_version=PROTOCOL_VERSION) + conn = self.klass.factory( + endpoint=contact_point, + timeout=timeout, + protocol_version=TestCluster.DEFAULT_PROTOCOL_VERSION, + allow_beta_protocol_version=TestCluster.DEFAULT_ALLOW_BETA + ) break except (OperationTimedOut, NoHostAvailable, ConnectionShutdown) as e: continue @@ -412,10 +416,10 @@ class C1(self.klass): class C2(self.klass): pass - clusterC1 = Cluster(connection_class=C1) + clusterC1 = TestCluster(connection_class=C1) clusterC1.connect(wait_for_all_pools=True) - clusterC2 = Cluster(connection_class=C2) + clusterC2 = TestCluster(connection_class=C2) clusterC2.connect(wait_for_all_pools=True) self.addCleanup(clusterC1.shutdown) self.addCleanup(clusterC2.shutdown) diff --git a/tests/integration/standard/test_control_connection.py b/tests/integration/standard/test_control_connection.py index b91d29c4e6..fe02de43da 100644 --- a/tests/integration/standard/test_control_connection.py +++ b/tests/integration/standard/test_control_connection.py @@ -22,9 +22,8 @@ import unittest # noqa -from cassandra.cluster import Cluster from cassandra.protocol import ConfigurationException -from tests.integration import use_singledc, PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster from tests.integration.datatype_utils import update_datatypes @@ -39,7 +38,7 @@ def setUp(self): raise unittest.SkipTest( "Native protocol 3,0+ is required for UDTs using %r" % (PROTOCOL_VERSION,)) - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.cluster = TestCluster() def tearDown(self): try: diff --git a/tests/integration/standard/test_custom_cluster.py b/tests/integration/standard/test_custom_cluster.py index 1943557ee4..84e0737086 100644 --- a/tests/integration/standard/test_custom_cluster.py +++ b/tests/integration/standard/test_custom_cluster.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from cassandra.cluster import Cluster, NoHostAvailable -from tests.integration import use_singledc, get_cluster, remove_cluster, local +from cassandra.cluster import NoHostAvailable +from tests.integration import use_singledc, get_cluster, remove_cluster, local, TestCluster from tests.util import wait_until, wait_until_not_raised try: @@ -31,9 +31,9 @@ def setup_module(): # can't use wait_for_binary_proto cause ccm tries on port 9042 ccm_cluster.start(wait_for_binary_proto=False) # wait until all nodes are up - wait_until_not_raised(lambda: Cluster(['127.0.0.1'], port=9046).connect().shutdown(), 1, 20) - wait_until_not_raised(lambda: Cluster(['127.0.0.2'], port=9046).connect().shutdown(), 1, 20) - wait_until_not_raised(lambda: Cluster(['127.0.0.3'], port=9046).connect().shutdown(), 1, 20) + wait_until_not_raised(lambda: TestCluster(contact_points=['127.0.0.1'], port=9046).connect().shutdown(), 1, 20) + wait_until_not_raised(lambda: TestCluster(contact_points=['127.0.0.2'], port=9046).connect().shutdown(), 1, 20) + wait_until_not_raised(lambda: TestCluster(contact_points=['127.0.0.3'], port=9046).connect().shutdown(), 1, 20) def teardown_module(): @@ -50,11 +50,11 @@ def test_connection_honor_cluster_port(self): All hosts should be marked as up and we should be able to execute queries on it. """ - cluster = Cluster() + cluster = TestCluster() with self.assertRaises(NoHostAvailable): cluster.connect() # should fail on port 9042 - cluster = Cluster(port=9046) + cluster = TestCluster(port=9046) session = cluster.connect(wait_for_all_pools=True) wait_until(lambda: len(cluster.metadata.all_hosts()) == 3, 1, 5) diff --git a/tests/integration/standard/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py index c68e9ef843..9906a8243e 100644 --- a/tests/integration/standard/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -21,9 +21,9 @@ import six from cassandra.query import (SimpleStatement, BatchStatement, BatchType) -from cassandra.cluster import Cluster -from tests.integration import use_singledc, PROTOCOL_VERSION, local +from tests.integration import use_singledc, PROTOCOL_VERSION, local, TestCluster + def setup_module(): use_singledc() @@ -38,7 +38,7 @@ def setUp(self): raise unittest.SkipTest( "Native protocol 4,0+ is required for custom payloads, currently using %r" % (PROTOCOL_VERSION,)) - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.cluster = TestCluster() self.session = self.cluster.connect() def tearDown(self): diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 2ab847677e..d5108ed47b 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -19,13 +19,13 @@ from cassandra.protocol import ProtocolHandler, ResultMessage, QueryMessage, UUIDType, read_int from cassandra.query import tuple_factory, SimpleStatement -from cassandra.cluster import (Cluster, ResponseFuture, ExecutionProfile, EXEC_PROFILE_DEFAULT, +from cassandra.cluster import (ResponseFuture, ExecutionProfile, EXEC_PROFILE_DEFAULT, ContinuousPagingOptions, NoHostAvailable) from cassandra import ProtocolVersion, ConsistencyLevel -from tests.integration import use_singledc, PROTOCOL_VERSION, drop_keyspace_shutdown_cluster, \ +from tests.integration import use_singledc, drop_keyspace_shutdown_cluster, \ greaterthanorequalcass30, execute_with_long_wait_retry, greaterthanorequaldse51, greaterthanorequalcass3_10, \ - greaterthanorequalcass31 + greaterthanorequalcass31, TestCluster from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES from tests.integration.standard.utils import create_table_with_all_types, get_all_primitive_params from six import binary_type @@ -43,7 +43,7 @@ class CustomProtocolHandlerTest(unittest.TestCase): @classmethod def setUpClass(cls): - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.cluster = TestCluster() cls.session = cls.cluster.connect() cls.session.execute("CREATE KEYSPACE custserdes WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}") cls.session.set_keyspace("custserdes") @@ -68,8 +68,9 @@ def test_custom_raw_uuid_row_results(self): """ # Ensure that we get normal uuid back first - cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)}) + cluster = TestCluster( + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)} + ) session = cluster.connect(keyspace="custserdes") result = session.execute("SELECT schema_version FROM system.local") @@ -105,8 +106,9 @@ def test_custom_raw_row_results_all_types(self): @test_category data_types:serialization """ # Connect using a custom protocol handler that tracks the various types the result message is used with. - cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)}) + cluster = TestCluster( + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)} + ) session = cluster.connect(keyspace="custserdes") session.client_protocol_handler = CustomProtocolHandlerResultMessageTracked @@ -133,7 +135,7 @@ def test_protocol_divergence_v5_fail_by_continuous_paging(self): @test_category connection """ - cluster = Cluster(protocol_version=ProtocolVersion.V5, allow_beta_protocol_version=True) + cluster = TestCluster(protocol_version=ProtocolVersion.V5, allow_beta_protocol_version=True) session = cluster.connect() max_pages = 4 @@ -228,7 +230,7 @@ def _send_query_message(self, session, timeout, **kwargs): return future def _protocol_divergence_fail_by_flag_uses_int(self, version, uses_int_query_flag, int_flag = True, beta=False): - cluster = Cluster(protocol_version=version, allow_beta_protocol_version=beta) + cluster = TestCluster(protocol_version=version, allow_beta_protocol_version=beta) session = cluster.connect() query_one = SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (1, 1)") diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index c6be3760fb..4e45553be2 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -9,18 +9,17 @@ from itertools import count -from cassandra.query import tuple_factory -from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.concurrent import execute_concurrent_with_args -from cassandra.protocol import ProtocolHandler, LazyProtocolHandler, NumpyProtocolHandler from cassandra.cython_deps import HAVE_CYTHON, HAVE_NUMPY +from cassandra.protocol import ProtocolHandler, LazyProtocolHandler, NumpyProtocolHandler +from cassandra.query import tuple_factory from tests import VERIFY_CYTHON -from tests.integration import use_singledc, PROTOCOL_VERSION, notprotocolv1, \ - drop_keyspace_shutdown_cluster, BasicSharedKeyspaceUnitTestCase, greaterthancass21 +from tests.integration import use_singledc, notprotocolv1, \ + drop_keyspace_shutdown_cluster, BasicSharedKeyspaceUnitTestCase, greaterthancass21, TestCluster from tests.integration.datatype_utils import update_datatypes from tests.integration.standard.utils import ( create_table_with_all_types, get_all_primitive_params, get_primitive_datatypes) - from tests.unit.cython.utils import cythontest, numpytest @@ -35,7 +34,7 @@ class CythonProtocolHandlerTest(unittest.TestCase): @classmethod def setUpClass(cls): - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.cluster = TestCluster() cls.session = cls.cluster.connect() cls.session.execute("CREATE KEYSPACE testspace WITH replication = " "{ 'class' : 'SimpleStrategy', 'replication_factor': '1'}") @@ -66,8 +65,9 @@ def test_cython_lazy_results_paged(self): Test Cython-based parser that returns an iterator, over multiple pages """ # arrays = { 'a': arr1, 'b': arr2, ... } - cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)}) + cluster = TestCluster( + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)} + ) session = cluster.connect(keyspace="testspace") session.client_protocol_handler = LazyProtocolHandler session.default_fetch_size = 2 @@ -99,8 +99,9 @@ def test_numpy_results_paged(self): Test Numpy-based parser that returns a NumPy array """ # arrays = { 'a': arr1, 'b': arr2, ... } - cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)}) + cluster = TestCluster( + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)} + ) session = cluster.connect(keyspace="testspace") session.client_protocol_handler = NumpyProtocolHandler session.default_fetch_size = 2 @@ -181,8 +182,9 @@ def get_data(protocol_handler): """ Get data from the test table. """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)}) + cluster = TestCluster( + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)} + ) session = cluster.connect(keyspace="testspace") # use our custom protocol handler diff --git a/tests/integration/standard/test_dse.py b/tests/integration/standard/test_dse.py index 40bcb68495..1b9b5bef84 100644 --- a/tests/integration/standard/test_dse.py +++ b/tests/integration/standard/test_dse.py @@ -16,11 +16,10 @@ from packaging.version import Version -from cassandra.cluster import Cluster from tests import notwindows from tests.unit.cython.utils import notcython from tests.integration import (execute_until_pass, - execute_with_long_wait_retry, use_cluster) + execute_with_long_wait_retry, use_cluster, TestCluster) try: import unittest2 as unittest @@ -60,8 +59,7 @@ def _test_basic(self, dse_version): ) use_cluster(cluster_name=cluster_name, nodes=[3], dse_options={}) - cluster = Cluster( - allow_beta_protocol_version=(dse_version >= Version('6.7.0'))) + cluster = TestCluster() session = cluster.connect() result = execute_until_pass( session, diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 9fae550f64..d3dea05fad 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -29,7 +29,6 @@ from cassandra import AlreadyExists, SignatureDescriptor, UserFunctionDescriptor, UserAggregateDescriptor -from cassandra.cluster import Cluster from cassandra.encoder import Encoder from cassandra.metadata import (IndexMetadata, Token, murmur3, Function, Aggregate, protect_name, protect_names, RegisteredTableExtension, _RegisteredExtensionType, get_schema_parser, @@ -42,8 +41,8 @@ greaterthanorequaldse51, greaterthanorequalcass30, lessthancass30, local, get_supported_protocol_versions, greaterthancass20, greaterthancass21, assert_startswith, greaterthanorequalcass40, - greaterthanorequaldse67, lessthancass40 -) + greaterthanorequaldse67, lessthancass40, + TestCluster) log = logging.getLogger(__name__) @@ -104,7 +103,7 @@ def test_host_release_version(self): class MetaDataRemovalTest(unittest.TestCase): def setUp(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, contact_points=['127.0.0.1', '127.0.0.2', '127.0.0.3', '126.0.0.186']) + self.cluster = TestCluster(contact_points=['127.0.0.1', '127.0.0.2', '127.0.0.3', '126.0.0.186']) self.cluster.connect() def tearDown(self): @@ -138,11 +137,11 @@ def test_schema_metadata_disable(self): @test_category metadata """ # Validate metadata is missing where appropriate - no_schema = Cluster(schema_metadata_enabled=False) + no_schema = TestCluster(schema_metadata_enabled=False) no_schema_session = no_schema.connect() self.assertEqual(len(no_schema.metadata.keyspaces), 0) self.assertEqual(no_schema.metadata.export_schema_as_string(), '') - no_token = Cluster(token_metadata_enabled=False) + no_token = TestCluster(token_metadata_enabled=False) no_token_session = no_token.connect() self.assertEqual(len(no_token.metadata.token_map.token_to_host_owner), 0) @@ -548,7 +547,7 @@ def test_non_size_tiered_compaction(self): self.assertIn("'tombstone_threshold': '0.3'", cql) self.assertIn("LeveledCompactionStrategy", cql) # formerly legacy options; reintroduced in 4.0 - if CASSANDRA_VERSION < Version('4.0'): + if CASSANDRA_VERSION < Version('4.0-a'): self.assertNotIn("min_threshold", cql) self.assertNotIn("max_threshold", cql) @@ -570,7 +569,7 @@ def test_refresh_schema_metadata(self): @test_category metadata """ - cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) + cluster2 = TestCluster(schema_event_refresh_window=-1) cluster2.connect() self.assertNotIn("new_keyspace", cluster2.metadata.keyspaces) @@ -653,7 +652,7 @@ def test_refresh_keyspace_metadata(self): @test_category metadata """ - cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) + cluster2 = TestCluster(schema_event_refresh_window=-1) cluster2.connect() self.assertTrue(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes) @@ -684,7 +683,7 @@ def test_refresh_table_metadata(self): table_name = "test" self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.keyspace_name, table_name)) - cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) + cluster2 = TestCluster(schema_event_refresh_window=-1) cluster2.connect() self.assertNotIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns) @@ -718,12 +717,13 @@ def test_refresh_metadata_for_mv(self): self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.keyspace_name, self.function_table_name)) - cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) + cluster2 = TestCluster(schema_event_refresh_window=-1) cluster2.connect() try: self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) - self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT a, b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)" + self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT a, b FROM {0}.{1} " + "WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (a, b)" .format(self.keyspace_name, self.function_table_name)) self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) @@ -741,12 +741,15 @@ def test_refresh_metadata_for_mv(self): self.assertIsNot(original_meta, self.session.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1']) self.assertEqual(original_meta.as_cql_query(), current_meta.as_cql_query()) - cluster3 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) + cluster3 = TestCluster(schema_event_refresh_window=-1) cluster3.connect() try: self.assertNotIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) - self.session.execute("CREATE MATERIALIZED VIEW {0}.mv2 AS SELECT a, b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)" - .format(self.keyspace_name, self.function_table_name)) + self.session.execute( + "CREATE MATERIALIZED VIEW {0}.mv2 AS SELECT a, b FROM {0}.{1} " + "WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (a, b)".format( + self.keyspace_name, self.function_table_name) + ) self.assertNotIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) cluster3.refresh_materialized_view_metadata(self.keyspace_name, 'mv2') self.assertIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) @@ -773,7 +776,7 @@ def test_refresh_user_type_metadata(self): if PROTOCOL_VERSION < 3: raise unittest.SkipTest("Protocol 3+ is required for UDTs, currently testing against {0}".format(PROTOCOL_VERSION)) - cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) + cluster2 = TestCluster(schema_event_refresh_window=-1) cluster2.connect() self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {}) @@ -801,7 +804,7 @@ def test_refresh_user_type_metadata_proto_2(self): raise unittest.SkipTest("Protocol versions 1 and 2 are not supported in Cassandra version ".format(CASSANDRA_VERSION)) for protocol_version in (1, 2): - cluster = Cluster(protocol_version=protocol_version) + cluster = TestCluster() session = cluster.connect() self.assertEqual(cluster.metadata.keyspaces[self.keyspace_name].user_types, {}) @@ -841,7 +844,7 @@ def test_refresh_user_function_metadata(self): if PROTOCOL_VERSION < 4: raise unittest.SkipTest("Protocol 4+ is required for UDFs, currently testing against {0}".format(PROTOCOL_VERSION)) - cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) + cluster2 = TestCluster(schema_event_refresh_window=-1) cluster2.connect() self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {}) @@ -877,7 +880,7 @@ def test_refresh_user_aggregate_metadata(self): if PROTOCOL_VERSION < 4: raise unittest.SkipTest("Protocol 4+ is required for UDAs, currently testing against {0}".format(PROTOCOL_VERSION)) - cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) + cluster2 = TestCluster(schema_event_refresh_window=-1) cluster2.connect() self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].aggregates, {}) @@ -940,7 +943,10 @@ def test_table_extensions(self): v = t + 'view' s.execute("CREATE TABLE %s.%s (k text PRIMARY KEY, v int)" % (ks, t)) - s.execute("CREATE MATERIALIZED VIEW %s.%s AS SELECT * FROM %s.%s WHERE v IS NOT NULL PRIMARY KEY (v, k)" % (ks, v, ks, t)) + s.execute( + "CREATE MATERIALIZED VIEW %s.%s AS SELECT * FROM %s.%s " + "WHERE v IS NOT NULL AND k IS NOT NULL PRIMARY KEY (v, k)" % (ks, v, ks, t) + ) table_meta = ks_meta.tables[t] view_meta = table_meta.views[v] @@ -1034,7 +1040,7 @@ def test_export_schema(self): Test export schema functionality """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() cluster.connect() self.assertIsInstance(cluster.metadata.export_schema_as_string(), six.string_types) @@ -1045,7 +1051,7 @@ def test_export_keyspace_schema(self): Test export keyspace schema functionality """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() cluster.connect() for keyspace in cluster.metadata.keyspaces: @@ -1085,7 +1091,7 @@ def test_export_keyspace_schema_udts(self): if sys.version_info[0:2] != (2, 7): raise unittest.SkipTest('This test compares static strings generated from dict items, which may change orders. Test with 2.7.') - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() session = cluster.connect() session.execute(""" @@ -1153,7 +1159,7 @@ def test_case_sensitivity(self): Test that names that need to be escaped in CREATE statements are """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() session = cluster.connect() ksname = 'AnInterestingKeyspace' @@ -1198,7 +1204,7 @@ def test_already_exists_exceptions(self): Ensure AlreadyExists exception is thrown when hit """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() session = cluster.connect() ksname = 'test3rf' @@ -1224,7 +1230,7 @@ def test_replicas(self): if murmur3 is None: raise unittest.SkipTest('the murmur3 extension is not available') - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() self.assertEqual(cluster.metadata.get_replicas('test3rf', 'key'), []) cluster.connect('test3rf') @@ -1240,7 +1246,7 @@ def test_token_map(self): Test token mappings """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() cluster.connect('test3rf') ring = cluster.metadata.token_map.ring owners = list(cluster.metadata.token_map.token_to_host_owner[token] for token in ring) @@ -1264,7 +1270,7 @@ class TokenMetadataTest(unittest.TestCase): def test_token(self): expected_node_count = len(get_cluster().nodes) - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() cluster.connect() tmap = cluster.metadata.token_map self.assertTrue(issubclass(tmap.token_class, Token)) @@ -1277,7 +1283,7 @@ class KeyspaceAlterMetadata(unittest.TestCase): Test verifies that table metadata is preserved on keyspace alter """ def setUp(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.cluster = TestCluster() self.session = self.cluster.connect() name = self._testMethodName.lower() crt_ks = ''' @@ -1322,7 +1328,7 @@ def table_name(self): @classmethod def setup_class(cls): - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.cluster = TestCluster() cls.session = cls.cluster.connect() try: if cls.keyspace_name in cls.cluster.metadata.keyspaces: @@ -1431,7 +1437,7 @@ def function_name(self): @classmethod def setup_class(cls): if PROTOCOL_VERSION >= 4: - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.cluster = TestCluster() cls.keyspace_name = cls.__name__.lower() cls.session = cls.cluster.connect() cls.session.execute("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name) @@ -1713,7 +1719,7 @@ def test_init_cond(self): """ # This is required until the java driver bundled with C* is updated to support v4 - c = Cluster(protocol_version=3) + c = TestCluster(protocol_version=3) s = c.connect(self.keyspace_name) encoder = Encoder() @@ -1897,7 +1903,7 @@ def function_name(self): @classmethod def setup_class(cls): - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.cluster = TestCluster() cls.keyspace_name = cls.__name__.lower() cls.session = cls.cluster.connect() cls.session.execute("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name) @@ -2023,7 +2029,11 @@ class MaterializedViewMetadataTestSimple(BasicSharedKeyspaceUnitTestCase): def setUp(self): self.session.execute("CREATE TABLE {0}.{1} (pk int PRIMARY KEY, c int)".format(self.keyspace_name, self.function_table_name)) - self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT pk, c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.keyspace_name, self.function_table_name)) + self.session.execute( + "CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT pk, c FROM {0}.{1} " + "WHERE pk IS NOT NULL AND c IS NOT NULL PRIMARY KEY (pk, c)".format( + self.keyspace_name, self.function_table_name) + ) def tearDown(self): self.session.execute("DROP MATERIALIZED VIEW {0}.mv1".format(self.keyspace_name)) @@ -2094,7 +2104,11 @@ def test_materialized_view_metadata_drop(self): self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.keyspace_name].views) - self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT pk, c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.keyspace_name, self.function_table_name)) + self.session.execute( + "CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT pk, c FROM {0}.{1} " + "WHERE pk IS NOT NULL AND c IS NOT NULL PRIMARY KEY (pk, c)".format( + self.keyspace_name, self.function_table_name) + ) @greaterthanorequalcass30 diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 7d3b7976e2..676a5340ef 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -26,8 +26,8 @@ from cassandra import ConsistencyLevel, WriteTimeout, Unavailable, ReadTimeout from cassandra.protocol import SyntaxException -from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT -from tests.integration import get_cluster, get_node, use_singledc, PROTOCOL_VERSION, execute_until_pass +from cassandra.cluster import NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT +from tests.integration import get_cluster, get_node, use_singledc, execute_until_pass, TestCluster from greplin import scales from tests.integration import BasicSharedKeyspaceUnitTestCaseRF3WM, BasicExistingKeyspaceUnitTestCase, local @@ -42,16 +42,16 @@ class MetricsTests(unittest.TestCase): def setUp(self): contact_point = ['127.0.0.2'] - self.cluster = Cluster(contact_points=contact_point, metrics_enabled=True, protocol_version=PROTOCOL_VERSION, - execution_profiles= + self.cluster = TestCluster(contact_points=contact_point, metrics_enabled=True, + execution_profiles= {EXEC_PROFILE_DEFAULT: ExecutionProfile( load_balancing_policy=HostFilterPolicy( - RoundRobinPolicy(), lambda host: host.address in contact_point), + RoundRobinPolicy(), lambda host: host.address in contact_point), retry_policy=FallthroughRetryPolicy() ) } - ) + ) self.session = self.cluster.connect("test3rf", wait_for_all_pools=True) def tearDown(self): @@ -203,8 +203,10 @@ def test_metrics_per_cluster(self): @test_category metrics """ - cluster2 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(retry_policy=FallthroughRetryPolicy())}) + cluster2 = TestCluster( + metrics_enabled=True, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(retry_policy=FallthroughRetryPolicy())} + ) cluster2.connect(self.ks_name, wait_for_all_pools=True) self.assertEqual(len(cluster2.metadata.all_hosts()), 3) @@ -255,13 +257,17 @@ def test_duplicate_metrics_per_cluster(self): @test_category metrics """ - cluster2 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, - monitor_reporting_enabled=False, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(retry_policy=FallthroughRetryPolicy())}) - - cluster3 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, - monitor_reporting_enabled=False, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(retry_policy=FallthroughRetryPolicy())}) + cluster2 = TestCluster( + metrics_enabled=True, + monitor_reporting_enabled=False, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(retry_policy=FallthroughRetryPolicy())} + ) + + cluster3 = TestCluster( + metrics_enabled=True, + monitor_reporting_enabled=False, + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(retry_policy=FallthroughRetryPolicy())} + ) # Ensure duplicate metric names are not allowed cluster2.metrics.set_stats_name("appcluster") diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 53b6494437..24facf42a0 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -17,13 +17,13 @@ except ImportError: import unittest # noqa -from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, SimpleConvictionPolicy, \ WhiteListRoundRobinPolicy from cassandra.pool import Host from cassandra.connection import DefaultEndPoint -from tests.integration import PROTOCOL_VERSION, local, use_singledc +from tests.integration import local, use_singledc, TestCluster from concurrent.futures import wait as wait_futures @@ -55,9 +55,9 @@ def test_predicate_changes(self): hfp = ExecutionProfile( load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), predicate=predicate) ) - cluster = Cluster((contact_point,), execution_profiles={EXEC_PROFILE_DEFAULT: hfp}, - protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, - status_event_refresh_window=0) + cluster = TestCluster(contact_points=(contact_point,), execution_profiles={EXEC_PROFILE_DEFAULT: hfp}, + topology_event_refresh_window=0, + status_event_refresh_window=0) session = cluster.connect(wait_for_all_pools=True) queried_hosts = set() @@ -84,7 +84,7 @@ class WhiteListRoundRobinPolicyTests(unittest.TestCase): def test_only_connects_to_subset(self): only_connect_hosts = {"127.0.0.1", "127.0.0.2"} white_list = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(only_connect_hosts)) - cluster = Cluster(execution_profiles={"white_list": white_list}) + cluster = TestCluster(execution_profiles={"white_list": white_list}) #cluster = Cluster(load_balancing_policy=WhiteListRoundRobinPolicy(only_connect_hosts)) session = cluster.connect(wait_for_all_pools=True) queried_hosts = set() diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index d314846e51..5c79f27346 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -13,7 +13,7 @@ # limitations under the License. -from tests.integration import use_singledc, PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster try: import unittest2 as unittest @@ -22,7 +22,6 @@ from cassandra import InvalidRequest, DriverException from cassandra import ConsistencyLevel, ProtocolVersion -from cassandra.cluster import Cluster from cassandra.query import PreparedStatement, UNSET_VALUE from tests.integration import (get_server_versions, greaterthanorequalcass40, greaterthanorequaldse50, requirecassandra, BasicSharedKeyspaceUnitTestCase) @@ -44,8 +43,7 @@ def setUpClass(cls): cls.cass_version = get_server_versions() def setUp(self): - self.cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, - allow_beta_protocol_version=True) + self.cluster = TestCluster(metrics_enabled=True, allow_beta_protocol_version=True) self.session = self.cluster.connect() def tearDown(self): @@ -517,7 +515,7 @@ def test_prepare_id_is_updated_across_session(self): @since 3.12 @jira_ticket PYTHON-808 """ - one_cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION) + one_cluster = TestCluster(metrics_enabled=True) one_session = one_cluster.connect() self.addCleanup(one_cluster.shutdown) @@ -557,7 +555,7 @@ def test_id_is_not_updated_conditional_v4(self): @since 3.13 @jira_ticket PYTHON-847 """ - cluster = Cluster(protocol_version=ProtocolVersion.V4) + cluster = TestCluster(protocol_version=ProtocolVersion.V4) session = cluster.connect() self.addCleanup(cluster.shutdown) self._test_updated_conditional(session, 9) @@ -571,7 +569,7 @@ def test_id_is_not_updated_conditional_v5(self): @since 3.13 @jira_ticket PYTHON-847 """ - cluster = Cluster(protocol_version=ProtocolVersion.V5) + cluster = TestCluster(protocol_version=ProtocolVersion.V5) session = cluster.connect() self.addCleanup(cluster.shutdown) self._test_updated_conditional(session, 10) @@ -586,7 +584,7 @@ def test_id_is_not_updated_conditional_dsev1(self): @since 3.13 @jira_ticket PYTHON-847 """ - cluster = Cluster(protocol_version=ProtocolVersion.DSE_V1) + cluster = TestCluster(protocol_version=ProtocolVersion.DSE_V1) session = cluster.connect() self.addCleanup(cluster.shutdown) self._test_updated_conditional(session, 10) @@ -601,7 +599,7 @@ def test_id_is_not_updated_conditional_dsev2(self): @since 3.13 @jira_ticket PYTHON-847 """ - cluster = Cluster(protocol_version=ProtocolVersion.DSE_V2) + cluster = TestCluster(protocol_version=ProtocolVersion.DSE_V2) session = cluster.connect() self.addCleanup(cluster.shutdown) self._test_updated_conditional(session, 10) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 63f94399a6..3e393cdb4b 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -24,11 +24,11 @@ from cassandra import ConsistencyLevel, Unavailable, InvalidRequest, cluster from cassandra.query import (PreparedStatement, BoundStatement, SimpleStatement, BatchStatement, BatchType, dict_factory, TraceUnavailable) -from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.policies import HostDistance, RoundRobinPolicy, WhiteListRoundRobinPolicy from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, \ greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, local, get_cluster, setup_keyspace, \ - USE_CASS_EXTERNAL, greaterthanorequalcass40, DSE_VERSION + USE_CASS_EXTERNAL, greaterthanorequalcass40, DSE_VERSION, TestCluster from tests import notwindows from tests.integration import greaterthanorequalcass30, get_node @@ -122,9 +122,9 @@ def test_trace_id_to_resultset(self): self.assertListEqual([rs_trace], rs.get_all_query_traces()) def test_trace_ignores_row_factory(self): - with Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) as cluster: - + with TestCluster( + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)} + ) as cluster: s = cluster.connect() query = "SELECT * FROM system.local" statement = SimpleStatement(query) @@ -367,7 +367,7 @@ def test_host_targeting_query(self): class PreparedStatementTests(unittest.TestCase): def setUp(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.cluster = TestCluster() self.session = self.cluster.connect() def tearDown(self): @@ -488,7 +488,7 @@ def test_prepared_metadata_generation(self): base_line = None for proto_version in get_supported_protocol_versions(): beta_flag = True if proto_version in ProtocolVersion.BETA_VERSIONS else False - cluster = Cluster(protocol_version=proto_version, allow_beta_protocol_version=beta_flag) + cluster = TestCluster() session = cluster.connect() select_statement = session.prepare("SELECT * FROM system.local") @@ -523,7 +523,7 @@ def test_prepare_on_all_hosts(self): @jira_ticket PYTHON-556 @expected_result queries will have to re-prepared on hosts that aren't the control connection """ - clus = Cluster(protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, reprepare_on_up=False) + clus = TestCluster(prepare_on_all_hosts=False, reprepare_on_up=False) self.addCleanup(clus.shutdown) session = clus.connect(wait_for_all_pools=True) @@ -543,11 +543,10 @@ def test_prepare_batch_statement(self): and the batch statement will be sent. """ policy = ForcedHostIndexPolicy() - clus = Cluster( + clus = TestCluster( execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=policy), }, - protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, reprepare_on_up=False, ) @@ -588,7 +587,7 @@ def test_prepare_batch_statement_after_alter(self): @expected_result queries will have to re-prepared on hosts that aren't the control connection and the batch statement will be sent. """ - clus = Cluster(protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, reprepare_on_up=False) + clus = TestCluster(prepare_on_all_hosts=False, reprepare_on_up=False) self.addCleanup(clus.shutdown) table = "test3rf.%s" % self._testMethodName.lower() @@ -647,7 +646,7 @@ def test_prepared_statement(self): Highlight the difference between Prepared and Bound statements """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = TestCluster() session = cluster.connect() prepared = session.prepare('INSERT INTO test3rf.test (k, v) VALUES (?, ?)') @@ -671,7 +670,7 @@ def setUp(self): "Protocol 2.0+ is required for BATCH operations, currently testing against %r" % (PROTOCOL_VERSION,)) - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.cluster = TestCluster() if PROTOCOL_VERSION < 3: self.cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) self.session = self.cluster.connect(wait_for_all_pools=True) @@ -802,7 +801,7 @@ def setUp(self): "Protocol 2.0+ is required for Serial Consistency, currently testing against %r" % (PROTOCOL_VERSION,)) - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.cluster = TestCluster() if PROTOCOL_VERSION < 3: self.cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) self.session = self.cluster.connect() @@ -894,7 +893,7 @@ def setUp(self): % (PROTOCOL_VERSION,)) serial_profile = ExecutionProfile(consistency_level=ConsistencyLevel.SERIAL) - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, execution_profiles={'serial': serial_profile}) + self.cluster = TestCluster(execution_profiles={'serial': serial_profile}) self.session = self.cluster.connect() ddl = ''' @@ -944,20 +943,15 @@ def test_no_connection_refused_on_timeout(self): continue else: # In this case result is an exception - if type(result).__name__ == "NoHostAvailable": + exception_type = type(result).__name__ + if exception_type == "NoHostAvailable": self.fail("PYTHON-91: Disconnected from Cassandra: %s" % result.message) - if type(result).__name__ == "WriteTimeout": - received_timeout = True - continue - if type(result).__name__ == "WriteFailure": - received_timeout = True - continue - if type(result).__name__ == "ReadTimeout": - continue - if type(result).__name__ == "ReadFailure": + if exception_type in ["WriteTimeout", "WriteFailure", "ReadTimeout", "ReadFailure", "ErrorMessageSub"]: + if type(result).__name__ in ["WriteTimeout", "WriteFailure"]: + received_timeout = True continue - self.fail("Unexpected exception %s: %s" % (type(result).__name__, result.message)) + self.fail("Unexpected exception %s: %s" % (exception_type, result.message)) # Make sure test passed self.assertTrue(received_timeout) @@ -1084,7 +1078,7 @@ def setUp(self): raise unittest.SkipTest( "Protocol 2.0+ is required for BATCH operations, currently testing against %r" % (PROTOCOL_VERSION,)) - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.cluster = TestCluster() self.session = self.cluster.connect() query = """ INSERT INTO test3rf.test (k, v) VALUES (?, ?) @@ -1359,7 +1353,7 @@ def test_unicode(self): class BaseKeyspaceTests(): @classmethod def setUpClass(cls): - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.cluster = TestCluster() cls.session = cls.cluster.connect(wait_for_all_pools=True) cls.ks_name = cls.__name__.lower() @@ -1425,7 +1419,7 @@ def test_setting_keyspace_and_session(self): @test_category query """ - cluster = Cluster(protocol_version=ProtocolVersion.V5, allow_beta_protocol_version=True) + cluster = TestCluster(protocol_version=ProtocolVersion.V5, allow_beta_protocol_version=True) session = cluster.connect(self.alternative_ks) self.addCleanup(cluster.shutdown) @@ -1442,8 +1436,7 @@ def test_setting_keyspace_and_session_after_created(self): @test_category query """ - pv = ProtocolVersion.DSE_V2 if DSE_VERSION else ProtocolVersion.V5 - cluster = Cluster(protocol_version=pv, allow_beta_protocol_version=True) + cluster = TestCluster() session = cluster.connect() self.addCleanup(cluster.shutdown) @@ -1461,8 +1454,7 @@ def test_setting_keyspace_and_same_session(self): @test_category query """ - pv = ProtocolVersion.DSE_V2 if DSE_VERSION else ProtocolVersion.V5 - cluster = Cluster(protocol_version=pv, allow_beta_protocol_version=True) + cluster = TestCluster() session = cluster.connect(self.ks_name) self.addCleanup(cluster.shutdown) @@ -1473,7 +1465,7 @@ def test_setting_keyspace_and_same_session(self): class SimpleWithKeyspaceTests(QueryKeyspaceTests, unittest.TestCase): @unittest.skip def test_lower_protocol(self): - cluster = Cluster(protocol_version=ProtocolVersion.V4) + cluster = TestCluster(protocol_version=ProtocolVersion.V4) session = cluster.connect(self.ks_name) self.addCleanup(cluster.shutdown) @@ -1527,7 +1519,7 @@ def confirm_results(self): class PreparedWithKeyspaceTests(BaseKeyspaceTests, unittest.TestCase): def setUp(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, allow_beta_protocol_version=True) + self.cluster = TestCluster() self.session = self.cluster.connect() def tearDown(self): @@ -1603,7 +1595,7 @@ def test_prepared_not_found(self): @test_category query """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION, allow_beta_protocol_version=True) + cluster = TestCluster() session = self.cluster.connect("system") self.addCleanup(cluster.shutdown) @@ -1625,7 +1617,7 @@ def test_prepared_in_query_keyspace(self): @test_category query """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION, allow_beta_protocol_version=True) + cluster = TestCluster() session = self.cluster.connect() self.addCleanup(cluster.shutdown) diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index b1f7b39fc6..dac4ec5ce3 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from tests.integration import use_singledc, PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster import logging log = logging.getLogger(__name__) @@ -26,7 +26,7 @@ from threading import Event from cassandra import ConsistencyLevel -from cassandra.cluster import Cluster, EXEC_PROFILE_DEFAULT, ExecutionProfile +from cassandra.cluster import EXEC_PROFILE_DEFAULT, ExecutionProfile from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args from cassandra.policies import HostDistance from cassandra.query import SimpleStatement @@ -44,8 +44,7 @@ def setUp(self): "Protocol 2.0+ is required for Paging state, currently testing against %r" % (PROTOCOL_VERSION,)) - self.cluster = Cluster( - protocol_version=PROTOCOL_VERSION, + self.cluster = TestCluster( execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(consistency_level=ConsistencyLevel.LOCAL_QUORUM)} ) if PROTOCOL_VERSION < 3: diff --git a/tests/integration/standard/test_routing.py b/tests/integration/standard/test_routing.py index bf4c7878b6..e1dabba49a 100644 --- a/tests/integration/standard/test_routing.py +++ b/tests/integration/standard/test_routing.py @@ -21,9 +21,7 @@ import logging log = logging.getLogger(__name__) -from cassandra.cluster import Cluster - -from tests.integration import use_singledc, PROTOCOL_VERSION +from tests.integration import use_singledc, TestCluster def setup_module(): @@ -38,7 +36,7 @@ def cfname(self): @classmethod def setup_class(cls): - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.cluster = TestCluster() cls.session = cls.cluster.connect('test1rf') @classmethod diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index 48ceb1d949..93f25d9276 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -12,14 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCaseWFunctionTable, BasicSharedKeyspaceUnitTestCase, execute_until_pass +from tests.integration import get_server_versions, use_singledc, \ + BasicSharedKeyspaceUnitTestCaseWFunctionTable, BasicSharedKeyspaceUnitTestCase, execute_until_pass, TestCluster try: import unittest2 as unittest except ImportError: import unittest # noqa -from cassandra.cluster import Cluster, ResultSet, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import ResultSet, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.query import tuple_factory, named_tuple_factory, dict_factory, ordered_dict_factory from cassandra.util import OrderedDict @@ -86,8 +87,9 @@ def setUpClass(cls): cls.select = "SELECT * FROM {0}.{1}".format(cls.ks_name, cls.ks_name) def _results_from_row_factory(self, row_factory): - cluster = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=row_factory)}) + cluster = TestCluster( + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=row_factory)} + ) with cluster: return cluster.connect().execute(self.select) @@ -174,7 +176,7 @@ class NamedTupleFactoryAndNumericColNamesTests(unittest.TestCase): """ @classmethod def setup_class(cls): - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.cluster = TestCluster() cls.session = cls.cluster.connect() cls._cass_version, cls._cql_version = get_server_versions() ddl = ''' @@ -211,8 +213,9 @@ def test_can_select_with_dict_factory(self): """ can SELECT numeric column using dict_factory """ - with Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) as cluster: + with TestCluster( + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)} + ) as cluster: try: cluster.connect().execute('SELECT * FROM test1rf.table_num_col') except ValueError as e: diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index f0e56879c7..0592b7d737 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -25,16 +25,16 @@ import cassandra from cassandra import InvalidRequest from cassandra import util -from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.concurrent import execute_concurrent_with_args from cassandra.cqltypes import Int32Type, EMPTY from cassandra.query import dict_factory, ordered_dict_factory from cassandra.util import sortedset, Duration from tests.unit.cython.utils import cythontest -from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass, notprotocolv1, \ +from tests.integration import use_singledc, execute_until_pass, notprotocolv1, \ BasicSharedKeyspaceUnitTestCase, greaterthancass21, lessthancass30, greaterthanorequaldse51, \ - DSE_VERSION, greaterthanorequalcass3_10, requiredse + DSE_VERSION, greaterthanorequalcass3_10, requiredse, TestCluster from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, PRIMITIVE_DATATYPES_KEYS, \ get_sample, get_all_samples, get_collection_sample @@ -136,7 +136,7 @@ def test_can_insert_primitive_datatypes(self): """ Test insertion of all datatype primitives """ - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = TestCluster() s = c.connect(self.keyspace_name) # create table @@ -217,7 +217,7 @@ def test_can_insert_collection_datatypes(self): Test insertion of all collection types """ - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = TestCluster() s = c.connect(self.keyspace_name) # use tuple encoding, to convert native python tuple into raw CQL s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple @@ -449,7 +449,7 @@ def test_can_insert_tuples(self): if self.cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = TestCluster() s = c.connect(self.keyspace_name) # use this encoder in order to insert tuples @@ -501,8 +501,9 @@ def test_can_insert_tuples_with_varying_lengths(self): if self.cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - c = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) + c = TestCluster( + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)} + ) s = c.connect(self.keyspace_name) # set the encoder for tuples for the ability to write tuples @@ -539,7 +540,7 @@ def test_can_insert_tuples_all_primitive_datatypes(self): if self.cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = TestCluster() s = c.connect(self.keyspace_name) s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple @@ -567,8 +568,9 @@ def test_can_insert_tuples_all_collection_datatypes(self): if self.cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - c = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) + c = TestCluster( + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)} + ) s = c.connect(self.keyspace_name) # set the encoder for tuples for the ability to write tuples @@ -665,8 +667,9 @@ def test_can_insert_nested_tuples(self): if self.cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - c = Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) + c = TestCluster( + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)} + ) s = c.connect(self.keyspace_name) # set the encoder for tuples for the ability to write tuples @@ -1276,7 +1279,7 @@ def test_nested_types_with_protocol_version(self): self.read_inserts_at_level(pvr) def read_inserts_at_level(self, proto_ver): - session = Cluster(protocol_version=proto_ver).connect(self.keyspace_name) + session = TestCluster(protocol_version=proto_ver).connect(self.keyspace_name) try: results = session.execute('select * from t')[0] self.assertEqual("[SortedSet([1, 2]), SortedSet([3, 5])]", str(results.v)) @@ -1294,7 +1297,7 @@ def read_inserts_at_level(self, proto_ver): session.cluster.shutdown() def run_inserts_at_version(self, proto_ver): - session = Cluster(protocol_version=proto_ver).connect(self.keyspace_name) + session = TestCluster(protocol_version=proto_ver).connect(self.keyspace_name) try: p = session.prepare('insert into t (k, v) values (?, ?)') session.execute(p, (0, [{1, 2}, {3, 5}])) diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 4f23f9d5a9..6d9676f25e 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -22,12 +22,12 @@ import six from cassandra import InvalidRequest -from cassandra.cluster import Cluster, UserTypeDoesNotExist, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import UserTypeDoesNotExist, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.query import dict_factory from cassandra.util import OrderedMap -from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass, \ - BasicSegregatedKeyspaceUnitTestCase, greaterthancass20, lessthancass30, greaterthanorequalcass36 +from tests.integration import use_singledc, execute_until_pass, \ + BasicSegregatedKeyspaceUnitTestCase, greaterthancass20, lessthancass30, greaterthanorequalcass36, TestCluster from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, PRIMITIVE_DATATYPES_KEYS, \ COLLECTION_TYPES, get_sample, get_collection_sample @@ -79,7 +79,7 @@ def test_can_insert_unprepared_registered_udts(self): Test the insertion of unprepared, registered UDTs """ - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = TestCluster() s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.execute("CREATE TYPE user (age int, name text)") @@ -123,7 +123,7 @@ def test_can_register_udt_before_connecting(self): Test the registration of UDTs before session creation """ - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = TestCluster() s = c.connect(wait_for_all_pools=True) s.execute(""" @@ -144,7 +144,7 @@ def test_can_register_udt_before_connecting(self): # now that types are defined, shutdown and re-create Cluster c.shutdown() - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = TestCluster() User1 = namedtuple('user', ('age', 'name')) User2 = namedtuple('user', ('state', 'is_cool')) @@ -181,7 +181,7 @@ def test_can_insert_prepared_unregistered_udts(self): Test the insertion of prepared, unregistered UDTs """ - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = TestCluster() s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.execute("CREATE TYPE user (age int, name text)") @@ -225,7 +225,7 @@ def test_can_insert_prepared_registered_udts(self): Test the insertion of prepared, registered UDTs """ - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = TestCluster() s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.execute("CREATE TYPE user (age int, name text)") @@ -275,7 +275,7 @@ def test_can_insert_udts_with_nulls(self): Test the insertion of UDTs with null and empty string fields """ - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = TestCluster() s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.execute("CREATE TYPE user (a text, b int, c uuid, d blob)") @@ -305,7 +305,7 @@ def test_can_insert_udts_with_varying_lengths(self): Test for ensuring extra-lengthy udts are properly inserted """ - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = TestCluster() s = c.connect(self.keyspace_name, wait_for_all_pools=True) max_test_length = 254 @@ -385,8 +385,9 @@ def nested_udt_verification_helper(self, session, max_nesting_depth, udts): self.assertEqual(udt, result["v_{0}".format(i)]) def _cluster_default_dict_factory(self): - return Cluster(protocol_version=PROTOCOL_VERSION, - execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)}) + return TestCluster( + execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=dict_factory)} + ) def test_can_insert_nested_registered_udts(self): """ @@ -485,7 +486,7 @@ def test_raise_error_on_nonexisting_udts(self): Test for ensuring that an error is raised for operating on a nonexisting udt or an invalid keyspace """ - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = TestCluster() s = c.connect(self.keyspace_name, wait_for_all_pools=True) User = namedtuple('user', ('age', 'name')) @@ -505,7 +506,7 @@ def test_can_insert_udt_all_datatypes(self): Test for inserting various types of PRIMITIVE_DATATYPES into UDT's """ - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = TestCluster() s = c.connect(self.keyspace_name, wait_for_all_pools=True) # create UDT @@ -550,7 +551,7 @@ def test_can_insert_udt_all_collection_datatypes(self): Test for inserting various types of COLLECTION_TYPES into UDT's """ - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = TestCluster() s = c.connect(self.keyspace_name, wait_for_all_pools=True) # create UDT @@ -617,7 +618,7 @@ def test_can_insert_nested_collections(self): if self.cass_version < (2, 1, 3): raise unittest.SkipTest("Support for nested collections was introduced in Cassandra 2.1.3") - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = TestCluster() s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple From 8473c044c9174a47d55cc82184b79b7075a34cf0 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 26 Mar 2020 14:12:48 -0500 Subject: [PATCH 1784/2005] More test fixes --- tests/integration/__init__.py | 3 +++ tests/integration/advanced/graph/test_graph.py | 2 +- tests/integration/advanced/test_unixsocketendpoint.py | 2 +- tests/integration/standard/test_query.py | 4 ++-- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 65cd6a2f1f..32a6346c7e 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -507,6 +507,9 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, CCM_CLUSTER = DseCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) CCM_CLUSTER.set_configuration_options({'batch_size_warn_threshold_in_kb': 5}) + if Version(dse_version) >= Version('5.0'): + CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) + CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) if Version(dse_version) >= Version('5.1'): # For Inet4Address CCM_CLUSTER.set_dse_configuration_options({ diff --git a/tests/integration/advanced/graph/test_graph.py b/tests/integration/advanced/graph/test_graph.py index 020d631d69..a0b6534c34 100644 --- a/tests/integration/advanced/graph/test_graph.py +++ b/tests/integration/advanced/graph/test_graph.py @@ -19,7 +19,7 @@ from cassandra.protocol import SyntaxException from cassandra.policies import WhiteListRoundRobinPolicy from cassandra.cluster import NoHostAvailable -from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, GraphExecutionProfile, +from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, GraphExecutionProfile from cassandra.graph import single_object_row_factory, Vertex, graph_object_row_factory, \ graph_graphson2_row_factory, graph_graphson3_row_factory from cassandra.util import SortedSet diff --git a/tests/integration/advanced/test_unixsocketendpoint.py b/tests/integration/advanced/test_unixsocketendpoint.py index 1f6665964a..10cbc1b362 100644 --- a/tests/integration/advanced/test_unixsocketendpoint.py +++ b/tests/integration/advanced/test_unixsocketendpoint.py @@ -65,7 +65,7 @@ def setUpClass(cls): lbp = UnixSocketWhiteListRoundRobinPolicy([UNIX_SOCKET_PATH]) ep = ExecutionProfile(load_balancing_policy=lbp) endpoint = UnixSocketEndPoint(UNIX_SOCKET_PATH) - cls.cluster = TestCluster([endpoint], execution_profiles={EXEC_PROFILE_DEFAULT: ep}) + cls.cluster = TestCluster(contact_points=[endpoint], execution_profiles={EXEC_PROFILE_DEFAULT: ep}) @classmethod def tearDownClass(cls): diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 3e393cdb4b..d6401a987e 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -24,7 +24,7 @@ from cassandra import ConsistencyLevel, Unavailable, InvalidRequest, cluster from cassandra.query import (PreparedStatement, BoundStatement, SimpleStatement, BatchStatement, BatchType, dict_factory, TraceUnavailable) -from cassandra.cluster import NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT +from cassandra.cluster import NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT, Cluster from cassandra.policies import HostDistance, RoundRobinPolicy, WhiteListRoundRobinPolicy from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, \ greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, local, get_cluster, setup_keyspace, \ @@ -488,7 +488,7 @@ def test_prepared_metadata_generation(self): base_line = None for proto_version in get_supported_protocol_versions(): beta_flag = True if proto_version in ProtocolVersion.BETA_VERSIONS else False - cluster = TestCluster() + cluster = Cluster(protocol_version=proto_version, allow_beta_protocol_version=beta_flag) session = cluster.connect() select_statement = session.prepare("SELECT * FROM system.local") From 607ff52c7521f179fc944df4dfc9ddb075fbb30d Mon Sep 17 00:00:00 2001 From: Aleksandr Sorokoumov Date: Mon, 30 Mar 2020 11:27:42 +0200 Subject: [PATCH 1785/2005] CASSANDRA-15663 Add 'default' to a list of reserved keywords --- cassandra/metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index a839c2206c..9f1de99615 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -49,7 +49,7 @@ cql_keywords = set(( 'add', 'aggregate', 'all', 'allow', 'alter', 'and', 'apply', 'as', 'asc', 'ascii', 'authorize', 'batch', 'begin', 'bigint', 'blob', 'boolean', 'by', 'called', 'clustering', 'columnfamily', 'compact', 'contains', 'count', - 'counter', 'create', 'custom', 'date', 'decimal', 'delete', 'desc', 'describe', 'deterministic', 'distinct', 'double', 'drop', + 'counter', 'create', 'custom', 'date', 'decimal', 'default', 'delete', 'desc', 'describe', 'deterministic', 'distinct', 'double', 'drop', 'entries', 'execute', 'exists', 'filtering', 'finalfunc', 'float', 'from', 'frozen', 'full', 'function', 'functions', 'grant', 'if', 'in', 'index', 'inet', 'infinity', 'initcond', 'input', 'insert', 'int', 'into', 'is', 'json', 'key', 'keys', 'keyspace', 'keyspaces', 'language', 'limit', 'list', 'login', 'map', 'materialized', 'modify', 'monotonic', 'nan', 'nologin', From 40fe726a358d1a844c025449a580f92f352245cf Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 11 Mar 2020 09:19:18 -0400 Subject: [PATCH 1786/2005] Support port discovery for C* 4.0 --- CHANGELOG.rst | 3 +- build.yaml | 8 +- cassandra/cluster.py | 136 ++++++++++++------ cassandra/connection.py | 19 +-- cassandra/metadata.py | 60 +++++++- cassandra/pool.py | 53 ++++++- cassandra/util.py | 10 +- tests/integration/__init__.py | 14 +- tests/integration/advanced/test_auth.py | 7 +- tests/integration/simulacron/test_endpoint.py | 16 +-- .../standard/test_control_connection.py | 28 +++- tests/integration/standard/test_metadata.py | 14 +- .../standard/test_single_interface.py | 77 ++++++++++ tests/unit/test_cluster.py | 11 ++ tests/unit/test_control_connection.py | 56 +++++++- 15 files changed, 408 insertions(+), 104 deletions(-) create mode 100644 tests/integration/standard/test_single_interface.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 9cd64d8fc8..9f5deaabae 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,7 +4,8 @@ Unreleased Features -------- -Transient Replication Support (PYTHON-1207) +* Transient Replication Support (PYTHON-1207) +* Support system.peers_v2 and port discovery for C* 4.0 (PYTHON-700) Bug Fixes --------- diff --git a/build.yaml b/build.yaml index 83bed55a09..bd40809ef3 100644 --- a/build.yaml +++ b/build.yaml @@ -21,7 +21,7 @@ schedules: matrix: exclude: - python: [3.4, 3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', '4.0', 'test-dse'] + - cassandra: ['2.1', '3.0', 'test-dse'] commit_branches: schedule: per_commit @@ -34,7 +34,7 @@ schedules: matrix: exclude: - python: [3.4, 3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', '4.0', 'test-dse'] + - cassandra: ['2.1', '3.0', 'test-dse'] commit_branches_dev: schedule: per_commit @@ -184,9 +184,11 @@ build: pip install --upgrade pip pip install -U setuptools + pip install git+ssh://git@github.com/riptano/ccm-private.git@cassandra-7544-native-ports-with-dse-fix + # Remove this pyyaml installation when removing Python 3.4 support pip install PyYAML==5.2 - pip install $HOME/ccm + #pip install $HOME/ccm if [ -n "$CCM_IS_DSE" ]; then pip install -r test-datastax-requirements.txt diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 255d035dac..7120bdb9e1 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -64,7 +64,7 @@ RESULT_KIND_SET_KEYSPACE, RESULT_KIND_ROWS, RESULT_KIND_SCHEMA_CHANGE, ProtocolHandler, RESULT_KIND_VOID) -from cassandra.metadata import Metadata, protect_name, murmur3 +from cassandra.metadata import Metadata, protect_name, murmur3, _NodeInfo from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, RetryPolicy, IdentityTranslator, NoSpeculativeExecutionPlan, @@ -581,7 +581,7 @@ class Cluster(object): contact_points = ['127.0.0.1'] """ The list of contact points to try connecting for cluster discovery. A - contact point can be a string (ip, hostname) or a + contact point can be a string (ip or hostname), a tuple (ip/hostname, port) or a :class:`.connection.EndPoint` instance. Defaults to loopback interface. @@ -1152,20 +1152,24 @@ def __init__(self, self.endpoint_factory = endpoint_factory or DefaultEndPointFactory(port=self.port) self.endpoint_factory.configure(self) - raw_contact_points = [cp for cp in self.contact_points if not isinstance(cp, EndPoint)] + raw_contact_points = [] + for cp in [cp for cp in self.contact_points if not isinstance(cp, EndPoint)]: + raw_contact_points.append(cp if isinstance(cp, tuple) else (cp, port)) + self.endpoints_resolved = [cp for cp in self.contact_points if isinstance(cp, EndPoint)] self._endpoint_map_for_insights = {repr(ep): '{ip}:{port}'.format(ip=ep.address, port=ep.port) for ep in self.endpoints_resolved} - strs_resolved_map = _resolve_contact_points_to_string_map(raw_contact_points, port) + strs_resolved_map = _resolve_contact_points_to_string_map(raw_contact_points) self.endpoints_resolved.extend(list(chain( *[ - [DefaultEndPoint(x, port) for x in xs if x is not None] + [DefaultEndPoint(ip, port) for ip, port in xs if ip is not None] for xs in strs_resolved_map.values() if xs is not None ] ))) + self._endpoint_map_for_insights.update( - {key: ['{ip}:{port}'.format(ip=ip, port=port) for ip in value] + {key: ['{ip}:{port}'.format(ip=ip, port=port) for ip, port in value] for key, value in strs_resolved_map.items() if value is not None} ) @@ -3420,8 +3424,17 @@ class ControlConnection(object): _SELECT_SCHEMA_PEERS_TEMPLATE = "SELECT peer, host_id, {nt_col_name}, schema_version FROM system.peers" _SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'" + _SELECT_PEERS_V2 = "SELECT * FROM system.peers_v2" + _SELECT_PEERS_NO_TOKENS_V2 = "SELECT host_id, peer, peer_port, data_center, rack, native_address, native_port, release_version, schema_version FROM system.peers_v2" + _SELECT_SCHEMA_PEERS_V2 = "SELECT host_id, peer, peer_port, native_address, native_port, schema_version FROM system.peers_v2" + _MINIMUM_NATIVE_ADDRESS_DSE_VERSION = Version("6.0.0") + class PeersQueryType(object): + """internal Enum for _peers_query""" + PEERS = 0 + PEERS_SCHEMA = 1 + _is_shutdown = False _timeout = None _protocol_version = None @@ -3433,6 +3446,8 @@ class ControlConnection(object): _schema_meta_enabled = True _token_meta_enabled = True + _uses_peers_v2 = True + # for testing purposes _time = time @@ -3547,13 +3562,25 @@ def _try_connect(self, host): "SCHEMA_CHANGE": partial(_watch_callback, self_weakref, '_handle_schema_change') }, register_timeout=self._timeout) - sel_peers = self._peers_query_for_version(connection, self._SELECT_PEERS_NO_TOKENS_TEMPLATE) + sel_peers = self._get_peers_query(self.PeersQueryType.PEERS, connection) sel_local = self._SELECT_LOCAL if self._token_meta_enabled else self._SELECT_LOCAL_NO_TOKENS peers_query = QueryMessage(query=sel_peers, consistency_level=ConsistencyLevel.ONE) local_query = QueryMessage(query=sel_local, consistency_level=ConsistencyLevel.ONE) - shared_results = connection.wait_for_responses( - peers_query, local_query, timeout=self._timeout) + (peers_success, peers_result), (local_success, local_result) = connection.wait_for_responses( + peers_query, local_query, timeout=self._timeout, fail_on_error=False) + + if not local_success: + raise local_result + if not peers_success: + # error with the peers v2 query, fallback to peers v1 + self._uses_peers_v2 = False + sel_peers = self._get_peers_query(self.PeersQueryType.PEERS, connection) + peers_query = QueryMessage(query=sel_peers, consistency_level=ConsistencyLevel.ONE) + peers_result = connection.wait_for_response( + peers_query, timeout=self._timeout) + + shared_results = (peers_result, local_result) self._refresh_node_list_and_token_map(connection, preloaded_results=shared_results) self._refresh_schema(connection, preloaded_results=shared_results, schema_agreement_wait=-1) except Exception: @@ -3675,20 +3702,18 @@ def refresh_node_list_and_token_map(self, force_token_rebuild=False): def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, force_token_rebuild=False): - if preloaded_results: log.debug("[control connection] Refreshing node list and token map using preloaded results") peers_result = preloaded_results[0] local_result = preloaded_results[1] else: cl = ConsistencyLevel.ONE + sel_peers = self._get_peers_query(self.PeersQueryType.PEERS, connection) if not self._token_meta_enabled: log.debug("[control connection] Refreshing node list without token map") - sel_peers = self._peers_query_for_version(connection, self._SELECT_PEERS_NO_TOKENS_TEMPLATE) sel_local = self._SELECT_LOCAL_NO_TOKENS else: log.debug("[control connection] Refreshing node list and token map") - sel_peers = self._SELECT_PEERS sel_local = self._SELECT_LOCAL peers_query = QueryMessage(query=sel_peers, consistency_level=cl) local_query = QueryMessage(query=sel_local, consistency_level=cl) @@ -3718,13 +3743,17 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, self._update_location_info(host, datacenter, rack) host.host_id = local_row.get("host_id") host.listen_address = local_row.get("listen_address") - host.broadcast_address = local_row.get("broadcast_address") + host.listen_port = local_row.get("listen_port") + host.broadcast_address = _NodeInfo.get_broadcast_address(local_row) + host.broadcast_port = _NodeInfo.get_broadcast_port(local_row) - host.broadcast_rpc_address = self._address_from_row(local_row) + host.broadcast_rpc_address = _NodeInfo.get_broadcast_rpc_address(local_row) + host.broadcast_rpc_port = _NodeInfo.get_broadcast_rpc_port(local_row) if host.broadcast_rpc_address is None: if self._token_meta_enabled: # local rpc_address is not available, use the connection endpoint host.broadcast_rpc_address = connection.endpoint.address + host.broadcast_rpc_port = connection.endpoint.port else: # local rpc_address has not been queried yet, try to fetch it # separately, which might fail because C* < 2.1.6 doesn't have rpc_address @@ -3737,9 +3766,11 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, row = dict_factory( local_rpc_address_result.column_names, local_rpc_address_result.parsed_rows) - host.broadcast_rpc_address = row[0]['rpc_address'] + host.broadcast_rpc_address = _NodeInfo.get_broadcast_rpc_address(row[0]) + host.broadcast_rpc_port = _NodeInfo.get_broadcast_rpc_port(row[0]) else: host.broadcast_rpc_address = connection.endpoint.address + host.broadcast_rpc_port = connection.endpoint.port host.release_version = local_row.get("release_version") host.dse_version = local_row.get("dse_version") @@ -3777,8 +3808,10 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, should_rebuild_token_map |= self._update_location_info(host, datacenter, rack) host.host_id = row.get("host_id") - host.broadcast_address = row.get("peer") - host.broadcast_rpc_address = self._address_from_row(row) + host.broadcast_address = _NodeInfo.get_broadcast_address(row) + host.broadcast_port = _NodeInfo.get_broadcast_port(row) + host.broadcast_rpc_address = _NodeInfo.get_broadcast_rpc_address(row) + host.broadcast_rpc_port = _NodeInfo.get_broadcast_rpc_port(row) host.release_version = row.get("release_version") host.dse_version = row.get("dse_version") host.dse_workload = row.get("workload") @@ -3834,7 +3867,8 @@ def _refresh_nodes_if_not_up(self, host): def _handle_topology_change(self, event): change_type = event["change_type"] - host = self._cluster.metadata.get_host(event["address"][0]) + addr, port = event["address"] + host = self._cluster.metadata.get_host(addr, port) if change_type == "NEW_NODE" or change_type == "MOVED_NODE": if self._topology_event_refresh_window >= 0: delay = self._delay_for_event_type('topology_change', self._topology_event_refresh_window) @@ -3844,7 +3878,8 @@ def _handle_topology_change(self, event): def _handle_status_change(self, event): change_type = event["change_type"] - host = self._cluster.metadata.get_host(event["address"][0]) + addr, port = event["address"] + host = self._cluster.metadata.get_host(addr, port) if change_type == "UP": delay = self._delay_for_event_type('status_change', self._status_event_refresh_window) if host is None: @@ -3898,7 +3933,7 @@ def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wai elapsed = 0 cl = ConsistencyLevel.ONE schema_mismatches = None - select_peers_query = self._peers_query_for_version(connection, self._SELECT_SCHEMA_PEERS_TEMPLATE) + select_peers_query = self._get_peers_query(self.PeersQueryType.PEERS_SCHEMA, connection) while elapsed < total_timeout: peers_query = QueryMessage(query=select_peers_query, consistency_level=cl) @@ -3955,43 +3990,50 @@ def _get_schema_mismatches(self, peers_result, local_result, local_address): return dict((version, list(nodes)) for version, nodes in six.iteritems(versions)) - def _address_from_row(self, row): + def _get_peers_query(self, peers_query_type, connection=None): """ - Parse the broadcast rpc address from a row and return it untranslated. - """ - addr = None - if "rpc_address" in row: - addr = row.get("rpc_address") # peers and local - if "native_transport_address" in row: - addr = row.get("native_transport_address") - if not addr or addr in ["0.0.0.0", "::"]: - addr = row.get("peer") - return addr + Determine the peers query to use. + + :param peers_query_type: Should be one of PeersQueryType enum. + + If _uses_peers_v2 is True, return the proper peers_v2 query (no templating). + Else, apply the logic below to choose the peers v1 address column name: - def _peers_query_for_version(self, connection, peers_query_template): - """ Given a connection: - find the server product version running on the connection's host, - use that to choose the column name for the transport address (see APOLLO-1130), and - use that column name in the provided peers query template. - - The provided template should be a string with a format replacement - field named nt_col_name. """ - host_release_version = self._cluster.metadata.get_host(connection.endpoint).release_version - host_dse_version = self._cluster.metadata.get_host(connection.endpoint).dse_version - uses_native_address_query = ( - host_dse_version and Version(host_dse_version) >= self._MINIMUM_NATIVE_ADDRESS_DSE_VERSION) + if peers_query_type not in (self.PeersQueryType.PEERS, self.PeersQueryType.PEERS_SCHEMA): + raise ValueError("Invalid peers query type: %s" % peers_query_type) - if uses_native_address_query: - select_peers_query = peers_query_template.format(nt_col_name="native_transport_address") - elif host_release_version: - select_peers_query = peers_query_template.format(nt_col_name="rpc_address") + if self._uses_peers_v2: + if peers_query_type == self.PeersQueryType.PEERS: + query = self._SELECT_PEERS_V2 if self._token_meta_enabled else self._SELECT_PEERS_NO_TOKENS_V2 + else: + query = self._SELECT_SCHEMA_PEERS_V2 else: - select_peers_query = self._SELECT_PEERS + if peers_query_type == self.PeersQueryType.PEERS and self._token_meta_enabled: + query = self._SELECT_PEERS + else: + query_template = (self._SELECT_SCHEMA_PEERS_TEMPLATE + if peers_query_type == self.PeersQueryType.PEERS_SCHEMA + else self._SELECT_PEERS_NO_TOKENS_TEMPLATE) + + host_release_version = self._cluster.metadata.get_host(connection.endpoint).release_version + host_dse_version = self._cluster.metadata.get_host(connection.endpoint).dse_version + uses_native_address_query = ( + host_dse_version and Version(host_dse_version) >= self._MINIMUM_NATIVE_ADDRESS_DSE_VERSION) + + if uses_native_address_query: + query = query_template.format(nt_col_name="native_transport_address") + elif host_release_version: + query = query_template.format(nt_col_name="rpc_address") + else: + query = self._SELECT_PEERS - return select_peers_query + return query def _signal_error(self): with self._lock: @@ -4181,7 +4223,7 @@ class ResponseFuture(object): coordinator_host = None """ - The host from which we recieved a response + The host from which we received a response """ attempted_hosts = None diff --git a/cassandra/connection.py b/cassandra/connection.py index 66af1f8521..f30be682a6 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -214,25 +214,26 @@ class DefaultEndPointFactory(EndPointFactory): port = None """ - If set, force all endpoints to use this port. + If no port is discovered in the row, this is the default port + used for endpoint creation. """ def __init__(self, port=None): self.port = port def create(self, row): - addr = None - if "rpc_address" in row: - addr = row.get("rpc_address") - if "native_transport_address" in row: - addr = row.get("native_transport_address") - if not addr or addr in ["0.0.0.0", "::"]: - addr = row.get("peer") + # TODO next major... move this class so we don't need this kind of hack + from cassandra.metadata import _NodeInfo + addr = _NodeInfo.get_broadcast_rpc_address(row) + port = _NodeInfo.get_broadcast_rpc_port(row) + if port is None: + port = self.port if self.port else 9042 # create the endpoint with the translated address + # TODO next major, create a TranslatedEndPoint type return DefaultEndPoint( self.cluster.address_translator.translate(addr), - self.port if self.port is not None else 9042) + port) @total_ordering diff --git a/cassandra/metadata.py b/cassandra/metadata.py index a839c2206c..6cbc4e85cf 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -338,20 +338,23 @@ def remove_host(self, host): with self._hosts_lock: return bool(self._hosts.pop(host.endpoint, False)) - def get_host(self, endpoint_or_address): + def get_host(self, endpoint_or_address, port=None): """ - Find a host in the metadata for a specific endpoint. If a string inet address is passed, - iterate all hosts to match the :attr:`~.pool.Host.broadcast_rpc_address` attribute. + Find a host in the metadata for a specific endpoint. If a string inet address and port are passed, + iterate all hosts to match the :attr:`~.pool.Host.broadcast_rpc_address` and + :attr:`~.pool.Host.broadcast_rpc_port`attributes. """ if not isinstance(endpoint_or_address, EndPoint): - return self._get_host_by_address(endpoint_or_address) + return self._get_host_by_address(endpoint_or_address, port) return self._hosts.get(endpoint_or_address) - def _get_host_by_address(self, address): + def _get_host_by_address(self, address, port=None): for host in six.itervalues(self._hosts): - if host.broadcast_rpc_address == address: + if (host.broadcast_rpc_address == address and + (port is None or host.broadcast_rpc_port is None or host.broadcast_rpc_port == port)): return host + return None def all_hosts(self): @@ -3316,3 +3319,48 @@ def group_keys_by_replica(session, keyspace, table, keys): return dict(keys_per_host) + +# TODO next major reorg +class _NodeInfo(object): + """ + Internal utility functions to determine the different host addresses/ports + from a local or peers row. + """ + + @staticmethod + def get_broadcast_rpc_address(row): + # TODO next major, change the parsing logic to avoid any + # overriding of a non-null value + addr = row.get("rpc_address") + if "native_address" in row: + addr = row.get("native_address") + if "native_transport_address" in row: + addr = row.get("native_transport_address") + if not addr or addr in ["0.0.0.0", "::"]: + addr = row.get("peer") + + return addr + + @staticmethod + def get_broadcast_rpc_port(row): + port = row.get("rpc_port") + if port is None or port == 0: + port = row.get("native_port") + + return port if port and port > 0 else None + + @staticmethod + def get_broadcast_address(row): + addr = row.get("broadcast_address") + if addr is None: + addr = row.get("peer") + + return addr + + @staticmethod + def get_broadcast_port(row): + port = row.get("broadcast_port") + if port is None or port == 0: + port = row.get("peer_port") + + return port if port and port > 0 else None diff --git a/cassandra/pool.py b/cassandra/pool.py index a4429aeed6..87e8f03716 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -55,21 +55,60 @@ class Host(object): broadcast_address = None """ - broadcast address configured for the node, *if available* ('peer' in system.peers table). - This is not present in the ``system.local`` table for older versions of Cassandra. It is also not queried if - :attr:`~.Cluster.token_metadata_enabled` is ``False``. + broadcast address configured for the node, *if available*: + + 'system.local.broadcast_address' or 'system.peers.peer' (Cassandra 2-3) + 'system.local.broadcast_address' or 'system.peers_v2.peer' (Cassandra 4) + + This is not present in the ``system.local`` table for older versions of Cassandra. It + is also not queried if :attr:`~.Cluster.token_metadata_enabled` is ``False``. + """ + + broadcast_port = None + """ + broadcast port configured for the node, *if available*: + + 'system.local.broadcast_port' or 'system.peers_v2.peer_port' (Cassandra 4) + + It is also not queried if :attr:`~.Cluster.token_metadata_enabled` is ``False``. """ broadcast_rpc_address = None """ - The broadcast rpc address of the node (`native_address` or `rpc_address`). + The broadcast rpc address of the node: + + 'system.local.rpc_address' or 'system.peers.rpc_address' (Cassandra 3) + 'system.local.rpc_address' or 'system.peers.native_transport_address (DSE 6+)' + 'system.local.rpc_address' or 'system.peers_v2.native_address (Cassandra 4)' + """ + + broadcast_rpc_port = None + """ + The broadcast rpc port of the node, *if available*: + + 'system.local.rpc_port' or 'system.peers.native_transport_port' (DSE 6+) + 'system.local.rpc_port' or 'system.peers_v2.native_port' (Cassandra 4) """ listen_address = None """ - listen address configured for the node, *if available*. This is only available in the ``system.local`` table for newer - versions of Cassandra. It is also not queried if :attr:`~.Cluster.token_metadata_enabled` is ``False``. - Usually the same as ``broadcast_address`` unless configured differently in cassandra.yaml. + listen address configured for the node, *if available*: + + 'system.local.listen_address' + + This is only available in the ``system.local`` table for newer versions of Cassandra. It is also not + queried if :attr:`~.Cluster.token_metadata_enabled` is ``False``. Usually the same as ``broadcast_address`` + unless configured differently in cassandra.yaml. + """ + + listen_port = None + """ + listen port configured for the node, *if available*: + + 'system.local.listen_port' + + This is only available in the ``system.local`` table for newer versions of Cassandra. It is also not + queried if :attr:`~.Cluster.token_metadata_enabled` is ``False``. """ conviction_policy = None diff --git a/cassandra/util.py b/cassandra/util.py index 0651591203..0a00533b33 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -189,17 +189,17 @@ def _addrinfo_to_ip_strings(addrinfo): extracts the IP address from the sockaddr portion of the result. Since this is meant to be used in conjunction with _addrinfo_or_none, - this will pass None and EndPont instances through unaffected. + this will pass None and EndPoint instances through unaffected. """ if addrinfo is None: return None - return [entry[4][0] for entry in addrinfo] + return [(entry[4][0], entry[4][1]) for entry in addrinfo] -def _resolve_contact_points_to_string_map(contact_points, port): +def _resolve_contact_points_to_string_map(contact_points): return OrderedDict( - (cp, _addrinfo_to_ip_strings(_addrinfo_or_none(cp, port))) - for cp in contact_points + ('{cp}:{port}'.format(cp=cp, port=port), _addrinfo_to_ip_strings(_addrinfo_or_none(cp, port))) + for cp, port in contact_points ) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 32a6346c7e..52b1286ebb 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -200,6 +200,8 @@ def _get_dse_version_from_cass(cass_version): ALLOW_BETA_PROTOCOL = False + + def get_default_protocol(): if CASSANDRA_VERSION >= Version('4.0-a'): if DSE_VERSION: @@ -340,6 +342,7 @@ def _id_and_mark(f): "This test is not suitible for environments with large clock granularity") requiressimulacron = unittest.skipIf(SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"), "Simulacron jar hasn't been specified or C* version is 2.0") requirecassandra = unittest.skipIf(DSE_VERSION, "Cassandra required") +notdse = unittest.skipIf(DSE_VERSION, "DSE not supported") requiredse = unittest.skipUnless(DSE_VERSION, "DSE required") requirescloudproxy = unittest.skipIf(CLOUD_PROXY_PATH is None, "Cloud Proxy path hasn't been specified") @@ -368,6 +371,9 @@ def check_socket_listening(itf, timeout=60): return False +USE_SINGLE_INTERFACE = os.getenv('USE_SINGLE_INTERFACE', False) + + def get_cluster(): return CCM_CLUSTER @@ -380,8 +386,8 @@ def use_multidc(dc_list, workloads=[]): use_cluster(MULTIDC_CLUSTER_NAME, dc_list, start=True, workloads=workloads) -def use_singledc(start=True, workloads=[]): - use_cluster(CLUSTER_NAME, [3], start=start, workloads=workloads) +def use_singledc(start=True, workloads=[], use_single_interface=USE_SINGLE_INTERFACE): + use_cluster(CLUSTER_NAME, [3], start=start, workloads=workloads, use_single_interface=use_single_interface) def use_single_node(start=True, workloads=[], configuration_options={}, dse_options={}): @@ -446,7 +452,7 @@ def start_cluster_wait_for_up(cluster): def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, set_keyspace=True, ccm_options=None, - configuration_options={}, dse_options={}): + configuration_options={}, dse_options={}, use_single_interface=USE_SINGLE_INTERFACE): dse_cluster = True if DSE_VERSION else False if not workloads: workloads = [] @@ -553,7 +559,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, }) common.switch_cluster(path, cluster_name) CCM_CLUSTER.set_configuration_options(configuration_options) - CCM_CLUSTER.populate(nodes, ipformat=ipformat) + CCM_CLUSTER.populate(nodes, ipformat=ipformat, use_single_interface=use_single_interface) try: jvm_args = [] diff --git a/tests/integration/advanced/test_auth.py b/tests/integration/advanced/test_auth.py index 748304aef4..7e9aa8c23e 100644 --- a/tests/integration/advanced/test_auth.py +++ b/tests/integration/advanced/test_auth.py @@ -50,16 +50,17 @@ def teardown_module(): def wait_role_manager_setup_then_execute(session, statements): for s in statements: - e = None + exc = None for attempt in range(3): try: session.execute(s) break except Exception as e: + exc = e time.sleep(5) else: # if we didn't reach `break` - if e is not None: - raise e + if exc is not None: + raise exc @attr('long') diff --git a/tests/integration/simulacron/test_endpoint.py b/tests/integration/simulacron/test_endpoint.py index ba625765c7..691fcc8718 100644 --- a/tests/integration/simulacron/test_endpoint.py +++ b/tests/integration/simulacron/test_endpoint.py @@ -19,7 +19,8 @@ from functools import total_ordering from cassandra.cluster import Cluster -from cassandra.connection import DefaultEndPoint, EndPoint, EndPointFactory +from cassandra.connection import DefaultEndPoint, EndPoint, DefaultEndPointFactory +from cassandra.metadata import _NodeInfo from tests.integration import requiressimulacron from tests.integration.simulacron import SimulacronCluster, PROTOCOL_VERSION @@ -59,17 +60,10 @@ def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.address) -class AddressEndPointFactory(EndPointFactory): +class AddressEndPointFactory(DefaultEndPointFactory): def create(self, row): - addr = None - if "rpc_address" in row: - addr = row.get("rpc_address") - if "native_transport_address" in row: - addr = row.get("native_transport_address") - if not addr or addr in ["0.0.0.0", "::"]: - addr = row.get("peer") - + addr = _NodeInfo.get_broadcast_rpc_address(row) return AddressEndPoint(addr) @@ -85,6 +79,7 @@ class EndPointTests(SimulacronCluster): def test_default_endpoint(self): hosts = self.cluster.metadata.all_hosts() + self.assertEqual(len(hosts), 3) for host in hosts: self.assertIsNotNone(host.endpoint) self.assertIsInstance(host.endpoint, DefaultEndPoint) @@ -106,6 +101,7 @@ def test_custom_endpoint(self): cluster.connect(wait_for_all_pools=True) hosts = cluster.metadata.all_hosts() + self.assertEqual(len(hosts), 3) for host in hosts: self.assertIsNotNone(host.endpoint) self.assertIsInstance(host.endpoint, AddressEndPoint) diff --git a/tests/integration/standard/test_control_connection.py b/tests/integration/standard/test_control_connection.py index fe02de43da..db7cff8506 100644 --- a/tests/integration/standard/test_control_connection.py +++ b/tests/integration/standard/test_control_connection.py @@ -23,7 +23,7 @@ from cassandra.protocol import ConfigurationException -from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster +from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster, greaterthanorequalcass40, notdse from tests.integration.datatype_utils import update_datatypes @@ -103,3 +103,29 @@ def test_get_control_connection_host(self): new_host = self.cluster.get_control_connection_host() self.assertNotEqual(host, new_host) + @notdse + @greaterthanorequalcass40 + def test_control_connection_port_discovery(self): + """ + Test to validate that the correct port is discovered when peersV2 is used (C* 4.0+). + + Unit tests already validate that the port can be picked up (or not) from the query. This validates + it picks up the correct port from a real server and is able to connect. + """ + self.cluster = TestCluster() + + host = self.cluster.get_control_connection_host() + self.assertEqual(host, None) + + self.session = self.cluster.connect() + cc_endpoint = self.cluster.control_connection._connection.endpoint + + host = self.cluster.get_control_connection_host() + self.assertEqual(host.endpoint, cc_endpoint) + self.assertEqual(host.is_up, True) + hosts = self.cluster.metadata.all_hosts() + self.assertEqual(3, len(hosts)) + + for host in hosts: + self.assertEqual(9042, host.broadcast_rpc_port) + self.assertEqual(7000, host.broadcast_port) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index d3dea05fad..bd556f357d 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -42,7 +42,7 @@ get_supported_protocol_versions, greaterthancass20, greaterthancass21, assert_startswith, greaterthanorequalcass40, greaterthanorequaldse67, lessthancass40, - TestCluster) + TestCluster, DSE_VERSION) log = logging.getLogger(__name__) @@ -52,11 +52,12 @@ def setup_module(): use_singledc() -class HostMetatDataTests(BasicExistingKeyspaceUnitTestCase): +class HostMetaDataTests(BasicExistingKeyspaceUnitTestCase): @local - def test_broadcast_listen_address(self): + def test_host_addresses(self): """ - Check to ensure that the broadcast, rpc_address, listen adresss and host are is populated correctly + Check to ensure that the broadcast_address, broadcast_rpc_address, + listen adresss, ports and host are is populated correctly. @since 3.3 @jira_ticket PYTHON-332 @@ -69,6 +70,11 @@ def test_broadcast_listen_address(self): self.assertIsNotNone(host.broadcast_address) self.assertIsNotNone(host.broadcast_rpc_address) self.assertIsNotNone(host.host_id) + + if not DSE_VERSION and CASSANDRA_VERSION >= Version('4-a'): + self.assertIsNotNone(host.broadcast_port) + self.assertIsNotNone(host.broadcast_rpc_port) + con = self.cluster.control_connection.get_connections()[0] local_host = con.host diff --git a/tests/integration/standard/test_single_interface.py b/tests/integration/standard/test_single_interface.py new file mode 100644 index 0000000000..91451a52a0 --- /dev/null +++ b/tests/integration/standard/test_single_interface.py @@ -0,0 +1,77 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import six + +from cassandra import ConsistencyLevel +from cassandra.query import SimpleStatement + +from packaging.version import Version +from tests.integration import use_singledc, PROTOCOL_VERSION, \ + remove_cluster, greaterthanorequalcass40, notdse, \ + CASSANDRA_VERSION, DSE_VERSION, TestCluster + + +def setup_module(): + if not DSE_VERSION and CASSANDRA_VERSION >= Version('4-a'): + remove_cluster() + use_singledc(use_single_interface=True) + +def teardown_module(): + remove_cluster() + + +@notdse +@greaterthanorequalcass40 +class SingleInterfaceTest(unittest.TestCase): + + def setUp(self): + self.cluster = TestCluster() + self.session = self.cluster.connect() + + def tearDown(self): + if self.cluster is not None: + self.cluster.shutdown() + + def test_single_interface(self): + """ + Test that we can connect to a multiple hosts bound to a single interface. + """ + hosts = self.cluster.metadata._hosts + broadcast_rpc_ports = [] + broadcast_ports = [] + self.assertEqual(len(hosts), 3) + for endpoint, host in six.iteritems(hosts): + + self.assertEqual(endpoint.address, host.broadcast_rpc_address) + self.assertEqual(endpoint.port, host.broadcast_rpc_port) + + if host.broadcast_rpc_port in broadcast_rpc_ports: + self.fail("Duplicate broadcast_rpc_port") + broadcast_rpc_ports.append(host.broadcast_rpc_port) + if host.broadcast_port in broadcast_ports: + self.fail("Duplicate broadcast_port") + broadcast_ports.append(host.broadcast_port) + + for _ in range(1, 100): + self.session.execute(SimpleStatement("select * from system_distributed.view_build_status", + consistency_level=ConsistencyLevel.ALL)) + + for pool in self.session.get_pools(): + self.assertEquals(1, pool.get_state()['open_count']) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 9478575cbf..249c0a17cc 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -90,6 +90,17 @@ def test_exception_types(self): class ClusterTest(unittest.TestCase): + def test_tuple_for_contact_points(self): + cluster = Cluster(contact_points=[('localhost', 9045), ('127.0.0.2', 9046), '127.0.0.3'], port=9999) + for cp in cluster.endpoints_resolved: + if cp.address in ('::1', '127.0.0.1'): + self.assertEqual(cp.port, 9045) + elif cp.address == '127.0.0.2': + self.assertEqual(cp.port, 9046) + else: + self.assertEqual(cp.address, '127.0.0.3') + self.assertEqual(cp.port, 9999) + def test_invalid_contact_point_types(self): with self.assertRaises(ValueError): Cluster(contact_points=[None], protocol_version=4, connect_timeout=1) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index db194fe0c7..3e75a0af27 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -49,10 +49,11 @@ def __init__(self): self.partitioner = None self.token_map = {} - def get_host(self, endpoint_or_address): + def get_host(self, endpoint_or_address, port=None): if not isinstance(endpoint_or_address, EndPoint): for host in six.itervalues(self.hosts): - if host.address == endpoint_or_address: + if (host.address == endpoint_or_address and + (port is None or host.broadcast_rpc_port is None or host.broadcast_rpc_port == port)): return host else: return self.hosts.get(endpoint_or_address) @@ -87,7 +88,7 @@ def __init__(self): def add_host(self, endpoint, datacenter, rack, signal=False, refresh_nodes=True): host = Host(endpoint, SimpleConvictionPolicy, datacenter, rack) self.added_hosts.append(host) - return host + return host, True def remove_host(self, host): self.removed_hosts.append(host) @@ -130,6 +131,12 @@ def __init__(self): [["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"]], ["192.168.1.2", "10.0.0.2", "a", "dc1", "rack1", ["2", "102", "202"]]] ] + + self.peer_results_v2 = [ + ["native_address", "native_port", "peer", "peer_port", "schema_version", "data_center", "rack", "tokens"], + [["192.168.1.1", 9042, "10.0.0.1", 7042, "a", "dc1", "rack1", ["1", "101", "201"]], + ["192.168.1.2", 9042, "10.0.0.2", 7040, "a", "dc1", "rack1", ["2", "102", "202"]]] + ] self.wait_for_responses = Mock(return_value=_node_meta_results(self.local_results, self.peer_results)) @@ -347,6 +354,7 @@ def test_handle_topology_change(self): } self.cluster.scheduler.reset_mock() self.control_connection._handle_topology_change(event) + self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection._refresh_nodes_if_not_up, None) event = { @@ -377,7 +385,7 @@ def test_handle_status_change(self): # do the same with a known Host event = { 'change_type': 'UP', - 'address': ('192.168.1.0', 9000) + 'address': ('192.168.1.0', 9042) } self.cluster.scheduler.reset_mock() self.control_connection._handle_status_change(event) @@ -470,6 +478,46 @@ def test_refresh_disabled(self): call(0.0, cc_no_topo_refresh.refresh_schema, **schema_event)]) + def test_refresh_nodes_and_tokens_add_host_detects_port(self): + del self.connection.peer_results[:] + self.connection.peer_results.extend(self.connection.peer_results_v2) + self.connection.peer_results[1].append( + ["192.168.1.3", 555, "10.0.0.3", 666, "a", "dc1", "rack1", ["3", "103", "203"]] + ) + self.connection.wait_for_responses = Mock(return_value=_node_meta_results( + self.connection.local_results, self.connection.peer_results)) + self.cluster.scheduler.schedule = lambda delay, f, *args, **kwargs: f(*args, **kwargs) + self.control_connection.refresh_node_list_and_token_map() + self.assertEqual(1, len(self.cluster.added_hosts)) + self.assertEqual(self.cluster.added_hosts[0].endpoint.address, "192.168.1.3") + self.assertEqual(self.cluster.added_hosts[0].endpoint.port, 555) + self.assertEqual(self.cluster.added_hosts[0].broadcast_rpc_address, "192.168.1.3") + self.assertEqual(self.cluster.added_hosts[0].broadcast_rpc_port, 555) + self.assertEqual(self.cluster.added_hosts[0].broadcast_address, "10.0.0.3") + self.assertEquals(self.cluster.added_hosts[0].broadcast_port, 666) + self.assertEqual(self.cluster.added_hosts[0].datacenter, "dc1") + self.assertEqual(self.cluster.added_hosts[0].rack, "rack1") + + def test_refresh_nodes_and_tokens_add_host_detects_invalid_port(self): + del self.connection.peer_results[:] + self.connection.peer_results.extend(self.connection.peer_results_v2) + self.connection.peer_results[1].append( + ["192.168.1.3", -1, "10.0.0.3", 0, "a", "dc1", "rack1", ["3", "103", "203"]] + ) + self.connection.wait_for_responses = Mock(return_value=_node_meta_results( + self.connection.local_results, self.connection.peer_results)) + self.cluster.scheduler.schedule = lambda delay, f, *args, **kwargs: f(*args, **kwargs) + self.control_connection.refresh_node_list_and_token_map() + self.assertEqual(1, len(self.cluster.added_hosts)) + self.assertEqual(self.cluster.added_hosts[0].endpoint.address, "192.168.1.3") + self.assertEqual(self.cluster.added_hosts[0].endpoint.port, 9042) # fallback default + self.assertEqual(self.cluster.added_hosts[0].broadcast_rpc_address, "192.168.1.3") + self.assertEqual(self.cluster.added_hosts[0].broadcast_rpc_port, None) + self.assertEqual(self.cluster.added_hosts[0].broadcast_address, "10.0.0.3") + self.assertEquals(self.cluster.added_hosts[0].broadcast_port, None) + self.assertEqual(self.cluster.added_hosts[0].datacenter, "dc1") + self.assertEqual(self.cluster.added_hosts[0].rack, "rack1") + class EventTimingTest(unittest.TestCase): """ From 8aabd8fc4f2dfd9feba4f9af03834f622c19bf9d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 31 Mar 2020 09:52:51 -0400 Subject: [PATCH 1787/2005] Add core graph documentation --- docs/.nav | 1 + docs/classic_graph.rst | 299 ++++++++++++++++++++++++ docs/core_graph.rst | 434 ----------------------------------- docs/graph.rst | 503 ++++++++++++++++++++++++++--------------- docs/graph_fluent.rst | 36 +++ docs/index.rst | 5 +- 6 files changed, 659 insertions(+), 619 deletions(-) create mode 100644 docs/classic_graph.rst delete mode 100644 docs/core_graph.rst diff --git a/docs/.nav b/docs/.nav index 568cd6a383..d5b54c4e13 100644 --- a/docs/.nav +++ b/docs/.nav @@ -5,6 +5,7 @@ lwt object_mapper geo_types graph +classic_graph performance query_paging security diff --git a/docs/classic_graph.rst b/docs/classic_graph.rst new file mode 100644 index 0000000000..ef68c86359 --- /dev/null +++ b/docs/classic_graph.rst @@ -0,0 +1,299 @@ +DataStax Classic Graph Queries +============================== + +Getting Started +~~~~~~~~~~~~~~~ + +First, we need to create a graph in the system. To access the system API, we +use the system execution profile :: + + from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT + + cluster = Cluster() + session = cluster.connect() + + graph_name = 'movies' + session.execute_graph("system.graph(name).ifNotExists().engine(Classic).create()", {'name': graph_name}, + execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) + + +To execute requests on our newly created graph, we need to setup an execution +profile. Additionally, we also need to set the schema_mode to `development` +for the schema creation:: + + + from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.graph import GraphOptions + + graph_name = 'movies' + ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name=graph_name)) + + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) + session = cluster.connect() + + session.execute_graph("schema.config().option('graph.schema_mode').set('development')") + + +We are ready to configure our graph schema. We will create a simple one for movies:: + + # properties are used to define a vertex + properties = """ + schema.propertyKey("genreId").Text().create(); + schema.propertyKey("personId").Text().create(); + schema.propertyKey("movieId").Text().create(); + schema.propertyKey("name").Text().create(); + schema.propertyKey("title").Text().create(); + schema.propertyKey("year").Int().create(); + schema.propertyKey("country").Text().create(); + """ + + session.execute_graph(properties) # we can execute multiple statements in a single request + + # A Vertex represents a "thing" in the world. + vertices = """ + schema.vertexLabel("genre").properties("genreId","name").create(); + schema.vertexLabel("person").properties("personId","name").create(); + schema.vertexLabel("movie").properties("movieId","title","year","country").create(); + """ + + session.execute_graph(vertices) + + # An edge represents a relationship between two vertices + edges = """ + schema.edgeLabel("belongsTo").single().connection("movie","genre").create(); + schema.edgeLabel("actor").connection("movie","person").create(); + """ + + session.execute_graph(edges) + + # Indexes to execute graph requests efficiently + indexes = """ + schema.vertexLabel("genre").index("genresById").materialized().by("genreId").add(); + schema.vertexLabel("genre").index("genresByName").materialized().by("name").add(); + schema.vertexLabel("person").index("personsById").materialized().by("personId").add(); + schema.vertexLabel("person").index("personsByName").materialized().by("name").add(); + schema.vertexLabel("movie").index("moviesById").materialized().by("movieId").add(); + schema.vertexLabel("movie").index("moviesByTitle").materialized().by("title").add(); + schema.vertexLabel("movie").index("moviesByYear").secondary().by("year").add(); + """ + +Next, we'll add some data:: + + session.execute_graph(""" + g.addV('genre').property('genreId', 1).property('name', 'Action').next(); + g.addV('genre').property('genreId', 2).property('name', 'Drama').next(); + g.addV('genre').property('genreId', 3).property('name', 'Comedy').next(); + g.addV('genre').property('genreId', 4).property('name', 'Horror').next(); + """) + + session.execute_graph(""" + g.addV('person').property('personId', 1).property('name', 'Mark Wahlberg').next(); + g.addV('person').property('personId', 2).property('name', 'Leonardo DiCaprio').next(); + g.addV('person').property('personId', 3).property('name', 'Iggy Pop').next(); + """) + + session.execute_graph(""" + g.addV('movie').property('movieId', 1).property('title', 'The Happening'). + property('year', 2008).property('country', 'United States').next(); + g.addV('movie').property('movieId', 2).property('title', 'The Italian Job'). + property('year', 2003).property('country', 'United States').next(); + + g.addV('movie').property('movieId', 3).property('title', 'Revolutionary Road'). + property('year', 2008).property('country', 'United States').next(); + g.addV('movie').property('movieId', 4).property('title', 'The Man in the Iron Mask'). + property('year', 1998).property('country', 'United States').next(); + + g.addV('movie').property('movieId', 5).property('title', 'Dead Man'). + property('year', 1995).property('country', 'United States').next(); + """) + +Now that our genre, actor and movie vertices are added, we'll create the relationships (edges) between them:: + + session.execute_graph(""" + genre_horror = g.V().hasLabel('genre').has('name', 'Horror').next(); + genre_drama = g.V().hasLabel('genre').has('name', 'Drama').next(); + genre_action = g.V().hasLabel('genre').has('name', 'Action').next(); + + leo = g.V().hasLabel('person').has('name', 'Leonardo DiCaprio').next(); + mark = g.V().hasLabel('person').has('name', 'Mark Wahlberg').next(); + iggy = g.V().hasLabel('person').has('name', 'Iggy Pop').next(); + + the_happening = g.V().hasLabel('movie').has('title', 'The Happening').next(); + the_italian_job = g.V().hasLabel('movie').has('title', 'The Italian Job').next(); + rev_road = g.V().hasLabel('movie').has('title', 'Revolutionary Road').next(); + man_mask = g.V().hasLabel('movie').has('title', 'The Man in the Iron Mask').next(); + dead_man = g.V().hasLabel('movie').has('title', 'Dead Man').next(); + + the_happening.addEdge('belongsTo', genre_horror); + the_italian_job.addEdge('belongsTo', genre_action); + rev_road.addEdge('belongsTo', genre_drama); + man_mask.addEdge('belongsTo', genre_drama); + man_mask.addEdge('belongsTo', genre_action); + dead_man.addEdge('belongsTo', genre_drama); + + the_happening.addEdge('actor', mark); + the_italian_job.addEdge('actor', mark); + rev_road.addEdge('actor', leo); + man_mask.addEdge('actor', leo); + dead_man.addEdge('actor', iggy); + """) + +We are all set. You can now query your graph. Here are some examples:: + + # Find all movies of the genre Drama + for r in session.execute_graph(""" + g.V().has('genre', 'name', 'Drama').in('belongsTo').valueMap();"""): + print(r) + + # Find all movies of the same genre than the movie 'Dead Man' + for r in session.execute_graph(""" + g.V().has('movie', 'title', 'Dead Man').out('belongsTo').in('belongsTo').valueMap();"""): + print(r) + + # Find all movies of Mark Wahlberg + for r in session.execute_graph(""" + g.V().has('person', 'name', 'Mark Wahlberg').in('actor').valueMap();"""): + print(r) + +To see a more graph examples, see `DataStax Graph Examples `_. + +Graph Types +~~~~~~~~~~~ + +Here are the supported graph types with their python representations: + +========== ================ +DSE Graph Python +========== ================ +boolean bool +bigint long, int (PY3) +int int +smallint int +varint int +float float +double double +uuid uuid.UUID +Decimal Decimal +inet str +timestamp datetime.datetime +date datetime.date +time datetime.time +duration datetime.timedelta +point Point +linestring LineString +polygon Polygon +blob bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) +========== ================ + +Graph Row Factory +~~~~~~~~~~~~~~~~~ + +By default (with :class:`.GraphExecutionProfile.row_factory` set to :func:`.graph.graph_object_row_factory`), known graph result +types are unpacked and returned as specialized types (:class:`.Vertex`, :class:`.Edge`). If the result is not one of these +types, a :class:`.graph.Result` is returned, containing the graph result parsed from JSON and removed from its outer dict. +The class has some accessor convenience methods for accessing top-level properties by name (`type`, `properties` above), +or lists by index:: + + # dicts with `__getattr__` or `__getitem__` + result = session.execute_graph("[[key_str: 'value', key_int: 3]]", execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0] # Using system exec just because there is no graph defined + result # dse.graph.Result({u'key_str': u'value', u'key_int': 3}) + result.value # {u'key_int': 3, u'key_str': u'value'} (dict) + result.key_str # u'value' + result.key_int # 3 + result['key_str'] # u'value' + result['key_int'] # 3 + + # lists with `__getitem__` + result = session.execute_graph('[[0, 1, 2]]', execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0] + result # dse.graph.Result([0, 1, 2]) + result.value # [0, 1, 2] (list) + result[1] # 1 (list[1]) + +You can use a different row factory by setting :attr:`.Session.default_graph_row_factory` or passing it to +:meth:`.Session.execute_graph`. For example, :func:`.graph.single_object_row_factory` returns the JSON result string`, +unparsed. :func:`.graph.graph_result_row_factory` returns parsed, but unmodified results (such that all metadata is retained, +unlike :func:`.graph.graph_object_row_factory`, which sheds some as attributes and properties are unpacked). These results +also provide convenience methods for converting to known types (:meth:`~.Result.as_vertex`, :meth:`~.Result.as_edge`, :meth:`~.Result.as_path`). + +Vertex and Edge properties are never unpacked since their types are unknown. If you know your graph schema and want to +deserialize properties, use the :class:`.GraphSON1Deserializer`. It provides convenient methods to deserialize by types (e.g. +deserialize_date, deserialize_uuid, deserialize_polygon etc.) Example:: + + # ... + from cassandra.graph import GraphSON1Deserializer + + row = session.execute_graph("g.V().toList()")[0] + value = row.properties['my_property_key'][0].value # accessing the VertexProperty value + value = GraphSON1Deserializer.deserialize_timestamp(value) + + print(value) # 2017-06-26 08:27:05 + print(type(value)) # + + +Named Parameters +~~~~~~~~~~~~~~~~ + +Named parameters are passed in a dict to :meth:`.cluster.Session.execute_graph`:: + + result_set = session.execute_graph('[a, b]', {'a': 1, 'b': 2}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) + [r.value for r in result_set] # [1, 2] + +All python types listed in `Graph Types`_ can be passed as named parameters and will be serialized +automatically to their graph representation: + +Example:: + + session.execute_graph(""" + g.addV('person'). + property('name', text_value). + property('age', integer_value). + property('birthday', timestamp_value). + property('house_yard', polygon_value).toList() + """, { + 'text_value': 'Mike Smith', + 'integer_value': 34, + 'timestamp_value': datetime.datetime(1967, 12, 30), + 'polygon_value': Polygon(((30, 10), (40, 40), (20, 40), (10, 20), (30, 10))) + }) + + +As with all Execution Profile parameters, graph options can be set in the cluster default (as shown in the first example) +or specified per execution:: + + ep = session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, + graph_options=GraphOptions(graph_name='something-else')) + session.execute_graph(statement, execution_profile=ep) + +Using GraphSON2 Protocol +~~~~~~~~~~~~~~~~~~~~~~~~ + +The default graph protocol used is GraphSON1. However GraphSON1 may +cause problems of type conversion happening during the serialization +of the query to the DSE Graph server, or the deserialization of the +responses back from a string Gremlin query. GraphSON2 offers better +support for the complex data types handled by DSE Graph. + +DSE >=5.0.4 now offers the possibility to use the GraphSON2 protocol +for graph queries. Enabling GraphSON2 can be done by `changing the +graph protocol of the execution profile` and `setting the graphson2 row factory`:: + + from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.graph import GraphOptions, GraphProtocol, graph_graphson2_row_factory + + # Create a GraphSON2 execution profile + ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name='types', + graph_protocol=GraphProtocol.GRAPHSON_2_0), + row_factory=graph_graphson2_row_factory) + + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) + session = cluster.connect() + session.execute_graph(...) + +Using GraphSON2, all properties will be automatically deserialized to +its Python representation. Note that it may bring significant +behavioral change at runtime. + +It is generally recommended to switch to GraphSON2 as it brings more +consistent support for complex data types in the Graph driver and will +be activated by default in the next major version (Python dse-driver +driver 3.0). diff --git a/docs/core_graph.rst b/docs/core_graph.rst deleted file mode 100644 index 47dc53d38d..0000000000 --- a/docs/core_graph.rst +++ /dev/null @@ -1,434 +0,0 @@ -DataStax Graph Queries -====================== - -The driver executes graph queries over the Cassandra native protocol. Use -:meth:`.Session.execute_graph` or :meth:`.Session.execute_graph_async` for -executing gremlin queries in DataStax Graph. - -The driver defines three Execution Profiles suitable for graph execution: - -* :data:`~.cluster.EXEC_PROFILE_GRAPH_DEFAULT` -* :data:`~.cluster.EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT` -* :data:`~.cluster.EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT` - -See :doc:`getting_started` and :doc:`execution_profiles` -for more detail on working with profiles. - -In DSE 6.8.0, the Core graph engine has been introduced and is now the default. It -provides a better unified multi-model, performance and scale. This guide -is for graphs that use the core engine. If you work with previous versions of -DSE or existing graphs, see :doc:`classic_graph`. - -Getting Started with Graph and the Core Engine -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -First, we need to create a graph in the system. To access the system API, we -use the system execution profile :: - - from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT - - cluster = Cluster() - session = cluster.connect() - - graph_name = 'movies' - session.execute_graph("system.graph(name).create()", {'name': graph_name}, - execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) - - -Graphs that use the core engine only support GraphSON3. Since they are Cassandra tables under -the hood, we can automatically configure the execution profile with the proper options -(row_factory and graph_protocol) when executing queries. You only need to make sure that -the `graph_name` is set and GraphSON3 will be automatically used:: - - from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT - - graph_name = 'movies' - ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name=graph_name)) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) - session = cluster.connect() - session.execute_graph("g.addV(...)") - - -Note that this graph engine detection is based on the metadata. You might experience -some query errors if the graph has been newly created and is not yet in the metadata. This -would result to a badly configured execution profile. If you really want to avoid that, -configure your execution profile explicitly:: - - from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.graph import GraphOptions, GraphProtocol, graph_graphson3_row_factory - - graph_name = 'movies' - ep_graphson3 = GraphExecutionProfile( - row_factory=graph_graphson3_row_factory, - graph_options=GraphOptions( - graph_protocol=GraphProtocol.GRAPHSON_3_0, - graph_name=graph_name)) - - cluster = Cluster(execution_profiles={'core': ep_graphson3}) - session = cluster.connect() - session.execute_graph("g.addV(...)", execution_profile='core') - - -We are ready to configure our graph schema. We will create a simple one for movies:: - - # A Vertex represents a "thing" in the world. - # Create the genre vertex - query = """ - schema.vertexLabel('genre') - .partitionBy('genreId', Int) - .property('name', Text) - .create() - """ - session.execute_graph(query) - - # Create the person vertex - query = """ - schema.vertexLabel('person') - .partitionBy('personId', Int) - .property('name', Text) - .create() - """ - session.execute_graph(query) - - # Create the movie vertex - query = """ - schema.vertexLabel('movie') - .partitionBy('movieId', Int) - .property('title', Text) - .property('year', Int) - .property('country', Text) - .create() - """ - session.execute_graph(query) - - # An edge represents a relationship between two vertices - # Create our edges - queries = """ - schema.edgeLabel('belongsTo').from('movie').to('genre').create(); - schema.edgeLabel('actor').from('movie').to('person').create(); - """ - session.execute_graph(queries) - - # Indexes to execute graph requests efficiently - - # If you have a node with the search workload enabled (solr), use the following: - indexes = """ - schema.vertexLabel('genre').searchIndex() - .by("name") - .create(); - - schema.vertexLabel('person').searchIndex() - .by("name") - .create(); - - schema.vertexLabel('movie').searchIndex() - .by('title') - .by("year") - .create(); - """ - session.execute_graph(indexes) - - # Otherwise, use secondary indexes: - indexes = """ - schema.vertexLabel('genre') - .secondaryIndex('by_genre') - .by('name') - .create() - - schema.vertexLabel('person') - .secondaryIndex('by_name') - .by('name') - .create() - - schema.vertexLabel('movie') - .secondaryIndex('by_title') - .by('title') - .create() - """ - session.execute_graph(indexes) - -Add some edge indexes (materialized views):: - - indexes = """ - schema.edgeLabel('belongsTo') - .from('movie') - .to('genre') - .materializedView('movie__belongsTo__genre_by_in_genreId') - .ifNotExists() - .partitionBy(IN, 'genreId') - .clusterBy(OUT, 'movieId', Asc) - .create() - - schema.edgeLabel('actor') - .from('movie') - .to('person') - .materializedView('movie__actor__person_by_in_personId') - .ifNotExists() - .partitionBy(IN, 'personId') - .clusterBy(OUT, 'movieId', Asc) - .create() - """ - session.execute_graph(indexes) - -Next, we'll add some data:: - - session.execute_graph(""" - g.addV('genre').property('genreId', 1).property('name', 'Action').next(); - g.addV('genre').property('genreId', 2).property('name', 'Drama').next(); - g.addV('genre').property('genreId', 3).property('name', 'Comedy').next(); - g.addV('genre').property('genreId', 4).property('name', 'Horror').next(); - """) - - session.execute_graph(""" - g.addV('person').property('personId', 1).property('name', 'Mark Wahlberg').next(); - g.addV('person').property('personId', 2).property('name', 'Leonardo DiCaprio').next(); - g.addV('person').property('personId', 3).property('name', 'Iggy Pop').next(); - """) - - session.execute_graph(""" - g.addV('movie').property('movieId', 1).property('title', 'The Happening'). - property('year', 2008).property('country', 'United States').next(); - g.addV('movie').property('movieId', 2).property('title', 'The Italian Job'). - property('year', 2003).property('country', 'United States').next(); - - g.addV('movie').property('movieId', 3).property('title', 'Revolutionary Road'). - property('year', 2008).property('country', 'United States').next(); - g.addV('movie').property('movieId', 4).property('title', 'The Man in the Iron Mask'). - property('year', 1998).property('country', 'United States').next(); - - g.addV('movie').property('movieId', 5).property('title', 'Dead Man'). - property('year', 1995).property('country', 'United States').next(); - """) - -Now that our genre, actor and movie vertices are added, we'll create the relationships (edges) between them:: - - session.execute_graph(""" - genre_horror = g.V().hasLabel('genre').has('name', 'Horror').id().next(); - genre_drama = g.V().hasLabel('genre').has('name', 'Drama').id().next(); - genre_action = g.V().hasLabel('genre').has('name', 'Action').id().next(); - - leo = g.V().hasLabel('person').has('name', 'Leonardo DiCaprio').id().next(); - mark = g.V().hasLabel('person').has('name', 'Mark Wahlberg').id().next(); - iggy = g.V().hasLabel('person').has('name', 'Iggy Pop').id().next(); - - the_happening = g.V().hasLabel('movie').has('title', 'The Happening').id().next(); - the_italian_job = g.V().hasLabel('movie').has('title', 'The Italian Job').id().next(); - rev_road = g.V().hasLabel('movie').has('title', 'Revolutionary Road').id().next(); - man_mask = g.V().hasLabel('movie').has('title', 'The Man in the Iron Mask').id().next(); - dead_man = g.V().hasLabel('movie').has('title', 'Dead Man').id().next(); - - g.addE('belongsTo').from(__.V(the_happening)).to(__.V(genre_horror)).next(); - g.addE('belongsTo').from(__.V(the_italian_job)).to(__.V(genre_action)).next(); - g.addE('belongsTo').from(__.V(rev_road)).to(__.V(genre_drama)).next(); - g.addE('belongsTo').from(__.V(man_mask)).to(__.V(genre_drama)).next(); - g.addE('belongsTo').from(__.V(man_mask)).to(__.V(genre_action)).next(); - g.addE('belongsTo').from(__.V(dead_man)).to(__.V(genre_drama)).next(); - - g.addE('actor').from(__.V(the_happening)).to(__.V(mark)).next(); - g.addE('actor').from(__.V(the_italian_job)).to(__.V(mark)).next(); - g.addE('actor').from(__.V(rev_road)).to(__.V(leo)).next(); - g.addE('actor').from(__.V(man_mask)).to(__.V(leo)).next(); - g.addE('actor').from(__.V(dead_man)).to(__.V(iggy)).next(); - """) - -We are all set. You can now query your graph. Here are some examples:: - - # Find all movies of the genre Drama - for r in session.execute_graph(""" - g.V().has('genre', 'name', 'Drama').in('belongsTo').valueMap();"""): - print(r) - - # Find all movies of the same genre than the movie 'Dead Man' - for r in session.execute_graph(""" - g.V().has('movie', 'title', 'Dead Man').out('belongsTo').in('belongsTo').valueMap();"""): - print(r) - - # Find all movies of Mark Wahlberg - for r in session.execute_graph(""" - g.V().has('person', 'name', 'Mark Wahlberg').in('actor').valueMap();"""): - print(r) - -To see a more graph examples, see `DataStax Graph Examples `_. - -Graph Types for the Core Engine -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Here are the supported graph types with their python representations: - -============ ================= -DSE Graph Python Driver -============ ================= -text str -boolean bool -bigint long -int int -smallint int -varint long -double float -float float -uuid UUID -bigdecimal Decimal -duration Duration (cassandra.util) -inet str or IPV4Address/IPV6Address (if available) -timestamp datetime.datetime -date datetime.date -time datetime.time -polygon Polygon -point Point -linestring LineString -blob bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) -list list -map dict -set set or list - (Can return a list due to numerical values returned by Java) -tuple tuple -udt class or namedtuple -============ ================= - -Named Parameters -~~~~~~~~~~~~~~~~ - -Named parameters are passed in a dict to :meth:`.cluster.Session.execute_graph`:: - - result_set = session.execute_graph('[a, b]', {'a': 1, 'b': 2}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) - [r.value for r in result_set] # [1, 2] - -All python types listed in `Graph Types for the Core Engine`_ can be passed as named parameters and will be serialized -automatically to their graph representation: - -Example:: - - session.execute_graph(""" - g.addV('person'). - property('name', text_value). - property('age', integer_value). - property('birthday', timestamp_value). - property('house_yard', polygon_value).next() - """, { - 'text_value': 'Mike Smith', - 'integer_value': 34, - 'timestamp_value': datetime.datetime(1967, 12, 30), - 'polygon_value': Polygon(((30, 10), (40, 40), (20, 40), (10, 20), (30, 10))) - }) - - -As with all Execution Profile parameters, graph options can be set in the cluster default (as shown in the first example) -or specified per execution:: - - ep = session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, - graph_options=GraphOptions(graph_name='something-else')) - session.execute_graph(statement, execution_profile=ep) - -CQL collections, Tuple and UDT -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This is a very interesting feature of the core engine: we can use all CQL data types, including -list, map, set, tuple and udt. Here is an example using all these types:: - - query = """ - schema.type('address') - .property('address', Text) - .property('city', Text) - .property('state', Text) - .create(); - """ - session.execute_graph(query) - - # It works the same way than normal CQL UDT, so we - # can create an udt class and register it - class Address(object): - def __init__(self, address, city, state): - self.address = address - self.city = city - self.state = state - - session.cluster.register_user_type(graph_name, 'address', Address) - - query = """ - schema.vertexLabel('person') - .partitionBy('personId', Int) - .property('address', typeOf('address')) - .property('friends', listOf(Text)) - .property('skills', setOf(Text)) - .property('scores', mapOf(Text, Int)) - .property('last_workout', tupleOf(Text, Date)) - .create() - """ - session.execute_graph(query) - - # insertion example - query = """ - g.addV('person') - .property('personId', pid) - .property('address', address) - .property('friends', friends) - .property('skills', skills) - .property('scores', scores) - .property('last_workout', last_workout) - .next() - """ - - session.execute_graph(query, { - 'pid': 3, - 'address': Address('42 Smith St', 'Quebec', 'QC'), - 'friends': ['Al', 'Mike', 'Cathy'], - 'skills': {'food', 'fight', 'chess'}, - 'scores': {'math': 98, 'french': 3}, - 'last_workout': ('CrossFit', datetime.date(2018, 11, 20)) - }) - -Limitations ------------ - -Since Python is not a strongly-typed language and the UDT/Tuple graphson representation is, you might -get schema errors when trying to write numerical data. Example:: - - session.execute_graph(""" - schema.vertexLabel('test_tuple').partitionBy('id', Int).property('t', tupleOf(Text, Bigint)).create() - """) - - session.execute_graph(""" - g.addV('test_tuple').property('id', 0).property('t', t) - """, - {'t': ('Test', 99))} - ) - - # error: [Invalid query] message="Value component 1 is of type int, not bigint" - -This is because the server requires the client to include a GraphSON schema definition -with every UDT or tuple query. In the general case, the driver can't determine what Graph type -is meant by, e.g., an int value, and so it can't serialize the value with the correct type in the schema. -The driver provides some numerical type-wrapper factories that you can use to specify types: - -* :func:`~.to_int` -* :func:`~.to_bigint` -* :func:`~.to_smallint` -* :func:`~.to_float` -* :func:`~.to_double` - -Here's the working example of the case above:: - - from cassandra.graph import to_bigint - - session.execute_graph(""" - g.addV('test_tuple').property('id', 0).property('t', t) - """, - {'t': ('Test', to_bigint(99))} - ) - -Continuous Paging -~~~~~~~~~~~~~~~~~ - -This is another nice feature that comes with the core engine: continuous paging with -graph queries. If all nodes of the cluster are >= DSE 6.8.0, it is automatically -enabled under the hood to get the best performance. If you want to explicitly -enable/disable it, you can do it through the execution profile:: - - # Disable it - ep = GraphExecutionProfile(..., continuous_paging_options=None)) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) - - # Enable with a custom max_pages option - ep = GraphExecutionProfile(..., - continuous_paging_options=ContinuousPagingOptions(max_pages=10))) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) diff --git a/docs/graph.rst b/docs/graph.rst index 49ec51e73b..47dc53d38d 100644 --- a/docs/graph.rst +++ b/docs/graph.rst @@ -1,8 +1,26 @@ DataStax Graph Queries ====================== -Getting Started -~~~~~~~~~~~~~~~ +The driver executes graph queries over the Cassandra native protocol. Use +:meth:`.Session.execute_graph` or :meth:`.Session.execute_graph_async` for +executing gremlin queries in DataStax Graph. + +The driver defines three Execution Profiles suitable for graph execution: + +* :data:`~.cluster.EXEC_PROFILE_GRAPH_DEFAULT` +* :data:`~.cluster.EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT` +* :data:`~.cluster.EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT` + +See :doc:`getting_started` and :doc:`execution_profiles` +for more detail on working with profiles. + +In DSE 6.8.0, the Core graph engine has been introduced and is now the default. It +provides a better unified multi-model, performance and scale. This guide +is for graphs that use the core engine. If you work with previous versions of +DSE or existing graphs, see :doc:`classic_graph`. + +Getting Started with Graph and the Core Engine +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ First, we need to create a graph in the system. To access the system API, we use the system execution profile :: @@ -13,129 +31,204 @@ use the system execution profile :: session = cluster.connect() graph_name = 'movies' - session.execute_graph("system.graph(name).ifNotExists().create()", {'name': graph_name}, + session.execute_graph("system.graph(name).create()", {'name': graph_name}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) -To execute requests on our newly created graph, we need to setup an execution -profile. Additionally, we also need to set the schema_mode to `development` -for the schema creation:: - +Graphs that use the core engine only support GraphSON3. Since they are Cassandra tables under +the hood, we can automatically configure the execution profile with the proper options +(row_factory and graph_protocol) when executing queries. You only need to make sure that +the `graph_name` is set and GraphSON3 will be automatically used:: from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.graph import GraphOptions graph_name = 'movies' ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name=graph_name)) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) session = cluster.connect() - - session.execute_graph("schema.config().option('graph.schema_mode').set('development')") + session.execute_graph("g.addV(...)") -We are ready to configure our graph schema. We will create a simple one for movies:: +Note that this graph engine detection is based on the metadata. You might experience +some query errors if the graph has been newly created and is not yet in the metadata. This +would result to a badly configured execution profile. If you really want to avoid that, +configure your execution profile explicitly:: - # properties are used to define a vertex - properties = """ - schema.propertyKey("genreId").Text().create(); - schema.propertyKey("personId").Text().create(); - schema.propertyKey("movieId").Text().create(); - schema.propertyKey("name").Text().create(); - schema.propertyKey("title").Text().create(); - schema.propertyKey("year").Int().create(); - schema.propertyKey("country").Text().create(); - """ + from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.graph import GraphOptions, GraphProtocol, graph_graphson3_row_factory + + graph_name = 'movies' + ep_graphson3 = GraphExecutionProfile( + row_factory=graph_graphson3_row_factory, + graph_options=GraphOptions( + graph_protocol=GraphProtocol.GRAPHSON_3_0, + graph_name=graph_name)) - session.execute_graph(properties) # we can execute multiple statements in a single request + cluster = Cluster(execution_profiles={'core': ep_graphson3}) + session = cluster.connect() + session.execute_graph("g.addV(...)", execution_profile='core') + + +We are ready to configure our graph schema. We will create a simple one for movies:: # A Vertex represents a "thing" in the world. - vertices = """ - schema.vertexLabel("genre").properties("genreId","name").create(); - schema.vertexLabel("person").properties("personId","name").create(); - schema.vertexLabel("movie").properties("movieId","title","year","country").create(); + # Create the genre vertex + query = """ + schema.vertexLabel('genre') + .partitionBy('genreId', Int) + .property('name', Text) + .create() """ - - session.execute_graph(vertices) + session.execute_graph(query) + + # Create the person vertex + query = """ + schema.vertexLabel('person') + .partitionBy('personId', Int) + .property('name', Text) + .create() + """ + session.execute_graph(query) + + # Create the movie vertex + query = """ + schema.vertexLabel('movie') + .partitionBy('movieId', Int) + .property('title', Text) + .property('year', Int) + .property('country', Text) + .create() + """ + session.execute_graph(query) # An edge represents a relationship between two vertices - edges = """ - schema.edgeLabel("belongsTo").single().connection("movie","genre").create(); - schema.edgeLabel("actor").connection("movie","person").create(); + # Create our edges + queries = """ + schema.edgeLabel('belongsTo').from('movie').to('genre').create(); + schema.edgeLabel('actor').from('movie').to('person').create(); """ - - session.execute_graph(edges) + session.execute_graph(queries) # Indexes to execute graph requests efficiently + + # If you have a node with the search workload enabled (solr), use the following: + indexes = """ + schema.vertexLabel('genre').searchIndex() + .by("name") + .create(); + + schema.vertexLabel('person').searchIndex() + .by("name") + .create(); + + schema.vertexLabel('movie').searchIndex() + .by('title') + .by("year") + .create(); + """ + session.execute_graph(indexes) + + # Otherwise, use secondary indexes: indexes = """ - schema.vertexLabel("genre").index("genresById").materialized().by("genreId").add(); - schema.vertexLabel("genre").index("genresByName").materialized().by("name").add(); - schema.vertexLabel("person").index("personsById").materialized().by("personId").add(); - schema.vertexLabel("person").index("personsByName").materialized().by("name").add(); - schema.vertexLabel("movie").index("moviesById").materialized().by("movieId").add(); - schema.vertexLabel("movie").index("moviesByTitle").materialized().by("title").add(); - schema.vertexLabel("movie").index("moviesByYear").secondary().by("year").add(); + schema.vertexLabel('genre') + .secondaryIndex('by_genre') + .by('name') + .create() + + schema.vertexLabel('person') + .secondaryIndex('by_name') + .by('name') + .create() + + schema.vertexLabel('movie') + .secondaryIndex('by_title') + .by('title') + .create() """ + session.execute_graph(indexes) + +Add some edge indexes (materialized views):: + + indexes = """ + schema.edgeLabel('belongsTo') + .from('movie') + .to('genre') + .materializedView('movie__belongsTo__genre_by_in_genreId') + .ifNotExists() + .partitionBy(IN, 'genreId') + .clusterBy(OUT, 'movieId', Asc) + .create() + + schema.edgeLabel('actor') + .from('movie') + .to('person') + .materializedView('movie__actor__person_by_in_personId') + .ifNotExists() + .partitionBy(IN, 'personId') + .clusterBy(OUT, 'movieId', Asc) + .create() + """ + session.execute_graph(indexes) Next, we'll add some data:: session.execute_graph(""" - g.addV('genre').property('genreId', 1).property('name', 'Action').next(); - g.addV('genre').property('genreId', 2).property('name', 'Drama').next(); - g.addV('genre').property('genreId', 3).property('name', 'Comedy').next(); - g.addV('genre').property('genreId', 4).property('name', 'Horror').next(); + g.addV('genre').property('genreId', 1).property('name', 'Action').next(); + g.addV('genre').property('genreId', 2).property('name', 'Drama').next(); + g.addV('genre').property('genreId', 3).property('name', 'Comedy').next(); + g.addV('genre').property('genreId', 4).property('name', 'Horror').next(); """) session.execute_graph(""" - g.addV('person').property('personId', 1).property('name', 'Mark Wahlberg').next(); - g.addV('person').property('personId', 2).property('name', 'Leonardo DiCaprio').next(); - g.addV('person').property('personId', 3).property('name', 'Iggy Pop').next(); + g.addV('person').property('personId', 1).property('name', 'Mark Wahlberg').next(); + g.addV('person').property('personId', 2).property('name', 'Leonardo DiCaprio').next(); + g.addV('person').property('personId', 3).property('name', 'Iggy Pop').next(); """) session.execute_graph(""" - g.addV('movie').property('movieId', 1).property('title', 'The Happening'). - property('year', 2008).property('country', 'United States').next(); - g.addV('movie').property('movieId', 2).property('title', 'The Italian Job'). - property('year', 2003).property('country', 'United States').next(); - - g.addV('movie').property('movieId', 3).property('title', 'Revolutionary Road'). - property('year', 2008).property('country', 'United States').next(); - g.addV('movie').property('movieId', 4).property('title', 'The Man in the Iron Mask'). - property('year', 1998).property('country', 'United States').next(); - - g.addV('movie').property('movieId', 5).property('title', 'Dead Man'). - property('year', 1995).property('country', 'United States').next(); + g.addV('movie').property('movieId', 1).property('title', 'The Happening'). + property('year', 2008).property('country', 'United States').next(); + g.addV('movie').property('movieId', 2).property('title', 'The Italian Job'). + property('year', 2003).property('country', 'United States').next(); + + g.addV('movie').property('movieId', 3).property('title', 'Revolutionary Road'). + property('year', 2008).property('country', 'United States').next(); + g.addV('movie').property('movieId', 4).property('title', 'The Man in the Iron Mask'). + property('year', 1998).property('country', 'United States').next(); + + g.addV('movie').property('movieId', 5).property('title', 'Dead Man'). + property('year', 1995).property('country', 'United States').next(); """) Now that our genre, actor and movie vertices are added, we'll create the relationships (edges) between them:: session.execute_graph(""" - genre_horror = g.V().hasLabel('genre').has('name', 'Horror').next(); - genre_drama = g.V().hasLabel('genre').has('name', 'Drama').next(); - genre_action = g.V().hasLabel('genre').has('name', 'Action').next(); - - leo = g.V().hasLabel('person').has('name', 'Leonardo DiCaprio').next(); - mark = g.V().hasLabel('person').has('name', 'Mark Wahlberg').next(); - iggy = g.V().hasLabel('person').has('name', 'Iggy Pop').next(); - - the_happening = g.V().hasLabel('movie').has('title', 'The Happening').next(); - the_italian_job = g.V().hasLabel('movie').has('title', 'The Italian Job').next(); - rev_road = g.V().hasLabel('movie').has('title', 'Revolutionary Road').next(); - man_mask = g.V().hasLabel('movie').has('title', 'The Man in the Iron Mask').next(); - dead_man = g.V().hasLabel('movie').has('title', 'Dead Man').next(); - - the_happening.addEdge('belongsTo', genre_horror); - the_italian_job.addEdge('belongsTo', genre_action); - rev_road.addEdge('belongsTo', genre_drama); - man_mask.addEdge('belongsTo', genre_drama); - man_mask.addEdge('belongsTo', genre_action); - dead_man.addEdge('belongsTo', genre_drama); - - the_happening.addEdge('actor', mark); - the_italian_job.addEdge('actor', mark); - rev_road.addEdge('actor', leo); - man_mask.addEdge('actor', leo); - dead_man.addEdge('actor', iggy); + genre_horror = g.V().hasLabel('genre').has('name', 'Horror').id().next(); + genre_drama = g.V().hasLabel('genre').has('name', 'Drama').id().next(); + genre_action = g.V().hasLabel('genre').has('name', 'Action').id().next(); + + leo = g.V().hasLabel('person').has('name', 'Leonardo DiCaprio').id().next(); + mark = g.V().hasLabel('person').has('name', 'Mark Wahlberg').id().next(); + iggy = g.V().hasLabel('person').has('name', 'Iggy Pop').id().next(); + + the_happening = g.V().hasLabel('movie').has('title', 'The Happening').id().next(); + the_italian_job = g.V().hasLabel('movie').has('title', 'The Italian Job').id().next(); + rev_road = g.V().hasLabel('movie').has('title', 'Revolutionary Road').id().next(); + man_mask = g.V().hasLabel('movie').has('title', 'The Man in the Iron Mask').id().next(); + dead_man = g.V().hasLabel('movie').has('title', 'Dead Man').id().next(); + + g.addE('belongsTo').from(__.V(the_happening)).to(__.V(genre_horror)).next(); + g.addE('belongsTo').from(__.V(the_italian_job)).to(__.V(genre_action)).next(); + g.addE('belongsTo').from(__.V(rev_road)).to(__.V(genre_drama)).next(); + g.addE('belongsTo').from(__.V(man_mask)).to(__.V(genre_drama)).next(); + g.addE('belongsTo').from(__.V(man_mask)).to(__.V(genre_action)).next(); + g.addE('belongsTo').from(__.V(dead_man)).to(__.V(genre_drama)).next(); + + g.addE('actor').from(__.V(the_happening)).to(__.V(mark)).next(); + g.addE('actor').from(__.V(the_italian_job)).to(__.V(mark)).next(); + g.addE('actor').from(__.V(rev_road)).to(__.V(leo)).next(); + g.addE('actor').from(__.V(man_mask)).to(__.V(leo)).next(); + g.addE('actor').from(__.V(dead_man)).to(__.V(iggy)).next(); """) We are all set. You can now query your graph. Here are some examples:: @@ -144,7 +237,7 @@ We are all set. You can now query your graph. Here are some examples:: for r in session.execute_graph(""" g.V().has('genre', 'name', 'Drama').in('belongsTo').valueMap();"""): print(r) - + # Find all movies of the same genre than the movie 'Dead Man' for r in session.execute_graph(""" g.V().has('movie', 'title', 'Dead Man').out('belongsTo').in('belongsTo').valueMap();"""): @@ -157,78 +250,40 @@ We are all set. You can now query your graph. Here are some examples:: To see a more graph examples, see `DataStax Graph Examples `_. -Graph Types -~~~~~~~~~~~ +Graph Types for the Core Engine +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here are the supported graph types with their python representations: -========== ================ -DSE Graph Python -========== ================ -boolean bool -bigint long, int (PY3) -int int -smallint int -varint int -float float -double double -uuid uuid.UUID -Decimal Decimal -inet str -timestamp datetime.datetime -date datetime.date -time datetime.time -duration datetime.timedelta -point Point -linestring LineString -polygon Polygon -blob bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) -========== ================ - -Graph Row Factory -~~~~~~~~~~~~~~~~~ - -By default (with :class:`.GraphExecutionProfile.row_factory` set to :func:`.graph.graph_object_row_factory`), known graph result -types are unpacked and returned as specialized types (:class:`.Vertex`, :class:`.Edge`). If the result is not one of these -types, a :class:`.graph.Result` is returned, containing the graph result parsed from JSON and removed from its outer dict. -The class has some accessor convenience methods for accessing top-level properties by name (`type`, `properties` above), -or lists by index:: - - # dicts with `__getattr__` or `__getitem__` - result = session.execute_graph("[[key_str: 'value', key_int: 3]]", execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0] # Using system exec just because there is no graph defined - result # dse.graph.Result({u'key_str': u'value', u'key_int': 3}) - result.value # {u'key_int': 3, u'key_str': u'value'} (dict) - result.key_str # u'value' - result.key_int # 3 - result['key_str'] # u'value' - result['key_int'] # 3 - - # lists with `__getitem__` - result = session.execute_graph('[[0, 1, 2]]', execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0] - result # dse.graph.Result([0, 1, 2]) - result.value # [0, 1, 2] (list) - result[1] # 1 (list[1]) - -You can use a different row factory by setting :attr:`.Session.default_graph_row_factory` or passing it to -:meth:`.Session.execute_graph`. For example, :func:`.graph.single_object_row_factory` returns the JSON result string`, -unparsed. :func:`.graph.graph_result_row_factory` returns parsed, but unmodified results (such that all metadata is retained, -unlike :func:`.graph.graph_object_row_factory`, which sheds some as attributes and properties are unpacked). These results -also provide convenience methods for converting to known types (:meth:`~.Result.as_vertex`, :meth:`~.Result.as_edge`, :meth:`~.Result.as_path`). - -Vertex and Edge properties are never unpacked since their types are unknown. If you know your graph schema and want to -deserialize properties, use the :class:`.GraphSON1Deserializer`. It provides convenient methods to deserialize by types (e.g. -deserialize_date, deserialize_uuid, deserialize_polygon etc.) Example:: - - # ... - from cassandra.graph import GraphSON1Deserializer - - row = session.execute_graph("g.V().toList()")[0] - value = row.properties['my_property_key'][0].value # accessing the VertexProperty value - value = GraphSON1Deserializer.deserialize_timestamp(value) - - print(value) # 2017-06-26 08:27:05 - print(type(value)) # - +============ ================= +DSE Graph Python Driver +============ ================= +text str +boolean bool +bigint long +int int +smallint int +varint long +double float +float float +uuid UUID +bigdecimal Decimal +duration Duration (cassandra.util) +inet str or IPV4Address/IPV6Address (if available) +timestamp datetime.datetime +date datetime.date +time datetime.time +polygon Polygon +point Point +linestring LineString +blob bytearray, buffer (PY2), memoryview (PY3), bytes (PY3) +list list +map dict +set set or list + (Can return a list due to numerical values returned by Java) +tuple tuple +udt class or namedtuple +============ ================= Named Parameters ~~~~~~~~~~~~~~~~ @@ -238,7 +293,7 @@ Named parameters are passed in a dict to :meth:`.cluster.Session.execute_graph`: result_set = session.execute_graph('[a, b]', {'a': 1, 'b': 2}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) [r.value for r in result_set] # [1, 2] -All python types listed in `Graph Types`_ can be passed as named parameters and will be serialized +All python types listed in `Graph Types for the Core Engine`_ can be passed as named parameters and will be serialized automatically to their graph representation: Example:: @@ -248,7 +303,7 @@ Example:: property('name', text_value). property('age', integer_value). property('birthday', timestamp_value). - property('house_yard', polygon_value).toList() + property('house_yard', polygon_value).next() """, { 'text_value': 'Mike Smith', 'integer_value': 34, @@ -264,36 +319,116 @@ or specified per execution:: graph_options=GraphOptions(graph_name='something-else')) session.execute_graph(statement, execution_profile=ep) -Using GraphSON2 Protocol -~~~~~~~~~~~~~~~~~~~~~~~~ +CQL collections, Tuple and UDT +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The default graph protocol used is GraphSON1. However GraphSON1 may -cause problems of type conversion happening during the serialization -of the query to the DSE Graph server, or the deserialization of the -responses back from a string Gremlin query. GraphSON2 offers better -support for the complex data types handled by DSE Graph. +This is a very interesting feature of the core engine: we can use all CQL data types, including +list, map, set, tuple and udt. Here is an example using all these types:: -DSE >=5.0.4 now offers the possibility to use the GraphSON2 protocol -for graph queries. Enabling GraphSON2 can be done by `changing the -graph protocol of the execution profile` and `setting the graphson2 row factory`:: + query = """ + schema.type('address') + .property('address', Text) + .property('city', Text) + .property('state', Text) + .create(); + """ + session.execute_graph(query) + + # It works the same way than normal CQL UDT, so we + # can create an udt class and register it + class Address(object): + def __init__(self, address, city, state): + self.address = address + self.city = city + self.state = state + + session.cluster.register_user_type(graph_name, 'address', Address) + + query = """ + schema.vertexLabel('person') + .partitionBy('personId', Int) + .property('address', typeOf('address')) + .property('friends', listOf(Text)) + .property('skills', setOf(Text)) + .property('scores', mapOf(Text, Int)) + .property('last_workout', tupleOf(Text, Date)) + .create() + """ + session.execute_graph(query) + + # insertion example + query = """ + g.addV('person') + .property('personId', pid) + .property('address', address) + .property('friends', friends) + .property('skills', skills) + .property('scores', scores) + .property('last_workout', last_workout) + .next() + """ - from cassandra.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.graph import GraphOptions, GraphProtocol, graph_graphson2_row_factory + session.execute_graph(query, { + 'pid': 3, + 'address': Address('42 Smith St', 'Quebec', 'QC'), + 'friends': ['Al', 'Mike', 'Cathy'], + 'skills': {'food', 'fight', 'chess'}, + 'scores': {'math': 98, 'french': 3}, + 'last_workout': ('CrossFit', datetime.date(2018, 11, 20)) + }) - # Create a GraphSON2 execution profile - ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name='types', - graph_protocol=GraphProtocol.GRAPHSON_2_0), - row_factory=graph_graphson2_row_factory) +Limitations +----------- - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) - session = cluster.connect() - session.execute_graph(...) +Since Python is not a strongly-typed language and the UDT/Tuple graphson representation is, you might +get schema errors when trying to write numerical data. Example:: -Using GraphSON2, all properties will be automatically deserialized to -its Python representation. Note that it may bring significant -behavioral change at runtime. + session.execute_graph(""" + schema.vertexLabel('test_tuple').partitionBy('id', Int).property('t', tupleOf(Text, Bigint)).create() + """) + + session.execute_graph(""" + g.addV('test_tuple').property('id', 0).property('t', t) + """, + {'t': ('Test', 99))} + ) + + # error: [Invalid query] message="Value component 1 is of type int, not bigint" + +This is because the server requires the client to include a GraphSON schema definition +with every UDT or tuple query. In the general case, the driver can't determine what Graph type +is meant by, e.g., an int value, and so it can't serialize the value with the correct type in the schema. +The driver provides some numerical type-wrapper factories that you can use to specify types: + +* :func:`~.to_int` +* :func:`~.to_bigint` +* :func:`~.to_smallint` +* :func:`~.to_float` +* :func:`~.to_double` + +Here's the working example of the case above:: + + from cassandra.graph import to_bigint -It is generally recommended to switch to GraphSON2 as it brings more -consistent support for complex data types in the Graph driver and will -be activated by default in the next major version (Python dse-driver -driver 3.0). + session.execute_graph(""" + g.addV('test_tuple').property('id', 0).property('t', t) + """, + {'t': ('Test', to_bigint(99))} + ) + +Continuous Paging +~~~~~~~~~~~~~~~~~ + +This is another nice feature that comes with the core engine: continuous paging with +graph queries. If all nodes of the cluster are >= DSE 6.8.0, it is automatically +enabled under the hood to get the best performance. If you want to explicitly +enable/disable it, you can do it through the execution profile:: + + # Disable it + ep = GraphExecutionProfile(..., continuous_paging_options=None)) + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) + + # Enable with a custom max_pages option + ep = GraphExecutionProfile(..., + continuous_paging_options=ContinuousPagingOptions(max_pages=10))) + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) diff --git a/docs/graph_fluent.rst b/docs/graph_fluent.rst index c79aa1ecf4..fbe0ef57df 100644 --- a/docs/graph_fluent.rst +++ b/docs/graph_fluent.rst @@ -71,6 +71,27 @@ If you want to change execution property defaults, please see the :doc:`Executio for a more generalized discussion of the API. Graph traversal queries use the same execution profile defined for DSE graph. If you need to change the default properties, please refer to the :doc:`DSE Graph query documentation page ` +Configuring a Traversal Execution Profile for the Core graph engine +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To execute a traversal query with graphs that use the core engine, you need to configure +a graphson3 execution profile: + +.. code-block:: python + from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.datastax.graph import GraphProtocol + from cassandra.datastax.graph.fluent import DseGraph + + ep_graphson3 = DseGraph.create_execution_profile( + 'my_core_graph_name', + graph_protocol=GraphProtocol.GRAPHSON_3_0 + ) + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep_graphson3}) + + g = DseGraph.traversal_source(session) + print g.V().toList() + + Explicit Graph Traversal Execution with a DSE Session ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -99,6 +120,21 @@ Below is an example of explicit execution. For this example, assume the schema h for result in session.execute_graph(v_query): pprint(result.value) +Converting a traversal to a bytecode query for core graphs require some more work, because we +need the cluster context for UDT and tuple types: + +.. code-block:: python + g = DseGraph.traversal_source(session=session) + context = { + 'cluster': cluster, + 'graph_name': 'the_graph_for_the_query' + } + addV_query = DseGraph.query_from_traversal( + g.addV('genre').property('genreId', 1).property('name', 'Action'), + graph_protocol=GraphProtocol.GRAPHSON_3_0, + context=context + ) + Implicit Graph Traversal Execution with TinkerPop ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/index.rst b/docs/index.rst index d18cf6acd2..4cdd637e0a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -54,7 +54,10 @@ Contents Working with DSE geometry types :doc:`graph` - Graph queries + Graph queries with the Core engine + +:doc:`classic_graph` + Graph queries with the Classic engine :doc:`graph_fluent` DataStax Graph Fluent API From c036a640ccf045a5dcb007d2a1f9582ffdd0ba73 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 31 Mar 2020 11:25:45 -0400 Subject: [PATCH 1788/2005] build 3.22 docs for core graph --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index fe5bbf548e..be1740f086 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.22' - ref: a1f8e102 + ref: 1ccd5b99 - name: '3.21' ref: 5589d96b - name: '3.20' From df2533d0be19536819c7dc384937c9f100b4165f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 31 Mar 2020 12:23:50 -0400 Subject: [PATCH 1789/2005] docs: better graph_fluent TOC position --- docs/.nav | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/.nav b/docs/.nav index d5b54c4e13..375f058817 100644 --- a/docs/.nav +++ b/docs/.nav @@ -5,6 +5,7 @@ lwt object_mapper geo_types graph +graph_fluent classic_graph performance query_paging From b06d524a26f586b30f7d3eea668a6486f0f778c8 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 26 Mar 2020 16:01:32 -0400 Subject: [PATCH 1790/2005] PYTHON-1207: transient replication support (phase 2) --- cassandra/metadata.py | 120 +++++++++++++----- docs/api/cassandra/metadata.rst | 4 + .../long/test_loadbalancingpolicies.py | 85 +++++++++---- tests/unit/test_metadata.py | 44 ++++++- 4 files changed, 185 insertions(+), 68 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 5fb228934d..df38fc6670 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -386,6 +386,7 @@ def __new__(metacls, name, bases, dct): return cls + @six.add_metaclass(ReplicationStrategyTypeType) class _ReplicationStrategy(object): options_map = None @@ -453,37 +454,82 @@ def make_token_replica_map(self, token_to_host_owner, ring): return {} -def parse_replication_factor(input_rf): +class ReplicationFactor(object): + """ + Represent the replication factor of a keyspace. + """ + + all_replicas = None + """ + The number of total replicas. """ - Given the inputted replication factor, returns a tuple containing number of total replicas - and number of transient replicas + + full_replicas = None """ + The number of replicas that own a full copy of the data. This is the same + than `all_replicas` when transient replication is not enabled. + """ + transient_replicas = None - try: - total_replicas = int(input_rf) - except ValueError: + """ + The number of transient replicas. + + Only set if the keyspace has transient replication enabled. + """ + + def __init__(self, all_replicas, transient_replicas=None): + self.all_replicas = all_replicas + self.transient_replicas = transient_replicas + self.full_replicas = (all_replicas - transient_replicas) if transient_replicas else all_replicas + + @staticmethod + def create(rf): + """ + Given the inputted replication factor string, parse and return the ReplicationFactor instance. + """ + transient_replicas = None try: - rf = input_rf.split('/') - total_replicas, transient_replicas = int(rf[0]), int(rf[1]) - except Exception: - raise ValueError("Unable to determine replication factor from: {}".format(input_rf)) - return total_replicas, transient_replicas + all_replicas = int(rf) + except ValueError: + try: + rf = rf.split('/') + all_replicas, transient_replicas = int(rf[0]), int(rf[1]) + except Exception: + raise ValueError("Unable to determine replication factor from: {}".format(rf)) + + return ReplicationFactor(all_replicas, transient_replicas) + + def __str__(self): + return ("%d/%d" % (self.all_replicas, self.transient_replicas) if self.transient_replicas + else "%d" % self.all_replicas) + + def __eq__(self, other): + if not isinstance(other, ReplicationFactor): + return False + + return self.all_replicas == other.all_replicas and self.full_replicas == other.full_replicas class SimpleStrategy(ReplicationStrategy): - replication_factor = None - """ - The replication factor for this keyspace. + replication_factor_info = None """ - transient_replicas = None - """ - The number of transient replicas for this keyspace. + A :class:`cassandra.metadata.ReplicationFactor` instance. """ + @property + def replication_factor(self): + """ + The replication factor for this keyspace. + + For backward compatibility, this returns the + :attr:`cassandra.metadata.ReplicationFactor.full_replicas` value of + :attr:`cassandra.metadata.SimpleStrategy.replication_factor_info`. + """ + return self.replication_factor_info.full_replicas + def __init__(self, options_map): - self._raw_replication_factor = options_map['replication_factor'] - self.replication_factor, self.transient_replicas = parse_replication_factor(self._raw_replication_factor) + self.replication_factor_info = ReplicationFactor.create(options_map['replication_factor']) def make_token_replica_map(self, token_to_host_owner, ring): replica_map = {} @@ -505,36 +551,40 @@ def export_for_schema(self): suitable for use in a CREATE KEYSPACE statement. """ return "{'class': 'SimpleStrategy', 'replication_factor': '%s'}" \ - % (self._raw_replication_factor,) + % (str(self.replication_factor_info),) def __eq__(self, other): if not isinstance(other, SimpleStrategy): return False - return str(self._raw_replication_factor) == str(other._raw_replication_factor) + return str(self.replication_factor_info) == str(other.replication_factor_info) class NetworkTopologyStrategy(ReplicationStrategy): + dc_replication_factors_info = None + """ + A map of datacenter names to the :class:`cassandra.metadata.ReplicationFactor` instance for that DC. + """ + dc_replication_factors = None """ A map of datacenter names to the replication factor for that DC. + + For backward compatibility, this maps to the :attr:`cassandra.metadata.ReplicationFactor.full_replicas` + value of the :attr:`cassandra.metadata.NetworkTopologyStrategy.dc_replication_factors_info` dict. """ def __init__(self, dc_replication_factors): - try: - self.dc_replication_factors = dict( - (str(k), int(v)) for k, v in dc_replication_factors.items()) - except ValueError: - self.dc_replication_factors = dict( - (str(k), str(v)) for k, v in dc_replication_factors.items()) + self.dc_replication_factors_info = dict( + (str(k), ReplicationFactor.create(v)) for k, v in dc_replication_factors.items()) + self.dc_replication_factors = dict( + (dc, rf.full_replicas) for dc, rf in self.dc_replication_factors_info.items()) def make_token_replica_map(self, token_to_host_owner, ring): - dc_rf_map = {} - for dc, rf in self.dc_replication_factors.items(): - total_rf = parse_replication_factor(rf)[0] - if total_rf > 0: - dc_rf_map[dc] = total_rf + dc_rf_map = dict( + (dc, full_replicas) for dc, full_replicas in self.dc_replication_factors.items() + if full_replicas > 0) # build a map of DCs to lists of indexes into `ring` for tokens that # belong to that DC @@ -614,15 +664,15 @@ def export_for_schema(self): suitable for use in a CREATE KEYSPACE statement. """ ret = "{'class': 'NetworkTopologyStrategy'" - for dc, repl_factor in sorted(self.dc_replication_factors.items()): - ret += ", '%s': '%s'" % (dc, repl_factor) + for dc, rf in sorted(self.dc_replication_factors_info.items()): + ret += ", '%s': '%s'" % (dc, str(rf)) return ret + "}" def __eq__(self, other): if not isinstance(other, NetworkTopologyStrategy): return False - return self.dc_replication_factors == other.dc_replication_factors + return self.dc_replication_factors_info == other.dc_replication_factors_info class LocalStrategy(ReplicationStrategy): diff --git a/docs/api/cassandra/metadata.rst b/docs/api/cassandra/metadata.rst index ca33e34739..91fe39fd99 100644 --- a/docs/api/cassandra/metadata.rst +++ b/docs/api/cassandra/metadata.rst @@ -76,6 +76,10 @@ Tokens and Ring Topology .. autoclass:: ReplicationStrategy :members: +.. autoclass:: ReplicationFactor + :members: + :exclude-members: create + .. autoclass:: SimpleStrategy :members: diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index c3bf911ed0..f245569a80 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -16,6 +16,7 @@ import struct import sys import traceback +from cassandra import cqltypes from cassandra import ConsistencyLevel, Unavailable, OperationTimedOut, ReadTimeout, ReadFailure, \ WriteTimeout, WriteFailure @@ -29,7 +30,7 @@ ) from cassandra.query import SimpleStatement -from tests.integration import use_singledc, use_multidc, remove_cluster, TestCluster +from tests.integration import use_singledc, use_multidc, remove_cluster, TestCluster, greaterthanorequalcass40, notdse from tests.integration.long.utils import (wait_for_up, create_schema, CoordinatorStats, force_stop, wait_for_down, decommission, start, @@ -184,18 +185,19 @@ def test_token_aware_is_used_by_default(self): """ cluster = TestCluster() + self.addCleanup(cluster.shutdown) if murmur3 is not None: self.assertTrue(isinstance(cluster.profile_manager.default.load_balancing_policy, TokenAwarePolicy)) else: self.assertTrue(isinstance(cluster.profile_manager.default.load_balancing_policy, DCAwareRoundRobinPolicy)) - cluster.shutdown() - def test_roundrobin(self): use_singledc() keyspace = 'test_roundrobin' cluster, session = self._cluster_session_with_lbp(RoundRobinPolicy()) + self.addCleanup(cluster.shutdown) + self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=3) self._insert(session, keyspace) @@ -226,12 +228,12 @@ def test_roundrobin(self): self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 6) self.coordinator_stats.assert_query_count_equals(self, 3, 6) - cluster.shutdown() def test_roundrobin_two_dcs(self): use_multidc([2, 2]) keyspace = 'test_roundrobin_two_dcs' cluster, session = self._cluster_session_with_lbp(RoundRobinPolicy()) + self.addCleanup(cluster.shutdown) self._wait_for_nodes_up(range(1, 5), cluster) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) @@ -260,12 +262,11 @@ def test_roundrobin_two_dcs(self): self.coordinator_stats.assert_query_count_equals(self, 4, 3) self.coordinator_stats.assert_query_count_equals(self, 5, 3) - cluster.shutdown() - def test_roundrobin_two_dcs_2(self): use_multidc([2, 2]) keyspace = 'test_roundrobin_two_dcs_2' cluster, session = self._cluster_session_with_lbp(RoundRobinPolicy()) + self.addCleanup(cluster.shutdown) self._wait_for_nodes_up(range(1, 5), cluster) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) @@ -294,12 +295,11 @@ def test_roundrobin_two_dcs_2(self): self.coordinator_stats.assert_query_count_equals(self, 4, 3) self.coordinator_stats.assert_query_count_equals(self, 5, 3) - cluster.shutdown() - def test_dc_aware_roundrobin_two_dcs(self): use_multidc([3, 2]) keyspace = 'test_dc_aware_roundrobin_two_dcs' cluster, session = self._cluster_session_with_lbp(DCAwareRoundRobinPolicy('dc1')) + self.addCleanup(cluster.shutdown) self._wait_for_nodes_up(range(1, 6)) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) @@ -312,12 +312,11 @@ def test_dc_aware_roundrobin_two_dcs(self): self.coordinator_stats.assert_query_count_equals(self, 4, 0) self.coordinator_stats.assert_query_count_equals(self, 5, 0) - cluster.shutdown() - def test_dc_aware_roundrobin_two_dcs_2(self): use_multidc([3, 2]) keyspace = 'test_dc_aware_roundrobin_two_dcs_2' cluster, session = self._cluster_session_with_lbp(DCAwareRoundRobinPolicy('dc2')) + self.addCleanup(cluster.shutdown) self._wait_for_nodes_up(range(1, 6)) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) @@ -330,12 +329,11 @@ def test_dc_aware_roundrobin_two_dcs_2(self): self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.assert_query_count_equals(self, 5, 6) - cluster.shutdown() - def test_dc_aware_roundrobin_one_remote_host(self): use_multidc([2, 2]) keyspace = 'test_dc_aware_roundrobin_one_remote_host' cluster, session = self._cluster_session_with_lbp(DCAwareRoundRobinPolicy('dc2', used_hosts_per_remote_dc=1)) + self.addCleanup(cluster.shutdown) self._wait_for_nodes_up(range(1, 5)) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) @@ -408,8 +406,6 @@ def test_dc_aware_roundrobin_one_remote_host(self): except NoHostAvailable: pass - cluster.shutdown() - def test_token_aware(self): keyspace = 'test_token_aware' self.token_aware(keyspace) @@ -421,6 +417,7 @@ def test_token_aware_prepared(self): def token_aware(self, keyspace, use_prepared=False): use_singledc() cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy())) + self.addCleanup(cluster.shutdown) self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=1) @@ -485,13 +482,12 @@ def token_aware(self, keyspace, use_prepared=False): self.assertEqual(results, set([0, 12])) self.coordinator_stats.assert_query_count_equals(self, 2, 0) - cluster.shutdown() - def test_token_aware_composite_key(self): use_singledc() keyspace = 'test_token_aware_composite_key' table = 'composite' cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy())) + self.addCleanup(cluster.shutdown) self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=2) @@ -520,12 +516,11 @@ def test_token_aware_composite_key(self): self.assertTrue(results[0].i) - cluster.shutdown() - def test_token_aware_with_rf_2(self, use_prepared=False): use_singledc() keyspace = 'test_token_aware_with_rf_2' cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy())) + self.addCleanup(cluster.shutdown) self._wait_for_nodes_up(range(1, 4), cluster) create_schema(cluster, session, keyspace, replication_factor=2) @@ -546,11 +541,10 @@ def test_token_aware_with_rf_2(self, use_prepared=False): self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 12) - cluster.shutdown() - def test_token_aware_with_local_table(self): use_singledc() cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy())) + self.addCleanup(cluster.shutdown) self._wait_for_nodes_up(range(1, 4), cluster) p = session.prepare("SELECT * FROM system.local WHERE key=?") @@ -558,8 +552,6 @@ def test_token_aware_with_local_table(self): r = session.execute(p, ('local',)) self.assertEqual(r[0].key, 'local') - cluster.shutdown() - def test_token_aware_with_shuffle_rf2(self): """ Test to validate the hosts are shuffled when the `shuffle_replicas` is truthy @@ -572,6 +564,7 @@ def test_token_aware_with_shuffle_rf2(self): """ keyspace = 'test_token_aware_with_rf_2' cluster, session = self._set_up_shuffle_test(keyspace, replication_factor=2) + self.addCleanup(cluster.shutdown) self._check_query_order_changes(session=session, keyspace=keyspace) @@ -586,8 +579,6 @@ def test_token_aware_with_shuffle_rf2(self): self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 12) - cluster.shutdown() - def test_token_aware_with_shuffle_rf3(self): """ Test to validate the hosts are shuffled when the `shuffle_replicas` is truthy @@ -600,6 +591,7 @@ def test_token_aware_with_shuffle_rf3(self): """ keyspace = 'test_token_aware_with_rf_3' cluster, session = self._set_up_shuffle_test(keyspace, replication_factor=3) + self.addCleanup(cluster.shutdown) self._check_query_order_changes(session=session, keyspace=keyspace) @@ -625,7 +617,47 @@ def test_token_aware_with_shuffle_rf3(self): self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 12) - cluster.shutdown() + @notdse + @greaterthanorequalcass40 + def test_token_aware_with_transient_replication(self): + """ + Test to validate that the token aware policy doesn't route any request to a transient node. + + @since 3.23 + @jira_ticket PYTHON-1207 + @expected_result the requests are spread across the 2 full replicas and + no other nodes are queried by the coordinator. + + @test_category policy + """ + # We can test this with a single dc when CASSANDRA-15670 is fixed + use_multidc([3, 3]) + + cluster, session = self._cluster_session_with_lbp( + TokenAwarePolicy(DCAwareRoundRobinPolicy(), shuffle_replicas=True) + ) + self.addCleanup(cluster.shutdown) + + session.execute("CREATE KEYSPACE test_tr WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': '3/1', 'dc2': '3/1'};") + session.execute("CREATE TABLE test_tr.users (id int PRIMARY KEY, username text) WITH read_repair ='NONE';") + for i in range(100): + session.execute("INSERT INTO test_tr.users (id, username) VALUES (%d, 'user');" % (i,)) + + query = session.prepare("SELECT * FROM test_tr.users WHERE id = ?") + for i in range(100): + f = session.execute_async(query, (i,), trace=True) + full_dc1_replicas = [h for h in cluster.metadata.get_replicas('test_tr', cqltypes.Int32Type.serialize(i, cluster.protocol_version)) + if h.datacenter == 'dc1'] + self.assertEqual(len(full_dc1_replicas), 2) + + f.result() + trace_hosts = [cluster.metadata.get_host(e.source) for e in f.get_query_trace().events] + + for h in f.attempted_hosts: + self.assertIn(h, full_dc1_replicas) + for h in trace_hosts: + self.assertIn(h, full_dc1_replicas) + def _set_up_shuffle_test(self, keyspace, replication_factor): use_singledc() @@ -670,6 +702,7 @@ def test_white_list(self): ) } ) + self.addCleanup(cluster.shutdown) session = cluster.connect() self._wait_for_nodes_up([1, 2, 3]) @@ -695,8 +728,6 @@ def test_white_list(self): self.fail() except NoHostAvailable: pass - finally: - cluster.shutdown() def test_black_list_with_host_filter_policy(self): """ diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 174ac1493b..b2143f8c20 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -34,7 +34,7 @@ UserType, KeyspaceMetadata, get_schema_parser, _UnknownStrategy, ColumnMetadata, TableMetadata, IndexMetadata, Function, Aggregate, - Metadata, TokenMap) + Metadata, TokenMap, ReplicationFactor) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host @@ -42,6 +42,34 @@ log = logging.getLogger(__name__) +class ReplicationFactorTest(unittest.TestCase): + + def test_replication_factor_parsing(self): + rf = ReplicationFactor.create('3') + self.assertEqual(rf.all_replicas, 3) + self.assertEqual(rf.full_replicas, 3) + self.assertEqual(rf.transient_replicas, None) + self.assertEqual(str(rf), '3') + + rf = ReplicationFactor.create('3/1') + self.assertEqual(rf.all_replicas, 3) + self.assertEqual(rf.full_replicas, 2) + self.assertEqual(rf.transient_replicas, 1) + self.assertEqual(str(rf), '3/1') + + self.assertRaises(ValueError, ReplicationFactor.create, '3/') + self.assertRaises(ValueError, ReplicationFactor.create, 'a/1') + self.assertRaises(ValueError, ReplicationFactor.create, 'a') + self.assertRaises(ValueError, ReplicationFactor.create, '3/a') + + def test_replication_factor_equality(self): + self.assertEqual(ReplicationFactor.create('3/1'), ReplicationFactor.create('3/1')) + self.assertEqual(ReplicationFactor.create('3'), ReplicationFactor.create('3')) + self.assertNotEqual(ReplicationFactor.create('3'), ReplicationFactor.create('3/1')) + self.assertNotEqual(ReplicationFactor.create('3'), ReplicationFactor.create('3/1')) + + + class StrategiesTest(unittest.TestCase): @classmethod @@ -109,11 +137,11 @@ def test_transient_replication_parsing(self): rs = ReplicationStrategy() simple_transient = rs.create('SimpleStrategy', {'replication_factor': '3/1'}) - self.assertEqual(simple_transient.replication_factor, 3) - self.assertEqual(simple_transient.transient_replicas, 1) + self.assertEqual(simple_transient.replication_factor_info, ReplicationFactor(3, 1)) + self.assertEqual(simple_transient.replication_factor, 2) self.assertIn("'replication_factor': '3/1'", simple_transient.export_for_schema()) - simple_str = rs.create('SimpleStrategy', {'replication_factor': '3'}) + simple_str = rs.create('SimpleStrategy', {'replication_factor': '2'}) self.assertNotEqual(simple_transient, simple_str) # make token replica map @@ -134,6 +162,8 @@ def test_nts_replication_parsing(self): self.assertEqual(nts_int.dc_replication_factors['dc1'], 3) self.assertEqual(nts_str.dc_replication_factors['dc1'], 3) + self.assertEqual(nts_int.dc_replication_factors_info['dc1'], ReplicationFactor(3)) + self.assertEqual(nts_str.dc_replication_factors_info['dc1'], ReplicationFactor(3)) self.assertEqual(nts_int.export_for_schema(), nts_str.export_for_schema()) self.assertEqual(nts_int, nts_str) @@ -152,8 +182,10 @@ def test_nts_transient_parsing(self): rs = ReplicationStrategy() nts_transient = rs.create('NetworkTopologyStrategy', {'dc1': '3/1', 'dc2': '5/1'}) - self.assertEqual(nts_transient.dc_replication_factors['dc1'], '3/1') - self.assertEqual(nts_transient.dc_replication_factors['dc2'], '5/1') + self.assertEqual(nts_transient.dc_replication_factors_info['dc1'], ReplicationFactor(3, 1)) + self.assertEqual(nts_transient.dc_replication_factors_info['dc2'], ReplicationFactor(5, 1)) + self.assertEqual(nts_transient.dc_replication_factors['dc1'], 2) + self.assertEqual(nts_transient.dc_replication_factors['dc2'], 4) self.assertIn("'dc1': '3/1', 'dc2': '5/1'", nts_transient.export_for_schema()) nts_str = rs.create('NetworkTopologyStrategy', {'dc1': '3', 'dc2': '5'}) From a40a2af79da1eb5f10484046de646711f8d139ba Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 3 Apr 2020 10:46:57 -0400 Subject: [PATCH 1791/2005] release 3.23: changelog & version --- CHANGELOG.rst | 2 +- cassandra/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 9f5deaabae..db2d7cb468 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,6 @@ 3.23.0 ====== -Unreleased +April 6, 2020 Features -------- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index c4479464c3..161499fab2 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 22, 0) +__version_info__ = (3, 23, 0) __version__ = '.'.join(map(str, __version_info__)) From a8865a7e54b00ffa49b9c6c3492c2fdd9dced786 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 3 Apr 2020 10:50:13 -0400 Subject: [PATCH 1792/2005] release 3.23: docs --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index be1740f086..2298db2588 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,6 +22,8 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: + - name: '3.23' + ref: a40a2af7 - name: '3.22' ref: 1ccd5b99 - name: '3.21' From 94478d7083285cf18e74ed48ac70ab164fa1646e Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 3 Apr 2020 11:03:18 -0400 Subject: [PATCH 1793/2005] appveyor: use the tmp native ports branch --- appveyor.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/appveyor.yml b/appveyor.yml index d1daaa6ec6..c687ab8f73 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -16,6 +16,7 @@ build_script: - cmd: | "%VS140COMNTOOLS%\..\..\VC\vcvarsall.bat" x86_amd64 python setup.py install --no-cython + pip install git+ssh://git@github.com/riptano/ccm-private.git@cassandra-7544-native-ports-with-dse-fix test_script: - ps: .\appveyor\run_test.ps1 cache: From ff8b3b2a87e396b7035feadcd7ad87f34dc05a17 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 3 Apr 2020 11:14:04 -0400 Subject: [PATCH 1794/2005] appveyor: remove use the tmp native ports branch --- appveyor.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/appveyor.yml b/appveyor.yml index c687ab8f73..d1daaa6ec6 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -16,7 +16,6 @@ build_script: - cmd: | "%VS140COMNTOOLS%\..\..\VC\vcvarsall.bat" x86_amd64 python setup.py install --no-cython - pip install git+ssh://git@github.com/riptano/ccm-private.git@cassandra-7544-native-ports-with-dse-fix test_script: - ps: .\appveyor\run_test.ps1 cache: From 1a184b99a4fd0ed265b8037c636a8adeb5ebe046 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 6 Apr 2020 09:21:43 -0400 Subject: [PATCH 1795/2005] post version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 161499fab2..fd4e516f16 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 23, 0) +__version_info__ = (3, 23, 0, 'post0') __version__ = '.'.join(map(str, __version_info__)) From 02e56bf68033487a9f5f5c5cf10534fb70d56afa Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 16 Apr 2020 10:19:29 -0400 Subject: [PATCH 1796/2005] add initial Jenkinsfile --- Jenkinsfile | 865 +++++++++++++++++++++++++++++++++++ build.yaml => build.yaml.bak | 0 2 files changed, 865 insertions(+) create mode 100644 Jenkinsfile rename build.yaml => build.yaml.bak (100%) diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 0000000000..0b26bf00d0 --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,865 @@ +#!groovy + +def initializeEnvironment() { + env.DRIVER_DISPLAY_NAME = 'Cassandra Python Driver' + env.DRIVER_METRIC_TYPE = 'oss' + if (env.GIT_URL.contains('riptano/python-driver')) { + env.DRIVER_DISPLAY_NAME = 'private ' + env.DRIVER_DISPLAY_NAME + env.DRIVER_METRIC_TYPE = 'oss-private' + } else if (env.GIT_URL.contains('python-dse-driver')) { + env.DRIVER_DISPLAY_NAME = 'DSE Python Driver' + env.DRIVER_METRIC_TYPE = 'dse' + } + + env.GIT_SHA = "${env.GIT_COMMIT.take(7)}" + env.GITHUB_PROJECT_URL = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" + env.GITHUB_BRANCH_URL = "${GITHUB_PROJECT_URL}/tree/${env.BRANCH_NAME}" + env.GITHUB_COMMIT_URL = "${GITHUB_PROJECT_URL}/commit/${env.GIT_COMMIT}" + + sh label: 'Assign Python global environment', script: '''#!/bin/bash -lex + pyenv global ${PYTHON_VERSION} + ''' + + sh label: 'Install socat; required for unix socket tests', script: '''#!/bin/bash -lex + sudo apt-get install socat + ''' + + sh label: 'Install the latest setuptools', script: '''#!/bin/bash -lex + pip install --upgrade pip + pip install -U setuptools + ''' + + sh label: 'Install CCM', script: '''#!/bin/bash -lex + pip install ${HOME}/ccm + ''' + + // Determine if server version is Apache Cassandra� or DataStax Enterprise + if (env.CASSANDRA_VERSION.split('-')[0] == 'dse') { + sh label: 'Install DataStax Enterprise requirements', script: '''#!/bin/bash -lex + pip install -r test-datastax-requirements.txt + ''' + } else { + sh label: 'Install Apache CassandraⓇ requirements', script: '''#!/bin/bash -lex + pip install -r test-requirements.txt + ''' + } + + sh label: 'Install unit test modules', script: '''#!/bin/bash -lex + pip install nose-ignore-docstring nose-exclude service_identity + ''' + + if (params.CYTHON) { + sh label: 'Install cython modules', script: '''#!/bin/bash -lex + pip install cython numpy + ''' + } + + sh label: 'Download Apache CassandraⓇ or DataStax Enterprise', script: '''#!/bin/bash -lex + . ${CCM_ENVIRONMENT_SHELL} ${CASSANDRA_VERSION} + ''' + + sh label: 'Display Python and environment information', script: '''#!/bin/bash -le + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + python --version + pip --version + printenv | sort + ''' +} + +def installDriverAndCompileExtensions() { + if (params.CYTHON) { + sh label: 'Install the driver and compile with C extensions with Cython', script: '''#!/bin/bash -lex + python setup.py build_ext --inplace + ''' + } else { + sh label: 'Install the driver and compile with C extensions without Cython', script: '''#!/bin/bash -lex + python setup.py build_ext --inplace --no-cython + ''' + } +} + +def executeStandardTests() { + /* + * Run the cython unit tests, this is not done in travis because it takes too much time for the + * whole matrix to build with cython + */ + if (params.CYTHON) { + sh label: 'Execute Cython unit tests', script: '''#!/bin/bash -lex + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} VERIFY_CYTHON=True nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true + EVENT_LOOP_MANAGER=eventlet VERIFY_CYTHON=True nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true + EVENT_LOOP_MANAGER=gevent VERIFY_CYTHON=True nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true + ''' + } + + sh label: 'Execute Simulacron integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + SIMULACRON_JAR="${HOME}/simulacron.jar" + #SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true + ''' + + sh label: 'Execute CQL engine integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + #EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true + ''' + + sh label: 'Execute Apache CassandraⓇ integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + #EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_cluster.py || true + ''' + + if (env.CASSANDRA_VERSION.split('-')[0] == 'dse' && env.CASSANDRA_VERSION.split('-')[1] != '4.8') { + sh label: 'Execute DataStax Enterprise integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variable + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + #EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true + ''' + } + + sh label: 'Execute DataStax Constellation integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variable + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + #EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true + ''' + + if (env.EXECUTE_LONG_TESTS == 'True') { + sh label: 'Execute long running integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variable + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + #EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true + ''' + } +} + +def executeDseSmokeTests() { + sh label: 'Execute profile DataStax Enterprise smoke test integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variable + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} DSE_VERSION=${DSE_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true + ''' +} + +def executeEventLoopTests() { + sh label: 'Execute profile event loop manager integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variable + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + EVENT_LOOP_TESTS=( + "tests/integration/standard/test_cluster.py" + "tests/integration/standard/test_concurrent.py" + "tests/integration/standard/test_connection.py" + "tests/integration/standard/test_control_connection.py" + "tests/integration/standard/test_metrics.py" + "tests/integration/standard/test_query.py" + "tests/integration/simulacron/test_endpoint.py" + "tests/integration/long/test_ssl.py" + ) + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true + ''' +} + +def executeUpgradeTests() { + sh label: 'Execute profile upgrade integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variable + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=upgrade_results.xml tests/integration/upgrade || true + ''' +} + +def executeTests() { + switch(params.PROFILE) { + case 'DSE-SMOKE-TEST': + executeDseSmokeTests() + break + case 'EVENT-LOOP': + executeEventLoopTests() + break + case 'UPGRADE': + executeUpgradeTests() + break + default: + executeStandardTests() + break + } +} + +def notifySlack(status = 'started') { + // Set the global pipeline scoped environment (this is above each matrix) + env.BUILD_STATED_SLACK_NOTIFIED = 'true' + + def buildType = 'Commit' + if (params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') { + buildType = "${params.CI_SCHEDULE.toLowerCase().capitalize()}" + } + + def color = 'good' // Green + if (status.equalsIgnoreCase('aborted')) { + color = '808080' // Grey + } else if (status.equalsIgnoreCase('unstable')) { + color = 'warning' // Orange + } else if (status.equalsIgnoreCase('failed')) { + color = 'danger' // Red + } + + def message = """Build ${status} for ${env.DRIVER_DISPLAY_NAME} [${buildType}] +<${env.GITHUB_BRANCH_URL}|${env.BRANCH_NAME}> - <${env.RUN_DISPLAY_URL}|#${env.BUILD_NUMBER}> - <${env.GITHUB_COMMIT_URL}|${env.GIT_SHA}>""" + if (params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') { + message += " - ${params.CI_SCHEDULE_PYTHON_VERSION} - ${params.EVENT_LOOP_MANAGER}" + } + if (!status.equalsIgnoreCase('Started')) { + message += """ +${status} after ${currentBuild.durationString - ' and counting'}""" + } + + slackSend color: "${color}", + channel: "#python-driver-dev-bots", + message: "${message}" +} + +def submitCIMetrics(buildType) { + long durationMs = currentBuild.duration + long durationSec = durationMs / 1000 + long nowSec = (currentBuild.startTimeInMillis + durationMs) / 1000 + def branchNameNoPeriods = env.BRANCH_NAME.replaceAll('\\.', '_') + def durationMetric = "okr.ci.python.${env.DRIVER_METRIC_TYPE}.${buildType}.${branchNameNoPeriods} ${durationSec} ${nowSec}" + + timeout(time: 1, unit: 'MINUTES') { + withCredentials([string(credentialsId: 'lab-grafana-address', variable: 'LAB_GRAFANA_ADDRESS'), + string(credentialsId: 'lab-grafana-port', variable: 'LAB_GRAFANA_PORT')]) { + withEnv(["DURATION_METRIC=${durationMetric}"]) { + sh label: 'Send runtime metrics to labgrafana', script: '''#!/bin/bash -lex + echo "${DURATION_METRIC}" | nc -q 5 ${LAB_GRAFANA_ADDRESS} ${LAB_GRAFANA_PORT} + ''' + } + } + } +} + +def describePerCommitStage() { + script { + def type = 'standard' + def serverDescription = 'current Apache CassandaraⓇ and supported DataStax Enterprise versions' + if (env.BRANCH_NAME ==~ /long-python.*/) { + type = 'long' + } else if (env.BRANCH_NAME ==~ /dev-python.*/) { + type = 'dev' + } + + currentBuild.displayName = "Per-Commit (${env.EVENT_LOOP_MANAGER} | ${type.capitalize()})" + currentBuild.description = "Per-Commit build and ${type} testing of ${serverDescription} against Python v2.7.14 and v3.5.6 using ${env.EVENT_LOOP_MANAGER} event loop manager" + } +} + +def describeScheduledTestingStage() { + script { + def type = params.CI_SCHEDULE.toLowerCase().capitalize() + def displayName = "${type} schedule (${env.EVENT_LOOP_MANAGER}" + if (params.CYTHON) { + displayName += " | Cython" + } + if (params.PROFILE != 'NONE') { + displayName += " | ${params.PROFILE}" + } + displayName += ")" + currentBuild.displayName = displayName + + def serverVersionDescription = "${params.CI_SCHEDULE_SERVER_VERSION.replaceAll(' ', ', ')} server version(s) in the matrix" + def pythonVersionDescription = "${params.CI_SCHEDULE_PYTHON_VERSION.replaceAll(' ', ', ')} Python version(s) in the matrix" + def description = "${type} scheduled testing using ${env.EVENT_LOOP_MANAGER} event loop manager" + if (params.CYTHON) { + description += ", with Cython enabled" + } + if (params.PROFILE != 'NONE') { + description += ", ${params.PROFILE} profile" + } + description += ", ${serverVersionDescription}, and ${pythonVersionDescription}" + currentBuild.description = description + } +} + +def describeAdhocTestingStage() { + script { + def serverType = params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION.split('-')[0] + def serverDisplayName = 'Apache CassandaraⓇ' + def serverVersion = " v${serverType}" + if (serverType == 'ALL') { + serverDisplayName = "all ${serverDisplayName} and DataStax Enterprise server versions" + serverVersion = '' + } else { + try { + serverVersion = " v${env.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION.split('-')[1]}" + } catch (e) { + ;; // no-op + } + if (serverType == 'dse') { + serverDisplayName = 'DataStax Enterprise' + } + } + def displayName = "${params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION} for v${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION} (${env.EVENT_LOOP_MANAGER}" + if (params.CYTHON) { + displayName += " | Cython" + } + if (params.PROFILE != 'NONE') { + displayName += " | ${params.PROFILE}" + } + displayName += ")" + currentBuild.displayName = displayName + + def description = "Testing ${serverDisplayName} ${serverVersion} using ${env.EVENT_LOOP_MANAGER} against Python ${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION}" + if (params.CYTHON) { + description += ", with Cython" + } + if (params.PROFILE == 'NONE') { + if (params.EXECUTE_LONG_TESTS) { + description += ", with" + } else { + description += ", without" + } + description += " long tests executed" + } else { + description += ", ${params.PROFILE} profile" + } + currentBuild.description = description + } +} + +def branchPatternCron = ~"(master)" + +pipeline { + agent none + + // Global pipeline timeout + options { + timeout(time: 10, unit: 'HOURS') + buildDiscarder(logRotator(artifactNumToKeepStr: '10', // Keep only the last 10 artifacts + numToKeepStr: '50')) // Keep only the last 50 build records + } + + parameters { + choice( + name: 'ADHOC_BUILD_TYPE', + choices: ['BUILD', 'BUILD-AND-EXECUTE-TESTS'], + description: '''

Perform a adhoc build operation

+
+ + + + + + + + + + + + + + +
ChoiceDescription
BUILDPerforms a Per-Commit build
BUILD-AND-EXECUTE-TESTSPerforms a build and executes the integration and unit tests
''') + choice( + name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION', + choices: ['2.7.14', '3.4.9', '3.5.6', '3.6.6', '3.7.4', '3.8.0'], + description: 'Python version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY!') + choice( + name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION', + choices: ['2.1', // Legacy Apache CassandraⓇ + '2.2', // Legacy Apache CassandraⓇ + '3.0', // Previous Apache CassandraⓇ + '3.11', // Current Apache CassandraⓇ + '4.0', // Development Apache CassandraⓇ + 'dse-5.0', // Long Term Support DataStax Enterprise + 'dse-5.1', // Legacy DataStax Enterprise + 'dse-6.0', // Previous DataStax Enterprise + 'dse-6.7', // Previous DataStax Enterprise + 'dse-6.8.0', // Current DataStax Enterprise + 'dse-6.8', // Development DataStax Enterprise + 'ALL'], + description: '''Apache CassandraⓇ and DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY! + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ChoiceDescription
2.1Apache CassandaraⓇ; v2.1.x
2.2Apache CassandarⓇ; v2.2.x
3.0Apache CassandaraⓇ v3.0.x
3.11Apache CassandaraⓇ v3.11.x
4.0Apache CassandaraⓇ v4.x (CURRENTLY UNDER DEVELOPMENT)
dse-5.0DataStax Enterprise v5.0.x (Long Term Support)
dse-5.1DataStax Enterprise v5.1.x
dse-6.0DataStax Enterprise v6.0.x
dse-6.7DataStax Enterprise v6.7.x
dse-6.8.0DataStax Enterprise v6.8.0
dse-6.8DataStax Enterprise v6.8.x (CURRENTLY UNDER DEVELOPMENT)
''') + booleanParam( + name: 'CYTHON', + defaultValue: false, + description: 'Flag to determine if Cython should be enabled for scheduled or adhoc builds') + booleanParam( + name: 'EXECUTE_LONG_TESTS', + defaultValue: false, + description: 'Flag to determine if long integration tests should be executed for scheduled or adhoc builds') + choice( + name: 'EVENT_LOOP_MANAGER', + choices: ['LIBEV', 'GEVENT', 'EVENTLET', 'ASYNCIO', 'ASYNCORE', 'TWISTED'], + description: '''

Event loop manager to utilize for scheduled or adhoc builds

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ChoiceDescription
LIBEVA full-featured and high-performance event loop that is loosely modeled after libevent, but without its limitations and bugs
GEVENTA co-routine -based Python networking library that uses greenlet to provide a high-level synchronous API on top of the libev or libuv event loop
EVENTLETA concurrent networking library for Python that allows you to change how you run your code, not how you write it
ASYNCIOA library to write concurrent code using the async/await syntax
ASYNCOREA module provides the basic infrastructure for writing asynchronous socket service clients and servers
TWISTEDAn event-driven networking engine written in Python and licensed under the open source MIT license
''') + choice( + name: 'PROFILE', + choices: ['NONE', 'DSE-SMOKE-TEST', 'EVENT-LOOP', 'UPGRADE'], + description: '''

Profile to utilize for scheduled or adhoc builds

+ + + + + + + + + + + + + + + + + + + + + + + +
ChoiceDescription
NONEExecute the standard tests for the driver
DSE-SMOKE-TESTExecute only the DataStax Enterprise smoke tests
EVENT-LOOPExecute only the event loop tests for the specified event loop manager (see: EVENT_LOOP_MANAGER)
UPGRADEExecute only the upgrade tests
''') + choice( + name: 'CI_SCHEDULE', + choices: ['DO-NOT-CHANGE-THIS-SELECTION', 'WEEKNIGHTS', 'WEEKENDS'], + description: 'CI testing schedule to execute periodically scheduled builds and tests of the driver (DO NOT CHANGE THIS SELECTION)') + string( + name: 'CI_SCHEDULE_PYTHON_VERSION', + defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', + description: 'CI testing python version to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') + string( + name: 'CI_SCHEDULE_SERVER_VERSION', + defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', + description: 'CI testing server version to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') + } + + triggers { + parameterizedCron(branchPatternCron.matcher(env.BRANCH_NAME).matches() ? """ + # Every weeknight (Monday - Friday) around 4:00 AM + # These schedules will run with and without Cython enabled for Python v2.7.14 and v3.5.6 + H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.14;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.5.6;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 + + # Every Saturday around 12:00, 4:00 and 8:00 PM + # These schedules are for weekly libev event manager runs with and without Cython for most of the Python versions (excludes v3.5.6.x) + H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.14;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 + H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.4.9;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 + H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.6.6;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 + H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.7.4;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 + H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.0;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 + # These schedules are for weekly gevent event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.9.x) + H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.14;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.6;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.6;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.4;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.0;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + # These schedules are for weekly eventlet event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.9.x) + H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.14;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.6;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.6;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.4;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.0;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + + # Every Sunday around 12:00 and 4:00 AM + # These schedules are for weekly asyncore event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.9.x) + H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.14;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.6;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.6;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.4;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.0;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + # These schedules are for weekly twisted event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.9.x) + H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.14;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.6;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.6;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.4;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.0;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + """ : "") + } + + environment { + OS_VERSION = 'ubuntu/bionic64/python-driver' + CYTHON_ENABLED = "${params.CYTHON ? 'True' : 'False'}" + EVENT_LOOP_MANAGER = "${params.EVENT_LOOP_MANAGER.toLowerCase()}" + EXECUTE_LONG_TESTS = "${params.EXECUTE_LONG_TESTS ? 'True' : 'False'}" + CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' + } + + stages { + stage ('Per-Commit') { + options { + timeout(time: 2, unit: 'HOURS') + } + when { + beforeAgent true + branch pattern: '((dev|long)-)+python.*', comparator: 'REGEXP' + allOf { + expression { params.ADHOC_BUILD_TYPE == 'BUILD' } + expression { params.CI_SCHEDULE == 'DO-NOT-CHANGE-THIS-SELECTION' } + not { buildingTag() } + } + } + + matrix { + axes { + axis { + name 'CASSANDRA_VERSION' + values '3.11', // Current Apache Cassandra + 'dse-6.8.0' // Current DataStax Enterprise + } + axis { + name 'PYTHON_VERSION' + values '2.7.14', '3.5.6' + } + axis { + name 'CYTHON_ENABLED' + values 'False' + } + } + + agent { + label "${OS_VERSION}" + } + + stages { + stage('Initialize-Environment') { + steps { + initializeEnvironment() + script { + if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { + notifySlack() + } + } + } + } + stage('Describe-Build') { + steps { + describePerCommitStage() + } + } + stage('Install-Driver-And-Compile-Extensions') { + steps { + installDriverAndCompileExtensions() + } + } + stage('Execute-Tests') { + steps { + + script { + if (env.BRANCH_NAME ==~ /long-python.*/) { + withEnv(["EXECUTE_LONG_TESTS=True"]) { + executeTests() + } + } + else { + executeTests() + } + } + } + post { + always { + junit testResults: '*_results.xml' + } + } + } + } + } + post { + always { + node('master') { + submitCIMetrics('commit') + } + } + aborted { + notifySlack('aborted') + } + success { + notifySlack('completed') + } + unstable { + notifySlack('unstable') + } + failure { + notifySlack('FAILED') + } + } + } + + stage ('Scheduled-Testing') { + when { + beforeAgent true + allOf { + expression { params.ADHOC_BUILD_TYPE == 'BUILD' } + expression { params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION' } + not { buildingTag() } + } + } + matrix { + axes { + axis { + name 'CASSANDRA_VERSION' + values '2.1', // Legacy Apache Cassandra + '2.2', // Legacy Apache Cassandra + '3.0', // Previous Apache Cassandra + '3.11', // Current Apache Cassandra + 'dse-5.1', // Legacy DataStax Enterprise + 'dse-6.0', // Previous DataStax Enterprise + 'dse-6.7' // Current DataStax Enterprise + } + axis { + name 'CYTHON_ENABLED' + values 'True', 'False' + } + } + when { + beforeAgent true + allOf { + expression { return params.CI_SCHEDULE_SERVER_VERSION.split(' ').any { it =~ /(ALL|${env.CASSANDRA_VERSION})/ } } + } + } + + environment { + PYTHON_VERSION = "${params.CI_SCHEDULE_PYTHON_VERSION}" + } + agent { + label "${OS_VERSION}" + } + + stages { + stage('Initialize-Environment') { + steps { + initializeEnvironment() + script { + if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { + notifySlack() + } + } + } + } + stage('Describe-Build') { + steps { + describeScheduledTestingStage() + } + } + stage('Install-Driver-And-Compile-Extensions') { + steps { + installDriverAndCompileExtensions() + } + } + stage('Execute-Tests') { + steps { + executeTests() + } + post { + always { + junit testResults: '*_results.xml' + } + } + } + } + } + post { + aborted { + notifySlack('aborted') + } + success { + notifySlack('completed') + } + unstable { + notifySlack('unstable') + } + failure { + notifySlack('FAILED') + } + } + } + + + stage('Adhoc-Testing') { + when { + beforeAgent true + allOf { + expression { params.ADHOC_BUILD_TYPE == 'BUILD-AND-EXECUTE-TESTS' } + not { buildingTag() } + } + } + + environment { + CYTHON_ENABLED = "${params.CYTHON ? 'True' : 'False'}" + PYTHON_VERSION = "${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION}" + } + + matrix { + axes { + axis { + name 'CASSANDRA_VERSION' + values '2.1', // Legacy Apache Cassandra + '2.2', // Legacy Apache Cassandra + '3.0', // Previous Apache Cassandra + '3.11', // Current Apache Cassandra + '4.0', // Development Apache Cassandra + 'dse-5.0', // Long Term Support DataStax Enterprise + 'dse-5.1', // Legacy DataStax Enterprise + 'dse-6.0', // Previous DataStax Enterprise + 'dse-6.7', // Current DataStax Enterprise + 'dse-6.8' // Development DataStax Enterprise + } + } + when { + beforeAgent true + allOf { + expression { params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION ==~ /(ALL|${env.CASSANDRA_VERSION})/ } + } + } + + agent { + label "${OS_VERSION}" + } + + stages { + stage('Describe-Build') { + steps { + describeAdhocTestingStage() + } + } + stage('Initialize-Environment') { + steps { + initializeEnvironment() + } + } + stage('Install-Driver-And-Compile-Extensions') { + steps { + installDriverAndCompileExtensions() + } + } + stage('Execute-Tests') { + steps { + executeTests() + } + post { + always { + junit testResults: '*_results.xml' + } + } + } + } + } + } + } +} diff --git a/build.yaml b/build.yaml.bak similarity index 100% rename from build.yaml rename to build.yaml.bak From 4704453d52d65fa4b02ddbdcffb1425695483022 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 8 May 2020 13:40:38 -0400 Subject: [PATCH 1797/2005] paging_state typo --- docs/query_paging.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/query_paging.rst b/docs/query_paging.rst index 2c4a4995ca..23ee2c1129 100644 --- a/docs/query_paging.rst +++ b/docs/query_paging.rst @@ -86,7 +86,7 @@ You can resume the pagination when executing a new query by using the :attr:`.Re results = session.execute(statement) # save the paging_state somewhere and return current results - web_session['paging_stage'] = results.paging_state + web_session['paging_state'] = results.paging_state # resume the pagination sometime later... From 448a879c883d8d0e3fab32cebd7d20a0b329ed64 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 11 May 2020 12:00:16 -0400 Subject: [PATCH 1798/2005] Do not trigger a schedule for the riptano repo --- Jenkinsfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 0b26bf00d0..a0d1730a61 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -361,6 +361,7 @@ def describeAdhocTestingStage() { } def branchPatternCron = ~"(master)" +def repoPatternCron = ~"^(riptano)" pipeline { agent none @@ -551,7 +552,7 @@ pipeline { } triggers { - parameterizedCron(branchPatternCron.matcher(env.BRANCH_NAME).matches() ? """ + parameterizedCron(branchPatternCron.matcher(env.BRANCH_NAME).matches() && repoPatternCron.matcher(env.GIT_URL).matches() ? """ # Every weeknight (Monday - Friday) around 4:00 AM # These schedules will run with and without Cython enabled for Python v2.7.14 and v3.5.6 H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.14;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 From 78c898275b4de284f9173d45dbf8c12952985099 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 12 May 2020 10:31:57 -0400 Subject: [PATCH 1799/2005] Change the riptano repo pattern match string --- Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index a0d1730a61..e7a53f12cb 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -361,7 +361,7 @@ def describeAdhocTestingStage() { } def branchPatternCron = ~"(master)" -def repoPatternCron = ~"^(riptano)" +def riptanoPatternCron = ~"(riptano)" pipeline { agent none @@ -552,7 +552,7 @@ pipeline { } triggers { - parameterizedCron(branchPatternCron.matcher(env.BRANCH_NAME).matches() && repoPatternCron.matcher(env.GIT_URL).matches() ? """ + parameterizedCron(branchPatternCron.matcher(env.BRANCH_NAME).matches() && !riptanoPatternCron.matcher(env.GIT_URL).find() ? """ # Every weeknight (Monday - Friday) around 4:00 AM # These schedules will run with and without Cython enabled for Python v2.7.14 and v3.5.6 H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.14;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 From f2347735aa414bcbef333fa9f7723562624b4394 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 13 May 2020 09:09:13 -0400 Subject: [PATCH 1800/2005] Refactored graph fluent tests to avoid ram issue --- .../advanced/graph/fluent/__init__.py | 709 +++++++++++++++ .../advanced/graph/fluent/test_graph.py | 860 +----------------- .../fluent/test_graph_explicit_execution.py | 96 ++ .../fluent/test_graph_implicit_execution.py | 108 +++ 4 files changed, 923 insertions(+), 850 deletions(-) create mode 100644 tests/integration/advanced/graph/fluent/test_graph_explicit_execution.py create mode 100644 tests/integration/advanced/graph/fluent/test_graph_implicit_execution.py diff --git a/tests/integration/advanced/graph/fluent/__init__.py b/tests/integration/advanced/graph/fluent/__init__.py index 2c9ca172f8..3bb81e78e3 100644 --- a/tests/integration/advanced/graph/fluent/__init__.py +++ b/tests/integration/advanced/graph/fluent/__init__.py @@ -11,3 +11,712 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import sys +import datetime +import six +import time +from collections import namedtuple +from packaging.version import Version + +from cassandra.datastax.graph.fluent import DseGraph +from cassandra.graph import VertexProperty, GraphProtocol +from cassandra.util import Point, Polygon, LineString + +from gremlin_python.process.graph_traversal import GraphTraversal, GraphTraversalSource +from gremlin_python.process.traversal import P +from gremlin_python.structure.graph import Edge as TravEdge +from gremlin_python.structure.graph import Vertex as TravVertex, VertexProperty as TravVertexProperty + +from tests.util import wait_until_not_raised +from tests.integration import DSE_VERSION +from tests.integration.advanced.graph import ( + GraphUnitTestCase, ClassicGraphSchema, CoreGraphSchema, + VertexLabel) +from tests.integration import requiredse + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + + +import ipaddress + + +def check_equality_base(testcase, original, read_value): + if isinstance(original, float): + testcase.assertAlmostEqual(original, read_value, delta=.01) + elif isinstance(original, ipaddress.IPv4Address): + testcase.assertAlmostEqual(original, ipaddress.IPv4Address(read_value)) + elif isinstance(original, ipaddress.IPv6Address): + testcase.assertAlmostEqual(original, ipaddress.IPv6Address(read_value)) + else: + testcase.assertEqual(original, read_value) + + +def create_traversal_profiles(cluster, graph_name): + ep_graphson2 = DseGraph().create_execution_profile( + graph_name, graph_protocol=GraphProtocol.GRAPHSON_2_0) + ep_graphson3 = DseGraph().create_execution_profile( + graph_name, graph_protocol=GraphProtocol.GRAPHSON_3_0) + + cluster.add_execution_profile('traversal_graphson2', ep_graphson2) + cluster.add_execution_profile('traversal_graphson3', ep_graphson3) + + return ep_graphson2, ep_graphson3 + + +class _AbstractTraversalTest(GraphUnitTestCase): + + def setUp(self): + super(_AbstractTraversalTest, self).setUp() + self.ep_graphson2, self.ep_graphson3 = create_traversal_profiles(self.cluster, self.graph_name) + + def _test_basic_query(self, schema, graphson): + """ + Test to validate that basic graph queries works + + Creates a simple classic tinkerpot graph, and attempts to preform a basic query + using Tinkerpop's GLV with both explicit and implicit execution + ensuring that each one is correct. See reference graph here + http://www.tinkerpop.com/docs/3.0.0.M1/ + + @since 1.0.0 + @jira_ticket PYTHON-641 + @expected_result graph should generate and all vertices and edge results should be + + @test_category dse graph + """ + + g = self.fetch_traversal_source(graphson) + self.execute_graph(schema.fixtures.classic(), graphson) + traversal = g.V().has('name', 'marko').out('knows').values('name') + results_list = self.execute_traversal(traversal, graphson) + self.assertEqual(len(results_list), 2) + self.assertIn('vadas', results_list) + self.assertIn('josh', results_list) + + def _test_classic_graph(self, schema, graphson): + """ + Test to validate that basic graph generation, and vertex and edges are surfaced correctly + + Creates a simple classic tinkerpot graph, and iterates over the the vertices and edges + using Tinkerpop's GLV with both explicit and implicit execution + ensuring that each one iscorrect. See reference graph here + http://www.tinkerpop.com/docs/3.0.0.M1/ + + @since 1.0.0 + @jira_ticket PYTHON-641 + @expected_result graph should generate and all vertices and edge results should be + + @test_category dse graph + """ + + self.execute_graph(schema.fixtures.classic(), graphson) + ep = self.get_execution_profile(graphson) + g = self.fetch_traversal_source(graphson) + traversal = g.V() + vert_list = self.execute_traversal(traversal, graphson) + + for vertex in vert_list: + schema.ensure_properties(self.session, vertex, execution_profile=ep) + self._validate_classic_vertex(g, vertex) + traversal = g.E() + edge_list = self.execute_traversal(traversal, graphson) + for edge in edge_list: + schema.ensure_properties(self.session, edge, execution_profile=ep) + self._validate_classic_edge(g, edge) + + def _test_graph_classic_path(self, schema, graphson): + """ + Test to validate that the path version of the result type is generated correctly. It also + tests basic path results as that is not covered elsewhere + + @since 1.0.0 + @jira_ticket PYTHON-641 + @expected_result path object should be unpacked correctly including all nested edges and vertices + @test_category dse graph + """ + self.execute_graph(schema.fixtures.classic(), graphson) + g = self.fetch_traversal_source(graphson) + traversal = g.V().hasLabel('person').has('name', 'marko').as_('a').outE('knows').inV().as_('c', 'd').outE('created').as_('e', 'f', 'g').inV().path() + path_list = self.execute_traversal(traversal, graphson) + self.assertEqual(len(path_list), 2) + for path in path_list: + self._validate_path_result_type(g, path) + + def _test_range_query(self, schema, graphson): + """ + Test to validate range queries are handled correctly. + + Creates a very large line graph script and executes it. Then proceeds to to a range + limited query against it, and ensure that the results are formated correctly and that + the result set is properly sized. + + @since 1.0.0 + @jira_ticket PYTHON-641 + @expected_result result set should be properly formated and properly sized + + @test_category dse graph + """ + + self.execute_graph(schema.fixtures.line(150), graphson) + ep = self.get_execution_profile(graphson) + g = self.fetch_traversal_source(graphson) + + traversal = g.E().range(0, 10) + edges = self.execute_traversal(traversal, graphson) + self.assertEqual(len(edges), 10) + for edge in edges: + schema.ensure_properties(self.session, edge, execution_profile=ep) + self._validate_line_edge(g, edge) + + def _test_result_types(self, schema, graphson): + """ + Test to validate that the edge and vertex version of results are constructed correctly. + + @since 1.0.0 + @jira_ticket PYTHON-641 + @expected_result edge/vertex result types should be unpacked correctly. + @test_category dse graph + """ + self.execute_graph(schema.fixtures.line(150), graphson) + g = self.fetch_traversal_source(graphson) + traversal = g.V() + vertices = self.execute_traversal(traversal, graphson) + for vertex in vertices: + self._validate_type(g, vertex) + + def _test_large_result_set(self, schema, graphson): + """ + Test to validate that large result sets return correctly. + + Creates a very large graph. Ensures that large result sets are handled appropriately. + + @since 1.0.0 + @jira_ticket PYTHON-641 + @expected_result when limits of result sets are hit errors should be surfaced appropriately + + @test_category dse graph + """ + self.execute_graph(schema.fixtures.large(), graphson) + g = self.fetch_traversal_source(graphson) + traversal = g.V() + vertices = self.execute_traversal(traversal, graphson) + for vertex in vertices: + self._validate_generic_vertex_result_type(g, vertex) + + def _test_vertex_meta_properties(self, schema, graphson): + """ + Test verifying vertex property properties + + @since 1.0.0 + @jira_ticket PYTHON-641 + + @test_category dse graph + """ + if schema is not ClassicGraphSchema: + raise unittest.SkipTest('skipped because multiple properties are only supported with classic graphs') + + s = self.session + s.execute_graph("schema.propertyKey('k0').Text().ifNotExists().create();") + s.execute_graph("schema.propertyKey('k1').Text().ifNotExists().create();") + s.execute_graph("schema.propertyKey('key').Text().properties('k0', 'k1').ifNotExists().create();") + s.execute_graph("schema.vertexLabel('MLP').properties('key').ifNotExists().create();") + s.execute_graph("schema.config().option('graph.allow_scan').set('true');") + v = s.execute_graph('''v = graph.addVertex('MLP') + v.property('key', 'meta_prop', 'k0', 'v0', 'k1', 'v1') + v''')[0] + + g = self.fetch_traversal_source(graphson) + + traversal = g.V() + # This should contain key, and value where value is a property + # This should be a vertex property and should contain sub properties + results = self.execute_traversal(traversal, graphson) + self._validate_meta_property(g, results[0]) + + def _test_vertex_multiple_properties(self, schema, graphson): + """ + Test verifying vertex property form for various Cardinality + + All key types are encoded as a list, regardless of cardinality + + Single cardinality properties have only one value -- the last one added + + Default is single (this is config dependent) + + @since 1.0.0 + @jira_ticket PYTHON-641 + + @test_category dse graph + """ + if schema is not ClassicGraphSchema: + raise unittest.SkipTest('skipped because multiple properties are only supported with classic graphs') + + s = self.session + s.execute_graph('''Schema schema = graph.schema(); + schema.propertyKey('mult_key').Text().multiple().ifNotExists().create(); + schema.propertyKey('single_key').Text().single().ifNotExists().create(); + schema.vertexLabel('MPW1').properties('mult_key').ifNotExists().create(); + schema.vertexLabel('MPW2').properties('mult_key').ifNotExists().create(); + schema.vertexLabel('SW1').properties('single_key').ifNotExists().create();''') + + mpw1v = s.execute_graph('''v = graph.addVertex('MPW1') + v.property('mult_key', 'value') + v''')[0] + + mpw2v = s.execute_graph('''g.addV('MPW2').property('mult_key', 'value0').property('mult_key', 'value1')''')[0] + + g = self.fetch_traversal_source(graphson) + traversal = g.V(mpw1v.id).properties() + + vertex_props = self.execute_traversal(traversal, graphson) + + self.assertEqual(len(vertex_props), 1) + + self.assertEqual(self.fetch_key_from_prop(vertex_props[0]), "mult_key") + self.assertEqual(vertex_props[0].value, "value") + + # multiple_with_two_values + #v = s.execute_graph('''g.addV(label, 'MPW2', 'mult_key', 'value0', 'mult_key', 'value1')''')[0] + traversal = g.V(mpw2v.id).properties() + + vertex_props = self.execute_traversal(traversal, graphson) + + self.assertEqual(len(vertex_props), 2) + self.assertEqual(self.fetch_key_from_prop(vertex_props[0]), 'mult_key') + self.assertEqual(self.fetch_key_from_prop(vertex_props[1]), 'mult_key') + self.assertEqual(vertex_props[0].value, 'value0') + self.assertEqual(vertex_props[1].value, 'value1') + + # single_with_one_value + v = s.execute_graph('''v = graph.addVertex('SW1') + v.property('single_key', 'value') + v''')[0] + traversal = g.V(v.id).properties() + vertex_props = self.execute_traversal(traversal, graphson) + self.assertEqual(len(vertex_props), 1) + self.assertEqual(self.fetch_key_from_prop(vertex_props[0]), "single_key") + self.assertEqual(vertex_props[0].value, "value") + + def should_parse_meta_properties(self): + g = self.fetch_traversal_source() + g.addV("meta_v").property("meta_prop", "hello", "sub_prop", "hi", "sub_prop2", "hi2") + + def _test_all_graph_types_with_schema(self, schema, graphson): + """ + Exhaustively goes through each type that is supported by dse_graph. + creates a vertex for each type using a dse-tinkerpop traversal, + It then attempts to fetch it from the server and compares it to what was inserted + Prime the graph with the correct schema first + + @since 1.0.0 + @jira_ticket PYTHON-641 + @expected_result inserted objects are equivalent to those retrieved + + @test_category dse graph + """ + self._write_and_read_data_types(schema, graphson) + + def _test_all_graph_types_without_schema(self, schema, graphson): + """ + Exhaustively goes through each type that is supported by dse_graph. + creates a vertex for each type using a dse-tinkerpop traversal, + It then attempts to fetch it from the server and compares it to what was inserted + Do not prime the graph with the correct schema first + @since 1.0.0 + @jira_ticket PYTHON-641 + @expected_result inserted objects are equivalent to those retrieved + @test_category dse graph + """ + if schema is not ClassicGraphSchema: + raise unittest.SkipTest('schema-less is only for classic graphs') + self._write_and_read_data_types(schema, graphson, use_schema=False) + + def _test_dsl(self, schema, graphson): + """ + The test creates a SocialTraversal and a SocialTraversalSource as part of + a DSL. Then calls it's method and checks the results to verify + we have the expected results + + @since @since 1.1.0a1 + @jira_ticket PYTHON-790 + @expected_result only the vertex corresponding to marko is in the result + + @test_category dse graph + """ + class SocialTraversal(GraphTraversal): + def knows(self, person_name): + return self.out("knows").hasLabel("person").has("name", person_name).in_() + + class SocialTraversalSource(GraphTraversalSource): + def __init__(self, *args, **kwargs): + super(SocialTraversalSource, self).__init__(*args, **kwargs) + self.graph_traversal = SocialTraversal + + def people(self, *names): + return self.get_graph_traversal().V().has("name", P.within(*names)) + + self.execute_graph(schema.fixtures.classic(), graphson) + if schema is CoreGraphSchema: + self.execute_graph(""" + schema.edgeLabel('knows').from('person').to('person').materializedView('person__knows__person_by_in_name'). + ifNotExists().partitionBy('in_name').clusterBy('out_name', Asc).create() + """, graphson) + time.sleep(1) # give some time to the MV to be populated + g = self.fetch_traversal_source(graphson, traversal_class=SocialTraversalSource) + + traversal = g.people("marko", "albert").knows("vadas") + results = self.execute_traversal(traversal, graphson) + self.assertEqual(len(results), 1) + only_vertex = results[0] + schema.ensure_properties(self.session, only_vertex, + execution_profile=self.get_execution_profile(graphson)) + self._validate_classic_vertex(g, only_vertex) + + def _test_bulked_results(self, schema, graphson): + """ + Send a query expecting a bulked result and the driver "undoes" + the bulk and returns the expected list + + @since 1.1.0a1 + @jira_ticket PYTHON-771 + @expected_result the expanded list + + @test_category dse graph + """ + self.execute_graph(schema.fixtures.classic(), graphson) + g = self.fetch_traversal_source(graphson) + barrier_traversal = g.E().label().barrier() + results = self.execute_traversal(barrier_traversal, graphson) + self.assertEqual(sorted(["created", "created", "created", "created", "knows", "knows"]), sorted(results)) + + def _test_udt_with_classes(self, schema, graphson): + class Address(object): + + def __init__(self, address, city, state): + self.address = address + self.city = city + self.state = state + + def __eq__(self, other): + return self.address == other.address and self.city == other.city and self.state == other.state + + class AddressWithTags(object): + + def __init__(self, address, city, state, tags): + self.address = address + self.city = city + self.state = state + self.tags = tags + + def __eq__(self, other): + return (self.address == other.address and self.city == other.city + and self.state == other.state and self.tags == other.tags) + + class ComplexAddress(object): + + def __init__(self, address, address_tags, city, state, props): + self.address = address + self.address_tags = address_tags + self.city = city + self.state = state + self.props = props + + def __eq__(self, other): + return (self.address == other.address and self.address_tags == other.address_tags + and self.city == other.city and self.state == other.state + and self.props == other.props) + + class ComplexAddressWithOwners(object): + + def __init__(self, address, address_tags, city, state, props, owners): + self.address = address + self.address_tags = address_tags + self.city = city + self.state = state + self.props = props + self.owners = owners + + def __eq__(self, other): + return (self.address == other.address and self.address_tags == other.address_tags + and self.city == other.city and self.state == other.state + and self.props == other.props and self.owners == other.owners) + + self.__test_udt(schema, graphson, Address, AddressWithTags, ComplexAddress, ComplexAddressWithOwners) + + def _test_udt_with_namedtuples(self, schema, graphson): + AddressTuple = namedtuple('Address', ('address', 'city', 'state')) + AddressWithTagsTuple = namedtuple('AddressWithTags', ('address', 'city', 'state', 'tags')) + ComplexAddressTuple = namedtuple('ComplexAddress', ('address', 'address_tags', 'city', 'state', 'props')) + ComplexAddressWithOwnersTuple = namedtuple('ComplexAddressWithOwners', ('address', 'address_tags', 'city', + 'state', 'props', 'owners')) + + self.__test_udt(schema, graphson, AddressTuple, AddressWithTagsTuple, + ComplexAddressTuple, ComplexAddressWithOwnersTuple) + + def _write_and_read_data_types(self, schema, graphson, use_schema=True): + g = self.fetch_traversal_source(graphson) + ep = self.get_execution_profile(graphson) + for data in six.itervalues(schema.fixtures.datatypes()): + typ, value, deserializer = data + vertex_label = VertexLabel([typ]) + property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + if use_schema or schema is CoreGraphSchema: + schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) + + write_traversal = g.addV(str(vertex_label.label)).property('pkid', vertex_label.id).\ + property(property_name, value) + self.execute_traversal(write_traversal, graphson) + + read_traversal = g.V().hasLabel(str(vertex_label.label)).has(property_name).properties() + results = self.execute_traversal(read_traversal, graphson) + + for result in results: + if result.label == 'pkid': + continue + self._check_equality(g, value, result.value) + + def __test_udt(self, schema, graphson, address_class, address_with_tags_class, + complex_address_class, complex_address_with_owners_class): + if schema is not CoreGraphSchema or DSE_VERSION < Version('6.8'): + raise unittest.SkipTest("Graph UDT is only supported with DSE 6.8+ and Core graphs.") + + ep = self.get_execution_profile(graphson) + + Address = address_class + AddressWithTags = address_with_tags_class + ComplexAddress = complex_address_class + ComplexAddressWithOwners = complex_address_with_owners_class + + # setup udt + self.session.execute_graph(""" + schema.type('address').property('address', Text).property('city', Text).property('state', Text).create(); + schema.type('addressTags').property('address', Text).property('city', Text).property('state', Text). + property('tags', setOf(Text)).create(); + schema.type('complexAddress').property('address', Text).property('address_tags', frozen(typeOf('addressTags'))). + property('city', Text).property('state', Text).property('props', mapOf(Text, Int)).create(); + schema.type('complexAddressWithOwners').property('address', Text). + property('address_tags', frozen(typeOf('addressTags'))). + property('city', Text).property('state', Text).property('props', mapOf(Text, Int)). + property('owners', frozen(listOf(tupleOf(Text, Int)))).create(); + """, execution_profile=ep) + + # wait max 10 seconds to get the UDT discovered. + wait_until_not_raised( + lambda: self.session.cluster.register_user_type(self.graph_name, 'address', Address), + 1, 10) + wait_until_not_raised( + lambda: self.session.cluster.register_user_type(self.graph_name, 'addressTags', AddressWithTags), + 1, 10) + wait_until_not_raised( + lambda: self.session.cluster.register_user_type(self.graph_name, 'complexAddress', ComplexAddress), + 1, 10) + wait_until_not_raised( + lambda: self.session.cluster.register_user_type(self.graph_name, 'complexAddressWithOwners', ComplexAddressWithOwners), + 1, 10) + + data = { + "udt1": ["typeOf('address')", Address('1440 Rd Smith', 'Quebec', 'QC')], + "udt2": ["tupleOf(typeOf('address'), Text)", (Address('1440 Rd Smith', 'Quebec', 'QC'), 'hello')], + "udt3": ["tupleOf(frozen(typeOf('address')), Text)", (Address('1440 Rd Smith', 'Quebec', 'QC'), 'hello')], + "udt4": ["tupleOf(tupleOf(Int, typeOf('address')), Text)", + ((42, Address('1440 Rd Smith', 'Quebec', 'QC')), 'hello')], + "udt5": ["tupleOf(tupleOf(Int, typeOf('addressTags')), Text)", + ((42, AddressWithTags('1440 Rd Smith', 'Quebec', 'QC', {'t1', 't2'})), 'hello')], + "udt6": ["tupleOf(tupleOf(Int, typeOf('complexAddress')), Text)", + ((42, ComplexAddress('1440 Rd Smith', + AddressWithTags('1440 Rd Smith', 'Quebec', 'QC', {'t1', 't2'}), + 'Quebec', 'QC', {'p1': 42, 'p2': 33})), 'hello')], + "udt7": ["tupleOf(tupleOf(Int, frozen(typeOf('complexAddressWithOwners'))), Text)", + ((42, ComplexAddressWithOwners( + '1440 Rd Smith', + AddressWithTags('1440 CRd Smith', 'Quebec', 'QC', {'t1', 't2'}), + 'Quebec', 'QC', {'p1': 42, 'p2': 33}, [('Mike', 43), ('Gina', 39)]) + ), 'hello')] + } + + g = self.fetch_traversal_source(graphson) + for typ, value in six.itervalues(data): + vertex_label = VertexLabel([typ]) + property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) + + write_traversal = g.addV(str(vertex_label.label)).property('pkid', vertex_label.id). \ + property(property_name, value) + self.execute_traversal(write_traversal, graphson) + + #vertex = list(schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] + #vertex_properties = list(schema.get_vertex_properties( + # self.session, vertex, execution_profile=ep)) + + read_traversal = g.V().hasLabel(str(vertex_label.label)).has(property_name).properties() + vertex_properties = self.execute_traversal(read_traversal, graphson) + + self.assertEqual(len(vertex_properties), 2) # include pkid + for vp in vertex_properties: + if vp.label == 'pkid': + continue + + self.assertIsInstance(vp, (VertexProperty, TravVertexProperty)) + self.assertEqual(vp.label, property_name) + self.assertEqual(vp.value, value) + + @staticmethod + def fetch_edge_props(g, edge): + edge_props = g.E(edge.id).properties().toList() + return edge_props + + @staticmethod + def fetch_vertex_props(g, vertex): + + vertex_props = g.V(vertex.id).properties().toList() + return vertex_props + + def _check_equality(self, g, original, read_value): + return check_equality_base(self, original, read_value) + + +def _validate_prop(key, value, unittest): + if key == 'index': + return + + if any(key.startswith(t) for t in ('int', 'short')): + typ = int + + elif any(key.startswith(t) for t in ('long',)): + if sys.version_info >= (3, 0): + typ = int + else: + typ = long + elif any(key.startswith(t) for t in ('float', 'double')): + typ = float + elif any(key.startswith(t) for t in ('polygon',)): + typ = Polygon + elif any(key.startswith(t) for t in ('point',)): + typ = Point + elif any(key.startswith(t) for t in ('Linestring',)): + typ = LineString + elif any(key.startswith(t) for t in ('neg',)): + typ = six.string_types + elif any(key.startswith(t) for t in ('date',)): + typ = datetime.date + elif any(key.startswith(t) for t in ('time',)): + typ = datetime.time + else: + unittest.fail("Received unexpected type: %s" % key) + + +@requiredse +class BaseImplicitExecutionTest(GraphUnitTestCase): + """ + This test class will execute all tests of the AbstractTraversalTestClass using implicit execution + This all traversal will be run directly using toList() + """ + def setUp(self): + super(BaseImplicitExecutionTest, self).setUp() + if DSE_VERSION: + self.ep = DseGraph().create_execution_profile(self.graph_name) + self.cluster.add_execution_profile(self.graph_name, self.ep) + + @staticmethod + def fetch_key_from_prop(property): + return property.key + + def fetch_traversal_source(self, graphson, **kwargs): + ep = self.get_execution_profile(graphson, traversal=True) + return DseGraph().traversal_source(self.session, self.graph_name, execution_profile=ep, **kwargs) + + def execute_traversal(self, traversal, graphson=None): + return traversal.toList() + + def _validate_classic_vertex(self, g, vertex): + # Checks the properties on a classic vertex for correctness + vertex_props = self.fetch_vertex_props(g, vertex) + vertex_prop_keys = [vp.key for vp in vertex_props] + self.assertEqual(len(vertex_prop_keys), 2) + self.assertIn('name', vertex_prop_keys) + self.assertTrue('lang' in vertex_prop_keys or 'age' in vertex_prop_keys) + + def _validate_generic_vertex_result_type(self, g, vertex): + # Checks a vertex object for it's generic properties + properties = self.fetch_vertex_props(g, vertex) + for attr in ('id', 'label'): + self.assertIsNotNone(getattr(vertex, attr)) + self.assertTrue(len(properties) > 2) + + def _validate_classic_edge_properties(self, g, edge): + # Checks the properties on a classic edge for correctness + edge_props = self.fetch_edge_props(g, edge) + edge_prop_keys = [ep.key for ep in edge_props] + self.assertEqual(len(edge_prop_keys), 1) + self.assertIn('weight', edge_prop_keys) + + def _validate_classic_edge(self, g, edge): + self._validate_generic_edge_result_type(edge) + self._validate_classic_edge_properties(g, edge) + + def _validate_line_edge(self, g, edge): + self._validate_generic_edge_result_type(edge) + edge_props = self.fetch_edge_props(g, edge) + edge_prop_keys = [ep.key for ep in edge_props] + self.assertEqual(len(edge_prop_keys), 1) + self.assertIn('distance', edge_prop_keys) + + def _validate_generic_edge_result_type(self, edge): + self.assertIsInstance(edge, TravEdge) + + for attr in ('outV', 'inV', 'label', 'id'): + self.assertIsNotNone(getattr(edge, attr)) + + def _validate_path_result_type(self, g, objects_path): + for obj in objects_path: + if isinstance(obj, TravEdge): + self._validate_classic_edge(g, obj) + elif isinstance(obj, TravVertex): + self._validate_classic_vertex(g, obj) + else: + self.fail("Invalid object found in path " + str(obj.type)) + + def _validate_meta_property(self, g, vertex): + meta_props = g.V(vertex.id).properties().toList() + self.assertEqual(len(meta_props), 1) + meta_prop = meta_props[0] + self.assertEqual(meta_prop.value, "meta_prop") + self.assertEqual(meta_prop.key, "key") + + nested_props = g.V(vertex.id).properties().properties().toList() + self.assertEqual(len(nested_props), 2) + for nested_prop in nested_props: + self.assertTrue(nested_prop.key in ['k0', 'k1']) + self.assertTrue(nested_prop.value in ['v0', 'v1']) + + def _validate_type(self, g, vertex): + props = self.fetch_vertex_props(g, vertex) + for prop in props: + value = prop.value + key = prop.key + _validate_prop(key, value, self) + + +class BaseExplicitExecutionTest(GraphUnitTestCase): + + def fetch_traversal_source(self, graphson, **kwargs): + ep = self.get_execution_profile(graphson, traversal=True) + return DseGraph().traversal_source(self.session, self.graph_name, execution_profile=ep, **kwargs) + + def execute_traversal(self, traversal, graphson): + ep = self.get_execution_profile(graphson, traversal=True) + ep = self.session.get_execution_profile(ep) + context = None + if graphson == GraphProtocol.GRAPHSON_3_0: + context = { + 'cluster': self.cluster, + 'graph_name': ep.graph_options.graph_name.decode('utf-8') if ep.graph_options.graph_name else None + } + query = DseGraph.query_from_traversal(traversal, graphson, context=context) + # Use an ep that is configured with the correct row factory, and bytecode-json language flat set + result_set = self.execute_graph(query, graphson, traversal=True) + return list(result_set) diff --git a/tests/integration/advanced/graph/fluent/test_graph.py b/tests/integration/advanced/graph/fluent/test_graph.py index 4ebb0b6109..02611c12c0 100644 --- a/tests/integration/advanced/graph/fluent/test_graph.py +++ b/tests/integration/advanced/graph/fluent/test_graph.py @@ -12,34 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys -import datetime import six -import time -from collections import namedtuple -from concurrent.futures import Future -from packaging.version import Version from cassandra import cluster from cassandra.cluster import ContinuousPagingOptions from cassandra.datastax.graph.fluent import DseGraph -from cassandra.graph import Vertex, Edge, VertexProperty, GraphProtocol -from cassandra.util import Point, Polygon, LineString +from cassandra.graph import VertexProperty -from gremlin_python.process.graph_traversal import GraphTraversal, GraphTraversalSource -from gremlin_python.process.traversal import P -from gremlin_python.structure.graph import Edge as TravEdge -from gremlin_python.structure.graph import Vertex as TravVertex, VertexProperty as TravVertexProperty - -from tests.util import wait_until_not_raised -from tests.integration import DSE_VERSION, greaterthanorequaldse68 -from tests.integration.advanced.graph import GraphUnitTestCase, \ - ClassicGraphSchema, CoreGraphSchema, \ - validate_classic_vertex, validate_classic_edge, validate_generic_vertex_result_type, \ - validate_classic_edge_properties, validate_line_edge, \ - validate_generic_edge_result_type, validate_path_result_type, VertexLabel, \ - GraphTestConfiguration, BasicGraphUnitTestCase -from tests.integration import greaterthanorequaldse60, requiredse +from tests.integration import greaterthanorequaldse68 +from tests.integration.advanced.graph import ( + GraphUnitTestCase, ClassicGraphSchema, CoreGraphSchema, + VertexLabel, GraphTestConfiguration +) +from tests.integration import greaterthanorequaldse60 +from tests.integration.advanced.graph.fluent import ( + BaseExplicitExecutionTest, create_traversal_profiles, check_equality_base) try: import unittest2 as unittest @@ -47,836 +34,9 @@ import unittest # noqa -import ipaddress - - -def check_equality_base(testcase, original, read_value): - if isinstance(original, float): - testcase.assertAlmostEqual(original, read_value, delta=.01) - elif isinstance(original, ipaddress.IPv4Address): - testcase.assertAlmostEqual(original, ipaddress.IPv4Address(read_value)) - elif isinstance(original, ipaddress.IPv6Address): - testcase.assertAlmostEqual(original, ipaddress.IPv6Address(read_value)) - else: - testcase.assertEqual(original, read_value) - - -def create_traversal_profiles(cluster, graph_name): - ep_graphson2 = DseGraph().create_execution_profile(graph_name, - graph_protocol=GraphProtocol.GRAPHSON_2_0) - ep_graphson3 = DseGraph().create_execution_profile(graph_name, - graph_protocol=GraphProtocol.GRAPHSON_3_0) - - cluster.add_execution_profile('traversal_graphson2', ep_graphson2) - cluster.add_execution_profile('traversal_graphson3', ep_graphson3) - - return ep_graphson2, ep_graphson3 - - -class _AbstractTraversalTest(GraphUnitTestCase): - - def setUp(self): - super(_AbstractTraversalTest, self).setUp() - self.ep_graphson2, self.ep_graphson3 = create_traversal_profiles(self.cluster, self.graph_name) - - def _test_basic_query(self, schema, graphson): - """ - Test to validate that basic graph queries works - - Creates a simple classic tinkerpot graph, and attempts to preform a basic query - using Tinkerpop's GLV with both explicit and implicit execution - ensuring that each one is correct. See reference graph here - http://www.tinkerpop.com/docs/3.0.0.M1/ - - @since 1.0.0 - @jira_ticket PYTHON-641 - @expected_result graph should generate and all vertices and edge results should be - - @test_category dse graph - """ - - g = self.fetch_traversal_source(graphson) - self.execute_graph(schema.fixtures.classic(), graphson) - traversal = g.V().has('name', 'marko').out('knows').values('name') - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 2) - self.assertIn('vadas', results_list) - self.assertIn('josh', results_list) - - def _test_classic_graph(self, schema, graphson): - """ - Test to validate that basic graph generation, and vertex and edges are surfaced correctly - - Creates a simple classic tinkerpot graph, and iterates over the the vertices and edges - using Tinkerpop's GLV with both explicit and implicit execution - ensuring that each one iscorrect. See reference graph here - http://www.tinkerpop.com/docs/3.0.0.M1/ - - @since 1.0.0 - @jira_ticket PYTHON-641 - @expected_result graph should generate and all vertices and edge results should be - - @test_category dse graph - """ - - self.execute_graph(schema.fixtures.classic(), graphson) - ep = self.get_execution_profile(graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.V() - vert_list = self.execute_traversal(traversal, graphson) - - for vertex in vert_list: - schema.ensure_properties(self.session, vertex, execution_profile=ep) - self._validate_classic_vertex(g, vertex) - traversal = g.E() - edge_list = self.execute_traversal(traversal, graphson) - for edge in edge_list: - schema.ensure_properties(self.session, edge, execution_profile=ep) - self._validate_classic_edge(g, edge) - - def _test_graph_classic_path(self, schema, graphson): - """ - Test to validate that the path version of the result type is generated correctly. It also - tests basic path results as that is not covered elsewhere - - @since 1.0.0 - @jira_ticket PYTHON-641 - @expected_result path object should be unpacked correctly including all nested edges and vertices - @test_category dse graph - """ - self.execute_graph(schema.fixtures.classic(), graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.V().hasLabel('person').has('name', 'marko').as_('a').outE('knows').inV().as_('c', 'd').outE('created').as_('e', 'f', 'g').inV().path() - path_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(path_list), 2) - for path in path_list: - self._validate_path_result_type(g, path) - - def _test_range_query(self, schema, graphson): - """ - Test to validate range queries are handled correctly. - - Creates a very large line graph script and executes it. Then proceeds to to a range - limited query against it, and ensure that the results are formated correctly and that - the result set is properly sized. - - @since 1.0.0 - @jira_ticket PYTHON-641 - @expected_result result set should be properly formated and properly sized - - @test_category dse graph - """ - - self.execute_graph(schema.fixtures.line(150), graphson) - ep = self.get_execution_profile(graphson) - g = self.fetch_traversal_source(graphson) - - traversal = g.E().range(0, 10) - edges = self.execute_traversal(traversal, graphson) - self.assertEqual(len(edges), 10) - for edge in edges: - schema.ensure_properties(self.session, edge, execution_profile=ep) - self._validate_line_edge(g, edge) - - def _test_result_types(self, schema, graphson): - """ - Test to validate that the edge and vertex version of results are constructed correctly. - - @since 1.0.0 - @jira_ticket PYTHON-641 - @expected_result edge/vertex result types should be unpacked correctly. - @test_category dse graph - """ - self.execute_graph(schema.fixtures.line(150), graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.V() - vertices = self.execute_traversal(traversal, graphson) - for vertex in vertices: - self._validate_type(g, vertex) - - def _test_large_result_set(self, schema, graphson): - """ - Test to validate that large result sets return correctly. - - Creates a very large graph. Ensures that large result sets are handled appropriately. - - @since 1.0.0 - @jira_ticket PYTHON-641 - @expected_result when limits of result sets are hit errors should be surfaced appropriately - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.large(), graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.V() - vertices = self.execute_traversal(traversal, graphson) - for vertex in vertices: - self._validate_generic_vertex_result_type(g, vertex) - - def _test_vertex_meta_properties(self, schema, graphson): - """ - Test verifying vertex property properties - - @since 1.0.0 - @jira_ticket PYTHON-641 - - @test_category dse graph - """ - if schema is not ClassicGraphSchema: - raise unittest.SkipTest('skipped because multiple properties are only supported with classic graphs') - - s = self.session - s.execute_graph("schema.propertyKey('k0').Text().ifNotExists().create();") - s.execute_graph("schema.propertyKey('k1').Text().ifNotExists().create();") - s.execute_graph("schema.propertyKey('key').Text().properties('k0', 'k1').ifNotExists().create();") - s.execute_graph("schema.vertexLabel('MLP').properties('key').ifNotExists().create();") - s.execute_graph("schema.config().option('graph.allow_scan').set('true');") - v = s.execute_graph('''v = graph.addVertex('MLP') - v.property('key', 'meta_prop', 'k0', 'v0', 'k1', 'v1') - v''')[0] - - g = self.fetch_traversal_source(graphson) - - traversal = g.V() - # This should contain key, and value where value is a property - # This should be a vertex property and should contain sub properties - results = self.execute_traversal(traversal, graphson) - self._validate_meta_property(g, results[0]) - - def _test_vertex_multiple_properties(self, schema, graphson): - """ - Test verifying vertex property form for various Cardinality - - All key types are encoded as a list, regardless of cardinality - - Single cardinality properties have only one value -- the last one added - - Default is single (this is config dependent) - - @since 1.0.0 - @jira_ticket PYTHON-641 - - @test_category dse graph - """ - if schema is not ClassicGraphSchema: - raise unittest.SkipTest('skipped because multiple properties are only supported with classic graphs') - - s = self.session - s.execute_graph('''Schema schema = graph.schema(); - schema.propertyKey('mult_key').Text().multiple().ifNotExists().create(); - schema.propertyKey('single_key').Text().single().ifNotExists().create(); - schema.vertexLabel('MPW1').properties('mult_key').ifNotExists().create(); - schema.vertexLabel('MPW2').properties('mult_key').ifNotExists().create(); - schema.vertexLabel('SW1').properties('single_key').ifNotExists().create();''') - - mpw1v = s.execute_graph('''v = graph.addVertex('MPW1') - v.property('mult_key', 'value') - v''')[0] - - mpw2v = s.execute_graph('''g.addV('MPW2').property('mult_key', 'value0').property('mult_key', 'value1')''')[0] - - g = self.fetch_traversal_source(graphson) - traversal = g.V(mpw1v.id).properties() - - vertex_props = self.execute_traversal(traversal, graphson) - - self.assertEqual(len(vertex_props), 1) - - self.assertEqual(self.fetch_key_from_prop(vertex_props[0]), "mult_key") - self.assertEqual(vertex_props[0].value, "value") - - # multiple_with_two_values - #v = s.execute_graph('''g.addV(label, 'MPW2', 'mult_key', 'value0', 'mult_key', 'value1')''')[0] - traversal = g.V(mpw2v.id).properties() - - vertex_props = self.execute_traversal(traversal, graphson) - - self.assertEqual(len(vertex_props), 2) - self.assertEqual(self.fetch_key_from_prop(vertex_props[0]), 'mult_key') - self.assertEqual(self.fetch_key_from_prop(vertex_props[1]), 'mult_key') - self.assertEqual(vertex_props[0].value, 'value0') - self.assertEqual(vertex_props[1].value, 'value1') - - # single_with_one_value - v = s.execute_graph('''v = graph.addVertex('SW1') - v.property('single_key', 'value') - v''')[0] - traversal = g.V(v.id).properties() - vertex_props = self.execute_traversal(traversal, graphson) - self.assertEqual(len(vertex_props), 1) - self.assertEqual(self.fetch_key_from_prop(vertex_props[0]), "single_key") - self.assertEqual(vertex_props[0].value, "value") - - def should_parse_meta_properties(self): - g = self.fetch_traversal_source() - g.addV("meta_v").property("meta_prop", "hello", "sub_prop", "hi", "sub_prop2", "hi2") - - def _test_all_graph_types_with_schema(self, schema, graphson): - """ - Exhaustively goes through each type that is supported by dse_graph. - creates a vertex for each type using a dse-tinkerpop traversal, - It then attempts to fetch it from the server and compares it to what was inserted - Prime the graph with the correct schema first - - @since 1.0.0 - @jira_ticket PYTHON-641 - @expected_result inserted objects are equivalent to those retrieved - - @test_category dse graph - """ - self._write_and_read_data_types(schema, graphson) - - def _test_all_graph_types_without_schema(self, schema, graphson): - """ - Exhaustively goes through each type that is supported by dse_graph. - creates a vertex for each type using a dse-tinkerpop traversal, - It then attempts to fetch it from the server and compares it to what was inserted - Do not prime the graph with the correct schema first - @since 1.0.0 - @jira_ticket PYTHON-641 - @expected_result inserted objects are equivalent to those retrieved - @test_category dse graph - """ - if schema is not ClassicGraphSchema: - raise unittest.SkipTest('schema-less is only for classic graphs') - self._write_and_read_data_types(schema, graphson, use_schema=False) - - def _test_dsl(self, schema, graphson): - """ - The test creates a SocialTraversal and a SocialTraversalSource as part of - a DSL. Then calls it's method and checks the results to verify - we have the expected results - - @since @since 1.1.0a1 - @jira_ticket PYTHON-790 - @expected_result only the vertex corresponding to marko is in the result - - @test_category dse graph - """ - class SocialTraversal(GraphTraversal): - def knows(self, person_name): - return self.out("knows").hasLabel("person").has("name", person_name).in_() - - class SocialTraversalSource(GraphTraversalSource): - def __init__(self, *args, **kwargs): - super(SocialTraversalSource, self).__init__(*args, **kwargs) - self.graph_traversal = SocialTraversal - - def people(self, *names): - return self.get_graph_traversal().V().has("name", P.within(*names)) - - self.execute_graph(schema.fixtures.classic(), graphson) - if schema is CoreGraphSchema: - self.execute_graph(""" - schema.edgeLabel('knows').from('person').to('person').materializedView('person__knows__person_by_in_name'). - ifNotExists().partitionBy('in_name').clusterBy('out_name', Asc).create() - """, graphson) - time.sleep(1) # give some time to the MV to be populated - g = self.fetch_traversal_source(graphson, traversal_class=SocialTraversalSource) - - traversal = g.people("marko", "albert").knows("vadas") - results = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results), 1) - only_vertex = results[0] - schema.ensure_properties(self.session, only_vertex, - execution_profile=self.get_execution_profile(graphson)) - self._validate_classic_vertex(g, only_vertex) - - def _test_bulked_results(self, schema, graphson): - """ - Send a query expecting a bulked result and the driver "undoes" - the bulk and returns the expected list - - @since 1.1.0a1 - @jira_ticket PYTHON-771 - @expected_result the expanded list - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.classic(), graphson) - g = self.fetch_traversal_source(graphson) - barrier_traversal = g.E().label().barrier() - results = self.execute_traversal(barrier_traversal, graphson) - self.assertEqual(sorted(["created", "created", "created", "created", "knows", "knows"]), sorted(results)) - - def _test_udt_with_classes(self, schema, graphson): - class Address(object): - - def __init__(self, address, city, state): - self.address = address - self.city = city - self.state = state - - def __eq__(self, other): - return self.address == other.address and self.city == other.city and self.state == other.state - - class AddressWithTags(object): - - def __init__(self, address, city, state, tags): - self.address = address - self.city = city - self.state = state - self.tags = tags - - def __eq__(self, other): - return (self.address == other.address and self.city == other.city - and self.state == other.state and self.tags == other.tags) - - class ComplexAddress(object): - - def __init__(self, address, address_tags, city, state, props): - self.address = address - self.address_tags = address_tags - self.city = city - self.state = state - self.props = props - - def __eq__(self, other): - return (self.address == other.address and self.address_tags == other.address_tags - and self.city == other.city and self.state == other.state - and self.props == other.props) - - class ComplexAddressWithOwners(object): - - def __init__(self, address, address_tags, city, state, props, owners): - self.address = address - self.address_tags = address_tags - self.city = city - self.state = state - self.props = props - self.owners = owners - - def __eq__(self, other): - return (self.address == other.address and self.address_tags == other.address_tags - and self.city == other.city and self.state == other.state - and self.props == other.props and self.owners == other.owners) - - self.__test_udt(schema, graphson, Address, AddressWithTags, ComplexAddress, ComplexAddressWithOwners) - - def _test_udt_with_namedtuples(self, schema, graphson): - AddressTuple = namedtuple('Address', ('address', 'city', 'state')) - AddressWithTagsTuple = namedtuple('AddressWithTags', ('address', 'city', 'state', 'tags')) - ComplexAddressTuple = namedtuple('ComplexAddress', ('address', 'address_tags', 'city', 'state', 'props')) - ComplexAddressWithOwnersTuple = namedtuple('ComplexAddressWithOwners', ('address', 'address_tags', 'city', - 'state', 'props', 'owners')) - - self.__test_udt(schema, graphson, AddressTuple, AddressWithTagsTuple, - ComplexAddressTuple, ComplexAddressWithOwnersTuple) - - def _write_and_read_data_types(self, schema, graphson, use_schema=True): - g = self.fetch_traversal_source(graphson) - ep = self.get_execution_profile(graphson) - for data in six.itervalues(schema.fixtures.datatypes()): - typ, value, deserializer = data - vertex_label = VertexLabel([typ]) - property_name = next(six.iterkeys(vertex_label.non_pk_properties)) - if use_schema or schema is CoreGraphSchema: - schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) - - write_traversal = g.addV(str(vertex_label.label)).property('pkid', vertex_label.id).\ - property(property_name, value) - self.execute_traversal(write_traversal, graphson) - - read_traversal = g.V().hasLabel(str(vertex_label.label)).has(property_name).properties() - results = self.execute_traversal(read_traversal, graphson) - - for result in results: - if result.label == 'pkid': - continue - self._check_equality(g, value, result.value) - - def __test_udt(self, schema, graphson, address_class, address_with_tags_class, - complex_address_class, complex_address_with_owners_class): - if schema is not CoreGraphSchema or DSE_VERSION < Version('6.8'): - raise unittest.SkipTest("Graph UDT is only supported with DSE 6.8+ and Core graphs.") - - ep = self.get_execution_profile(graphson) - - Address = address_class - AddressWithTags = address_with_tags_class - ComplexAddress = complex_address_class - ComplexAddressWithOwners = complex_address_with_owners_class - - # setup udt - self.session.execute_graph(""" - schema.type('address').property('address', Text).property('city', Text).property('state', Text).create(); - schema.type('addressTags').property('address', Text).property('city', Text).property('state', Text). - property('tags', setOf(Text)).create(); - schema.type('complexAddress').property('address', Text).property('address_tags', frozen(typeOf('addressTags'))). - property('city', Text).property('state', Text).property('props', mapOf(Text, Int)).create(); - schema.type('complexAddressWithOwners').property('address', Text). - property('address_tags', frozen(typeOf('addressTags'))). - property('city', Text).property('state', Text).property('props', mapOf(Text, Int)). - property('owners', frozen(listOf(tupleOf(Text, Int)))).create(); - """, execution_profile=ep) - - # wait max 10 seconds to get the UDT discovered. - wait_until_not_raised( - lambda: self.session.cluster.register_user_type(self.graph_name, 'address', Address), - 1, 10) - wait_until_not_raised( - lambda: self.session.cluster.register_user_type(self.graph_name, 'addressTags', AddressWithTags), - 1, 10) - wait_until_not_raised( - lambda: self.session.cluster.register_user_type(self.graph_name, 'complexAddress', ComplexAddress), - 1, 10) - wait_until_not_raised( - lambda: self.session.cluster.register_user_type(self.graph_name, 'complexAddressWithOwners', ComplexAddressWithOwners), - 1, 10) - - data = { - "udt1": ["typeOf('address')", Address('1440 Rd Smith', 'Quebec', 'QC')], - "udt2": ["tupleOf(typeOf('address'), Text)", (Address('1440 Rd Smith', 'Quebec', 'QC'), 'hello')], - "udt3": ["tupleOf(frozen(typeOf('address')), Text)", (Address('1440 Rd Smith', 'Quebec', 'QC'), 'hello')], - "udt4": ["tupleOf(tupleOf(Int, typeOf('address')), Text)", - ((42, Address('1440 Rd Smith', 'Quebec', 'QC')), 'hello')], - "udt5": ["tupleOf(tupleOf(Int, typeOf('addressTags')), Text)", - ((42, AddressWithTags('1440 Rd Smith', 'Quebec', 'QC', {'t1', 't2'})), 'hello')], - "udt6": ["tupleOf(tupleOf(Int, typeOf('complexAddress')), Text)", - ((42, ComplexAddress('1440 Rd Smith', - AddressWithTags('1440 Rd Smith', 'Quebec', 'QC', {'t1', 't2'}), - 'Quebec', 'QC', {'p1': 42, 'p2': 33})), 'hello')], - "udt7": ["tupleOf(tupleOf(Int, frozen(typeOf('complexAddressWithOwners'))), Text)", - ((42, ComplexAddressWithOwners( - '1440 Rd Smith', - AddressWithTags('1440 CRd Smith', 'Quebec', 'QC', {'t1', 't2'}), - 'Quebec', 'QC', {'p1': 42, 'p2': 33}, [('Mike', 43), ('Gina', 39)]) - ), 'hello')] - } - - g = self.fetch_traversal_source(graphson) - for typ, value in six.itervalues(data): - vertex_label = VertexLabel([typ]) - property_name = next(six.iterkeys(vertex_label.non_pk_properties)) - schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) - - write_traversal = g.addV(str(vertex_label.label)).property('pkid', vertex_label.id). \ - property(property_name, value) - self.execute_traversal(write_traversal, graphson) - - #vertex = list(schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] - #vertex_properties = list(schema.get_vertex_properties( - # self.session, vertex, execution_profile=ep)) - - read_traversal = g.V().hasLabel(str(vertex_label.label)).has(property_name).properties() - vertex_properties = self.execute_traversal(read_traversal, graphson) - - self.assertEqual(len(vertex_properties), 2) # include pkid - for vp in vertex_properties: - if vp.label == 'pkid': - continue - - self.assertIsInstance(vp, (VertexProperty, TravVertexProperty)) - self.assertEqual(vp.label, property_name) - self.assertEqual(vp.value, value) - - @staticmethod - def fetch_edge_props(g, edge): - edge_props = g.E(edge.id).properties().toList() - return edge_props - - @staticmethod - def fetch_vertex_props(g, vertex): - - vertex_props = g.V(vertex.id).properties().toList() - return vertex_props - - def _check_equality(self, g, original, read_value): - return check_equality_base(self, original, read_value) - - -@requiredse -class BaseImplicitExecutionTest(GraphUnitTestCase): - """ - This test class will execute all tests of the AbstractTraversalTestClass using implicit execution - This all traversal will be run directly using toList() - """ - def setUp(self): - super(BaseImplicitExecutionTest, self).setUp() - if DSE_VERSION: - self.ep = DseGraph().create_execution_profile(self.graph_name) - self.cluster.add_execution_profile(self.graph_name, self.ep) - - @staticmethod - def fetch_key_from_prop(property): - return property.key - - def fetch_traversal_source(self, graphson, **kwargs): - ep = self.get_execution_profile(graphson, traversal=True) - return DseGraph().traversal_source(self.session, self.graph_name, execution_profile=ep, **kwargs) - - def execute_traversal(self, traversal, graphson=None): - return traversal.toList() - - def _validate_classic_vertex(self, g, vertex): - # Checks the properties on a classic vertex for correctness - vertex_props = self.fetch_vertex_props(g, vertex) - vertex_prop_keys = [vp.key for vp in vertex_props] - self.assertEqual(len(vertex_prop_keys), 2) - self.assertIn('name', vertex_prop_keys) - self.assertTrue('lang' in vertex_prop_keys or 'age' in vertex_prop_keys) - - def _validate_generic_vertex_result_type(self, g, vertex): - # Checks a vertex object for it's generic properties - properties = self.fetch_vertex_props(g, vertex) - for attr in ('id', 'label'): - self.assertIsNotNone(getattr(vertex, attr)) - self.assertTrue(len(properties) > 2) - - def _validate_classic_edge_properties(self, g, edge): - # Checks the properties on a classic edge for correctness - edge_props = self.fetch_edge_props(g, edge) - edge_prop_keys = [ep.key for ep in edge_props] - self.assertEqual(len(edge_prop_keys), 1) - self.assertIn('weight', edge_prop_keys) - - def _validate_classic_edge(self, g, edge): - self._validate_generic_edge_result_type(edge) - self._validate_classic_edge_properties(g, edge) - - def _validate_line_edge(self, g, edge): - self._validate_generic_edge_result_type(edge) - edge_props = self.fetch_edge_props(g, edge) - edge_prop_keys = [ep.key for ep in edge_props] - self.assertEqual(len(edge_prop_keys), 1) - self.assertIn('distance', edge_prop_keys) - - def _validate_generic_edge_result_type(self, edge): - self.assertIsInstance(edge, TravEdge) - - for attr in ('outV', 'inV', 'label', 'id'): - self.assertIsNotNone(getattr(edge, attr)) - - def _validate_path_result_type(self, g, objects_path): - for obj in objects_path: - if isinstance(obj, TravEdge): - self._validate_classic_edge(g, obj) - elif isinstance(obj, TravVertex): - self._validate_classic_vertex(g, obj) - else: - self.fail("Invalid object found in path " + str(obj.type)) - - def _validate_meta_property(self, g, vertex): - meta_props = g.V(vertex.id).properties().toList() - self.assertEqual(len(meta_props), 1) - meta_prop = meta_props[0] - self.assertEqual(meta_prop.value, "meta_prop") - self.assertEqual(meta_prop.key, "key") - - nested_props = g.V(vertex.id).properties().properties().toList() - self.assertEqual(len(nested_props), 2) - for nested_prop in nested_props: - self.assertTrue(nested_prop.key in ['k0', 'k1']) - self.assertTrue(nested_prop.value in ['v0', 'v1']) - - def _validate_type(self, g, vertex): - props = self.fetch_vertex_props(g, vertex) - for prop in props: - value = prop.value - key = prop.key - _validate_prop(key, value, self) - - -@requiredse -@GraphTestConfiguration.generate_tests(traversal=True) -class ImplicitExecutionTest(BaseImplicitExecutionTest, _AbstractTraversalTest): - def _test_iterate_step(self, schema, graphson): - """ - Test to validate that the iterate() step work on all dse versions. - @jira_ticket PYTHON-1155 - @expected_result iterate step works - @test_category dse graph - """ - - g = self.fetch_traversal_source(graphson) - self.execute_graph(schema.fixtures.classic(), graphson) - g.addV('person').property('name', 'Person1').iterate() - - -@requiredse -@GraphTestConfiguration.generate_tests(traversal=True) -class ImplicitAsyncExecutionTest(BaseImplicitExecutionTest): - """ - Test to validate that the traversal async execution works properly. - - @since 3.21.0 - @jira_ticket PYTHON-1129 - - @test_category dse graph - """ - - def setUp(self): - super(ImplicitAsyncExecutionTest, self).setUp() - self.ep_graphson2, self.ep_graphson3 = create_traversal_profiles(self.cluster, self.graph_name) - - - def _validate_results(self, results): - results = list(results) - self.assertEqual(len(results), 2) - self.assertIn('vadas', results) - self.assertIn('josh', results) - - def _test_promise(self, schema, graphson): - self.execute_graph(schema.fixtures.classic(), graphson) - g = self.fetch_traversal_source(graphson) - traversal_future = g.V().has('name', 'marko').out('knows').values('name').promise() - self._validate_results(traversal_future.result()) - - def _test_promise_error_is_propagated(self, schema, graphson): - self.execute_graph(schema.fixtures.classic(), graphson) - g = DseGraph().traversal_source(self.session, 'wrong_graph', execution_profile=self.ep) - traversal_future = g.V().has('name', 'marko').out('knows').values('name').promise() - with self.assertRaises(Exception): - traversal_future.result() - - def _test_promise_callback(self, schema, graphson): - self.execute_graph(schema.fixtures.classic(), graphson) - g = self.fetch_traversal_source(graphson) - future = Future() - - def cb(f): - future.set_result(f.result()) - - traversal_future = g.V().has('name', 'marko').out('knows').values('name').promise() - traversal_future.add_done_callback(cb) - self._validate_results(future.result()) - - def _test_promise_callback_on_error(self, schema, graphson): - self.execute_graph(schema.fixtures.classic(), graphson) - g = DseGraph().traversal_source(self.session, 'wrong_graph', execution_profile=self.ep) - future = Future() - - def cb(f): - try: - f.result() - except Exception as e: - future.set_exception(e) - - traversal_future = g.V().has('name', 'marko').out('knows').values('name').promise() - traversal_future.add_done_callback(cb) - with self.assertRaises(Exception): - future.result() - - -class ExplicitExecutionBase(GraphUnitTestCase): - - def fetch_traversal_source(self, graphson, **kwargs): - ep = self.get_execution_profile(graphson, traversal=True) - return DseGraph().traversal_source(self.session, self.graph_name, execution_profile=ep, **kwargs) - - def execute_traversal(self, traversal, graphson): - ep = self.get_execution_profile(graphson, traversal=True) - ep = self.session.get_execution_profile(ep) - context = None - if graphson == GraphProtocol.GRAPHSON_3_0: - context = { - 'cluster': self.cluster, - 'graph_name': ep.graph_options.graph_name.decode('utf-8') if ep.graph_options.graph_name else None - } - query = DseGraph.query_from_traversal(traversal, graphson, context=context) - # Use an ep that is configured with the correct row factory, and bytecode-json language flat set - result_set = self.execute_graph(query, graphson, traversal=True) - return list(result_set) - - -@requiredse -@GraphTestConfiguration.generate_tests(traversal=True) -class ExplicitExecutionTest(ExplicitExecutionBase, _AbstractTraversalTest): - """ - This test class will execute all tests of the AbstractTraversalTestClass using Explicit execution - All queries will be run by converting them to byte code, and calling execute graph explicitly with a generated ep. - """ - @staticmethod - def fetch_key_from_prop(property): - return property.label - - def _validate_classic_vertex(self, g, vertex): - validate_classic_vertex(self, vertex) - - def _validate_generic_vertex_result_type(self, g, vertex): - validate_generic_vertex_result_type(self, vertex) - - def _validate_classic_edge_properties(self, g, edge): - validate_classic_edge_properties(self, edge) - - def _validate_classic_edge(self, g, edge): - validate_classic_edge(self, edge) - - def _validate_line_edge(self, g, edge): - validate_line_edge(self, edge) - - def _validate_generic_edge_result_type(self, edge): - validate_generic_edge_result_type(self, edge) - - def _validate_type(self, g, vertex): - for key in vertex.properties: - value = vertex.properties[key][0].value - _validate_prop(key, value, self) - - def _validate_path_result_type(self, g, path_obj): - # This pre-processing is due to a change in TinkerPop - # properties are not returned automatically anymore - # with some queries. - for obj in path_obj.objects: - if not obj.properties: - props = [] - if isinstance(obj, Edge): - obj.properties = { - p.key: p.value - for p in self.fetch_edge_props(g, obj) - } - elif isinstance(obj, Vertex): - obj.properties = { - p.label: p.value - for p in self.fetch_vertex_props(g, obj) - } - - validate_path_result_type(self, path_obj) - - def _validate_meta_property(self, g, vertex): - - self.assertEqual(len(vertex.properties), 1) - self.assertEqual(len(vertex.properties['key']), 1) - p = vertex.properties['key'][0] - self.assertEqual(p.label, 'key') - self.assertEqual(p.value, 'meta_prop') - self.assertEqual(p.properties, {'k0': 'v0', 'k1': 'v1'}) - - -def _validate_prop(key, value, unittest): - if key == 'index': - return - - if any(key.startswith(t) for t in ('int', 'short')): - typ = int - - elif any(key.startswith(t) for t in ('long',)): - if sys.version_info >= (3, 0): - typ = int - else: - typ = long - elif any(key.startswith(t) for t in ('float', 'double')): - typ = float - elif any(key.startswith(t) for t in ('polygon',)): - typ = Polygon - elif any(key.startswith(t) for t in ('point',)): - typ = Point - elif any(key.startswith(t) for t in ('Linestring',)): - typ = LineString - elif any(key.startswith(t) for t in ('neg',)): - typ = six.string_types - elif any(key.startswith(t) for t in ('date',)): - typ = datetime.date - elif any(key.startswith(t) for t in ('time',)): - typ = datetime.time - else: - unittest.fail("Received unexpected type: %s" % key) - unittest.assertIsInstance(value, typ) - - @greaterthanorequaldse60 @GraphTestConfiguration.generate_tests(traversal=True) -class BatchStatementTests(ExplicitExecutionBase): +class BatchStatementTests(BaseExplicitExecutionTest): def setUp(self): super(BatchStatementTests, self).setUp() diff --git a/tests/integration/advanced/graph/fluent/test_graph_explicit_execution.py b/tests/integration/advanced/graph/fluent/test_graph_explicit_execution.py new file mode 100644 index 0000000000..1a5846203d --- /dev/null +++ b/tests/integration/advanced/graph/fluent/test_graph_explicit_execution.py @@ -0,0 +1,96 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from cassandra.graph import Vertex, Edge + +from tests.integration.advanced.graph import ( + validate_classic_vertex, validate_classic_edge, validate_generic_vertex_result_type, + validate_classic_edge_properties, validate_line_edge, + validate_generic_edge_result_type, validate_path_result_type) + +from tests.integration import requiredse, DSE_VERSION +from tests.integration.advanced import use_single_node_with_graph +from tests.integration.advanced.graph import GraphTestConfiguration +from tests.integration.advanced.graph.fluent import ( + BaseExplicitExecutionTest, _AbstractTraversalTest, _validate_prop) + + +def setup_module(): + if DSE_VERSION: + dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}} + use_single_node_with_graph(dse_options=dse_options) + + +@requiredse +@GraphTestConfiguration.generate_tests(traversal=True) +class ExplicitExecutionTest(BaseExplicitExecutionTest, _AbstractTraversalTest): + """ + This test class will execute all tests of the AbstractTraversalTestClass using Explicit execution + All queries will be run by converting them to byte code, and calling execute graph explicitly with a generated ep. + """ + @staticmethod + def fetch_key_from_prop(property): + return property.label + + def _validate_classic_vertex(self, g, vertex): + validate_classic_vertex(self, vertex) + + def _validate_generic_vertex_result_type(self, g, vertex): + validate_generic_vertex_result_type(self, vertex) + + def _validate_classic_edge_properties(self, g, edge): + validate_classic_edge_properties(self, edge) + + def _validate_classic_edge(self, g, edge): + validate_classic_edge(self, edge) + + def _validate_line_edge(self, g, edge): + validate_line_edge(self, edge) + + def _validate_generic_edge_result_type(self, edge): + validate_generic_edge_result_type(self, edge) + + def _validate_type(self, g, vertex): + for key in vertex.properties: + value = vertex.properties[key][0].value + _validate_prop(key, value, self) + + def _validate_path_result_type(self, g, path_obj): + # This pre-processing is due to a change in TinkerPop + # properties are not returned automatically anymore + # with some queries. + for obj in path_obj.objects: + if not obj.properties: + props = [] + if isinstance(obj, Edge): + obj.properties = { + p.key: p.value + for p in self.fetch_edge_props(g, obj) + } + elif isinstance(obj, Vertex): + obj.properties = { + p.label: p.value + for p in self.fetch_vertex_props(g, obj) + } + + validate_path_result_type(self, path_obj) + + def _validate_meta_property(self, g, vertex): + + self.assertEqual(len(vertex.properties), 1) + self.assertEqual(len(vertex.properties['key']), 1) + p = vertex.properties['key'][0] + self.assertEqual(p.label, 'key') + self.assertEqual(p.value, 'meta_prop') + self.assertEqual(p.properties, {'k0': 'v0', 'k1': 'v1'}) diff --git a/tests/integration/advanced/graph/fluent/test_graph_implicit_execution.py b/tests/integration/advanced/graph/fluent/test_graph_implicit_execution.py new file mode 100644 index 0000000000..50e6795867 --- /dev/null +++ b/tests/integration/advanced/graph/fluent/test_graph_implicit_execution.py @@ -0,0 +1,108 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from concurrent.futures import Future +from cassandra.datastax.graph.fluent import DseGraph + +from tests.integration import requiredse, DSE_VERSION +from tests.integration.advanced import use_single_node_with_graph +from tests.integration.advanced.graph import GraphTestConfiguration +from tests.integration.advanced.graph.fluent import ( + BaseImplicitExecutionTest, create_traversal_profiles, _AbstractTraversalTest) + + +def setup_module(): + if DSE_VERSION: + dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}} + use_single_node_with_graph(dse_options=dse_options) + + +@requiredse +@GraphTestConfiguration.generate_tests(traversal=True) +class ImplicitExecutionTest(BaseImplicitExecutionTest, _AbstractTraversalTest): + def _test_iterate_step(self, schema, graphson): + """ + Test to validate that the iterate() step work on all dse versions. + @jira_ticket PYTHON-1155 + @expected_result iterate step works + @test_category dse graph + """ + + g = self.fetch_traversal_source(graphson) + self.execute_graph(schema.fixtures.classic(), graphson) + g.addV('person').property('name', 'Person1').iterate() + + +@requiredse +@GraphTestConfiguration.generate_tests(traversal=True) +class ImplicitAsyncExecutionTest(BaseImplicitExecutionTest): + """ + Test to validate that the traversal async execution works properly. + + @since 3.21.0 + @jira_ticket PYTHON-1129 + + @test_category dse graph + """ + + def setUp(self): + super(ImplicitAsyncExecutionTest, self).setUp() + self.ep_graphson2, self.ep_graphson3 = create_traversal_profiles(self.cluster, self.graph_name) + + def _validate_results(self, results): + results = list(results) + self.assertEqual(len(results), 2) + self.assertIn('vadas', results) + self.assertIn('josh', results) + + def _test_promise(self, schema, graphson): + self.execute_graph(schema.fixtures.classic(), graphson) + g = self.fetch_traversal_source(graphson) + traversal_future = g.V().has('name', 'marko').out('knows').values('name').promise() + self._validate_results(traversal_future.result()) + + def _test_promise_error_is_propagated(self, schema, graphson): + self.execute_graph(schema.fixtures.classic(), graphson) + g = DseGraph().traversal_source(self.session, 'wrong_graph', execution_profile=self.ep) + traversal_future = g.V().has('name', 'marko').out('knows').values('name').promise() + with self.assertRaises(Exception): + traversal_future.result() + + def _test_promise_callback(self, schema, graphson): + self.execute_graph(schema.fixtures.classic(), graphson) + g = self.fetch_traversal_source(graphson) + future = Future() + + def cb(f): + future.set_result(f.result()) + + traversal_future = g.V().has('name', 'marko').out('knows').values('name').promise() + traversal_future.add_done_callback(cb) + self._validate_results(future.result()) + + def _test_promise_callback_on_error(self, schema, graphson): + self.execute_graph(schema.fixtures.classic(), graphson) + g = DseGraph().traversal_source(self.session, 'wrong_graph', execution_profile=self.ep) + future = Future() + + def cb(f): + try: + f.result() + except Exception as e: + future.set_exception(e) + + traversal_future = g.V().has('name', 'marko').out('knows').values('name').promise() + traversal_future.add_done_callback(cb) + with self.assertRaises(Exception): + future.result() From f39467790db9220e56656de49fcddfa3f2601f05 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 12 May 2020 11:26:32 -0400 Subject: [PATCH 1801/2005] Use PROTOCOL_TLS to select the higher protocol version --- cassandra/datastax/cloud/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/datastax/cloud/__init__.py b/cassandra/datastax/cloud/__init__.py index f79d72a7a2..1459b28d8c 100644 --- a/cassandra/datastax/cloud/__init__.py +++ b/cassandra/datastax/cloud/__init__.py @@ -23,7 +23,7 @@ _HAS_SSL = True try: - from ssl import SSLContext, PROTOCOL_TLSv1, CERT_REQUIRED + from ssl import SSLContext, PROTOCOL_TLS, CERT_REQUIRED except: _HAS_SSL = False @@ -169,7 +169,7 @@ def parse_metadata_info(config, http_data): def _ssl_context_from_cert(ca_cert_location, cert_location, key_location): - ssl_context = SSLContext(PROTOCOL_TLSv1) + ssl_context = SSLContext(PROTOCOL_TLS) ssl_context.load_verify_locations(ca_cert_location) ssl_context.verify_mode = CERT_REQUIRED ssl_context.load_cert_chain(certfile=cert_location, keyfile=key_location) From ea250fbef856d1f26b89e94d8fc3c6bed9f8386c Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 12 May 2020 16:11:37 -0400 Subject: [PATCH 1802/2005] Add python-* branches and Cython to Per Commit stage --- Jenkinsfile | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index e7a53f12cb..22c0e27de2 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -107,7 +107,7 @@ def executeStandardTests() { set +o allexport SIMULACRON_JAR="${HOME}/simulacron.jar" - #SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true ''' sh label: 'Execute CQL engine integration tests', script: '''#!/bin/bash -lex @@ -116,7 +116,7 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - #EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true ''' sh label: 'Execute Apache CassandraⓇ integration tests', script: '''#!/bin/bash -lex @@ -125,7 +125,7 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - #EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_cluster.py || true + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true ''' if (env.CASSANDRA_VERSION.split('-')[0] == 'dse' && env.CASSANDRA_VERSION.split('-')[1] != '4.8') { @@ -135,7 +135,7 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - #EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true ''' } @@ -145,7 +145,7 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - #EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true ''' if (env.EXECUTE_LONG_TESTS == 'True') { @@ -155,7 +155,7 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - #EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true ''' } } @@ -609,7 +609,7 @@ pipeline { } when { beforeAgent true - branch pattern: '((dev|long)-)+python.*', comparator: 'REGEXP' + branch pattern: '((dev|long)-)?python-.*', comparator: 'REGEXP' allOf { expression { params.ADHOC_BUILD_TYPE == 'BUILD' } expression { params.CI_SCHEDULE == 'DO-NOT-CHANGE-THIS-SELECTION' } @@ -630,7 +630,7 @@ pipeline { } axis { name 'CYTHON_ENABLED' - values 'False' + values 'False', 'True' } } From 0419d587f9c670821ab861e921ef1bb2c7dbd471 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 12 May 2020 20:30:33 -0400 Subject: [PATCH 1803/2005] Use CYTHON_ENABLED to determine the driver should cythonize modules --- Jenkinsfile | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 22c0e27de2..c168189858 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -48,7 +48,7 @@ def initializeEnvironment() { pip install nose-ignore-docstring nose-exclude service_identity ''' - if (params.CYTHON) { + if (env.CYTHON_ENABLED == 'True') { sh label: 'Install cython modules', script: '''#!/bin/bash -lex pip install cython numpy ''' @@ -71,7 +71,7 @@ def initializeEnvironment() { } def installDriverAndCompileExtensions() { - if (params.CYTHON) { + if (env.CYTHON_ENABLED == 'True') { sh label: 'Install the driver and compile with C extensions with Cython', script: '''#!/bin/bash -lex python setup.py build_ext --inplace ''' @@ -87,7 +87,7 @@ def executeStandardTests() { * Run the cython unit tests, this is not done in travis because it takes too much time for the * whole matrix to build with cython */ - if (params.CYTHON) { + if (env.CYTHON_ENABLED == 'True') { sh label: 'Execute Cython unit tests', script: '''#!/bin/bash -lex # Load CCM environment variables set -o allexport @@ -291,7 +291,7 @@ def describeScheduledTestingStage() { script { def type = params.CI_SCHEDULE.toLowerCase().capitalize() def displayName = "${type} schedule (${env.EVENT_LOOP_MANAGER}" - if (params.CYTHON) { + if (env.CYTHON_ENABLED == 'True') { displayName += " | Cython" } if (params.PROFILE != 'NONE') { @@ -303,7 +303,7 @@ def describeScheduledTestingStage() { def serverVersionDescription = "${params.CI_SCHEDULE_SERVER_VERSION.replaceAll(' ', ', ')} server version(s) in the matrix" def pythonVersionDescription = "${params.CI_SCHEDULE_PYTHON_VERSION.replaceAll(' ', ', ')} Python version(s) in the matrix" def description = "${type} scheduled testing using ${env.EVENT_LOOP_MANAGER} event loop manager" - if (params.CYTHON) { + if (env.CYTHON_ENABLED == 'True') { description += ", with Cython enabled" } if (params.PROFILE != 'NONE') { @@ -333,7 +333,7 @@ def describeAdhocTestingStage() { } } def displayName = "${params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION} for v${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION} (${env.EVENT_LOOP_MANAGER}" - if (params.CYTHON) { + if (env.CYTHON_ENABLED == 'True') { displayName += " | Cython" } if (params.PROFILE != 'NONE') { @@ -343,7 +343,7 @@ def describeAdhocTestingStage() { currentBuild.displayName = displayName def description = "Testing ${serverDisplayName} ${serverVersion} using ${env.EVENT_LOOP_MANAGER} against Python ${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION}" - if (params.CYTHON) { + if (env.CYTHON_ENABLED == 'True') { description += ", with Cython" } if (params.PROFILE == 'NONE') { @@ -409,8 +409,7 @@ pipeline { 'dse-5.1', // Legacy DataStax Enterprise 'dse-6.0', // Previous DataStax Enterprise 'dse-6.7', // Previous DataStax Enterprise - 'dse-6.8.0', // Current DataStax Enterprise - 'dse-6.8', // Development DataStax Enterprise + 'dse-6.8', // Current DataStax Enterprise 'ALL'], description: '''Apache CassandraⓇ and DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY! @@ -456,10 +455,6 @@ pipeline { - - - - @@ -600,6 +595,7 @@ pipeline { EVENT_LOOP_MANAGER = "${params.EVENT_LOOP_MANAGER.toLowerCase()}" EXECUTE_LONG_TESTS = "${params.EXECUTE_LONG_TESTS ? 'True' : 'False'}" CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' + CCM_MAX_HEAP_SIZE = '1024M' } stages { From d6a5e4dd5ee06081eb55caebbf8a61417cafce37 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 13 May 2020 14:44:22 -0400 Subject: [PATCH 1804/2005] Add python-1238 changelog entry --- CHANGELOG.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index db2d7cb468..4aab9e19e0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +3.24.0 +====== +Not released + +Bug Fixes +--------- +* Unable to connect to a cloud cluster using Ubuntu 20.04 (PYTHON-1238) + 3.23.0 ====== April 6, 2020 From 0b63a242ae03b3c6700ccc9ec3b5d2b6e5cc0a3f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 13 May 2020 15:27:03 -0400 Subject: [PATCH 1805/2005] jenkins try to fix the parameterizedCron condition --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index c168189858..1bcb78226b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -547,7 +547,7 @@ pipeline { } triggers { - parameterizedCron(branchPatternCron.matcher(env.BRANCH_NAME).matches() && !riptanoPatternCron.matcher(env.GIT_URL).find() ? """ + parameterizedCron((branchPatternCron.matcher(env.BRANCH_NAME).matches() && !riptanoPatternCron.matcher(env.GIT_URL).find()) ? """ # Every weeknight (Monday - Friday) around 4:00 AM # These schedules will run with and without Cython enabled for Python v2.7.14 and v3.5.6 H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.14;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 From 892340fad6438f270ff8183ec72603340aacce2c Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 14 May 2020 11:41:19 -0400 Subject: [PATCH 1806/2005] Bump geomet dependency to <0.3 --- requirements.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 42bc6d0e9e..f784fba1b9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -geomet>=0.1,<0.2 +geomet>=0.1,<0.3 six >=1.9 futures <=2.2.0 # Futures is not required for Python 3, but it works up through 2.2.0 (after which it introduced breaking syntax). diff --git a/setup.py b/setup.py index e157228f56..bee1052e90 100644 --- a/setup.py +++ b/setup.py @@ -404,7 +404,7 @@ def run_setup(extensions): sys.stderr.write("Bypassing Cython setup requirement\n") dependencies = ['six >=1.9', - 'geomet>=0.1,<0.2'] + 'geomet>=0.1,<0.3'] if not PY3: dependencies.append('futures') From 932b535b2ddcbe2bddbdc9456676919cfb92c57f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 14 May 2020 11:41:44 -0400 Subject: [PATCH 1807/2005] Uninstall geomet when testing Cassandra --- Jenkinsfile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index 1bcb78226b..fe5e60652a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -42,6 +42,11 @@ def initializeEnvironment() { sh label: 'Install Apache CassandraⓇ requirements', script: '''#!/bin/bash -lex pip install -r test-requirements.txt ''' + + sh label: 'Uninstall the geomet dependency since it is not required for Cassandra', script: '''#!/bin/bash -lex + pip uninstall -y geomet + ''' + } sh label: 'Install unit test modules', script: '''#!/bin/bash -lex From ffc9e372f9a971edb68365c6ec717309995ee4fa Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 14 May 2020 15:02:39 -0400 Subject: [PATCH 1808/2005] Make geomet an optional dependency at runtime --- CHANGELOG.rst | 8 ++++++++ Jenkinsfile | 4 ++-- cassandra/util.py | 20 +++++++++++++++++++- tests/unit/advanced/test_geometry.py | 4 ++-- 4 files changed, 31 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4aab9e19e0..0df3dd6a20 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,10 +2,18 @@ ====== Not released +Features +-------- +* Make geomet an optional dependency at runtime (PYTHON-1237) + Bug Fixes --------- * Unable to connect to a cloud cluster using Ubuntu 20.04 (PYTHON-1238) +Others +------ +* Bump geomet dependency version to 0.2 (PYTHON-1243) + 3.23.0 ====== April 6, 2020 diff --git a/Jenkinsfile b/Jenkinsfile index fe5e60652a..36151926f9 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -552,7 +552,7 @@ pipeline { } triggers { - parameterizedCron((branchPatternCron.matcher(env.BRANCH_NAME).matches() && !riptanoPatternCron.matcher(env.GIT_URL).find()) ? """ + parameterizedCron((branchPatternCron.matcher(env.BRANCH_NAME).matches() && !riptanoPatternCron.matcher(GIT_URL).find()) ? """ # Every weeknight (Monday - Friday) around 4:00 AM # These schedules will run with and without Cython enabled for Python v2.7.14 and v3.5.6 H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.14;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 @@ -600,7 +600,7 @@ pipeline { EVENT_LOOP_MANAGER = "${params.EVENT_LOOP_MANAGER.toLowerCase()}" EXECUTE_LONG_TESTS = "${params.EXECUTE_LONG_TESTS ? 'True' : 'False'}" CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' - CCM_MAX_HEAP_SIZE = '1024M' + CCM_MAX_HEAP_SIZE = '1536M' } stages { diff --git a/cassandra/util.py b/cassandra/util.py index 0a00533b33..ead58c82f6 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -17,7 +17,6 @@ import datetime from functools import total_ordering import logging -from geomet import wkt from itertools import chain import random import re @@ -25,6 +24,15 @@ import uuid import sys +_HAS_GEOMET = True +try: + from geomet import wkt +except: + _HAS_GEOMET = False + + +from cassandra import DriverException + DATETIME_EPOC = datetime.datetime(1970, 1, 1) UTC_DATETIME_EPOC = datetime.datetime.utcfromtimestamp(0) @@ -35,6 +43,7 @@ assert sys.byteorder in ('little', 'big') is_little_endian = sys.byteorder == 'little' + def datetime_from_timestamp(timestamp): """ Creates a timezone-agnostic datetime from timestamp (in seconds) in a consistent manner. @@ -1308,6 +1317,9 @@ def from_wkt(s): """ Parse a Point geometry from a wkt string and return a new Point object. """ + if not _HAS_GEOMET: + raise DriverException("Geomet is required to deserialize a wkt geometry.") + try: geom = wkt.loads(s) except ValueError: @@ -1363,6 +1375,9 @@ def from_wkt(s): """ Parse a LineString geometry from a wkt string and return a new LineString object. """ + if not _HAS_GEOMET: + raise DriverException("Geomet is required to deserialize a wkt geometry.") + try: geom = wkt.loads(s) except ValueError: @@ -1444,6 +1459,9 @@ def from_wkt(s): """ Parse a Polygon geometry from a wkt string and return a new Polygon object. """ + if not _HAS_GEOMET: + raise DriverException("Geomet is required to deserialize a wkt geometry.") + try: geom = wkt.loads(s) except ValueError: diff --git a/tests/unit/advanced/test_geometry.py b/tests/unit/advanced/test_geometry.py index 7cd8f666e6..4fa2644ff2 100644 --- a/tests/unit/advanced/test_geometry.py +++ b/tests/unit/advanced/test_geometry.py @@ -22,7 +22,7 @@ from cassandra.cqltypes import lookup_casstype from cassandra.protocol import ProtocolVersion from cassandra.cqltypes import PointType, LineStringType, PolygonType, WKBGeometryType -from cassandra.util import Point, LineString, Polygon, _LinearRing, Distance +from cassandra.util import Point, LineString, Polygon, _LinearRing, Distance, _HAS_GEOMET wkb_be = 0 wkb_le = 1 @@ -104,7 +104,7 @@ def test_eq(self): # specifically use assertFalse(eq) to make sure we're using the geo __eq__ operator self.assertFalse(geo == object()) - +@unittest.skipUnless(_HAS_GEOMET, "Skip wkt geometry tests when geomet is not installed") class WKTTest(unittest.TestCase): def test_line_parse(self): From 696b047e90cd3ffc3ddceb9fb5ee84c11e5c9174 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 19 May 2020 13:53:24 -0400 Subject: [PATCH 1809/2005] Fix writing data in a Boolean field using the Fluent API --- CHANGELOG.rst | 1 + cassandra/datastax/graph/graphson.py | 4 +-- tests/integration/advanced/graph/__init__.py | 2 ++ .../advanced/graph/test_graph_datatype.py | 26 +++++++++++++------ 4 files changed, 23 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 0df3dd6a20..6c40a5b8a0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -9,6 +9,7 @@ Features Bug Fixes --------- * Unable to connect to a cloud cluster using Ubuntu 20.04 (PYTHON-1238) +* [GRAPH] Can't write data in a Boolean field using the Fluent API (PYTHON-1239) Others ------ diff --git a/cassandra/datastax/graph/graphson.py b/cassandra/datastax/graph/graphson.py index 8419c7992b..956d7d7f18 100644 --- a/cassandra/datastax/graph/graphson.py +++ b/cassandra/datastax/graph/graphson.py @@ -52,7 +52,7 @@ DSE Graph GraphSON 2.0 GraphSON 3.0 | Python Driver ------------ | -------------- | -------------- | ------------ text | string | string | str -boolean | g:Boolean | g:Boolean | bool +boolean | | | bool bigint | g:Int64 | g:Int64 | long int | g:Int32 | g:Int32 | int double | g:Double | g:Double | float @@ -125,7 +125,7 @@ class TextTypeIO(GraphSONTypeIO): class BooleanTypeIO(GraphSONTypeIO): - graphson_base_type = 'Boolean' + graphson_base_type = None cql_type = 'boolean' @classmethod diff --git a/tests/integration/advanced/graph/__init__.py b/tests/integration/advanced/graph/__init__.py index 0573cf2557..6c9458dd02 100644 --- a/tests/integration/advanced/graph/__init__.py +++ b/tests/integration/advanced/graph/__init__.py @@ -418,6 +418,8 @@ class ClassicGraphFixtures(GraphFixtures): @staticmethod def datatypes(): data = { + "boolean1": ["Boolean()", True, None], + "boolean2": ["Boolean()", False, None], "point1": ["Point()", Point(.5, .13), GraphSON1Deserializer.deserialize_point], "point2": ["Point()", Point(-5, .0), GraphSON1Deserializer.deserialize_point], diff --git a/tests/integration/advanced/graph/test_graph_datatype.py b/tests/integration/advanced/graph/test_graph_datatype.py index d4d28b80df..222b1f5ace 100644 --- a/tests/integration/advanced/graph/test_graph_datatype.py +++ b/tests/integration/advanced/graph/test_graph_datatype.py @@ -28,6 +28,7 @@ from cassandra.graph.query import GraphProtocol from cassandra.graph.types import VertexProperty +from tests.util import wait_until from tests.integration.advanced.graph import BasicGraphUnitTestCase, ClassicGraphFixtures, \ ClassicGraphSchema, CoreGraphSchema from tests.integration.advanced.graph import VertexLabel, GraphTestConfiguration, GraphUnitTestCase @@ -94,14 +95,18 @@ def _test_all_datatypes(self, schema, graphson): schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) vertex = list(schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] - vertex_properties = list(schema.get_vertex_properties( - self.session, vertex, execution_profile=ep)) + def get_vertex_properties(): + return list(schema.get_vertex_properties( + self.session, vertex, execution_profile=ep)) + prop_returned = 1 if DSE_VERSION < Version('5.1') else 2 # include pkid >=5.1 + wait_until( + lambda: len(get_vertex_properties()) == prop_returned, 0.2, 15) + + vertex_properties = get_vertex_properties() if graphson == GraphProtocol.GRAPHSON_1_0: vertex_properties = [vp.as_vertex_property() for vp in vertex_properties] - prop_returned = 1 if DSE_VERSION < Version('5.1') else 2 # include pkid >=5.1 - self.assertEqual(len(vertex_properties), prop_returned) for vp in vertex_properties: if vp.label == 'pkid': continue @@ -109,7 +114,7 @@ def _test_all_datatypes(self, schema, graphson): self.assertIsInstance(vp, VertexProperty) self.assertEqual(vp.label, property_name) if graphson == GraphProtocol.GRAPHSON_1_0: - deserialized_value = deserializer(vp.value) + deserialized_value = deserializer(vp.value) if deserializer else vp.value self.assertEqual(deserialized_value, value) else: self.assertEqual(vp.value, value) @@ -171,10 +176,15 @@ def __test_udt(self, schema, graphson, address_class, address_with_tags_class, schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) vertex = list(schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] - vertex_properties = list(schema.get_vertex_properties( - self.session, vertex, execution_profile=ep)) - self.assertEqual(len(vertex_properties), 2) # include pkid + def get_vertex_properties(): + return list(schema.get_vertex_properties( + self.session, vertex, execution_profile=ep)) + + wait_until( + lambda: len(get_vertex_properties()) == 2, 0.2, 15) + + vertex_properties = get_vertex_properties() for vp in vertex_properties: if vp.label == 'pkid': continue From e513c54e5f9fa3cdfd030c82a446ac1421441c0f Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 20 May 2020 09:20:49 -0400 Subject: [PATCH 1810/2005] Bump gremlinpython dependency to 3.4.6 --- CHANGELOG.rst | 1 + Jenkinsfile | 8 ++++++-- setup.py | 2 +- test-datastax-requirements.txt | 2 +- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 0df3dd6a20..a32679e13b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -13,6 +13,7 @@ Bug Fixes Others ------ * Bump geomet dependency version to 0.2 (PYTHON-1243) +* Bump gremlinpython dependency version to 3.4.6 (PYTHON-1212) 3.23.0 ====== diff --git a/Jenkinsfile b/Jenkinsfile index 36151926f9..774d357c29 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -623,7 +623,11 @@ pipeline { axis { name 'CASSANDRA_VERSION' values '3.11', // Current Apache Cassandra - 'dse-6.8.0' // Current DataStax Enterprise + 'dse-5.0', // Long Term Support DataStax Enterprise + 'dse-5.1', // Legacy DataStax Enterprise + 'dse-6.0', // Previous DataStax Enterprise + 'dse-6.7', // Previous DataStax Enterprise + 'dse-6.8' // Current DataStax Enterprise } axis { name 'PYTHON_VERSION' @@ -631,7 +635,7 @@ pipeline { } axis { name 'CYTHON_ENABLED' - values 'False', 'True' + values 'False' } } diff --git a/setup.py b/setup.py index bee1052e90..745d05dfb3 100644 --- a/setup.py +++ b/setup.py @@ -410,7 +410,7 @@ def run_setup(extensions): dependencies.append('futures') _EXTRAS_REQUIRE = { - 'graph': ['gremlinpython==3.3.4'] + 'graph': ['gremlinpython==3.4.6'] } setup( diff --git a/test-datastax-requirements.txt b/test-datastax-requirements.txt index 69cc3a9484..3a47b8de16 100644 --- a/test-datastax-requirements.txt +++ b/test-datastax-requirements.txt @@ -1,3 +1,3 @@ -r test-requirements.txt kerberos -gremlinpython==3.3.4 +gremlinpython==3.4.6 From 7dcda6da6331e1b9c7628cd69b5254d4c8791a81 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 20 May 2020 18:42:13 -0400 Subject: [PATCH 1811/2005] Wait the simulacron http server is ready before running tests --- Jenkinsfile | 9 +++++---- tests/integration/simulacron/utils.py | 4 +++- tests/util.py | 14 ++++++++------ 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 774d357c29..9ba138cf4a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -290,6 +290,11 @@ def describePerCommitStage() { currentBuild.displayName = "Per-Commit (${env.EVENT_LOOP_MANAGER} | ${type.capitalize()})" currentBuild.description = "Per-Commit build and ${type} testing of ${serverDescription} against Python v2.7.14 and v3.5.6 using ${env.EVENT_LOOP_MANAGER} event loop manager" } + + sh label: 'Describe the python environment', script: '''#!/bin/bash -lex + python -V + pip freeze + ''' } def describeScheduledTestingStage() { @@ -623,10 +628,6 @@ pipeline { axis { name 'CASSANDRA_VERSION' values '3.11', // Current Apache Cassandra - 'dse-5.0', // Long Term Support DataStax Enterprise - 'dse-5.1', // Legacy DataStax Enterprise - 'dse-6.0', // Previous DataStax Enterprise - 'dse-6.7', // Previous DataStax Enterprise 'dse-6.8' // Current DataStax Enterprise } axis { diff --git a/tests/integration/simulacron/utils.py b/tests/integration/simulacron/utils.py index 870b60bd46..b1d9debaf3 100644 --- a/tests/integration/simulacron/utils.py +++ b/tests/integration/simulacron/utils.py @@ -19,6 +19,7 @@ from cassandra.metadata import SchemaParserV4, SchemaParserDSE68 +from tests.util import wait_until_not_raised from tests.integration import CASSANDRA_VERSION, SIMULACRON_JAR, DSE_VERSION DEFAULT_CLUSTER = "python_simulacron_cluster" @@ -110,7 +111,8 @@ def submit_request(self, query): request.add_header("Content-Type", 'application/json') request.add_header("Content-Length", len(data)) - connection = opener.open(request) + # wait that simulacron is ready and listening + connection = wait_until_not_raised(lambda: opener.open(request), 1, 10) return connection.read().decode('utf-8') def prime_server_versions(self): diff --git a/tests/util.py b/tests/util.py index c5dfd8a387..5c7ac2416f 100644 --- a/tests/util.py +++ b/tests/util.py @@ -15,6 +15,7 @@ import time from functools import wraps + def wait_until(condition, delay, max_attempts): """ Executes a function at regular intervals while the condition @@ -44,22 +45,23 @@ def wait_until_not_raised(condition, delay, max_attempts): """ def wrapped_condition(): try: - condition() + result = condition() except: - return False + return False, None - return True + return True, result attempt = 0 while attempt < (max_attempts-1): attempt += 1 - if wrapped_condition(): - return + success, result = wrapped_condition() + if success: + return result time.sleep(delay) # last attempt, let the exception raise - condition() + return condition() def late(seconds=1): From c2bf6255cb8c6af4a2d34edd914078db8159811d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 21 May 2020 11:09:58 -0400 Subject: [PATCH 1812/2005] Add use_default_tempdir cloud config options --- CHANGELOG.rst | 1 + cassandra/datastax/cloud/__init__.py | 3 +- tests/unit/advanced/cloud/test_cloud.py | 42 +++++++++++++++++++++++-- 3 files changed, 42 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d6b1971059..7e1033e566 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,7 @@ Not released Features -------- * Make geomet an optional dependency at runtime (PYTHON-1237) +* Add use_default_tempdir cloud config options (PYTHON-1245) Bug Fixes --------- diff --git a/cassandra/datastax/cloud/__init__.py b/cassandra/datastax/cloud/__init__.py index 1459b28d8c..ecb4a73fd4 100644 --- a/cassandra/datastax/cloud/__init__.py +++ b/cassandra/datastax/cloud/__init__.py @@ -97,8 +97,9 @@ def get_cloud_config(cloud_config, create_pyopenssl_context=False): def read_cloud_config_from_zip(cloud_config, create_pyopenssl_context): secure_bundle = cloud_config['secure_connect_bundle'] + use_default_tempdir = cloud_config.get('use_default_tempdir', None) with ZipFile(secure_bundle) as zipfile: - base_dir = os.path.dirname(secure_bundle) + base_dir = tempfile.gettempdir() if use_default_tempdir else os.path.dirname(secure_bundle) tmp_dir = tempfile.mkdtemp(dir=base_dir) try: zipfile.extractall(path=tmp_dir) diff --git a/tests/unit/advanced/cloud/test_cloud.py b/tests/unit/advanced/cloud/test_cloud.py index e6001fb474..ab18f0af72 100644 --- a/tests/unit/advanced/cloud/test_cloud.py +++ b/tests/unit/advanced/cloud/test_cloud.py @@ -6,23 +6,29 @@ # You may obtain a copy of the License at # # http://www.datastax.com/terms/datastax-dse-driver-license-terms +import tempfile +import os +import shutil +import six + try: import unittest2 as unittest except ImportError: import unittest # noqa -import os - +from cassandra import DriverException from cassandra.datastax import cloud from mock import patch +from tests import notwindows class CloudTests(unittest.TestCase): current_path = os.path.dirname(os.path.abspath(__file__)) + creds_path = os.path.join(current_path, './creds.zip') config_zip = { - 'secure_connect_bundle': os.path.join(current_path, './creds.zip') + 'secure_connect_bundle': creds_path } metadata_json = """ {"region":"local", @@ -75,3 +81,33 @@ def test_parse_metadata_info(self): ] for host_id in host_ids: self.assertIn(host_id, config.host_ids) + + @notwindows + def test_use_default_tempdir(self): + tmpdir = tempfile.mkdtemp() + + def clean_tmp_dir(): + os.chmod(tmpdir, 0o777) + shutil.rmtree(tmpdir) + self.addCleanup(clean_tmp_dir) + + tmp_creds_path = os.path.join(tmpdir, 'creds.zip') + shutil.copyfile(self.creds_path, tmp_creds_path) + os.chmod(tmpdir, 0o544) + config = { + 'secure_connect_bundle': tmp_creds_path + } + + # The directory is not writtable.. we expect a permission error + exc = PermissionError if six.PY3 else OSError + with self.assertRaises(exc): + cloud.get_cloud_config(config) + + # With use_default_tempdir, we expect an connection refused + # since the cluster doesn't exist + with self.assertRaises(DriverException): + config = { + 'secure_connect_bundle': tmp_creds_path, + 'use_default_tempdir': True + } + cloud.get_cloud_config(config) From 63525402596b59128e2ed8e7c0bf231976fad2f5 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 21 May 2020 12:26:33 -0400 Subject: [PATCH 1813/2005] Set the jenkins config to always run the unit tests --- Jenkinsfile | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 9ba138cf4a..97c9ad676e 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -88,22 +88,17 @@ def installDriverAndCompileExtensions() { } def executeStandardTests() { - /* - * Run the cython unit tests, this is not done in travis because it takes too much time for the - * whole matrix to build with cython - */ - if (env.CYTHON_ENABLED == 'True') { - sh label: 'Execute Cython unit tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} VERIFY_CYTHON=True nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true - EVENT_LOOP_MANAGER=eventlet VERIFY_CYTHON=True nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true - EVENT_LOOP_MANAGER=gevent VERIFY_CYTHON=True nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true - ''' - } + sh label: 'Execute unit tests', script: '''#!/bin/bash -lex + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true + EVENT_LOOP_MANAGER=eventlet VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true + EVENT_LOOP_MANAGER=gevent VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true + ''' sh label: 'Execute Simulacron integration tests', script: '''#!/bin/bash -lex # Load CCM environment variables From dfd06e890c459b4eb28216f6479dd3e97e90f3c5 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 21 May 2020 15:49:02 -0400 Subject: [PATCH 1814/2005] Document cloud use_default_tempdir --- cassandra/cluster.py | 5 ++++- docs/cloud.rst | 19 +++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 7120bdb9e1..f69625b1be 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -993,7 +993,10 @@ def default_retry_policy(self, policy): { # path to the secure connect bundle - 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip' + 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip', + + # optional config options + 'use_default_tempdir': True # use the system temp dir for the zip extraction } The zip file will be temporarily extracted in the same directory to diff --git a/docs/cloud.rst b/docs/cloud.rst index e8cc2fa750..7ff7693736 100644 --- a/docs/cloud.rst +++ b/docs/cloud.rst @@ -19,6 +19,25 @@ To connect to a DataStax Astra cluster: cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider) session = cluster.connect() +Cloud Config Options +==================== + +use_default_tempdir ++++++++++++++++++++ + +The secure connect bundle needs to be extracted to load the certificates into the SSLContext. +By default, the zip location is used as the base dir for the extraction. In some environments, +the zip location file system is read-only (e.g Azure Function). With *use_default_tempdir* set to *True*, +the default temporary directory of the system will be used as base dir. + +.. code:: python + + cloud_config = { + 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip', + 'use_default_tempdir': True + } + ... + Astra Differences ================== In most circumstances, the client code for interacting with an Astra cluster will be the same as interacting with any other Cassandra cluster. The exceptions being: From ed7c0eebb0227cd84242d0be07978882d4d7bc1c Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 1 Jun 2020 15:44:36 -0400 Subject: [PATCH 1815/2005] Fix tox utf8 issue and travis lz4 --- .travis.yml | 3 ++- tox.ini | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index f1fff4bb63..b485e21227 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,7 +24,8 @@ addons: - libev-dev install: - - pip install tox-travis lz4 + - pip install tox-travis + - if [[ $TRAVIS_PYTHON_VERSION != pypy3.5 ]]; then pip install lz4; fi script: - tox diff --git a/tox.ini b/tox.ini index 9f0d510045..e71f6294a0 100644 --- a/tox.ini +++ b/tox.ini @@ -18,6 +18,7 @@ deps = {[base]deps} setenv = LIBEV_EMBED=0 CARES_EMBED=0 + LC_ALL=en_US.UTF-8 changedir = {envtmpdir} commands = nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/ From 9dab6c2d5015e8f1e3387637c3a0503653dfcbfc Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 1 Jun 2020 15:46:51 -0400 Subject: [PATCH 1816/2005] Fix travis build image link --- README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 6bd3e45749..358f588d32 100644 --- a/README.rst +++ b/README.rst @@ -1,8 +1,8 @@ DataStax Driver for Apache Cassandra ==================================== -.. image:: https://travis-ci.org/datastax/python-driver.png?branch=master - :target: https://travis-ci.org/datastax/python-driver +.. image:: https://travis-ci.com/datastax/python-driver.png?branch=master + :target: https://travis-ci.com/github/datastax/python-driver A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (2.1+) and DataStax Enterprise (4.7+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. From 3317f42c2cc5d81083795edfa0986903d818e04c Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 1 Jun 2020 16:40:53 -0400 Subject: [PATCH 1817/2005] Use gevent 1.4 for travis --- tox.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index e71f6294a0..fd50a6c1d6 100644 --- a/tox.ini +++ b/tox.ini @@ -25,7 +25,7 @@ commands = nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/ [testenv:gevent_loop] deps = {[base]deps} - gevent + gevent>=1.4,<1.5 setenv = LIBEV_EMBED=0 CARES_EMBED=0 @@ -37,7 +37,7 @@ commands = [testenv:eventlet_loop] deps = {[base]deps} - gevent + gevent>=1.4,<1.5 setenv = LIBEV_EMBED=0 CARES_EMBED=0 From a6a66cac2931c4f66d248ed7091d68f6932de5b0 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 2 Jun 2020 09:46:04 -0400 Subject: [PATCH 1818/2005] ensure the connection max request id's is respected --- cassandra/connection.py | 2 +- cassandra/pool.py | 2 +- .../integration/simulacron/test_connection.py | 40 +++++- tests/unit/test_host_connection_pool.py | 126 ++++++++++-------- 4 files changed, 112 insertions(+), 58 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index f30be682a6..3d154de033 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -1443,7 +1443,7 @@ def __init__(self, connection, owner): log.debug("Sending options message heartbeat on idle connection (%s) %s", id(connection), connection.endpoint) with connection.lock: - if connection.in_flight <= connection.max_request_id: + if connection.in_flight < connection.max_request_id: connection.in_flight += 1 connection.send_msg(OptionsMessage(), connection.get_request_id(), self._options_callback) else: diff --git a/cassandra/pool.py b/cassandra/pool.py index 87e8f03716..cd27656046 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -418,7 +418,7 @@ def borrow_connection(self, timeout): remaining = timeout while True: with conn.lock: - if conn.in_flight <= conn.max_request_id: + if conn.in_flight < conn.max_request_id: conn.in_flight += 1 return conn, conn.get_request_id() if timeout is not None: diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index afe2685dbf..11bfef7fb7 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -24,7 +24,7 @@ from cassandra import OperationTimedOut from cassandra.cluster import (EXEC_PROFILE_DEFAULT, Cluster, ExecutionProfile, _Scheduler, NoHostAvailable) -from cassandra.policies import HostStateListener, RoundRobinPolicy +from cassandra.policies import HostStateListener, RoundRobinPolicy, WhiteListRoundRobinPolicy from tests import connection_class, thread_pool_executor_class from tests.util import late @@ -32,7 +32,7 @@ from tests.integration.util import assert_quiescent_pool_state # important to import the patch PROTOCOL_VERSION from the simulacron module from tests.integration.simulacron import SimulacronBase, PROTOCOL_VERSION -from cassandra.connection import DEFAULT_CQL_VERSION +from cassandra.connection import DEFAULT_CQL_VERSION, Connection from tests.unit.cython.utils import cythontest from tests.integration.simulacron.utils import (NO_THEN, PrimeOptions, prime_query, prime_request, @@ -475,3 +475,39 @@ def test_driver_recovers_nework_isolation(self): time.sleep(idle_heartbeat_timeout + idle_heartbeat_interval + 2) self.assertIsNotNone(session.execute("SELECT * from system.local")) + + def test_max_in_flight(self): + """ Verify we don't exceed max_in_flight when borrowing connections or sending heartbeats """ + Connection.max_in_flight = 50 + start_and_prime_singledc() + profile = ExecutionProfile(request_timeout=1, load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) + cluster = Cluster( + protocol_version=PROTOCOL_VERSION, + compression=False, + execution_profiles={EXEC_PROFILE_DEFAULT: profile}, + idle_heartbeat_interval=.1, + idle_heartbeat_timeout=.1, + ) + session = cluster.connect(wait_for_all_pools=True) + self.addCleanup(cluster.shutdown) + + query = session.prepare("INSERT INTO table1 (id) VALUES (?)") + + prime_request(PauseReads()) + + futures = [] + # + 50 because simulacron doesn't immediately block all queries + for i in range(Connection.max_in_flight + 50): + futures.append(session.execute_async(query, ['a'])) + + prime_request(ResumeReads()) + + for future in futures: + # We're veryfing we don't get an assertion error from Connection.get_request_id, + # so skip any valid errors + try: + future.result() + except OperationTimedOut: + pass + except NoHostAvailable: + pass diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index 78af47651b..e62488b400 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -22,11 +22,14 @@ from cassandra.cluster import Session from cassandra.connection import Connection -from cassandra.pool import Host, HostConnectionPool, NoConnectionsAvailable +from cassandra.pool import HostConnection, HostConnectionPool +from cassandra.pool import Host, NoConnectionsAvailable from cassandra.policies import HostDistance, SimpleConvictionPolicy -class HostConnectionPoolTests(unittest.TestCase): +class _PoolTests(unittest.TestCase): + PoolImpl = None + uses_single_connection = None def make_session(self): session = NonCallableMagicMock(spec=Session, keyspace='foobarkeyspace') @@ -41,7 +44,7 @@ def test_borrow_and_return(self): conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100) session.cluster.connection_factory.return_value = conn - pool = HostConnectionPool(host, HostDistance.LOCAL, session) + pool = self.PoolImpl(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.endpoint) c, request_id = pool.borrow_connection(timeout=0.01) @@ -51,7 +54,8 @@ def test_borrow_and_return(self): pool.return_connection(conn) self.assertEqual(0, conn.in_flight) - self.assertNotIn(conn, pool._trash) + if not self.uses_single_connection: + self.assertNotIn(conn, pool._trash) def test_failed_wait_for_connection(self): host = Mock(spec=Host, address='ip1') @@ -59,7 +63,7 @@ def test_failed_wait_for_connection(self): conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100) session.cluster.connection_factory.return_value = conn - pool = HostConnectionPool(host, HostDistance.LOCAL, session) + pool = self.PoolImpl(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) @@ -77,7 +81,7 @@ def test_successful_wait_for_connection(self): conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100, lock=Lock()) session.cluster.connection_factory.return_value = conn - pool = HostConnectionPool(host, HostDistance.LOCAL, session) + pool = self.PoolImpl(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) @@ -95,48 +99,6 @@ def get_second_conn(): t.join() self.assertEqual(0, conn.in_flight) - def test_all_connections_trashed(self): - host = Mock(spec=Host, address='ip1') - session = self.make_session() - conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100, lock=Lock()) - session.cluster.connection_factory.return_value = conn - session.cluster.get_core_connections_per_host.return_value = 1 - - # manipulate the core connection setting so that we can - # trash the only connection - pool = HostConnectionPool(host, HostDistance.LOCAL, session) - session.cluster.get_core_connections_per_host.return_value = 0 - pool._maybe_trash_connection(conn) - session.cluster.get_core_connections_per_host.return_value = 1 - - submit_called = Event() - - def fire_event(*args, **kwargs): - submit_called.set() - - session.submit.side_effect = fire_event - - def get_conn(): - conn.reset_mock() - c, request_id = pool.borrow_connection(1.0) - self.assertIs(conn, c) - self.assertEqual(1, conn.in_flight) - conn.set_keyspace_blocking.assert_called_once_with('foobarkeyspace') - pool.return_connection(c) - - t = Thread(target=get_conn) - t.start() - - submit_called.wait() - self.assertEqual(1, pool._scheduled_for_creation) - session.submit.assert_called_once_with(pool._create_new_connection) - - # now run the create_new_connection call - pool._create_new_connection() - - t.join() - self.assertEqual(0, conn.in_flight) - def test_spawn_when_at_max(self): host = Mock(spec=Host, address='ip1') session = self.make_session() @@ -147,7 +109,7 @@ def test_spawn_when_at_max(self): # core conns = 1, max conns = 2 session.cluster.get_max_connections_per_host.return_value = 2 - pool = HostConnectionPool(host, HostDistance.LOCAL, session) + pool = self.PoolImpl(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) @@ -160,7 +122,8 @@ def test_spawn_when_at_max(self): # purposes of this test, as long as it results in a new connection # creation being scheduled self.assertRaises(NoConnectionsAvailable, pool.borrow_connection, 0) - session.submit.assert_called_once_with(pool._create_new_connection) + if not self.uses_single_connection: + session.submit.assert_called_once_with(pool._create_new_connection) def test_return_defunct_connection(self): host = Mock(spec=Host, address='ip1') @@ -169,7 +132,7 @@ def test_return_defunct_connection(self): max_request_id=100, signaled_error=False) session.cluster.connection_factory.return_value = conn - pool = HostConnectionPool(host, HostDistance.LOCAL, session) + pool = self.PoolImpl(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) @@ -188,7 +151,7 @@ def test_return_defunct_connection_on_down_host(self): max_request_id=100, signaled_error=False) session.cluster.connection_factory.return_value = conn - pool = HostConnectionPool(host, HostDistance.LOCAL, session) + pool = self.PoolImpl(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) @@ -205,10 +168,11 @@ def test_return_defunct_connection_on_down_host(self): def test_return_closed_connection(self): host = Mock(spec=Host, address='ip1') session = self.make_session() - conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=True, max_request_id=100, signaled_error=False) + conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=True, max_request_id=100, + signaled_error=False) session.cluster.connection_factory.return_value = conn - pool = HostConnectionPool(host, HostDistance.LOCAL, session) + pool = self.PoolImpl(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) @@ -241,3 +205,57 @@ def test_host_equality(self): self.assertEqual(a, b, 'Two Host instances should be equal when sharing.') self.assertNotEqual(a, c, 'Two Host instances should NOT be equal when using two different addresses.') self.assertNotEqual(b, c, 'Two Host instances should NOT be equal when using two different addresses.') + + +class HostConnectionPoolTests(_PoolTests): + PoolImpl = HostConnectionPool + uses_single_connection = False + + def test_all_connections_trashed(self): + host = Mock(spec=Host, address='ip1') + session = self.make_session() + conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100, + lock=Lock()) + session.cluster.connection_factory.return_value = conn + session.cluster.get_core_connections_per_host.return_value = 1 + + # manipulate the core connection setting so that we can + # trash the only connection + pool = self.PoolImpl(host, HostDistance.LOCAL, session) + session.cluster.get_core_connections_per_host.return_value = 0 + pool._maybe_trash_connection(conn) + session.cluster.get_core_connections_per_host.return_value = 1 + + submit_called = Event() + + def fire_event(*args, **kwargs): + submit_called.set() + + session.submit.side_effect = fire_event + + def get_conn(): + conn.reset_mock() + c, request_id = pool.borrow_connection(1.0) + self.assertIs(conn, c) + self.assertEqual(1, conn.in_flight) + conn.set_keyspace_blocking.assert_called_once_with('foobarkeyspace') + pool.return_connection(c) + + t = Thread(target=get_conn) + t.start() + + submit_called.wait() + self.assertEqual(1, pool._scheduled_for_creation) + session.submit.assert_called_once_with(pool._create_new_connection) + + # now run the create_new_connection call + pool._create_new_connection() + + t.join() + self.assertEqual(0, conn.in_flight) + + +class HostConnectionTests(_PoolTests): + PoolImpl = HostConnection + uses_single_connection = True + From f9d69f37c4855ba941e945b0cb237c8a9ca0ffbc Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 2 Jun 2020 09:59:17 -0400 Subject: [PATCH 1819/2005] PYTHON-1196: Add test to verify we can handle TCP backpressure --- Jenkinsfile | 1 + .../simulacron/test_backpressure.py | 179 ++++++++++++++++++ .../integration/simulacron/test_connection.py | 2 +- tests/integration/simulacron/utils.py | 27 +++ 4 files changed, 208 insertions(+), 1 deletion(-) create mode 100644 tests/integration/simulacron/test_backpressure.py diff --git a/Jenkinsfile b/Jenkinsfile index 97c9ad676e..b8a116c6cf 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -184,6 +184,7 @@ def executeEventLoopTests() { "tests/integration/standard/test_connection.py" "tests/integration/standard/test_control_connection.py" "tests/integration/standard/test_metrics.py" + "tests/integration/simulacron/test_backpressure.py" "tests/integration/standard/test_query.py" "tests/integration/simulacron/test_endpoint.py" "tests/integration/long/test_ssl.py" diff --git a/tests/integration/simulacron/test_backpressure.py b/tests/integration/simulacron/test_backpressure.py new file mode 100644 index 0000000000..b7b428f64f --- /dev/null +++ b/tests/integration/simulacron/test_backpressure.py @@ -0,0 +1,179 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import time + +from cassandra import OperationTimedOut +from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT, NoHostAvailable +from cassandra.policies import RoundRobinPolicy, WhiteListRoundRobinPolicy +from tests.integration import requiressimulacron, libevtest +from tests.integration.simulacron import SimulacronBase, PROTOCOL_VERSION +from tests.integration.simulacron.utils import ResumeReads, PauseReads, prime_request, start_and_prime_singledc + + +@requiressimulacron +@libevtest +class TCPBackpressureTests(SimulacronBase): + def setUp(self): + self.callback_successes = 0 + self.callback_errors = 0 + + def callback_success(self, results): + self.callback_successes += 1 + + def callback_error(self, results): + self.callback_errors += 1 + + def _fill_buffers(self, session, query, expected_blocked=3, **execute_kwargs): + futures = [] + buffer = '1' * 50000 + for _ in range(100000): + future = session.execute_async(query, [buffer], **execute_kwargs) + futures.append(future) + + total_blocked = 0 + for pool in session.get_pools(): + if not pool._connection._socket_writable: + total_blocked += 1 + if total_blocked == expected_blocked: + break + else: + raise Exception("Unable to fill TCP send buffer on expected number of nodes") + return futures + + def test_paused_connections(self): + """ Verify all requests come back as expected if node resumes within query timeout """ + start_and_prime_singledc() + profile = ExecutionProfile(request_timeout=500, load_balancing_policy=RoundRobinPolicy()) + cluster = Cluster( + protocol_version=PROTOCOL_VERSION, + compression=False, + execution_profiles={EXEC_PROFILE_DEFAULT: profile}, + ) + session = cluster.connect(wait_for_all_pools=True) + self.addCleanup(cluster.shutdown) + + query = session.prepare("INSERT INTO table1 (id) VALUES (?)") + + prime_request(PauseReads()) + futures = self._fill_buffers(session, query) + + # Make sure we actually have some stuck in-flight requests + for in_flight in [pool._connection.in_flight for pool in session.get_pools()]: + self.assertGreater(in_flight, 100) + time.sleep(.5) + for in_flight in [pool._connection.in_flight for pool in session.get_pools()]: + self.assertGreater(in_flight, 100) + + prime_request(ResumeReads()) + + for future in futures: + try: + future.result() + except NoHostAvailable as e: + # We shouldn't have any timeouts here, but all of the queries beyond what can fit + # in the tcp buffer will have returned with a ConnectionBusy exception + self.assertIn("ConnectionBusy", str(e)) + + # Verify that we can continue sending queries without any problems + for host in session.cluster.metadata.all_hosts(): + session.execute(query, ["a"], host=host) + + def test_queued_requests_timeout(self): + """ Verify that queued requests timeout as expected """ + start_and_prime_singledc() + profile = ExecutionProfile(request_timeout=.1, load_balancing_policy=RoundRobinPolicy()) + cluster = Cluster( + protocol_version=PROTOCOL_VERSION, + compression=False, + execution_profiles={EXEC_PROFILE_DEFAULT: profile}, + ) + session = cluster.connect(wait_for_all_pools=True) + self.addCleanup(cluster.shutdown) + + query = session.prepare("INSERT INTO table1 (id) VALUES (?)") + + prime_request(PauseReads()) + + futures = [] + for i in range(1000): + future = session.execute_async(query, [str(i)]) + future.add_callbacks(callback=self.callback_success, errback=self.callback_error) + futures.append(future) + + successes = 0 + for future in futures: + try: + future.result() + successes += 1 + except OperationTimedOut: + pass + + # Simulacron will respond to a couple queries before cutting off reads, so we'll just verify + # that only "a few" successes happened here + self.assertLess(successes, 50) + self.assertLess(self.callback_successes, 50) + self.assertEqual(self.callback_errors, len(futures) - self.callback_successes) + + def test_cluster_busy(self): + """ Verify that once TCP buffer is full we get busy exceptions rather than timeouts """ + start_and_prime_singledc() + profile = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) + cluster = Cluster( + protocol_version=PROTOCOL_VERSION, + compression=False, + execution_profiles={EXEC_PROFILE_DEFAULT: profile}, + ) + session = cluster.connect(wait_for_all_pools=True) + self.addCleanup(cluster.shutdown) + + query = session.prepare("INSERT INTO table1 (id) VALUES (?)") + + prime_request(PauseReads()) + + # These requests will get stuck in the TCP buffer and we have no choice but to let them time out + self._fill_buffers(session, query, expected_blocked=3) + + # Now that our send buffer is completely full, verify we immediately get busy exceptions rather than timing out + for i in range(1000): + with self.assertRaises(NoHostAvailable) as e: + session.execute(query, [str(i)]) + self.assertIn("ConnectionBusy", str(e.exception)) + + def test_node_busy(self): + """ Verify that once TCP buffer is full, queries continue to get re-routed to other nodes """ + start_and_prime_singledc() + profile = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) + cluster = Cluster( + protocol_version=PROTOCOL_VERSION, + compression=False, + execution_profiles={EXEC_PROFILE_DEFAULT: profile}, + ) + session = cluster.connect(wait_for_all_pools=True) + self.addCleanup(cluster.shutdown) + + query = session.prepare("INSERT INTO table1 (id) VALUES (?)") + + prime_request(PauseReads(dc_id=0, node_id=0)) + + blocked_profile = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(["127.0.0.1"])) + cluster.add_execution_profile('blocked_profile', blocked_profile) + + # Fill our blocked node's tcp buffer until we get a busy exception + self._fill_buffers(session, query, expected_blocked=1, execution_profile='blocked_profile') + + # Now that our send buffer is completely full on one node, + # verify queries get re-routed to other nodes and queries complete successfully + for i in range(1000): + session.execute(query, [str(i)]) + diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 11bfef7fb7..4ef97247a6 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -39,7 +39,7 @@ start_and_prime_cluster_defaults, start_and_prime_singledc, clear_queries, RejectConnections, - RejectType, AcceptConnections) + RejectType, AcceptConnections, PauseReads, ResumeReads) class TrackDownListener(HostStateListener): diff --git a/tests/integration/simulacron/utils.py b/tests/integration/simulacron/utils.py index b1d9debaf3..ba9573fd23 100644 --- a/tests/integration/simulacron/utils.py +++ b/tests/integration/simulacron/utils.py @@ -338,6 +338,33 @@ def method(self): return "DELETE" +class _PauseOrResumeReads(SimulacronRequest): + def __init__(self, cluster_name=DEFAULT_CLUSTER, dc_id=None, node_id=None): + self.path = "pause-reads/{}".format(cluster_name) + if dc_id is not None: + self.path += "/{}".format(dc_id) + if node_id is not None: + self.path += "/{}".format(node_id) + elif node_id: + raise Exception("Can't set node_id without dc_id") + + @property + def method(self): + raise NotImplementedError() + + +class PauseReads(_PauseOrResumeReads): + @property + def method(self): + return "PUT" + + +class ResumeReads(_PauseOrResumeReads): + @property + def method(self): + return "DELETE" + + def prime_driver_defaults(): """ Function to prime the necessary queries so the test harness can run From e14a7a097fb651b2c1875f7173cfe04fd500889b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 2 Jun 2020 11:02:51 -0400 Subject: [PATCH 1820/2005] PYTHON-1248: Libevreactor: Raise ConnectionBusy if tcp send buffer is full --- CHANGELOG.rst | 1 + Jenkinsfile | 2 +- cassandra/cluster.py | 9 ++++++--- cassandra/connection.py | 3 +++ cassandra/io/libevreactor.py | 4 ++++ tests/integration/simulacron/test_backpressure.py | 2 +- 6 files changed, 16 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7e1033e566..da1b5dfd0f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,6 +6,7 @@ Features -------- * Make geomet an optional dependency at runtime (PYTHON-1237) * Add use_default_tempdir cloud config options (PYTHON-1245) +* Tcp flow control for libevreactor (PYTHON-1248) Bug Fixes --------- diff --git a/Jenkinsfile b/Jenkinsfile index b8a116c6cf..e133beb5db 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -601,7 +601,7 @@ pipeline { EVENT_LOOP_MANAGER = "${params.EVENT_LOOP_MANAGER.toLowerCase()}" EXECUTE_LONG_TESTS = "${params.EXECUTE_LONG_TESTS ? 'True' : 'False'}" CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' - CCM_MAX_HEAP_SIZE = '1536M' + CCM_MAX_HEAP_SIZE = '1280M' } stages { diff --git a/cassandra/cluster.py b/cassandra/cluster.py index f69625b1be..616fb7f147 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -48,7 +48,7 @@ from cassandra.connection import (ConnectionException, ConnectionShutdown, ConnectionHeartbeat, ProtocolVersionUnsupported, EndPoint, DefaultEndPoint, DefaultEndPointFactory, - ContinuousPagingState, SniEndPointFactory) + ContinuousPagingState, SniEndPointFactory, ConnectionBusy) from cassandra.cqltypes import UserType from cassandra.encoder import Encoder from cassandra.protocol import (QueryMessage, ResultMessage, @@ -4445,7 +4445,9 @@ def _query(self, host, message=None, cb=None): except NoConnectionsAvailable as exc: log.debug("All connections for host %s are at capacity, moving to the next host", host) self._errors[host] = exc - return None + except ConnectionBusy as exc: + log.debug("Connection for host %s is busy, moving to the next host", host) + self._errors[host] = exc except Exception as exc: log.debug("Error querying host %s", host, exc_info=True) self._errors[host] = exc @@ -4453,7 +4455,8 @@ def _query(self, host, message=None, cb=None): self._metrics.on_connection_error() if connection: pool.return_connection(connection) - return None + + return None @property def has_more_pages(self): diff --git a/cassandra/connection.py b/cassandra/connection.py index 3d154de033..6ce3e44a30 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -692,6 +692,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self._requests = {} self._iobuf = io.BytesIO() self._continuous_paging_sessions = {} + self._socket_writable = True if ssl_options: self._check_hostname = bool(self.ssl_options.pop('check_hostname', False)) @@ -926,6 +927,8 @@ def send_msg(self, msg, request_id, cb, encoder=ProtocolHandler.encode_message, raise ConnectionShutdown("Connection to %s is defunct" % self.endpoint) elif self.is_closed: raise ConnectionShutdown("Connection to %s is closed" % self.endpoint) + elif not self._socket_writable: + raise ConnectionBusy("Connection %s is overloaded" % self.endpoint) # queue the decoder function with the request # this allows us to inject custom functions per request to encode, decode messages diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 2487419784..917e16aea8 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -310,6 +310,8 @@ def handle_write(self, watcher, revents, errno=None): with self._deque_lock: next_msg = self.deque.popleft() except IndexError: + if not self._socket_writable: + self._socket_writable = True return try: @@ -317,6 +319,8 @@ def handle_write(self, watcher, revents, errno=None): except socket.error as err: if (err.args[0] in NONBLOCKING or err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE)): + if err.args[0] in NONBLOCKING: + self._socket_writable = False with self._deque_lock: self.deque.appendleft(next_msg) else: diff --git a/tests/integration/simulacron/test_backpressure.py b/tests/integration/simulacron/test_backpressure.py index b7b428f64f..69c38da8fe 100644 --- a/tests/integration/simulacron/test_backpressure.py +++ b/tests/integration/simulacron/test_backpressure.py @@ -45,7 +45,7 @@ def _fill_buffers(self, session, query, expected_blocked=3, **execute_kwargs): for pool in session.get_pools(): if not pool._connection._socket_writable: total_blocked += 1 - if total_blocked == expected_blocked: + if total_blocked >= expected_blocked: break else: raise Exception("Unable to fill TCP send buffer on expected number of nodes") From 748a5a4b91790f61046f875be269ce44ffb4c7d1 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 2 Jun 2020 14:19:30 -0400 Subject: [PATCH 1821/2005] Update all python patch versions --- Jenkinsfile | 72 ++++++++++++++++++++++++++--------------------------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index e133beb5db..152ad629b1 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -284,7 +284,7 @@ def describePerCommitStage() { } currentBuild.displayName = "Per-Commit (${env.EVENT_LOOP_MANAGER} | ${type.capitalize()})" - currentBuild.description = "Per-Commit build and ${type} testing of ${serverDescription} against Python v2.7.14 and v3.5.6 using ${env.EVENT_LOOP_MANAGER} event loop manager" + currentBuild.description = "Per-Commit build and ${type} testing of ${serverDescription} against Python v2.7.18 and v3.5.9 using ${env.EVENT_LOOP_MANAGER} event loop manager" } sh label: 'Describe the python environment', script: '''#!/bin/bash -lex @@ -402,7 +402,7 @@ pipeline {
dse-6.7 DataStax Enterprise v6.7.x
dse-6.8.0DataStax Enterprise v6.8.0
dse-6.8 DataStax Enterprise v6.8.x (CURRENTLY UNDER DEVELOPMENT)
''') choice( name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION', - choices: ['2.7.14', '3.4.9', '3.5.6', '3.6.6', '3.7.4', '3.8.0'], + choices: ['2.7.18', '3.4.10', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], description: 'Python version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY!') choice( name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION', @@ -555,43 +555,43 @@ pipeline { triggers { parameterizedCron((branchPatternCron.matcher(env.BRANCH_NAME).matches() && !riptanoPatternCron.matcher(GIT_URL).find()) ? """ # Every weeknight (Monday - Friday) around 4:00 AM - # These schedules will run with and without Cython enabled for Python v2.7.14 and v3.5.6 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.14;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.5.6;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 + # These schedules will run with and without Cython enabled for Python v2.7.18 and v3.5.9 + H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 # Every Saturday around 12:00, 4:00 and 8:00 PM - # These schedules are for weekly libev event manager runs with and without Cython for most of the Python versions (excludes v3.5.6.x) - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.14;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.4.9;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.6.6;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.7.4;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.0;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - # These schedules are for weekly gevent event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.9.x) - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.14;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.6;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.6;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.4;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.0;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - # These schedules are for weekly eventlet event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.9.x) - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.14;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.6;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.6;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.4;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.0;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + # These schedules are for weekly libev event manager runs with and without Cython for most of the Python versions (excludes v3.5.9.x) + H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 + H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.4.10;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 + H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 + H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 + H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 + # These schedules are for weekly gevent event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) + H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + # These schedules are for weekly eventlet event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) + H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 # Every Sunday around 12:00 and 4:00 AM - # These schedules are for weekly asyncore event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.9.x) - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.14;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.6;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.6;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.4;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.0;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - # These schedules are for weekly twisted event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.9.x) - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.14;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.6;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.6;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.4;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.0;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + # These schedules are for weekly asyncore event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) + H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + # These schedules are for weekly twisted event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) + H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 """ : "") } @@ -628,7 +628,7 @@ pipeline { } axis { name 'PYTHON_VERSION' - values '2.7.14', '3.5.6' + values '2.7.18', '3.5.9' } axis { name 'CYTHON_ENABLED' From bbdefc7e15f599c9179ea26f4fc31ce117e76b09 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 4 Jun 2020 15:05:51 -0400 Subject: [PATCH 1822/2005] Avoid memory issue by running backpressure tests separately --- Jenkinsfile | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 152ad629b1..49b5277b64 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -107,7 +107,13 @@ def executeStandardTests() { set +o allexport SIMULACRON_JAR="${HOME}/simulacron.jar" - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true + + # Run backpressure tests separately to avoid memory issue + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || true ''' sh label: 'Execute CQL engine integration tests', script: '''#!/bin/bash -lex @@ -184,7 +190,6 @@ def executeEventLoopTests() { "tests/integration/standard/test_connection.py" "tests/integration/standard/test_control_connection.py" "tests/integration/standard/test_metrics.py" - "tests/integration/simulacron/test_backpressure.py" "tests/integration/standard/test_query.py" "tests/integration/simulacron/test_endpoint.py" "tests/integration/long/test_ssl.py" From 64c302ee3b6d55174dedae15abc6222a921668a0 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 5 Jun 2020 14:16:00 -0400 Subject: [PATCH 1823/2005] Fix graph elementMap() result deserialization --- CHANGELOG.rst | 1 + Jenkinsfile | 2 +- cassandra/datastax/graph/__init__.py | 2 +- cassandra/datastax/graph/graphson.py | 14 ++++- cassandra/datastax/graph/types.py | 51 ++++++++++++++++++- docs/api/cassandra/datastax/graph/index.rst | 3 ++ .../advanced/graph/test_graph_query.py | 23 ++++++++- 7 files changed, 90 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index da1b5dfd0f..d0d6d6d695 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -12,6 +12,7 @@ Bug Fixes --------- * Unable to connect to a cloud cluster using Ubuntu 20.04 (PYTHON-1238) * [GRAPH] Can't write data in a Boolean field using the Fluent API (PYTHON-1239) +* [GRAPH] Fix elementMap() result deserialization (PYTHON-1233) Others ------ diff --git a/Jenkinsfile b/Jenkinsfile index 49b5277b64..87b20804ca 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -606,7 +606,7 @@ pipeline { EVENT_LOOP_MANAGER = "${params.EVENT_LOOP_MANAGER.toLowerCase()}" EXECUTE_LONG_TESTS = "${params.EXECUTE_LONG_TESTS ? 'True' : 'False'}" CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' - CCM_MAX_HEAP_SIZE = '1280M' + CCM_MAX_HEAP_SIZE = '1536M' } stages { diff --git a/cassandra/datastax/graph/__init__.py b/cassandra/datastax/graph/__init__.py index d828c7f707..11785c84f6 100644 --- a/cassandra/datastax/graph/__init__.py +++ b/cassandra/datastax/graph/__init__.py @@ -13,7 +13,7 @@ # limitations under the License. -from cassandra.datastax.graph.types import Element, Vertex, VertexProperty, Edge, Path +from cassandra.datastax.graph.types import Element, Vertex, VertexProperty, Edge, Path, T from cassandra.datastax.graph.query import ( GraphOptions, GraphProtocol, GraphStatement, SimpleGraphStatement, Result, graph_object_row_factory, single_object_row_factory, diff --git a/cassandra/datastax/graph/graphson.py b/cassandra/datastax/graph/graphson.py index 956d7d7f18..4b333eb1bf 100644 --- a/cassandra/datastax/graph/graphson.py +++ b/cassandra/datastax/graph/graphson.py @@ -34,7 +34,7 @@ from cassandra.cqltypes import cql_types_from_string from cassandra.metadata import UserType from cassandra.util import Polygon, Point, LineString, Duration -from cassandra.datastax.graph.types import Vertex, VertexProperty, Edge, Path +from cassandra.datastax.graph.types import Vertex, VertexProperty, Edge, Path, T __all__ = ['GraphSON1Serializer', 'GraphSON1Deserializer', 'GraphSON1TypeDeserializer', 'GraphSON2Serializer', 'GraphSON2Deserializer', 'GraphSON2Reader', @@ -745,6 +745,15 @@ def deserialize(cls, value, reader=None): return udt_class(**dict(kwargs)) +class TTypeIO(GraphSONTypeIO): + prefix = 'g' + graphson_base_type = 'T' + + @classmethod + def deserialize(cls, value, reader=None): + return T.name_to_value[value] + + class _BaseGraphSONSerializer(object): _serializers = OrderedDict() @@ -1120,7 +1129,8 @@ def get_serializer(self, value): class GraphSON3Deserializer(GraphSON2Deserializer): _TYPES = GraphSON2Deserializer._TYPES + [MapTypeIO, ListTypeIO, SetTypeIO, TupleTypeIO, - UserTypeIO, DseDurationTypeIO, BulkSetTypeIO] + UserTypeIO, DseDurationTypeIO, + TTypeIO, BulkSetTypeIO] _deserializers = {t.graphson_type: t for t in _TYPES} diff --git a/cassandra/datastax/graph/types.py b/cassandra/datastax/graph/types.py index ae22cd4bfe..9817c99d7d 100644 --- a/cassandra/datastax/graph/types.py +++ b/cassandra/datastax/graph/types.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__all__ = ['Element', 'Vertex', 'Edge', 'VertexProperty', 'Path'] +__all__ = ['Element', 'Vertex', 'Edge', 'VertexProperty', 'Path', 'T'] class Element(object): @@ -159,3 +159,52 @@ def __str__(self): def __repr__(self): return "%s(%r, %r)" % (self.__class__.__name__, self.labels, [o.value for o in self.objects]) + + +class T(object): + """ + Represents a collection of tokens for more concise Traversal definitions. + """ + + name = None + val = None + + # class attributes + id = None + """ + """ + + key = None + """ + """ + label = None + """ + """ + value = None + """ + """ + + def __init__(self, name, val): + self.name = name + self.val = val + + def __str__(self): + return self.name + + def __repr__(self): + return "T.%s" % (self.name, ) + + +T.id = T("id", 1) +T.id_ = T("id_", 2) +T.key = T("key", 3) +T.label = T("label", 4) +T.value = T("value", 5) + +T.name_to_value = { + 'id': T.id, + 'id_': T.id_, + 'key': T.key, + 'label': T.label, + 'value': T.value +} diff --git a/docs/api/cassandra/datastax/graph/index.rst b/docs/api/cassandra/datastax/graph/index.rst index 18a0e7c511..dafd5f65fd 100644 --- a/docs/api/cassandra/datastax/graph/index.rst +++ b/docs/api/cassandra/datastax/graph/index.rst @@ -81,6 +81,9 @@ .. autoclass:: Path :members: +.. autoclass:: T + :members: + .. autoclass:: GraphSON1Serializer :members: diff --git a/tests/integration/advanced/graph/test_graph_query.py b/tests/integration/advanced/graph/test_graph_query.py index 1ccfc4a90c..0eda67894d 100644 --- a/tests/integration/advanced/graph/test_graph_query.py +++ b/tests/integration/advanced/graph/test_graph_query.py @@ -35,8 +35,9 @@ from cassandra.graph import (SimpleGraphStatement, single_object_row_factory, Result, GraphOptions, GraphProtocol, to_bigint) from cassandra.datastax.graph.query import _graph_options +from cassandra.datastax.graph.types import T -from tests.integration import DSE_VERSION, requiredse +from tests.integration import DSE_VERSION, requiredse, greaterthanorequaldse68 from tests.integration.advanced.graph import BasicGraphUnitTestCase, GraphTestConfiguration, \ validate_classic_vertex, GraphUnitTestCase, validate_classic_edge, validate_path_result_type, \ validate_line_edge, validate_generic_vertex_result_type, \ @@ -542,6 +543,26 @@ def _test_query_bulkset(self, schema, graphson): self.assertEqual(len(results), 5) self.assertEqual(results.count(35), 2) + @greaterthanorequaldse68 + def _test_elementMap_query(self, schema, graphson): + """ + Test to validate that an elementMap can be serialized properly. + """ + self.execute_graph(schema.fixtures.classic(), graphson) + rs = self.execute_graph('''g.V().has('name','marko').elementMap()''', graphson) + results_list = self.resultset_to_list(rs) + self.assertEqual(len(results_list), 1) + row = results_list[0] + if graphson == GraphProtocol.GRAPHSON_3_0: + self.assertIn(T.id, row) + self.assertIn(T.label, row) + if schema is CoreGraphSchema: + self.assertEqual(row[T.id], 'dseg:/person/marko') + self.assertEqual(row[T.label], 'person') + else: + self.assertIn('id', row) + self.assertIn('label', row) + @GraphTestConfiguration.generate_tests(schema=ClassicGraphSchema) class ClassicGraphQueryTest(GenericGraphQueryTest): From 86168e03869394a4b3d285aba98b5624d63ddbd2 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 8 Jun 2020 15:03:26 -0400 Subject: [PATCH 1824/2005] Fix PlainTextAuthProvider fails with unicode chars and Python3 --- CHANGELOG.rst | 1 + cassandra/auth.py | 3 ++- tests/unit/test_auth.py | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 tests/unit/test_auth.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d0d6d6d695..5549ac50af 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,6 +11,7 @@ Features Bug Fixes --------- * Unable to connect to a cloud cluster using Ubuntu 20.04 (PYTHON-1238) +* PlainTextAuthProvider fails with unicode chars and Python3 (PYTHON-1241) * [GRAPH] Can't write data in a Boolean field using the Fluent API (PYTHON-1239) * [GRAPH] Fix elementMap() result deserialization (PYTHON-1233) diff --git a/cassandra/auth.py b/cassandra/auth.py index 910592f7ac..3d2f751ac0 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -277,7 +277,8 @@ def get_initial_challenge(self): def evaluate_challenge(self, challenge): if challenge == six.b('PLAIN-START'): - return six.b("\x00%s\x00%s" % (self.username, self.password)) + data = "\x00%s\x00%s" % (self.username, self.password) + return data if six.PY2 else data.encode() raise Exception('Did not receive a valid challenge response from server') diff --git a/tests/unit/test_auth.py b/tests/unit/test_auth.py new file mode 100644 index 0000000000..7b4196f831 --- /dev/null +++ b/tests/unit/test_auth.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# # Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +from cassandra.auth import PlainTextAuthenticator + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + + +class TestPlainTextAuthenticator(unittest.TestCase): + + def test_evaluate_challenge_with_unicode_data(self): + authenticator = PlainTextAuthenticator("johnӁ", "doeӁ") + self.assertEqual( + authenticator.evaluate_challenge(six.ensure_binary('PLAIN-START')), + six.ensure_binary("\x00johnӁ\x00doeӁ") + ) From e3d5515687c15ab9d6026353436a32c485bbdaf9 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 9 Jun 2020 13:18:04 -0400 Subject: [PATCH 1825/2005] Fix Graph execution profiles consistency level are not set to LOCAL_QUORUM for a cloud cluster --- CHANGELOG.rst | 1 + cassandra/cluster.py | 4 ++-- tests/integration/cloud/test_cloud.py | 13 +++++++++++-- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 5549ac50af..dde4c316dd 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -12,6 +12,7 @@ Bug Fixes --------- * Unable to connect to a cloud cluster using Ubuntu 20.04 (PYTHON-1238) * PlainTextAuthProvider fails with unicode chars and Python3 (PYTHON-1241) +* Graph execution profiles consistency level are not set to LOCAL_QUORUM with a cloud cluster (PYTHON-1240) * [GRAPH] Can't write data in a Boolean field using the Fluent API (PYTHON-1239) * [GRAPH] Fix elementMap() result deserialization (PYTHON-1233) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 616fb7f147..c4d6de124d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -418,7 +418,7 @@ class GraphExecutionProfile(ExecutionProfile): """ def __init__(self, load_balancing_policy=_NOT_SET, retry_policy=None, - consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, + consistency_level=_NOT_SET, serial_consistency_level=None, request_timeout=30.0, row_factory=None, graph_options=None, continuous_paging_options=_NOT_SET): """ @@ -443,7 +443,7 @@ def __init__(self, load_balancing_policy=_NOT_SET, retry_policy=None, class GraphAnalyticsExecutionProfile(GraphExecutionProfile): def __init__(self, load_balancing_policy=None, retry_policy=None, - consistency_level=ConsistencyLevel.LOCAL_ONE, serial_consistency_level=None, + consistency_level=_NOT_SET, serial_consistency_level=None, request_timeout=3600. * 24. * 7., row_factory=None, graph_options=None): """ diff --git a/tests/integration/cloud/test_cloud.py b/tests/integration/cloud/test_cloud.py index ef76b71303..5b9b268f5c 100644 --- a/tests/integration/cloud/test_cloud.py +++ b/tests/integration/cloud/test_cloud.py @@ -23,7 +23,7 @@ from ssl import SSLContext, PROTOCOL_TLSv1 from cassandra import DriverException, ConsistencyLevel, InvalidRequest -from cassandra.cluster import NoHostAvailable, ExecutionProfile, Cluster +from cassandra.cluster import NoHostAvailable, ExecutionProfile, Cluster, _execution_profile_to_string from cassandra.connection import SniEndPoint from cassandra.auth import PlainTextAuthProvider from cassandra.policies import TokenAwarePolicy, DCAwareRoundRobinPolicy, ConstantReconnectionPolicy @@ -160,7 +160,16 @@ def test_metadata_ssl_error(self): def test_default_consistency(self): self.connect(self.creds) self.assertEqual(self.session.default_consistency_level, ConsistencyLevel.LOCAL_QUORUM) - self.assertEqual(self.cluster.profile_manager.default.consistency_level, ConsistencyLevel.LOCAL_QUORUM) + # Verify EXEC_PROFILE_DEFAULT, EXEC_PROFILE_GRAPH_DEFAULT, + # EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT + for ep_key in six.iterkeys(self.cluster.profile_manager.profiles): + ep = self.cluster.profile_manager.profiles[ep_key] + self.assertEqual( + ep.consistency_level, + ConsistencyLevel.LOCAL_QUORUM, + "Expecting LOCAL QUORUM for profile {}, but got {} instead".format( + _execution_profile_to_string(ep_key), ConsistencyLevel.value_to_name[ep.consistency_level] + )) def test_default_consistency_of_execution_profiles(self): cloud_config = {'secure_connect_bundle': self.creds} From 9f444b415cf5e9e4e48f5f620772cbfed9f36e5b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 15 Jun 2020 08:25:48 -0400 Subject: [PATCH 1826/2005] In some cases, socket.write() return 0 as sent instead of raising NONBLOCKING --- cassandra/io/libevreactor.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 917e16aea8..54e2d0de03 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -330,6 +330,11 @@ def handle_write(self, watcher, revents, errno=None): if sent < len(next_msg): with self._deque_lock: self.deque.appendleft(next_msg[sent:]) + # we've seen some cases that 0 is returned instead of NONBLOCKING. But usually, + # we don't expect this to happen. https://bugs.python.org/issue20951 + if sent == 0: + self._socket_writable = False + return def handle_read(self, watcher, revents, errno=None): if revents & libev.EV_ERROR: From 5cbbd1abd3bf9a84994f7b5ca495e80e586118a9 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 15 Jun 2020 14:50:07 -0400 Subject: [PATCH 1827/2005] Tests: Set MAX HEAP to 1500M when create a cluster with graph workload --- tests/integration/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 52b1286ebb..1c40f9bd46 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -566,7 +566,9 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back - if 'graph' not in workloads: + if 'graph' in workloads: + jvm_args += ['-Xms1500M', '-Xmx1500M'] + else: if PROTOCOL_VERSION >= 4: jvm_args = [" -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler"] if len(workloads) > 0: From f58ff459199ec6a2cc539acdf1613a808b6f317d Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 15 Jun 2020 16:15:00 -0400 Subject: [PATCH 1828/2005] Improve graph documentation for the core engine --- docs/graph_fluent.rst | 92 ++++++++++++++++++++++++------------------- 1 file changed, 52 insertions(+), 40 deletions(-) diff --git a/docs/graph_fluent.rst b/docs/graph_fluent.rst index fbe0ef57df..03cf8d36c0 100644 --- a/docs/graph_fluent.rst +++ b/docs/graph_fluent.rst @@ -27,7 +27,19 @@ hard to maintain. This fluent API allows you to build Gremlin traversals and wri queries directly in Python. These native traversal queries can be executed explicitly, with a `Session` object, or implicitly:: - g = DseGraph.traversal_source(session=dse_session) + from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.datastax.graph import GraphProtocol + from cassandra.datastax.graph.fluent import DseGraph + + # Create an execution profile, using GraphSON3 for Core graphs + ep_graphson3 = DseGraph.create_execution_profile( + 'my_core_graph_name', + graph_protocol=GraphProtocol.GRAPHSON_3_0) + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep_graphson3}) + session = cluster.connect() + + # Execute a fluent graph query + g = DseGraph.traversal_source(session=session) g.addV('genre').property('genreId', 1).property('name', 'Action').next() # implicit execution caused by iterating over results @@ -50,15 +62,24 @@ Configuring a Traversal Execution Profile The fluent api takes advantage of *configuration profiles* to allow different execution configurations for the various query handlers. Graph traversal execution requires a custom execution profile to enable Gremlin-bytecode as -query language. Here is how to accomplish this configuration: +query language. With Core graphs, it is important to use GraphSON3. Here is how +to accomplish this configuration: .. code-block:: python from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.datastax.graph import GraphProtocol from cassandra.datastax.graph.fluent import DseGraph - ep = DseGraph.create_execution_profile('graph_name') - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) + # Using GraphSON3 as graph protocol is a requirement with Core graphs. + ep = DseGraph.create_execution_profile( + 'graph_name', + graph_protocol=GraphProtocol.GRAPHSON_3_0) + + # For Classic graphs, GraphSON1, GraphSON2 and GraphSON3 (DSE 6.8+) are supported. + ep_classic = DseGraph.create_execution_profile('classic_graph_name') # default is GraphSON2 + + cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep, 'classic': ep_classic}) session = cluster.connect() g = DseGraph.traversal_source(session) # Build the GraphTraversalSource @@ -71,27 +92,6 @@ If you want to change execution property defaults, please see the :doc:`Executio for a more generalized discussion of the API. Graph traversal queries use the same execution profile defined for DSE graph. If you need to change the default properties, please refer to the :doc:`DSE Graph query documentation page ` -Configuring a Traversal Execution Profile for the Core graph engine -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To execute a traversal query with graphs that use the core engine, you need to configure -a graphson3 execution profile: - -.. code-block:: python - from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT - from cassandra.datastax.graph import GraphProtocol - from cassandra.datastax.graph.fluent import DseGraph - - ep_graphson3 = DseGraph.create_execution_profile( - 'my_core_graph_name', - graph_protocol=GraphProtocol.GRAPHSON_3_0 - ) - cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep_graphson3}) - - g = DseGraph.traversal_source(session) - print g.V().toList() - - Explicit Graph Traversal Execution with a DSE Session ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -101,19 +101,28 @@ Below is an example of explicit execution. For this example, assume the schema h .. code-block:: python + from cassandra.cluster import Cluster, EXEC_PROFILE_GRAPH_DEFAULT + from cassandra.datastax.graph import GraphProtocol from cassandra.datastax.graph.fluent import DseGraph from pprint import pprint - # create a tinkerpop graphson2 ExecutionProfile - ep = DseGraph.create_execution_profile('graph_name') + ep = DseGraph.create_execution_profile( + 'graph_name', + graph_protocol=GraphProtocol.GRAPHSON_3_0) cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) session = cluster.connect() g = DseGraph.traversal_source(session=session) + +Convert a traversal to a bytecode query for classic graphs:: + addV_query = DseGraph.query_from_traversal( - g.addV('genre').property('genreId', 1).property('name', 'Action') + g.addV('genre').property('genreId', 1).property('name', 'Action'), + graph_protocol=GraphProtocol.GRAPHSON_3_0 ) - v_query = DseGraph.query_from_traversal(g.V()) + v_query = DseGraph.query_from_traversal( + g.V(), + graph_protocol=GraphProtocol.GRAPHSON_3_0) for result in session.execute_graph(addV_query): pprint(result.value) @@ -124,7 +133,6 @@ Converting a traversal to a bytecode query for core graphs require some more wor need the cluster context for UDT and tuple types: .. code-block:: python - g = DseGraph.traversal_source(session=session) context = { 'cluster': cluster, 'graph_name': 'the_graph_for_the_query' @@ -135,6 +143,9 @@ need the cluster context for UDT and tuple types: context=context ) + for result in session.execute_graph(addV_query): + pprint(result.value) + Implicit Graph Traversal Execution with TinkerPop ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -185,19 +196,18 @@ python `Future `, you need to bound the batch to a DSE session:: - batch = DseGraph.batch(session, 'graphson2') # bound the session and execution profile + batch = DseGraph.batch(session, 'graphson3') # bound the session and execution profile batch.add( g.addV('genre').property('genreId', 1).property('name', 'Action')) From 1592728ae4e70ffb16b90b3675268f0186a5e9f7 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 15 Jun 2020 19:46:53 -0400 Subject: [PATCH 1829/2005] Set resource_manager_options.worker_options.cores_total for DSE >=6.8 --- tests/integration/__init__.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 1c40f9bd46..207b48b098 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -534,7 +534,17 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, } }) if 'spark' in workloads: - config_options = {"initial_spark_worker_resources": 0.1} + if Version(dse_version) >= Version('6.8'): + config_options = { + "resource_manager_options": { + "worker_options": { + "cores_total": 0.1 + } + } + } + else: + config_options = {"initial_spark_worker_resources": 0.1} + if Version(dse_version) >= Version('6.7'): log.debug("Disabling AlwaysON SQL for a DSE 6.7 Cluster") config_options['alwayson_sql_options'] = {'enabled': False} From d42ac88e25d2e051d35a38e89f9be7a527cff761 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 16 Jun 2020 11:46:58 -0400 Subject: [PATCH 1830/2005] Set resource_manager_options.worker_options.memory_total for DSE >=6.8 --- tests/integration/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 207b48b098..1e1f582804 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -538,7 +538,8 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, config_options = { "resource_manager_options": { "worker_options": { - "cores_total": 0.1 + "cores_total": 0.1, + "memory_total": "64M" } } } From a1ba1f3c2905a625b4c1b05861ab687b2b4f46e9 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 16 Jun 2020 14:16:05 -0400 Subject: [PATCH 1831/2005] Use ccm cassandra-test branch for Windows to get use_single_interface support --- appveyor/appveyor.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/appveyor/appveyor.ps1 b/appveyor/appveyor.ps1 index cc1e6aa76f..5f6840e4e1 100644 --- a/appveyor/appveyor.ps1 +++ b/appveyor/appveyor.ps1 @@ -54,7 +54,7 @@ Start-Process python -ArgumentList "-m pip install psutil pyYaml six numpy" -Wai # Clone ccm from git and use master. If (!(Test-Path $env:CCM_PATH)) { - Start-Process git -ArgumentList "clone https://github.com/pcmanus/ccm.git $($env:CCM_PATH)" -Wait -NoNewWindow + Start-Process git -ArgumentList "clone -b cassandra-test https://github.com/pcmanus/ccm.git $($env:CCM_PATH)" -Wait -NoNewWindow } From e0b7e73c805a3581cc773cb49ff7a6933fa309c5 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 17 Jun 2020 13:43:48 -0400 Subject: [PATCH 1832/2005] release 3.24: changelog & version --- CHANGELOG.rst | 5 +++-- cassandra/__init__.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index dde4c316dd..53a5e22436 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,6 @@ 3.24.0 ====== -Not released +June 18, 2020 Features -------- @@ -12,7 +12,7 @@ Bug Fixes --------- * Unable to connect to a cloud cluster using Ubuntu 20.04 (PYTHON-1238) * PlainTextAuthProvider fails with unicode chars and Python3 (PYTHON-1241) -* Graph execution profiles consistency level are not set to LOCAL_QUORUM with a cloud cluster (PYTHON-1240) +* [GRAPH] Graph execution profiles consistency level are not set to LOCAL_QUORUM with a cloud cluster (PYTHON-1240) * [GRAPH] Can't write data in a Boolean field using the Fluent API (PYTHON-1239) * [GRAPH] Fix elementMap() result deserialization (PYTHON-1233) @@ -20,6 +20,7 @@ Others ------ * Bump geomet dependency version to 0.2 (PYTHON-1243) * Bump gremlinpython dependency version to 3.4.6 (PYTHON-1212) +* Improve fluent graph documentation for core graphs (PYTHON-1244) 3.23.0 ====== diff --git a/cassandra/__init__.py b/cassandra/__init__.py index fd4e516f16..f2bf696035 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 23, 0, 'post0') +__version_info__ = (3, 24, 0) __version__ = '.'.join(map(str, __version_info__)) From c88255f202a21bbbae35f16e603b0f10f2f2cf36 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 17 Jun 2020 13:45:15 -0400 Subject: [PATCH 1833/2005] release 3.24: docs --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index 2298db2588..3a33e5a4e8 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,6 +22,8 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: + - name: '3.24' + ref: e0b7e73c - name: '3.23' ref: a40a2af7 - name: '3.22' From 21cac12b2ca68b1d2abdda97db1b73cf5f3ea450 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 9 Jul 2020 15:27:36 -0400 Subject: [PATCH 1834/2005] PYTHON-1254: Update Getting Started guide to include Astra connection example --- docs/getting_started.rst | 35 ++++++++++++++++++++++++++++++++--- 1 file changed, 32 insertions(+), 3 deletions(-) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 8cb86a5504..ce31ca5d6f 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -3,16 +3,42 @@ Getting Started First, make sure you have the driver properly :doc:`installed `. -Connecting to Cassandra +Connecting to a Cluster ----------------------- Before we can start executing any queries against a Cassandra cluster we need to setup an instance of :class:`~.Cluster`. As the name suggests, you will typically have one instance of :class:`~.Cluster` for each Cassandra cluster you want to interact with. -The simplest way to create a :class:`~.Cluster` is like this: First, make sure you have the Cassandra driver properly :doc:`installed `. +Connecting to Astra ++++++++++++++++++++ + +If you are a DataStax `Astra `_ user, +here is how to connect to your cluster: + +1. Download the secure connect bundle from your Astra account. +2. Connect to your cluster with + +.. code-block:: python + + from cassandra.cluster import Cluster + from cassandra.auth import PlainTextAuthProvider + + cloud_config = { + 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip' + } + auth_provider = PlainTextAuthProvider(username='user', password='pass') + cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider) + session = cluster.connect() + +See `Astra `_ and :doc:`cloud` for more details. + +Connecting to Cassandra ++++++++++++++++++++++++ +The simplest way to create a :class:`~.Cluster` is like this: + .. code-block:: python from cassandra.cluster import Cluster @@ -52,6 +78,8 @@ To establish connections and begin executing queries we need a cluster = Cluster() session = cluster.connect() +Session Keyspace +---------------- The :meth:`~.Cluster.connect()` method takes an optional ``keyspace`` argument which sets the default keyspace for all queries made through that :class:`~.Session`: @@ -60,7 +88,6 @@ which sets the default keyspace for all queries made through that :class:`~.Sess cluster = Cluster() session = cluster.connect('mykeyspace') - You can always change a Session's keyspace using :meth:`~.Session.set_keyspace` or by executing a ``USE `` query: @@ -70,6 +97,8 @@ by executing a ``USE `` query: # or you can do this instead session.execute('USE users') +Execution Profiles +------------------ Profiles are passed in by ``execution_profiles`` dict. In this case we can construct the base ``ExecutionProfile`` passing all attributes: From 3b50d0554c65b1030547145aef6061e4a854cb27 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 9 Jul 2020 15:29:10 -0400 Subject: [PATCH 1835/2005] update docs hash for 3.24 --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 3a33e5a4e8..eeccbe16b6 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.24' - ref: e0b7e73c + ref: 21cac12b - name: '3.23' ref: a40a2af7 - name: '3.22' From 4c582f24c62579f8ea5d2a83e14f9255a8a531b3 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 27 Jul 2020 14:26:06 -0400 Subject: [PATCH 1836/2005] Initial Jenkinsfile with a scripted pipeline --- Jenkinsfile | 816 +++++++++++++++++--------------------------- Jenkinsfile.bak | 873 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1182 insertions(+), 507 deletions(-) create mode 100644 Jenkinsfile.bak diff --git a/Jenkinsfile b/Jenkinsfile index 87b20804ca..61b2f3ad5c 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,39 +1,172 @@ #!groovy - -def initializeEnvironment() { - env.DRIVER_DISPLAY_NAME = 'Cassandra Python Driver' - env.DRIVER_METRIC_TYPE = 'oss' +/* + +There are multiple combinations to test the python driver. + +Test Profiles: + + Full: Execute all unit and integration tests, including long tests. + Standard: Execute unit and integration tests. + Smoke Tests: Execute a small subset of tests. + EVENT_LOOP: Execute a small subset of tests selected to test EVENT_LOOPs. + +Matrix Types: + + Full: All server versions, python runtimes tested with and without Cython. + Develop: Smaller matrix for dev purpose. + Cassandra: All cassandra server versions. + Dse: All dse server versions. + +Parameters: + + EVENT_LOOP: 'LIBEV' (Default), 'GEVENT', 'EVENTLET', 'ASYNCIO', 'ASYNCORE', 'TWISTED' + CYTHON: Default, 'True', 'False' + +*/ + +@Library('dsdrivers-pipeline-lib@develop') +import com.datastax.jenkins.drivers.python.Slack + +slack = new Slack() + +// Define our predefined matrices +matrices = [ + "FULL": [ + "SERVER": ['2.1', '2.2', '3.0', '3.11', '4.0', 'dse-5.0', 'dse-5.1', 'dse-6.0', 'dse-6.7', 'dse-6.8'], + "RUNTIME": ['2.7.18', '3.4.10', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], + "CYTHON": ["True", "False"] + ], + "DEVELOP": [ + "SERVER": ['2.1', '3.11', 'dse-6.8'], + "RUNTIME": ['2.7.18', '3.6.10'], + "CYTHON": ["True", "False"] + ], + "CASSANDRA": [ + "SERVER": ['2.1', '2.2', '3.0', '3.11', '4.0'], + "RUNTIME": ['2.7.18', '3.4.10', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], + "CYTHON": ["True", "False"] + ], + "DSE": [ + "SERVER": ['dse-5.0', 'dse-5.1', 'dse-6.0', 'dse-6.7', 'dse-6.8'], + "RUNTIME": ['2.7.18', '3.4.10', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], + "CYTHON": ["True", "False"] + ] +] + +def getBuildContext() { + /* + Based on schedule, parameters and branch name, configure the build context and env vars. + */ + + def driver_display_name = 'Cassandra Python Driver' if (env.GIT_URL.contains('riptano/python-driver')) { - env.DRIVER_DISPLAY_NAME = 'private ' + env.DRIVER_DISPLAY_NAME - env.DRIVER_METRIC_TYPE = 'oss-private' + driver_display_name = 'private ' + driver_display_name } else if (env.GIT_URL.contains('python-dse-driver')) { - env.DRIVER_DISPLAY_NAME = 'DSE Python Driver' - env.DRIVER_METRIC_TYPE = 'dse' + driver_display_name = 'DSE Python Driver' } - env.GIT_SHA = "${env.GIT_COMMIT.take(7)}" - env.GITHUB_PROJECT_URL = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" - env.GITHUB_BRANCH_URL = "${GITHUB_PROJECT_URL}/tree/${env.BRANCH_NAME}" - env.GITHUB_COMMIT_URL = "${GITHUB_PROJECT_URL}/commit/${env.GIT_COMMIT}" + def git_sha = "${env.GIT_COMMIT.take(7)}" + def github_project_url = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" + def github_branch_url = "${github_project_url}/tree/${env.BRANCH_NAME}" + def github_commit_url = "${github_project_url}/commit/${env.GIT_COMMIT}" - sh label: 'Assign Python global environment', script: '''#!/bin/bash -lex - pyenv global ${PYTHON_VERSION} - ''' + def profile = "${params.PROFILE}" + def EVENT_LOOP = "${params.EVENT_LOOP.toLowerCase()}" + matrixType = "FULL" + developBranchPattern = ~"((dev|long)-)?python-.*" - sh label: 'Install socat; required for unix socket tests', script: '''#!/bin/bash -lex - sudo apt-get install socat - ''' + if (developBranchPattern.matcher(env.BRANCH_NAME).matches()) { + matrixType = "DEVELOP" + if (env.BRANCH_NAME.contains("long")) { + profile = "FULL" + } + } + + // Check if parameters were set explicitly + if (params.MATRIX != "DEFAULT") { + matrixType = params.MATRIX + } + + matrix = matrices[matrixType].clone() + if (params.CYTHON != "DEFAULT") { + matrix["CYTHON"] = [params.CYTHON] + } + + if (params.SERVER_VERSION != "DEFAULT") { + matrix["SERVER"] = [params.SERVER_VERSION] + } + + if (params.PYTHON_VERSION != "DEFAULT") { + matrix["RUNTIME"] = [params.PYTHON_VERSION] + } + + if (params.CI_SCHEDULE == "WEEKNIGHTS") { + matrix["SERVER"] = params.CI_SCHEDULE_SERVER_VERSION.split(' ') + matrix["RUNTIME"] = params.CI_SCHEDULE_PYTHON_VERSION.split(' ') + } + + context = [ + vars: [ + "PROFILE=${profile}", + "EVENT_LOOP=${EVENT_LOOP}", + "DRIVER_DISPLAY_NAME=${driver_display_name}", "GIT_SHA=${git_sha}", "GITHUB_PROJECT_URL=${github_project_url}", + "GITHUB_BRANCH_URL=${github_branch_url}", "GITHUB_COMMIT_URL=${github_commit_url}" + ], + matrix: matrix + ] + + return context +} + +def buildAndTest(context) { + initializeEnvironment() + installDriverAndCompileExtensions() + + try { + executeTests() + } finally { + junit testResults: '*_results.xml' + } +} + +def getMatrixBuilds(buildContext) { + def tasks = [:] + matrix = buildContext.matrix + + matrix["SERVER"].each { serverVersion -> + matrix["RUNTIME"].each { runtimeVersion -> + matrix["CYTHON"].each { cythonFlag -> + def taskVars = [ + "CASSANDRA_VERSION=${serverVersion}", + "PYTHON_VERSION=${runtimeVersion}", + "CYTHON_ENABLED=${cythonFlag}" + ] + def cythonDesc = cythonFlag == "True" ? ", Cython": "" + tasks["${serverVersion}, py${runtimeVersion}${cythonDesc}"] = { + node("${OS_VERSION}") { + checkout scm + + withEnv(taskVars) { + buildAndTest(context) + } + } + } + } + } + } + return tasks +} - sh label: 'Install the latest setuptools', script: '''#!/bin/bash -lex +def initializeEnvironment() { + sh label: 'Initialize the environment', script: '''#!/bin/bash -lex + pyenv global ${PYTHON_VERSION} + sudo apt-get install socat pip install --upgrade pip pip install -U setuptools - ''' - - sh label: 'Install CCM', script: '''#!/bin/bash -lex pip install ${HOME}/ccm ''' - // Determine if server version is Apache Cassandra� or DataStax Enterprise + // Determine if server version is Apache CassandraⓇ or DataStax Enterprise if (env.CASSANDRA_VERSION.split('-')[0] == 'dse') { sh label: 'Install DataStax Enterprise requirements', script: '''#!/bin/bash -lex pip install -r test-datastax-requirements.txt @@ -46,7 +179,6 @@ def initializeEnvironment() { sh label: 'Uninstall the geomet dependency since it is not required for Cassandra', script: '''#!/bin/bash -lex pip uninstall -y geomet ''' - } sh label: 'Install unit test modules', script: '''#!/bin/bash -lex @@ -71,6 +203,7 @@ def initializeEnvironment() { python --version pip --version + pip freeze printenv | sort ''' } @@ -95,9 +228,9 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true - EVENT_LOOP_MANAGER=eventlet VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true - EVENT_LOOP_MANAGER=gevent VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true + EVENT_LOOP=${EVENT_LOOP} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true + EVENT_LOOP=eventlet VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true + EVENT_LOOP=gevent VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true ''' sh label: 'Execute Simulacron integration tests', script: '''#!/bin/bash -lex @@ -107,13 +240,13 @@ def executeStandardTests() { set +o allexport SIMULACRON_JAR="${HOME}/simulacron.jar" - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true # Run backpressure tests separately to avoid memory issue - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || true ''' sh label: 'Execute CQL engine integration tests', script: '''#!/bin/bash -lex @@ -122,7 +255,7 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true ''' sh label: 'Execute Apache CassandraⓇ integration tests', script: '''#!/bin/bash -lex @@ -131,7 +264,7 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true ''' if (env.CASSANDRA_VERSION.split('-')[0] == 'dse' && env.CASSANDRA_VERSION.split('-')[1] != '4.8') { @@ -141,7 +274,7 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true + EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true ''' } @@ -151,17 +284,17 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true + EVENT_LOOP=${EVENT_LOOP} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true ''' - if (env.EXECUTE_LONG_TESTS == 'True') { + if (env.PROFILE == 'FULL') { sh label: 'Execute long running integration tests', script: '''#!/bin/bash -lex # Load CCM environment variable set -o allexport . ${HOME}/environment.txt set +o allexport - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true ''' } } @@ -173,7 +306,7 @@ def executeDseSmokeTests() { . ${HOME}/environment.txt set +o allexport - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} DSE_VERSION=${DSE_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} DSE_VERSION=${DSE_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true ''' } @@ -194,69 +327,34 @@ def executeEventLoopTests() { "tests/integration/simulacron/test_endpoint.py" "tests/integration/long/test_ssl.py" ) - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true - ''' -} - -def executeUpgradeTests() { - sh label: 'Execute profile upgrade integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=upgrade_results.xml tests/integration/upgrade || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true ''' } def executeTests() { - switch(params.PROFILE) { + switch(env.PROFILE) { case 'DSE-SMOKE-TEST': executeDseSmokeTests() break - case 'EVENT-LOOP': + case 'EVENT_LOOP': executeEventLoopTests() break - case 'UPGRADE': - executeUpgradeTests() - break default: executeStandardTests() break } } -def notifySlack(status = 'started') { - // Set the global pipeline scoped environment (this is above each matrix) - env.BUILD_STATED_SLACK_NOTIFIED = 'true' - def buildType = 'Commit' - if (params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') { - buildType = "${params.CI_SCHEDULE.toLowerCase().capitalize()}" - } - - def color = 'good' // Green - if (status.equalsIgnoreCase('aborted')) { - color = '808080' // Grey - } else if (status.equalsIgnoreCase('unstable')) { - color = 'warning' // Orange - } else if (status.equalsIgnoreCase('failed')) { - color = 'danger' // Red - } - - def message = """Build ${status} for ${env.DRIVER_DISPLAY_NAME} [${buildType}] -<${env.GITHUB_BRANCH_URL}|${env.BRANCH_NAME}> - <${env.RUN_DISPLAY_URL}|#${env.BUILD_NUMBER}> - <${env.GITHUB_COMMIT_URL}|${env.GIT_SHA}>""" - if (params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') { - message += " - ${params.CI_SCHEDULE_PYTHON_VERSION} - ${params.EVENT_LOOP_MANAGER}" - } - if (!status.equalsIgnoreCase('Started')) { - message += """ -${status} after ${currentBuild.durationString - ' and counting'}""" +// TODO move this in the shared lib +def getDriverMetricType() { + metric_type = 'oss' + if (env.GIT_URL.contains('riptano/python-driver')) { + metric_type = 'oss-private' + } else if (env.GIT_URL.contains('python-dse-driver')) { + metric_type = 'dse' } - - slackSend color: "${color}", - channel: "#python-driver-dev-bots", - message: "${message}" + return metric_type } def submitCIMetrics(buildType) { @@ -264,7 +362,8 @@ def submitCIMetrics(buildType) { long durationSec = durationMs / 1000 long nowSec = (currentBuild.startTimeInMillis + durationMs) / 1000 def branchNameNoPeriods = env.BRANCH_NAME.replaceAll('\\.', '_') - def durationMetric = "okr.ci.python.${env.DRIVER_METRIC_TYPE}.${buildType}.${branchNameNoPeriods} ${durationSec} ${nowSec}" + metric_type = getDriverMetricType() + def durationMetric = "okr.ci.python.${metric_type}.${buildType}.${branchNameNoPeriods} ${durationSec} ${nowSec}" timeout(time: 1, unit: 'MINUTES') { withCredentials([string(credentialsId: 'lab-grafana-address', variable: 'LAB_GRAFANA_ADDRESS'), @@ -278,108 +377,24 @@ def submitCIMetrics(buildType) { } } -def describePerCommitStage() { +def describeBuild(buildContext) { script { - def type = 'standard' - def serverDescription = 'current Apache CassandaraⓇ and supported DataStax Enterprise versions' - if (env.BRANCH_NAME ==~ /long-python.*/) { - type = 'long' - } else if (env.BRANCH_NAME ==~ /dev-python.*/) { - type = 'dev' - } - - currentBuild.displayName = "Per-Commit (${env.EVENT_LOOP_MANAGER} | ${type.capitalize()})" - currentBuild.description = "Per-Commit build and ${type} testing of ${serverDescription} against Python v2.7.18 and v3.5.9 using ${env.EVENT_LOOP_MANAGER} event loop manager" + def runtimes = buildContext.matrix["RUNTIME"] + def serverVersions = buildContext.matrix["SERVER"] + def numBuilds = runtimes.size() * serverVersions.size() * buildContext.matrix["CYTHON"].size() + currentBuild.displayName = "${env.PROFILE} (${env.EVENT_LOOP} | ${numBuilds} builds)" + currentBuild.description = "${env.PROFILE} build testing servers (${serverVersions.join(', ')}) against Python (${runtimes.join(', ')}) using ${env.EVENT_LOOP} event loop manager" } - - sh label: 'Describe the python environment', script: '''#!/bin/bash -lex - python -V - pip freeze - ''' } -def describeScheduledTestingStage() { - script { - def type = params.CI_SCHEDULE.toLowerCase().capitalize() - def displayName = "${type} schedule (${env.EVENT_LOOP_MANAGER}" - if (env.CYTHON_ENABLED == 'True') { - displayName += " | Cython" - } - if (params.PROFILE != 'NONE') { - displayName += " | ${params.PROFILE}" - } - displayName += ")" - currentBuild.displayName = displayName - - def serverVersionDescription = "${params.CI_SCHEDULE_SERVER_VERSION.replaceAll(' ', ', ')} server version(s) in the matrix" - def pythonVersionDescription = "${params.CI_SCHEDULE_PYTHON_VERSION.replaceAll(' ', ', ')} Python version(s) in the matrix" - def description = "${type} scheduled testing using ${env.EVENT_LOOP_MANAGER} event loop manager" - if (env.CYTHON_ENABLED == 'True') { - description += ", with Cython enabled" - } - if (params.PROFILE != 'NONE') { - description += ", ${params.PROFILE} profile" - } - description += ", ${serverVersionDescription}, and ${pythonVersionDescription}" - currentBuild.description = description - } -} - -def describeAdhocTestingStage() { - script { - def serverType = params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION.split('-')[0] - def serverDisplayName = 'Apache CassandaraⓇ' - def serverVersion = " v${serverType}" - if (serverType == 'ALL') { - serverDisplayName = "all ${serverDisplayName} and DataStax Enterprise server versions" - serverVersion = '' - } else { - try { - serverVersion = " v${env.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION.split('-')[1]}" - } catch (e) { - ;; // no-op - } - if (serverType == 'dse') { - serverDisplayName = 'DataStax Enterprise' - } - } - def displayName = "${params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION} for v${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION} (${env.EVENT_LOOP_MANAGER}" - if (env.CYTHON_ENABLED == 'True') { - displayName += " | Cython" - } - if (params.PROFILE != 'NONE') { - displayName += " | ${params.PROFILE}" - } - displayName += ")" - currentBuild.displayName = displayName - - def description = "Testing ${serverDisplayName} ${serverVersion} using ${env.EVENT_LOOP_MANAGER} against Python ${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION}" - if (env.CYTHON_ENABLED == 'True') { - description += ", with Cython" - } - if (params.PROFILE == 'NONE') { - if (params.EXECUTE_LONG_TESTS) { - description += ", with" - } else { - description += ", without" - } - description += " long tests executed" - } else { - description += ", ${params.PROFILE} profile" - } - currentBuild.description = description - } -} - -def branchPatternCron = ~"(master)" -def riptanoPatternCron = ~"(riptano)" +def scheduleTriggerJobName = "drivers/python/oss/master" pipeline { agent none // Global pipeline timeout options { - timeout(time: 10, unit: 'HOURS') + timeout(time: 10, unit: 'HOURS') // TODO timeout should be per build buildDiscarder(logRotator(artifactNumToKeepStr: '10', // Keep only the last 10 artifacts numToKeepStr: '50')) // Keep only the last 50 build records } @@ -406,12 +421,73 @@ pipeline { ''') choice( - name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION', - choices: ['2.7.18', '3.4.10', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], - description: 'Python version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY!') + name: 'PROFILE', + choices: ['STANDARD', 'FULL', 'DSE-SMOKE-TEST', 'EVENT_LOOP'], + description: '''

Profile to utilize for scheduled or adhoc builds

+ + + + + + + + + + + + + + + + + + + + + + + +
ChoiceDescription
STANDARDExecute the standard tests for the driver
FULLExecute all tests for the driver, including long tests.
DSE-SMOKE-TESTExecute only the DataStax Enterprise smoke tests
EVENT_LOOPExecute only the event loop tests for the specified event loop manager (see: EVENT_LOOP)
''') + choice( + name: 'MATRIX', + choices: ['DEFAULT', 'FULL', 'DEVELOP', 'CASSANDRA', 'DSE'], + description: '''

The matrix for the build.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
ChoiceDescription
DEFAULTDefault to the build context.
FULLAll server versions, python runtimes tested with and without Cython.
DEVELOPSmaller matrix for dev purpose.
CASSANDRAAll cassandra server versions.
DSEAll dse server versions.
''') choice( - name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION', - choices: ['2.1', // Legacy Apache CassandraⓇ + name: 'PYTHON_VERSION', + choices: ['DEFAULT', '2.7.18', '3.4.10', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], + description: 'Python runtime version. Default to the build context.') + choice( + name: 'SERVER_VERSION', + choices: ['DEFAULT', + '2.1', // Legacy Apache CassandraⓇ '2.2', // Legacy Apache CassandraⓇ '3.0', // Previous Apache CassandraⓇ '3.11', // Current Apache CassandraⓇ @@ -421,7 +497,7 @@ pipeline { 'dse-6.0', // Previous DataStax Enterprise 'dse-6.7', // Previous DataStax Enterprise 'dse-6.8', // Current DataStax Enterprise - 'ALL'], + ], description: '''Apache CassandraⓇ and DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY! @@ -429,10 +505,14 @@ pipeline { + + + + - + @@ -440,15 +520,15 @@ pipeline { - + - + - + @@ -471,16 +551,32 @@ pipeline {
Choice Description
DEFAULTDefault to the build context.
2.1Apache CassandaraⓇ; v2.1.xApache CassandraⓇ; v2.1.x
2.2
3.0Apache CassandaraⓇ v3.0.xApache CassandraⓇ v3.0.x
3.11Apache CassandaraⓇ v3.11.xApache CassandraⓇ v3.11.x
4.0Apache CassandaraⓇ v4.x (CURRENTLY UNDER DEVELOPMENT)Apache CassandraⓇ v4.x (CURRENTLY UNDER DEVELOPMENT)
dse-5.0DataStax Enterprise v6.8.x (CURRENTLY UNDER DEVELOPMENT)
''') - booleanParam( + choice( name: 'CYTHON', - defaultValue: false, - description: 'Flag to determine if Cython should be enabled for scheduled or adhoc builds') - booleanParam( - name: 'EXECUTE_LONG_TESTS', - defaultValue: false, - description: 'Flag to determine if long integration tests should be executed for scheduled or adhoc builds') + choices: ['DEFAULT', 'True', 'False'], + description: '''

Flag to determine if Cython should be enabled

+ + + + + + + + + + + + + + + + + + + +
ChoiceDescription
DefaultDefault to the build context.
TrueEnable Cython
FalseDisable Cython
''') choice( - name: 'EVENT_LOOP_MANAGER', + name: 'EVENT_LOOP', choices: ['LIBEV', 'GEVENT', 'EVENTLET', 'ASYNCIO', 'ASYNCORE', 'TWISTED'], description: '''

Event loop manager to utilize for scheduled or adhoc builds

@@ -515,34 +611,6 @@ pipeline {
An event-driven networking engine written in Python and licensed under the open source MIT license
''') - choice( - name: 'PROFILE', - choices: ['NONE', 'DSE-SMOKE-TEST', 'EVENT-LOOP', 'UPGRADE'], - description: '''

Profile to utilize for scheduled or adhoc builds

- - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
NONEExecute the standard tests for the driver
DSE-SMOKE-TESTExecute only the DataStax Enterprise smoke tests
EVENT-LOOPExecute only the event loop tests for the specified event loop manager (see: EVENT_LOOP_MANAGER)
UPGRADEExecute only the upgrade tests
''') choice( name: 'CI_SCHEDULE', choices: ['DO-NOT-CHANGE-THIS-SELECTION', 'WEEKNIGHTS', 'WEEKENDS'], @@ -558,316 +626,50 @@ pipeline { } triggers { - parameterizedCron((branchPatternCron.matcher(env.BRANCH_NAME).matches() && !riptanoPatternCron.matcher(GIT_URL).find()) ? """ + parameterizedCron((scheduleTriggerJobName == env.JOB_NAME) ? """ # Every weeknight (Monday - Friday) around 4:00 AM # These schedules will run with and without Cython enabled for Python v2.7.18 and v3.5.9 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 - - # Every Saturday around 12:00, 4:00 and 8:00 PM - # These schedules are for weekly libev event manager runs with and without Cython for most of the Python versions (excludes v3.5.9.x) - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.4.10;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - # These schedules are for weekly gevent event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - # These schedules are for weekly eventlet event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - - # Every Sunday around 12:00 and 4:00 AM - # These schedules are for weekly asyncore event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - # These schedules are for weekly twisted event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18 3.5.9;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 """ : "") } environment { OS_VERSION = 'ubuntu/bionic64/python-driver' - CYTHON_ENABLED = "${params.CYTHON ? 'True' : 'False'}" - EVENT_LOOP_MANAGER = "${params.EVENT_LOOP_MANAGER.toLowerCase()}" - EXECUTE_LONG_TESTS = "${params.EXECUTE_LONG_TESTS ? 'True' : 'False'}" CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' CCM_MAX_HEAP_SIZE = '1536M' } stages { - stage ('Per-Commit') { - options { - timeout(time: 2, unit: 'HOURS') + stage ('Build and Test') { + agent { + // If I removed this agent block, GIT_URL and GIT_COMMIT aren't set. + // However, this trigger an additional checkout + label "master" } when { beforeAgent true - branch pattern: '((dev|long)-)?python-.*', comparator: 'REGEXP' allOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD' } - expression { params.CI_SCHEDULE == 'DO-NOT-CHANGE-THIS-SELECTION' } not { buildingTag() } } } - matrix { - axes { - axis { - name 'CASSANDRA_VERSION' - values '3.11', // Current Apache Cassandra - 'dse-6.8' // Current DataStax Enterprise - } - axis { - name 'PYTHON_VERSION' - values '2.7.18', '3.5.9' - } - axis { - name 'CYTHON_ENABLED' - values 'False' - } - } + steps { + script { + context = getBuildContext() + withEnv(context.vars) { + describeBuild(context) + slack.notifyChannel() - agent { - label "${OS_VERSION}" - } + // build and test all builds + parallel getMatrixBuilds(context) - stages { - stage('Initialize-Environment') { - steps { - initializeEnvironment() - script { - if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { - notifySlack() - } - } - } - } - stage('Describe-Build') { - steps { - describePerCommitStage() - } - } - stage('Install-Driver-And-Compile-Extensions') { - steps { - installDriverAndCompileExtensions() - } - } - stage('Execute-Tests') { - steps { - - script { - if (env.BRANCH_NAME ==~ /long-python.*/) { - withEnv(["EXECUTE_LONG_TESTS=True"]) { - executeTests() - } - } - else { - executeTests() - } - } - } - post { - always { - junit testResults: '*_results.xml' - } - } - } - } - } - post { - always { - node('master') { + // send the metrics submitCIMetrics('commit') + slack.notifyChannel(currentBuild.currentResult) } } - aborted { - notifySlack('aborted') - } - success { - notifySlack('completed') - } - unstable { - notifySlack('unstable') - } - failure { - notifySlack('FAILED') - } - } - } - - stage ('Scheduled-Testing') { - when { - beforeAgent true - allOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD' } - expression { params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION' } - not { buildingTag() } - } - } - matrix { - axes { - axis { - name 'CASSANDRA_VERSION' - values '2.1', // Legacy Apache Cassandra - '2.2', // Legacy Apache Cassandra - '3.0', // Previous Apache Cassandra - '3.11', // Current Apache Cassandra - 'dse-5.1', // Legacy DataStax Enterprise - 'dse-6.0', // Previous DataStax Enterprise - 'dse-6.7' // Current DataStax Enterprise - } - axis { - name 'CYTHON_ENABLED' - values 'True', 'False' - } - } - when { - beforeAgent true - allOf { - expression { return params.CI_SCHEDULE_SERVER_VERSION.split(' ').any { it =~ /(ALL|${env.CASSANDRA_VERSION})/ } } - } - } - - environment { - PYTHON_VERSION = "${params.CI_SCHEDULE_PYTHON_VERSION}" - } - agent { - label "${OS_VERSION}" - } - - stages { - stage('Initialize-Environment') { - steps { - initializeEnvironment() - script { - if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { - notifySlack() - } - } - } - } - stage('Describe-Build') { - steps { - describeScheduledTestingStage() - } - } - stage('Install-Driver-And-Compile-Extensions') { - steps { - installDriverAndCompileExtensions() - } - } - stage('Execute-Tests') { - steps { - executeTests() - } - post { - always { - junit testResults: '*_results.xml' - } - } - } - } - } - post { - aborted { - notifySlack('aborted') - } - success { - notifySlack('completed') - } - unstable { - notifySlack('unstable') - } - failure { - notifySlack('FAILED') - } } } - - stage('Adhoc-Testing') { - when { - beforeAgent true - allOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD-AND-EXECUTE-TESTS' } - not { buildingTag() } - } - } - - environment { - CYTHON_ENABLED = "${params.CYTHON ? 'True' : 'False'}" - PYTHON_VERSION = "${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION}" - } - - matrix { - axes { - axis { - name 'CASSANDRA_VERSION' - values '2.1', // Legacy Apache Cassandra - '2.2', // Legacy Apache Cassandra - '3.0', // Previous Apache Cassandra - '3.11', // Current Apache Cassandra - '4.0', // Development Apache Cassandra - 'dse-5.0', // Long Term Support DataStax Enterprise - 'dse-5.1', // Legacy DataStax Enterprise - 'dse-6.0', // Previous DataStax Enterprise - 'dse-6.7', // Current DataStax Enterprise - 'dse-6.8' // Development DataStax Enterprise - } - } - when { - beforeAgent true - allOf { - expression { params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION ==~ /(ALL|${env.CASSANDRA_VERSION})/ } - } - } - - agent { - label "${OS_VERSION}" - } - - stages { - stage('Describe-Build') { - steps { - describeAdhocTestingStage() - } - } - stage('Initialize-Environment') { - steps { - initializeEnvironment() - } - } - stage('Install-Driver-And-Compile-Extensions') { - steps { - installDriverAndCompileExtensions() - } - } - stage('Execute-Tests') { - steps { - executeTests() - } - post { - always { - junit testResults: '*_results.xml' - } - } - } - } - } - } } } diff --git a/Jenkinsfile.bak b/Jenkinsfile.bak new file mode 100644 index 0000000000..87b20804ca --- /dev/null +++ b/Jenkinsfile.bak @@ -0,0 +1,873 @@ +#!groovy + +def initializeEnvironment() { + env.DRIVER_DISPLAY_NAME = 'Cassandra Python Driver' + env.DRIVER_METRIC_TYPE = 'oss' + if (env.GIT_URL.contains('riptano/python-driver')) { + env.DRIVER_DISPLAY_NAME = 'private ' + env.DRIVER_DISPLAY_NAME + env.DRIVER_METRIC_TYPE = 'oss-private' + } else if (env.GIT_URL.contains('python-dse-driver')) { + env.DRIVER_DISPLAY_NAME = 'DSE Python Driver' + env.DRIVER_METRIC_TYPE = 'dse' + } + + env.GIT_SHA = "${env.GIT_COMMIT.take(7)}" + env.GITHUB_PROJECT_URL = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" + env.GITHUB_BRANCH_URL = "${GITHUB_PROJECT_URL}/tree/${env.BRANCH_NAME}" + env.GITHUB_COMMIT_URL = "${GITHUB_PROJECT_URL}/commit/${env.GIT_COMMIT}" + + sh label: 'Assign Python global environment', script: '''#!/bin/bash -lex + pyenv global ${PYTHON_VERSION} + ''' + + sh label: 'Install socat; required for unix socket tests', script: '''#!/bin/bash -lex + sudo apt-get install socat + ''' + + sh label: 'Install the latest setuptools', script: '''#!/bin/bash -lex + pip install --upgrade pip + pip install -U setuptools + ''' + + sh label: 'Install CCM', script: '''#!/bin/bash -lex + pip install ${HOME}/ccm + ''' + + // Determine if server version is Apache Cassandra� or DataStax Enterprise + if (env.CASSANDRA_VERSION.split('-')[0] == 'dse') { + sh label: 'Install DataStax Enterprise requirements', script: '''#!/bin/bash -lex + pip install -r test-datastax-requirements.txt + ''' + } else { + sh label: 'Install Apache CassandraⓇ requirements', script: '''#!/bin/bash -lex + pip install -r test-requirements.txt + ''' + + sh label: 'Uninstall the geomet dependency since it is not required for Cassandra', script: '''#!/bin/bash -lex + pip uninstall -y geomet + ''' + + } + + sh label: 'Install unit test modules', script: '''#!/bin/bash -lex + pip install nose-ignore-docstring nose-exclude service_identity + ''' + + if (env.CYTHON_ENABLED == 'True') { + sh label: 'Install cython modules', script: '''#!/bin/bash -lex + pip install cython numpy + ''' + } + + sh label: 'Download Apache CassandraⓇ or DataStax Enterprise', script: '''#!/bin/bash -lex + . ${CCM_ENVIRONMENT_SHELL} ${CASSANDRA_VERSION} + ''' + + sh label: 'Display Python and environment information', script: '''#!/bin/bash -le + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + python --version + pip --version + printenv | sort + ''' +} + +def installDriverAndCompileExtensions() { + if (env.CYTHON_ENABLED == 'True') { + sh label: 'Install the driver and compile with C extensions with Cython', script: '''#!/bin/bash -lex + python setup.py build_ext --inplace + ''' + } else { + sh label: 'Install the driver and compile with C extensions without Cython', script: '''#!/bin/bash -lex + python setup.py build_ext --inplace --no-cython + ''' + } +} + +def executeStandardTests() { + + sh label: 'Execute unit tests', script: '''#!/bin/bash -lex + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true + EVENT_LOOP_MANAGER=eventlet VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true + EVENT_LOOP_MANAGER=gevent VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true + ''' + + sh label: 'Execute Simulacron integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + SIMULACRON_JAR="${HOME}/simulacron.jar" + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true + + # Run backpressure tests separately to avoid memory issue + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || true + ''' + + sh label: 'Execute CQL engine integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true + ''' + + sh label: 'Execute Apache CassandraⓇ integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true + ''' + + if (env.CASSANDRA_VERSION.split('-')[0] == 'dse' && env.CASSANDRA_VERSION.split('-')[1] != '4.8') { + sh label: 'Execute DataStax Enterprise integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variable + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true + ''' + } + + sh label: 'Execute DataStax Constellation integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variable + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true + ''' + + if (env.EXECUTE_LONG_TESTS == 'True') { + sh label: 'Execute long running integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variable + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true + ''' + } +} + +def executeDseSmokeTests() { + sh label: 'Execute profile DataStax Enterprise smoke test integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variable + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} DSE_VERSION=${DSE_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true + ''' +} + +def executeEventLoopTests() { + sh label: 'Execute profile event loop manager integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variable + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + EVENT_LOOP_TESTS=( + "tests/integration/standard/test_cluster.py" + "tests/integration/standard/test_concurrent.py" + "tests/integration/standard/test_connection.py" + "tests/integration/standard/test_control_connection.py" + "tests/integration/standard/test_metrics.py" + "tests/integration/standard/test_query.py" + "tests/integration/simulacron/test_endpoint.py" + "tests/integration/long/test_ssl.py" + ) + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true + ''' +} + +def executeUpgradeTests() { + sh label: 'Execute profile upgrade integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variable + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=upgrade_results.xml tests/integration/upgrade || true + ''' +} + +def executeTests() { + switch(params.PROFILE) { + case 'DSE-SMOKE-TEST': + executeDseSmokeTests() + break + case 'EVENT-LOOP': + executeEventLoopTests() + break + case 'UPGRADE': + executeUpgradeTests() + break + default: + executeStandardTests() + break + } +} + +def notifySlack(status = 'started') { + // Set the global pipeline scoped environment (this is above each matrix) + env.BUILD_STATED_SLACK_NOTIFIED = 'true' + + def buildType = 'Commit' + if (params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') { + buildType = "${params.CI_SCHEDULE.toLowerCase().capitalize()}" + } + + def color = 'good' // Green + if (status.equalsIgnoreCase('aborted')) { + color = '808080' // Grey + } else if (status.equalsIgnoreCase('unstable')) { + color = 'warning' // Orange + } else if (status.equalsIgnoreCase('failed')) { + color = 'danger' // Red + } + + def message = """Build ${status} for ${env.DRIVER_DISPLAY_NAME} [${buildType}] +<${env.GITHUB_BRANCH_URL}|${env.BRANCH_NAME}> - <${env.RUN_DISPLAY_URL}|#${env.BUILD_NUMBER}> - <${env.GITHUB_COMMIT_URL}|${env.GIT_SHA}>""" + if (params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') { + message += " - ${params.CI_SCHEDULE_PYTHON_VERSION} - ${params.EVENT_LOOP_MANAGER}" + } + if (!status.equalsIgnoreCase('Started')) { + message += """ +${status} after ${currentBuild.durationString - ' and counting'}""" + } + + slackSend color: "${color}", + channel: "#python-driver-dev-bots", + message: "${message}" +} + +def submitCIMetrics(buildType) { + long durationMs = currentBuild.duration + long durationSec = durationMs / 1000 + long nowSec = (currentBuild.startTimeInMillis + durationMs) / 1000 + def branchNameNoPeriods = env.BRANCH_NAME.replaceAll('\\.', '_') + def durationMetric = "okr.ci.python.${env.DRIVER_METRIC_TYPE}.${buildType}.${branchNameNoPeriods} ${durationSec} ${nowSec}" + + timeout(time: 1, unit: 'MINUTES') { + withCredentials([string(credentialsId: 'lab-grafana-address', variable: 'LAB_GRAFANA_ADDRESS'), + string(credentialsId: 'lab-grafana-port', variable: 'LAB_GRAFANA_PORT')]) { + withEnv(["DURATION_METRIC=${durationMetric}"]) { + sh label: 'Send runtime metrics to labgrafana', script: '''#!/bin/bash -lex + echo "${DURATION_METRIC}" | nc -q 5 ${LAB_GRAFANA_ADDRESS} ${LAB_GRAFANA_PORT} + ''' + } + } + } +} + +def describePerCommitStage() { + script { + def type = 'standard' + def serverDescription = 'current Apache CassandaraⓇ and supported DataStax Enterprise versions' + if (env.BRANCH_NAME ==~ /long-python.*/) { + type = 'long' + } else if (env.BRANCH_NAME ==~ /dev-python.*/) { + type = 'dev' + } + + currentBuild.displayName = "Per-Commit (${env.EVENT_LOOP_MANAGER} | ${type.capitalize()})" + currentBuild.description = "Per-Commit build and ${type} testing of ${serverDescription} against Python v2.7.18 and v3.5.9 using ${env.EVENT_LOOP_MANAGER} event loop manager" + } + + sh label: 'Describe the python environment', script: '''#!/bin/bash -lex + python -V + pip freeze + ''' +} + +def describeScheduledTestingStage() { + script { + def type = params.CI_SCHEDULE.toLowerCase().capitalize() + def displayName = "${type} schedule (${env.EVENT_LOOP_MANAGER}" + if (env.CYTHON_ENABLED == 'True') { + displayName += " | Cython" + } + if (params.PROFILE != 'NONE') { + displayName += " | ${params.PROFILE}" + } + displayName += ")" + currentBuild.displayName = displayName + + def serverVersionDescription = "${params.CI_SCHEDULE_SERVER_VERSION.replaceAll(' ', ', ')} server version(s) in the matrix" + def pythonVersionDescription = "${params.CI_SCHEDULE_PYTHON_VERSION.replaceAll(' ', ', ')} Python version(s) in the matrix" + def description = "${type} scheduled testing using ${env.EVENT_LOOP_MANAGER} event loop manager" + if (env.CYTHON_ENABLED == 'True') { + description += ", with Cython enabled" + } + if (params.PROFILE != 'NONE') { + description += ", ${params.PROFILE} profile" + } + description += ", ${serverVersionDescription}, and ${pythonVersionDescription}" + currentBuild.description = description + } +} + +def describeAdhocTestingStage() { + script { + def serverType = params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION.split('-')[0] + def serverDisplayName = 'Apache CassandaraⓇ' + def serverVersion = " v${serverType}" + if (serverType == 'ALL') { + serverDisplayName = "all ${serverDisplayName} and DataStax Enterprise server versions" + serverVersion = '' + } else { + try { + serverVersion = " v${env.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION.split('-')[1]}" + } catch (e) { + ;; // no-op + } + if (serverType == 'dse') { + serverDisplayName = 'DataStax Enterprise' + } + } + def displayName = "${params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION} for v${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION} (${env.EVENT_LOOP_MANAGER}" + if (env.CYTHON_ENABLED == 'True') { + displayName += " | Cython" + } + if (params.PROFILE != 'NONE') { + displayName += " | ${params.PROFILE}" + } + displayName += ")" + currentBuild.displayName = displayName + + def description = "Testing ${serverDisplayName} ${serverVersion} using ${env.EVENT_LOOP_MANAGER} against Python ${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION}" + if (env.CYTHON_ENABLED == 'True') { + description += ", with Cython" + } + if (params.PROFILE == 'NONE') { + if (params.EXECUTE_LONG_TESTS) { + description += ", with" + } else { + description += ", without" + } + description += " long tests executed" + } else { + description += ", ${params.PROFILE} profile" + } + currentBuild.description = description + } +} + +def branchPatternCron = ~"(master)" +def riptanoPatternCron = ~"(riptano)" + +pipeline { + agent none + + // Global pipeline timeout + options { + timeout(time: 10, unit: 'HOURS') + buildDiscarder(logRotator(artifactNumToKeepStr: '10', // Keep only the last 10 artifacts + numToKeepStr: '50')) // Keep only the last 50 build records + } + + parameters { + choice( + name: 'ADHOC_BUILD_TYPE', + choices: ['BUILD', 'BUILD-AND-EXECUTE-TESTS'], + description: '''

Perform a adhoc build operation

+ + + + + + + + + + + + + + + +
ChoiceDescription
BUILDPerforms a Per-Commit build
BUILD-AND-EXECUTE-TESTSPerforms a build and executes the integration and unit tests
''') + choice( + name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION', + choices: ['2.7.18', '3.4.10', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], + description: 'Python version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY!') + choice( + name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION', + choices: ['2.1', // Legacy Apache CassandraⓇ + '2.2', // Legacy Apache CassandraⓇ + '3.0', // Previous Apache CassandraⓇ + '3.11', // Current Apache CassandraⓇ + '4.0', // Development Apache CassandraⓇ + 'dse-5.0', // Long Term Support DataStax Enterprise + 'dse-5.1', // Legacy DataStax Enterprise + 'dse-6.0', // Previous DataStax Enterprise + 'dse-6.7', // Previous DataStax Enterprise + 'dse-6.8', // Current DataStax Enterprise + 'ALL'], + description: '''Apache CassandraⓇ and DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY! + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ChoiceDescription
2.1Apache CassandaraⓇ; v2.1.x
2.2Apache CassandarⓇ; v2.2.x
3.0Apache CassandaraⓇ v3.0.x
3.11Apache CassandaraⓇ v3.11.x
4.0Apache CassandaraⓇ v4.x (CURRENTLY UNDER DEVELOPMENT)
dse-5.0DataStax Enterprise v5.0.x (Long Term Support)
dse-5.1DataStax Enterprise v5.1.x
dse-6.0DataStax Enterprise v6.0.x
dse-6.7DataStax Enterprise v6.7.x
dse-6.8DataStax Enterprise v6.8.x (CURRENTLY UNDER DEVELOPMENT)
''') + booleanParam( + name: 'CYTHON', + defaultValue: false, + description: 'Flag to determine if Cython should be enabled for scheduled or adhoc builds') + booleanParam( + name: 'EXECUTE_LONG_TESTS', + defaultValue: false, + description: 'Flag to determine if long integration tests should be executed for scheduled or adhoc builds') + choice( + name: 'EVENT_LOOP_MANAGER', + choices: ['LIBEV', 'GEVENT', 'EVENTLET', 'ASYNCIO', 'ASYNCORE', 'TWISTED'], + description: '''

Event loop manager to utilize for scheduled or adhoc builds

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ChoiceDescription
LIBEVA full-featured and high-performance event loop that is loosely modeled after libevent, but without its limitations and bugs
GEVENTA co-routine -based Python networking library that uses greenlet to provide a high-level synchronous API on top of the libev or libuv event loop
EVENTLETA concurrent networking library for Python that allows you to change how you run your code, not how you write it
ASYNCIOA library to write concurrent code using the async/await syntax
ASYNCOREA module provides the basic infrastructure for writing asynchronous socket service clients and servers
TWISTEDAn event-driven networking engine written in Python and licensed under the open source MIT license
''') + choice( + name: 'PROFILE', + choices: ['NONE', 'DSE-SMOKE-TEST', 'EVENT-LOOP', 'UPGRADE'], + description: '''

Profile to utilize for scheduled or adhoc builds

+ + + + + + + + + + + + + + + + + + + + + + + +
ChoiceDescription
NONEExecute the standard tests for the driver
DSE-SMOKE-TESTExecute only the DataStax Enterprise smoke tests
EVENT-LOOPExecute only the event loop tests for the specified event loop manager (see: EVENT_LOOP_MANAGER)
UPGRADEExecute only the upgrade tests
''') + choice( + name: 'CI_SCHEDULE', + choices: ['DO-NOT-CHANGE-THIS-SELECTION', 'WEEKNIGHTS', 'WEEKENDS'], + description: 'CI testing schedule to execute periodically scheduled builds and tests of the driver (DO NOT CHANGE THIS SELECTION)') + string( + name: 'CI_SCHEDULE_PYTHON_VERSION', + defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', + description: 'CI testing python version to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') + string( + name: 'CI_SCHEDULE_SERVER_VERSION', + defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', + description: 'CI testing server version to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') + } + + triggers { + parameterizedCron((branchPatternCron.matcher(env.BRANCH_NAME).matches() && !riptanoPatternCron.matcher(GIT_URL).find()) ? """ + # Every weeknight (Monday - Friday) around 4:00 AM + # These schedules will run with and without Cython enabled for Python v2.7.18 and v3.5.9 + H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 + + # Every Saturday around 12:00, 4:00 and 8:00 PM + # These schedules are for weekly libev event manager runs with and without Cython for most of the Python versions (excludes v3.5.9.x) + H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 + H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.4.10;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 + H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 + H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 + H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 + # These schedules are for weekly gevent event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) + H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + # These schedules are for weekly eventlet event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) + H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + + # Every Sunday around 12:00 and 4:00 AM + # These schedules are for weekly asyncore event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) + H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + # These schedules are for weekly twisted event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) + H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 + """ : "") + } + + environment { + OS_VERSION = 'ubuntu/bionic64/python-driver' + CYTHON_ENABLED = "${params.CYTHON ? 'True' : 'False'}" + EVENT_LOOP_MANAGER = "${params.EVENT_LOOP_MANAGER.toLowerCase()}" + EXECUTE_LONG_TESTS = "${params.EXECUTE_LONG_TESTS ? 'True' : 'False'}" + CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' + CCM_MAX_HEAP_SIZE = '1536M' + } + + stages { + stage ('Per-Commit') { + options { + timeout(time: 2, unit: 'HOURS') + } + when { + beforeAgent true + branch pattern: '((dev|long)-)?python-.*', comparator: 'REGEXP' + allOf { + expression { params.ADHOC_BUILD_TYPE == 'BUILD' } + expression { params.CI_SCHEDULE == 'DO-NOT-CHANGE-THIS-SELECTION' } + not { buildingTag() } + } + } + + matrix { + axes { + axis { + name 'CASSANDRA_VERSION' + values '3.11', // Current Apache Cassandra + 'dse-6.8' // Current DataStax Enterprise + } + axis { + name 'PYTHON_VERSION' + values '2.7.18', '3.5.9' + } + axis { + name 'CYTHON_ENABLED' + values 'False' + } + } + + agent { + label "${OS_VERSION}" + } + + stages { + stage('Initialize-Environment') { + steps { + initializeEnvironment() + script { + if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { + notifySlack() + } + } + } + } + stage('Describe-Build') { + steps { + describePerCommitStage() + } + } + stage('Install-Driver-And-Compile-Extensions') { + steps { + installDriverAndCompileExtensions() + } + } + stage('Execute-Tests') { + steps { + + script { + if (env.BRANCH_NAME ==~ /long-python.*/) { + withEnv(["EXECUTE_LONG_TESTS=True"]) { + executeTests() + } + } + else { + executeTests() + } + } + } + post { + always { + junit testResults: '*_results.xml' + } + } + } + } + } + post { + always { + node('master') { + submitCIMetrics('commit') + } + } + aborted { + notifySlack('aborted') + } + success { + notifySlack('completed') + } + unstable { + notifySlack('unstable') + } + failure { + notifySlack('FAILED') + } + } + } + + stage ('Scheduled-Testing') { + when { + beforeAgent true + allOf { + expression { params.ADHOC_BUILD_TYPE == 'BUILD' } + expression { params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION' } + not { buildingTag() } + } + } + matrix { + axes { + axis { + name 'CASSANDRA_VERSION' + values '2.1', // Legacy Apache Cassandra + '2.2', // Legacy Apache Cassandra + '3.0', // Previous Apache Cassandra + '3.11', // Current Apache Cassandra + 'dse-5.1', // Legacy DataStax Enterprise + 'dse-6.0', // Previous DataStax Enterprise + 'dse-6.7' // Current DataStax Enterprise + } + axis { + name 'CYTHON_ENABLED' + values 'True', 'False' + } + } + when { + beforeAgent true + allOf { + expression { return params.CI_SCHEDULE_SERVER_VERSION.split(' ').any { it =~ /(ALL|${env.CASSANDRA_VERSION})/ } } + } + } + + environment { + PYTHON_VERSION = "${params.CI_SCHEDULE_PYTHON_VERSION}" + } + agent { + label "${OS_VERSION}" + } + + stages { + stage('Initialize-Environment') { + steps { + initializeEnvironment() + script { + if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { + notifySlack() + } + } + } + } + stage('Describe-Build') { + steps { + describeScheduledTestingStage() + } + } + stage('Install-Driver-And-Compile-Extensions') { + steps { + installDriverAndCompileExtensions() + } + } + stage('Execute-Tests') { + steps { + executeTests() + } + post { + always { + junit testResults: '*_results.xml' + } + } + } + } + } + post { + aborted { + notifySlack('aborted') + } + success { + notifySlack('completed') + } + unstable { + notifySlack('unstable') + } + failure { + notifySlack('FAILED') + } + } + } + + + stage('Adhoc-Testing') { + when { + beforeAgent true + allOf { + expression { params.ADHOC_BUILD_TYPE == 'BUILD-AND-EXECUTE-TESTS' } + not { buildingTag() } + } + } + + environment { + CYTHON_ENABLED = "${params.CYTHON ? 'True' : 'False'}" + PYTHON_VERSION = "${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION}" + } + + matrix { + axes { + axis { + name 'CASSANDRA_VERSION' + values '2.1', // Legacy Apache Cassandra + '2.2', // Legacy Apache Cassandra + '3.0', // Previous Apache Cassandra + '3.11', // Current Apache Cassandra + '4.0', // Development Apache Cassandra + 'dse-5.0', // Long Term Support DataStax Enterprise + 'dse-5.1', // Legacy DataStax Enterprise + 'dse-6.0', // Previous DataStax Enterprise + 'dse-6.7', // Current DataStax Enterprise + 'dse-6.8' // Development DataStax Enterprise + } + } + when { + beforeAgent true + allOf { + expression { params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION ==~ /(ALL|${env.CASSANDRA_VERSION})/ } + } + } + + agent { + label "${OS_VERSION}" + } + + stages { + stage('Describe-Build') { + steps { + describeAdhocTestingStage() + } + } + stage('Initialize-Environment') { + steps { + initializeEnvironment() + } + } + stage('Install-Driver-And-Compile-Extensions') { + steps { + installDriverAndCompileExtensions() + } + } + stage('Execute-Tests') { + steps { + executeTests() + } + post { + always { + junit testResults: '*_results.xml' + } + } + } + } + } + } + } +} From 04d1f30c292c70c6052a299ba477add9c69c1d76 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 4 Aug 2020 08:59:39 -0400 Subject: [PATCH 1837/2005] Add Duration type attributes in docs --- cassandra/util.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/util.py b/cassandra/util.py index ead58c82f6..f896ff4f86 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1541,8 +1541,11 @@ class Duration(object): """ months = 0 + "" days = 0 + "" nanoseconds = 0 + "" def __init__(self, months=0, days=0, nanoseconds=0): self.months = months From 1d6ac40696b7eb522e116144c830cf8384b8547c Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Tue, 4 Aug 2020 14:25:05 -0400 Subject: [PATCH 1838/2005] disabling schedules temporarily --- Jenkinsfile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 61b2f3ad5c..890cc425a6 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -387,7 +387,7 @@ def describeBuild(buildContext) { } } -def scheduleTriggerJobName = "drivers/python/oss/master" +def scheduleTriggerJobName = "drivers/python/oss/master/disabled" pipeline { agent none @@ -641,11 +641,11 @@ pipeline { stages { stage ('Build and Test') { - agent { - // If I removed this agent block, GIT_URL and GIT_COMMIT aren't set. - // However, this trigger an additional checkout - label "master" - } + agent none + // // If I removed this agent block, GIT_URL and GIT_COMMIT aren't set. + // // However, this trigger an additional checkout + // label none + // } when { beforeAgent true allOf { From 01583bb1727016c13bb897f1a79211906b41e0d8 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 14 Sep 2020 14:27:00 -0400 Subject: [PATCH 1839/2005] Drop python 3.4 support --- CHANGELOG.rst | 8 ++++++++ Jenkinsfile | 14 +++++++------- README-dev.rst | 2 +- README.rst | 2 +- build.yaml.bak | 25 +++++-------------------- docs/index.rst | 2 +- docs/installation.rst | 2 +- setup.py | 1 - test-requirements.txt | 2 +- tox.ini | 2 +- 10 files changed, 26 insertions(+), 34 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 53a5e22436..1579148446 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +3.25.0 +====== +Not released + +Others +------ +* Drop Python 3.4 support (PYTHON-1220) + 3.24.0 ====== June 18, 2020 diff --git a/Jenkinsfile b/Jenkinsfile index 890cc425a6..abb6092758 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -33,7 +33,7 @@ slack = new Slack() matrices = [ "FULL": [ "SERVER": ['2.1', '2.2', '3.0', '3.11', '4.0', 'dse-5.0', 'dse-5.1', 'dse-6.0', 'dse-6.7', 'dse-6.8'], - "RUNTIME": ['2.7.18', '3.4.10', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], + "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], "CYTHON": ["True", "False"] ], "DEVELOP": [ @@ -43,12 +43,12 @@ matrices = [ ], "CASSANDRA": [ "SERVER": ['2.1', '2.2', '3.0', '3.11', '4.0'], - "RUNTIME": ['2.7.18', '3.4.10', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], + "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], "CYTHON": ["True", "False"] ], "DSE": [ "SERVER": ['dse-5.0', 'dse-5.1', 'dse-6.0', 'dse-6.7', 'dse-6.8'], - "RUNTIME": ['2.7.18', '3.4.10', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], + "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], "CYTHON": ["True", "False"] ] ] @@ -482,7 +482,7 @@ pipeline { ''') choice( name: 'PYTHON_VERSION', - choices: ['DEFAULT', '2.7.18', '3.4.10', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], + choices: ['DEFAULT', '2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], description: 'Python runtime version. Default to the build context.') choice( name: 'SERVER_VERSION', @@ -641,11 +641,11 @@ pipeline { stages { stage ('Build and Test') { - agent none + agent { // // If I removed this agent block, GIT_URL and GIT_COMMIT aren't set. // // However, this trigger an additional checkout - // label none - // } + label "master" + } when { beforeAgent true allOf { diff --git a/README-dev.rst b/README-dev.rst index 8294d4efb8..85a722c3b7 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -176,7 +176,7 @@ Use tee to capture logs and see them on your terminal:: Testing Multiple Python Versions -------------------------------- -If you want to test all of python 2.7, 3.4, 3.5, 3.6, 3.7, and pypy, use tox (this is what +If you want to test all of python 2.7, 3.5, 3.6, 3.7, and pypy, use tox (this is what TravisCI runs):: tox diff --git a/README.rst b/README.rst index 358f588d32..7c5bf1eee1 100644 --- a/README.rst +++ b/README.rst @@ -7,7 +7,7 @@ DataStax Driver for Apache Cassandra A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (2.1+) and DataStax Enterprise (4.7+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. -The driver supports Python 2.7, 3.4, 3.5, 3.6, 3.7 and 3.8. +The driver supports Python 2.7, 3.5, 3.6, 3.7 and 3.8. **Note:** DataStax products do not support big-endian systems. diff --git a/build.yaml.bak b/build.yaml.bak index bd40809ef3..100c86558a 100644 --- a/build.yaml.bak +++ b/build.yaml.bak @@ -8,7 +8,7 @@ schedules: EVENT_LOOP_MANAGER='libev' matrix: exclude: - - python: [3.4, 3.6, 3.7, 3.8] + - python: [3.6, 3.7, 3.8] - cassandra: ['2.1', '3.0', '4.0', 'test-dse'] commit_long_test: @@ -20,7 +20,7 @@ schedules: EVENT_LOOP_MANAGER='libev' matrix: exclude: - - python: [3.4, 3.6, 3.7, 3.8] + - python: [3.6, 3.7, 3.8] - cassandra: ['2.1', '3.0', 'test-dse'] commit_branches: @@ -33,7 +33,7 @@ schedules: EXCLUDE_LONG=1 matrix: exclude: - - python: [3.4, 3.6, 3.7, 3.8] + - python: [3.6, 3.7, 3.8] - cassandra: ['2.1', '3.0', 'test-dse'] commit_branches_dev: @@ -46,7 +46,7 @@ schedules: EXCLUDE_LONG=1 matrix: exclude: - - python: [2.7, 3.4, 3.7, 3.6, 3.8] + - python: [2.7, 3.7, 3.6, 3.8] - cassandra: ['2.0', '2.1', '2.2', '3.0', '4.0', 'test-dse', 'dse-4.8', 'dse-5.0', 'dse-6.0', 'dse-6.8'] release_test: @@ -77,9 +77,6 @@ schedules: env_vars: | EVENT_LOOP_MANAGER='gevent' JUST_EVENT_LOOP=1 - matrix: - exclude: - - python: [3.4] weekly_eventlet: schedule: 0 18 * * 6 @@ -89,9 +86,6 @@ schedules: env_vars: | EVENT_LOOP_MANAGER='eventlet' JUST_EVENT_LOOP=1 - matrix: - exclude: - - python: [3.4] weekly_asyncio: schedule: 0 22 * * 6 @@ -113,9 +107,6 @@ schedules: env_vars: | EVENT_LOOP_MANAGER='asyncore' JUST_EVENT_LOOP=1 - matrix: - exclude: - - python: [3.4] weekly_twister: schedule: 0 14 * * 7 @@ -125,9 +116,6 @@ schedules: env_vars: | EVENT_LOOP_MANAGER='twisted' JUST_EVENT_LOOP=1 - matrix: - exclude: - - python: [3.4] upgrade_tests: schedule: adhoc @@ -138,12 +126,11 @@ schedules: JUST_UPGRADE=True matrix: exclude: - - python: [3.4, 3.6, 3.7, 3.8] + - python: [3.6, 3.7, 3.8] - cassandra: ['2.0', '2.1', '2.2', '3.0', '4.0', 'test-dse'] python: - 2.7 - - 3.4 - 3.5 - 3.6 - 3.7 @@ -186,8 +173,6 @@ build: pip install git+ssh://git@github.com/riptano/ccm-private.git@cassandra-7544-native-ports-with-dse-fix - # Remove this pyyaml installation when removing Python 3.4 support - pip install PyYAML==5.2 #pip install $HOME/ccm if [ -n "$CCM_IS_DSE" ]; then diff --git a/docs/index.rst b/docs/index.rst index 4cdd637e0a..978faa17c6 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,7 +4,7 @@ A Python client driver for `Apache Cassandra® `_. This driver works exclusively with the Cassandra Query Language v3 (CQL3) and Cassandra's native protocol. Cassandra 2.1+ is supported, including DSE 4.7+. -The driver supports Python 2.7, 3.4, 3.5, 3.6, 3.7 and 3.8. +The driver supports Python 2.7, 3.5, 3.6, 3.7 and 3.8. This driver is open source under the `Apache v2 License `_. diff --git a/docs/installation.rst b/docs/installation.rst index b381425302..3855383602 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -3,7 +3,7 @@ Installation Supported Platforms ------------------- -Python 2.7, 3.4, 3.5, 3.6, 3.7 and 3.8 are supported. Both CPython (the standard Python +Python 2.7, 3.5, 3.6, 3.7 and 3.8 are supported. Both CPython (the standard Python implementation) and `PyPy `_ are supported and tested. Linux, OSX, and Windows are supported. diff --git a/setup.py b/setup.py index 745d05dfb3..aaaa1b4d2d 100644 --- a/setup.py +++ b/setup.py @@ -443,7 +443,6 @@ def run_setup(extensions): 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', diff --git a/test-requirements.txt b/test-requirements.txt index f150a73247..9e62bfdee8 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -15,5 +15,5 @@ cython>=0.20,<0.30 packaging backports.ssl_match_hostname; python_version < '2.7.9' futurist; python_version >= '3.7' -asynctest; python_version > '3.4' +asynctest; python_version >= '3.5' ipaddress; python_version < '3.3.0' diff --git a/tox.ini b/tox.ini index fd50a6c1d6..efb610ac09 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py{27,34,35,36,37,38},pypy +envlist = py{27,35,36,37,38},pypy [base] deps = nose From d96a44b8efd48f7da6fb86941a50eac1b2ab26fc Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 11 Sep 2020 15:49:26 -0400 Subject: [PATCH 1840/2005] Ensure the driver can connect when invalid peer hosts are in system.peers --- CHANGELOG.rst | 4 ++ cassandra/cluster.py | 17 +++++-- tests/unit/test_control_connection.py | 73 ++++++++++++++++++++------- 3 files changed, 71 insertions(+), 23 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 1579148446..f88278a22e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,10 @@ ====== Not released +Features +-------- +* Ensure the driver can connect when invalid peer hosts are in system.peers (PYTHON-1260) + Others ------ * Drop Python 3.4 support (PYTHON-1220) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c4d6de124d..ec91ce257a 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3788,12 +3788,14 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, # any new nodes, so we need this additional check. (See PYTHON-90) should_rebuild_token_map = force_token_rebuild or self._cluster.metadata.partitioner is None for row in peers_result: + if not self._is_valid_peer(row): + log.warning( + "Found an invalid row for peer (%s). Ignoring host." % + _NodeInfo.get_broadcast_rpc_address(row)) + continue + endpoint = self._cluster.endpoint_factory.create(row) - tokens = row.get("tokens", None) - if 'tokens' in row and not tokens: # it was selected, but empty - log.warning("Excluding host (%s) with no tokens in system.peers table of %s." % (endpoint, connection.endpoint)) - continue if endpoint in found_hosts: log.warning("Found multiple hosts with the same endpoint (%s). Excluding peer %s", endpoint, row.get("peer")) continue @@ -3820,6 +3822,7 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, host.dse_workload = row.get("workload") host.dse_workloads = row.get("workloads") + tokens = row.get("tokens", None) if partitioner and tokens and self._token_meta_enabled: token_map[host] = tokens @@ -3834,6 +3837,12 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, log.debug("[control connection] Rebuilding token map due to topology changes") self._cluster.metadata.rebuild_token_map(partitioner, token_map) + @staticmethod + def _is_valid_peer(row): + return bool(_NodeInfo.get_broadcast_rpc_address(row) and row.get("host_id") and + row.get("data_center") and row.get("rack") and + ('tokens' not in row or row.get('tokens'))) + def _update_location_info(self, host, datacenter, rack): if host.datacenter == datacenter and host.rack == rack: return False diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index 3e75a0af27..efad1ca5c9 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -127,15 +127,15 @@ def __init__(self): ] self.peer_results = [ - ["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens"], - [["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"]], - ["192.168.1.2", "10.0.0.2", "a", "dc1", "rack1", ["2", "102", "202"]]] + ["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens", "host_id"], + [["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"], "uuid1"], + ["192.168.1.2", "10.0.0.2", "a", "dc1", "rack1", ["2", "102", "202"], "uuid2"]] ] self.peer_results_v2 = [ - ["native_address", "native_port", "peer", "peer_port", "schema_version", "data_center", "rack", "tokens"], - [["192.168.1.1", 9042, "10.0.0.1", 7042, "a", "dc1", "rack1", ["1", "101", "201"]], - ["192.168.1.2", 9042, "10.0.0.2", 7040, "a", "dc1", "rack1", ["2", "102", "202"]]] + ["native_address", "native_port", "peer", "peer_port", "schema_version", "data_center", "rack", "tokens", "host_id"], + [["192.168.1.1", 9042, "10.0.0.1", 7042, "a", "dc1", "rack1", ["1", "101", "201"], "uuid1"], + ["192.168.1.2", 9042, "10.0.0.2", 7040, "a", "dc1", "rack1", ["2", "102", "202"], "uuid2"]] ] self.wait_for_responses = Mock(return_value=_node_meta_results(self.local_results, self.peer_results)) @@ -155,18 +155,18 @@ def sleep(self, amount): class ControlConnectionTest(unittest.TestCase): _matching_schema_preloaded_results = _node_meta_results( - local_results=(["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"], - [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]]), - peer_results=(["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens"], - [["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"]], - ["192.168.1.2", "10.0.0.2", "a", "dc1", "rack1", ["2", "102", "202"]]])) + local_results=(["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens", "host_id"], + [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"], "uuid1"]]), + peer_results=(["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens", "host_id"], + [["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"], "uuid2"], + ["192.168.1.2", "10.0.0.2", "a", "dc1", "rack1", ["2", "102", "202"], "uuid3"]])) _nonmatching_schema_preloaded_results = _node_meta_results( - local_results=(["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"], - [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]]), - peer_results=(["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens"], - [["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"]], - ["192.168.1.2", "10.0.0.2", "b", "dc1", "rack1", ["2", "102", "202"]]])) + local_results=(["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens", "host_id"], + [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"], "uuid1"]]), + peer_results=(["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens", "host_id"], + [["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"], "uuid2"], + ["192.168.1.2", "10.0.0.2", "b", "dc1", "rack1", ["2", "102", "202"], "uuid3"]])) def setUp(self): self.cluster = MockCluster() @@ -275,6 +275,40 @@ def test_refresh_nodes_and_tokens(self): self.assertEqual(self.connection.wait_for_responses.call_count, 1) + def test_refresh_nodes_and_tokens_with_invalid_peers(self): + def refresh_and_validate_added_hosts(): + self.connection.wait_for_responses = Mock(return_value=_node_meta_results( + self.connection.local_results, self.connection.peer_results)) + self.control_connection.refresh_node_list_and_token_map() + self.assertEqual(1, len(self.cluster.added_hosts)) # only one valid peer found + + # peersV1 + del self.connection.peer_results[:] + self.connection.peer_results.extend([ + ["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens", "host_id"], + [["192.168.1.3", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"], 'uuid5'], + # all others are invalid + [None, None, "a", "dc1", "rack1", ["1", "101", "201"], 'uuid1'], + ["192.168.1.7", "10.0.0.1", "a", None, "rack1", ["1", "101", "201"], 'uuid2'], + ["192.168.1.6", "10.0.0.1", "a", "dc1", None, ["1", "101", "201"], 'uuid3'], + ["192.168.1.5", "10.0.0.1", "a", "dc1", "rack1", None, 'uuid4'], + ["192.168.1.4", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"], None]]]) + refresh_and_validate_added_hosts() + + # peersV2 + del self.cluster.added_hosts[:] + del self.connection.peer_results[:] + self.connection.peer_results.extend([ + ["native_address", "native_port", "peer", "peer_port", "schema_version", "data_center", "rack", "tokens", "host_id"], + [["192.168.1.4", 9042, "10.0.0.1", 7042, "a", "dc1", "rack1", ["1", "101", "201"], "uuid1"], + # all others are invalid + [None, 9042, None, 7040, "a", "dc1", "rack1", ["2", "102", "202"], "uuid2"], + ["192.168.1.5", 9042, "10.0.0.2", 7040, "a", None, "rack1", ["2", "102", "202"], "uuid2"], + ["192.168.1.5", 9042, "10.0.0.2", 7040, "a", "dc1", None, ["2", "102", "202"], "uuid2"], + ["192.168.1.5", 9042, "10.0.0.2", 7040, "a", "dc1", "rack1", None, "uuid2"], + ["192.168.1.5", 9042, "10.0.0.2", 7040, "a", "dc1", "rack1", ["2", "102", "202"], None]]]) + refresh_and_validate_added_hosts() + def test_refresh_nodes_and_tokens_uses_preloaded_results_if_given(self): """ refresh_nodes_and_tokens uses preloaded results if given for shared table queries @@ -311,7 +345,7 @@ def test_refresh_nodes_and_tokens_no_partitioner(self): def test_refresh_nodes_and_tokens_add_host(self): self.connection.peer_results[1].append( - ["192.168.1.3", "10.0.0.3", "a", "dc1", "rack1", ["3", "103", "203"]] + ["192.168.1.3", "10.0.0.3", "a", "dc1", "rack1", ["3", "103", "203"], "uuid3"] ) self.cluster.scheduler.schedule = lambda delay, f, *args, **kwargs: f(*args, **kwargs) self.control_connection.refresh_node_list_and_token_map() @@ -319,6 +353,7 @@ def test_refresh_nodes_and_tokens_add_host(self): self.assertEqual(self.cluster.added_hosts[0].address, "192.168.1.3") self.assertEqual(self.cluster.added_hosts[0].datacenter, "dc1") self.assertEqual(self.cluster.added_hosts[0].rack, "rack1") + self.assertEqual(self.cluster.added_hosts[0].host_id, "uuid3") def test_refresh_nodes_and_tokens_remove_host(self): del self.connection.peer_results[1][1] @@ -482,7 +517,7 @@ def test_refresh_nodes_and_tokens_add_host_detects_port(self): del self.connection.peer_results[:] self.connection.peer_results.extend(self.connection.peer_results_v2) self.connection.peer_results[1].append( - ["192.168.1.3", 555, "10.0.0.3", 666, "a", "dc1", "rack1", ["3", "103", "203"]] + ["192.168.1.3", 555, "10.0.0.3", 666, "a", "dc1", "rack1", ["3", "103", "203"], "uuid3"] ) self.connection.wait_for_responses = Mock(return_value=_node_meta_results( self.connection.local_results, self.connection.peer_results)) @@ -502,7 +537,7 @@ def test_refresh_nodes_and_tokens_add_host_detects_invalid_port(self): del self.connection.peer_results[:] self.connection.peer_results.extend(self.connection.peer_results_v2) self.connection.peer_results[1].append( - ["192.168.1.3", -1, "10.0.0.3", 0, "a", "dc1", "rack1", ["3", "103", "203"]] + ["192.168.1.3", -1, "10.0.0.3", 0, "a", "dc1", "rack1", ["3", "103", "203"], "uuid3"] ) self.connection.wait_for_responses = Mock(return_value=_node_meta_results( self.connection.local_results, self.connection.peer_results)) From dc3f2f8074ee08bd44bafd5604c7fab06566e9c6 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 14 Oct 2020 14:26:15 -0400 Subject: [PATCH 1841/2005] Fix asyncore race condition cause logging exception on shutdown --- CHANGELOG.rst | 4 ++++ cassandra/io/asyncorereactor.py | 23 ++++++++--------------- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f88278a22e..a3217d6cda 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,6 +6,10 @@ Features -------- * Ensure the driver can connect when invalid peer hosts are in system.peers (PYTHON-1260) +Bug Fixes +--------- +* Asyncore race condition cause logging exception on shutdown (PYTHON-1266) + Others ------ * Drop Python 3.4 support (PYTHON-1220) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index e07aab4697..681552e589 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -36,20 +36,7 @@ from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager -# TODO: Remove when Python 2 is removed -class LogWrapper(object): - """ PYTHON-1228. If our logger has disappeared, there's nothing we can do, so just execute nothing """ - def __init__(self): - self._log = logging.getLogger(__name__) - - def __getattr__(self, name): - try: - return getattr(self._log, name) - except: - return lambda *args, **kwargs: None - - -log = LogWrapper() +log = logging.getLogger(__name__) _dispatcher_map = {} @@ -262,7 +249,13 @@ def _run_loop(self): self._loop_dispatcher.loop(self.timer_resolution) self._timers.service_timeouts() except Exception: - log.debug("Asyncore event loop stopped unexepectedly", exc_info=True) + try: + log.debug("Asyncore event loop stopped unexpectedly", exc_info=True) + except Exception: + # TODO: Remove when Python 2 support is removed + # PYTHON-1266. If our logger has disappeared, there's nothing we + # can do, so just log nothing. + pass break self._started = False From 501c134b4b94df47ff8588c79d98a4d3b3c25244 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 6 Aug 2020 10:38:12 -0400 Subject: [PATCH 1842/2005] initial support of protocol v5 checksumming --- cassandra/__init__.py | 8 ++ cassandra/connection.py | 91 +++++++++++++--- cassandra/marshal.py | 1 + cassandra/protocol.py | 35 ++++++- cassandra/segment.py | 222 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 342 insertions(+), 15 deletions(-) create mode 100644 cassandra/segment.py diff --git a/cassandra/__init__.py b/cassandra/__init__.py index f2bf696035..47fd2f6bec 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -235,6 +235,14 @@ def has_continuous_paging_support(cls, version): def has_continuous_paging_next_pages(cls, version): return version >= cls.DSE_V2 + @classmethod + def has_checksumming_support(cls, version): + return cls.V5 <= version < cls.DSE_V1 + + @classmethod + def has_cql_frame_compression_support(cls, version): + return version != cls.V5 + class WriteType(object): """ diff --git a/cassandra/connection.py b/cassandra/connection.py index 6ce3e44a30..13ffa5ecde 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -42,11 +42,15 @@ AuthResponseMessage, AuthChallengeMessage, AuthSuccessMessage, ProtocolException, RegisterMessage, ReviseRequestMessage) +from cassandra.segment import SegmentCodec, CrcException from cassandra.util import OrderedDict log = logging.getLogger(__name__) +segment_codec_no_compression = SegmentCodec() +segment_codec_lz4 = None + # We use an ordered dictionary and specifically add lz4 before # snappy so that lz4 will be preferred. Changing the order of this # will change the compression preferences for the driver. @@ -88,6 +92,7 @@ def lz4_decompress(byts): return lz4_block.decompress(byts[3::-1] + byts[4:]) locally_supported_compressions['lz4'] = (lz4_compress, lz4_decompress) + segment_codec_lz4 = SegmentCodec(lz4_compress, lz4_decompress) try: import snappy @@ -426,6 +431,10 @@ class ProtocolError(Exception): pass +class CrcMismatchException(ConnectionException): + pass + + class ContinuousPagingState(object): """ A class for specifying continuous paging state, only supported starting with DSE_V2. @@ -657,6 +666,7 @@ class Connection(object): allow_beta_protocol_version = False _iobuf = None + _frame_iobuf = None _current_frame = None _socket = None @@ -667,6 +677,8 @@ class Connection(object): _check_hostname = False _product_type = None + _is_checksumming_enabled = False + def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, cql_version=None, protocol_version=ProtocolVersion.MAX_SUPPORTED, is_control_connection=False, @@ -691,6 +703,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self._push_watchers = defaultdict(set) self._requests = {} self._iobuf = io.BytesIO() + self._frame_iobuf = io.BytesIO() self._continuous_paging_sessions = {} self._socket_writable = True @@ -933,7 +946,14 @@ def send_msg(self, msg, request_id, cb, encoder=ProtocolHandler.encode_message, # queue the decoder function with the request # this allows us to inject custom functions per request to encode, decode messages self._requests[request_id] = (cb, decoder, result_metadata) - msg = encoder(msg, request_id, self.protocol_version, compressor=self.compressor, allow_beta_protocol_version=self.allow_beta_protocol_version) + msg = encoder(msg, request_id, self.protocol_version, compressor=self.compressor, + allow_beta_protocol_version=self.allow_beta_protocol_version) + + if self._is_checksumming_enabled: + buffer = io.BytesIO() + self._segment_codec.encode(buffer, msg) + msg = buffer.getvalue() + self.push(msg) return len(msg) @@ -1012,7 +1032,7 @@ def control_conn_disposed(self): @defunct_on_error def _read_frame_header(self): - buf = self._iobuf.getvalue() + buf = self._frame_iobuf.getvalue() pos = len(buf) if pos: version = int_from_buf_item(buf[0]) & PROTOCOL_VERSION_MASK @@ -1029,26 +1049,57 @@ def _read_frame_header(self): return pos def _reset_frame(self): - self._iobuf = io.BytesIO(self._iobuf.read()) - self._iobuf.seek(0, 2) # io.SEEK_END == 2 (constant not present in 2.6) + self._frame_iobuf = io.BytesIO(self._frame_iobuf.read()) + self._frame_iobuf.seek(0, 2) # 2 == SEEK_END self._current_frame = None + def _reset_io_buffer(self): + self._iobuf = io.BytesIO(self._iobuf.read()) + self._iobuf.seek(0, 2) # 2 == SEEK_END + + @defunct_on_error + def _process_segment_buffer(self): + if self._iobuf.tell(): + try: + segment_header = self._segment_codec.decode_header(self._iobuf) + if segment_header: + segment = self._segment_codec.decode(self._iobuf, segment_header) + if segment: + self._frame_iobuf.write(segment.payload) + except CrcException as exc: + # re-raise an exception that inherits from ConnectionException + raise CrcMismatchException(str(exc), self.endpoint) + def process_io_buffer(self): while True: + if self._is_checksumming_enabled: + self._process_segment_buffer() + else: + # TODO, try to avoid having 2 io buffers when protocol != V5 + self._frame_iobuf.write(self._iobuf.getvalue()) + + self._reset_io_buffer() + if not self._current_frame: pos = self._read_frame_header() else: - pos = self._iobuf.tell() + pos = self._frame_iobuf.tell() if not self._current_frame or pos < self._current_frame.end_pos: + if self._is_checksumming_enabled and self._iobuf.tell(): + # TODO keep the current segment frame? + # We have a multi-segments message and we need to read more data to complete + # the current cql frame + continue + # we don't have a complete header yet or we # already saw a header, but we don't have a # complete message yet return else: frame = self._current_frame - self._iobuf.seek(frame.body_offset) - msg = self._iobuf.read(frame.end_pos - frame.body_offset) + self._frame_iobuf.seek(frame.body_offset) + msg = self._frame_iobuf.read(frame.end_pos - frame.body_offset) self.process_msg(frame, msg) self._reset_frame() @@ -1185,11 +1236,17 @@ def _handle_options_response(self, options_response): compression_type = k break - # set the decompressor here, but set the compressor only after - # a successful Ready message - self._compression_type = compression_type - self._compressor, self.decompressor = \ - locally_supported_compressions[compression_type] + if (compression_type == 'snappy' and + ProtocolVersion.has_checksumming_support(self.protocol_version)): + log.debug("Snappy compression is not supported with protocol version %s and checksumming.", + self.protocol_version) + compression_type = None + else: + # set the decompressor here, but set the compressor only after + # a successful Ready message + self._compression_type = compression_type + self._compressor, self.decompressor = \ + locally_supported_compressions[compression_type] self._send_startup_message(compression_type, no_compact=self.no_compact) @@ -1210,6 +1267,7 @@ def _send_startup_message(self, compression=None, no_compact=False): def _handle_startup_response(self, startup_response, did_authenticate=False): if self.is_defunct: return + if isinstance(startup_response, ReadyMessage): if self.authenticator: log.warning("An authentication challenge was not sent, " @@ -1220,6 +1278,15 @@ def _handle_startup_response(self, startup_response, did_authenticate=False): log.debug("Got ReadyMessage on new connection (%s) from %s", id(self), self.endpoint) if self._compressor: self.compressor = self._compressor + + if ProtocolVersion.has_checksumming_support(self.protocol_version): + self._is_checksumming_enabled = True + if self.compressor: + self._segment_codec = segment_codec_lz4 + else: + self._segment_codec = segment_codec_no_compression + log.debug("Enabling protocol checksumming on connection (%s).", id(self)) + self.connected_event.set() elif isinstance(startup_response, AuthenticateMessage): log.debug("Got AuthenticateMessage on new connection (%s) from %s: %s", diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 7533ebd307..43cb627b08 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -28,6 +28,7 @@ def _make_packer(format_string): int8_pack, int8_unpack = _make_packer('>b') uint64_pack, uint64_unpack = _make_packer('>Q') uint32_pack, uint32_unpack = _make_packer('>I') +uint32_le_pack, uint32_le_unpack = _make_packer('H') uint8_pack, uint8_unpack = _make_packer('>B') float_pack, float_unpack = _make_packer('>f') diff --git a/cassandra/protocol.py b/cassandra/protocol.py index eac9ebb8b5..f3b5189f13 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -31,7 +31,7 @@ UserAggregateDescriptor, SchemaTargetType) from cassandra.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack, uint8_pack, int8_unpack, uint64_pack, header_pack, - v3_header_pack, uint32_pack) + v3_header_pack, uint32_pack, uint32_le_unpack, uint32_le_pack) from cassandra.cqltypes import (AsciiType, BytesType, BooleanType, CounterColumnType, DateType, DecimalType, DoubleType, FloatType, Int32Type, @@ -1115,7 +1115,8 @@ def encode_message(cls, msg, stream_id, protocol_version, compressor, allow_beta msg.send_body(body, protocol_version) body = body.getvalue() - if compressor and len(body) > 0: + if (ProtocolVersion.has_cql_frame_compression_support(protocol_version) + and compressor and len(body) > 0): body = compressor(body) flags |= COMPRESSED_FLAG @@ -1155,7 +1156,8 @@ def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcod :param decompressor: optional decompression function to inflate the body :return: a message decoded from the body and frame attributes """ - if flags & COMPRESSED_FLAG: + if (ProtocolVersion.has_cql_frame_compression_support(protocol_version) and + flags & COMPRESSED_FLAG): if decompressor is None: raise RuntimeError("No de-compressor available for compressed frame!") body = decompressor(body) @@ -1271,6 +1273,33 @@ def read_int(f): return int32_unpack(f.read(4)) +def read_uint_le(f, size=4): + """ + Read a sequence of little endian bytes and return an unsigned integer. + """ + + if size == 4: + value = uint32_le_unpack(f.read(4)) + else: + value = 0 + for i in range(size): + value |= (read_byte(f) & 0xFF) << 8 * i + + return value + + +def write_uint_le(f, i, size=4): + """ + Write an unsigned integer on a sequence of little endian bytes. + """ + if size == 4: + f.write(uint32_le_pack(i)) + else: + for j in range(size): + shift = j * 8 + write_byte(f, i >> shift & 0xFF) + + def write_int(f, i): f.write(int32_pack(i)) diff --git a/cassandra/segment.py b/cassandra/segment.py new file mode 100644 index 0000000000..b62099473b --- /dev/null +++ b/cassandra/segment.py @@ -0,0 +1,222 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import zlib +import six + +from cassandra import DriverException +from cassandra.marshal import int32_pack +from cassandra.protocol import write_uint_le, read_uint_le + +CRC24_INIT = 0x875060 +CRC24_POLY = 0x1974F0B +CRC24_LENGTH = 3 +CRC32_LENGTH = 4 +CRC32_INITIAL = zlib.crc32(b"\xfa\x2d\x55\xca") + + +class CrcException(Exception): + """ + CRC mismatch error. This can happen when decoding a segment. + + TODO: here to avoid import cycles. In the next major, the exceptions + should be declared in a separated exceptions.py file. + """ + pass + + +def compute_crc24(data, length): + crc = CRC24_INIT + + for _ in range(length): + crc ^= (data & 0xff) << 16 + data >>= 8 + + for i in range(8): + crc <<= 1 + if crc & 0x1000000 != 0: + crc ^= CRC24_POLY + + return crc + + +def compute_crc32(data, value): + crc32 = zlib.crc32(data, value) + if six.PY2: + crc32 &= 0xffffffff + + return crc32 + + +class SegmentHeader(object): + + payload_length = None + uncompressed_payload_length = None + is_self_contained = None + + def __init__(self, payload_length, uncompressed_payload_length, is_self_contained): + self.payload_length = payload_length + self.uncompressed_payload_length = uncompressed_payload_length + self.is_self_contained = is_self_contained + + def length(self): + return SegmentCodec.UNCOMPRESSED_HEADER_LENGTH if self.uncompressed_payload_length < 0 \ + else SegmentCodec.COMPRESSED_HEADER_LENGTH + + +class Segment(object): + + MAX_PAYLOAD_LENGTH = 128 * 1024 - 1 + + payload = None + is_self_contained = None + + def __init__(self, payload, is_self_contained): + self.payload = payload + self.is_self_contained = is_self_contained + + +class SegmentCodec(object): + + COMPRESSED_HEADER_LENGTH = 5 + UNCOMPRESSED_HEADER_LENGTH = 3 + FLAG_OFFSET = 17 + + compressor = None + decompressor = None + + def __init__(self, compressor=None, decompressor=None): + self.compressor = compressor + self.decompressor = decompressor + + @property + def header_length(self): + return self.COMPRESSED_HEADER_LENGTH if self.compression \ + else self.UNCOMPRESSED_HEADER_LENGTH + + @property + def header_length_with_crc(self): + return self.header_length + CRC24_LENGTH + + @property + def compression(self): + return self.compressor and self.decompressor + + def compress(self, data): + # the uncompressed length is already encoded in the header, so + # we remove it here + return self.compressor(data)[4:] + + def decompress(self, encoded_data, uncompressed_length): + return self.decompressor(int32_pack(uncompressed_length) + encoded_data) + + def encode_header(self, buffer, payload_length, uncompressed_length, is_self_contained): + if payload_length > Segment.MAX_PAYLOAD_LENGTH: + raise DriverException('Payload length exceed Segment.MAX_PAYLOAD_LENGTH') + + header_data = payload_length + + flag_offset = self.FLAG_OFFSET + if self.compression: + header_data |= uncompressed_length << flag_offset + flag_offset += 17 + + if is_self_contained: + header_data |= 1 << flag_offset + + write_uint_le(buffer, header_data, self.header_length) + header_crc = compute_crc24(header_data, self.header_length) + write_uint_le(buffer, header_crc, CRC24_LENGTH) + + def _encode_segment(self, buffer, payload, is_self_contained): + uncompressed_payload = payload + uncompressed_payload_length = len(payload) + + if self.compression: + compressed_payload = self.compress(uncompressed_payload) + if len(compressed_payload) >= uncompressed_payload_length: + encoded_payload = uncompressed_payload + uncompressed_payload_length = 0 + else: + encoded_payload = compressed_payload + else: + encoded_payload = uncompressed_payload + + payload_length = len(encoded_payload) + self.encode_header(buffer, payload_length, uncompressed_payload_length, is_self_contained) + + payload_crc = compute_crc32(encoded_payload, CRC32_INITIAL) + buffer.write(encoded_payload) + write_uint_le(buffer, payload_crc) + + def encode(self, buffer, msg): + msg_length = len(msg) + + if msg_length > Segment.MAX_PAYLOAD_LENGTH: + payloads = [] + for i in range(0, msg_length, Segment.MAX_PAYLOAD_LENGTH): + payloads.append(msg[i:i + Segment.MAX_PAYLOAD_LENGTH]) + else: + payloads = [msg] + + is_self_contained = len(payloads) == 1 + for payload in payloads: + self._encode_segment(buffer, payload, is_self_contained) + + def decode_header(self, buffer): + if buffer.tell() < self.header_length_with_crc: + return + + buffer.seek(0) + header_data = read_uint_le(buffer, self.header_length) + + expected_header_crc = read_uint_le(buffer, CRC24_LENGTH) + actual_header_crc = compute_crc24(header_data, self.header_length) + if actual_header_crc != expected_header_crc: + raise CrcException('CRC mismatch on header {:x}. Received {:x}", computed {:x}.'.format( + header_data, expected_header_crc, actual_header_crc)) + + payload_length = header_data & Segment.MAX_PAYLOAD_LENGTH + header_data >>= 17 + + if self.compression: + uncompressed_payload_length = header_data & Segment.MAX_PAYLOAD_LENGTH + header_data >>= 17 + else: + uncompressed_payload_length = -1 + + is_self_contained = (header_data & 1) == 1 + + return SegmentHeader(payload_length, uncompressed_payload_length, is_self_contained) + + def decode(self, buffer, header): + marker = buffer.tell() + buffer_size = buffer.seek(0, 2) # 2 == SEEK_END + buffer.seek(marker) + if buffer_size < self.header_length_with_crc + header.payload_length + CRC32_LENGTH: + return + + encoded_payload = buffer.read(header.payload_length) + expected_payload_crc = read_uint_le(buffer) + + actual_payload_crc = compute_crc32(encoded_payload, CRC32_INITIAL) + if actual_payload_crc != expected_payload_crc: + raise CrcException('CRC mismatch on payload. Received {:x}", computed {:x}.'.format( + expected_payload_crc, actual_payload_crc)) + + payload = encoded_payload + if self.compression and header.uncompressed_payload_length > 0: + payload = self.decompress(encoded_payload, header.uncompressed_payload_length) + + return Segment(payload, header.is_self_contained) From 66de36063c49f748b6b13b24d8c441dededd32da Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 31 Aug 2020 14:29:24 -0400 Subject: [PATCH 1843/2005] Add test_segment.py + some cleanup --- cassandra/__init__.py | 4 - cassandra/connection.py | 32 +++--- cassandra/protocol.py | 5 +- cassandra/segment.py | 40 +++---- tests/unit/test_segment.py | 206 +++++++++++++++++++++++++++++++++++++ 5 files changed, 248 insertions(+), 39 deletions(-) create mode 100644 tests/unit/test_segment.py diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 47fd2f6bec..100df2df17 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -239,10 +239,6 @@ def has_continuous_paging_next_pages(cls, version): def has_checksumming_support(cls, version): return cls.V5 <= version < cls.DSE_V1 - @classmethod - def has_cql_frame_compression_support(cls, version): - return version != cls.V5 - class WriteType(object): """ diff --git a/cassandra/connection.py b/cassandra/connection.py index 13ffa5ecde..7054a389d0 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -1059,13 +1059,17 @@ def _reset_io_buffer(self): @defunct_on_error def _process_segment_buffer(self): - if self._iobuf.tell(): + readable_bytes = self._iobuf.tell() + if readable_bytes >= self._segment_codec.header_length_with_crc: try: + self._iobuf.seek(0) segment_header = self._segment_codec.decode_header(self._iobuf) - if segment_header: + if readable_bytes >= segment_header.segment_length: segment = self._segment_codec.decode(self._iobuf, segment_header) - if segment: - self._frame_iobuf.write(segment.payload) + self._frame_iobuf.write(segment.payload) + else: + # not enough data to read the segment + self._iobuf.seek(0, 2) except CrcException as exc: # re-raise an exception that inherits from ConnectionException raise CrcMismatchException(str(exc), self.endpoint) @@ -1075,7 +1079,9 @@ def process_io_buffer(self): if self._is_checksumming_enabled: self._process_segment_buffer() else: - # TODO, try to avoid having 2 io buffers when protocol != V5 + # We should probably refactor the IO buffering stuff out of the Connection + # class to handle this in a better way. That would make the segment and frame + # decoding code clearer. self._frame_iobuf.write(self._iobuf.getvalue()) self._reset_io_buffer() @@ -1087,9 +1093,8 @@ def process_io_buffer(self): if not self._current_frame or pos < self._current_frame.end_pos: if self._is_checksumming_enabled and self._iobuf.tell(): - # TODO keep the current segment frame? - # We have a multi-segments message and we need to read more data to complete - # the current cql frame + # We have a multi-segments message and we need to read more + # data to complete the current cql frame continue # we don't have a complete header yet or we @@ -1236,10 +1241,12 @@ def _handle_options_response(self, options_response): compression_type = k break + # If snappy compression is selected with v5+checksumming, the connection + # will fail with OTO. Only lz4 is supported if (compression_type == 'snappy' and ProtocolVersion.has_checksumming_support(self.protocol_version)): - log.debug("Snappy compression is not supported with protocol version %s and checksumming.", - self.protocol_version) + log.debug("Snappy compression is not supported with protocol version %s and " + "checksumming. Consider installing lz4. Disabling compression.", self.protocol_version) compression_type = None else: # set the decompressor here, but set the compressor only after @@ -1281,10 +1288,7 @@ def _handle_startup_response(self, startup_response, did_authenticate=False): if ProtocolVersion.has_checksumming_support(self.protocol_version): self._is_checksumming_enabled = True - if self.compressor: - self._segment_codec = segment_codec_lz4 - else: - self._segment_codec = segment_codec_no_compression + self._segment_codec = segment_codec_lz4 if self.compressor else segment_codec_no_compression log.debug("Enabling protocol checksumming on connection (%s).", id(self)) self.connected_event.set() diff --git a/cassandra/protocol.py b/cassandra/protocol.py index f3b5189f13..c454824637 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -1115,7 +1115,8 @@ def encode_message(cls, msg, stream_id, protocol_version, compressor, allow_beta msg.send_body(body, protocol_version) body = body.getvalue() - if (ProtocolVersion.has_cql_frame_compression_support(protocol_version) + # With checksumming, the compression is done at the segment frame encoding + if (not ProtocolVersion.has_checksumming_support(protocol_version) and compressor and len(body) > 0): body = compressor(body) flags |= COMPRESSED_FLAG @@ -1156,7 +1157,7 @@ def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcod :param decompressor: optional decompression function to inflate the body :return: a message decoded from the body and frame attributes """ - if (ProtocolVersion.has_cql_frame_compression_support(protocol_version) and + if (not ProtocolVersion.has_checksumming_support(protocol_version) and flags & COMPRESSED_FLAG): if decompressor is None: raise RuntimeError("No de-compressor available for compressed frame!") diff --git a/cassandra/segment.py b/cassandra/segment.py index b62099473b..e3881c4402 100644 --- a/cassandra/segment.py +++ b/cassandra/segment.py @@ -28,10 +28,11 @@ class CrcException(Exception): """ - CRC mismatch error. This can happen when decoding a segment. + CRC mismatch error. - TODO: here to avoid import cycles. In the next major, the exceptions - should be declared in a separated exceptions.py file. + TODO: here to avoid import cycles with cassandra.connection. In the next + major, the exceptions should be declared in a separated exceptions.py + file. """ pass @@ -70,9 +71,14 @@ def __init__(self, payload_length, uncompressed_payload_length, is_self_containe self.uncompressed_payload_length = uncompressed_payload_length self.is_self_contained = is_self_contained - def length(self): - return SegmentCodec.UNCOMPRESSED_HEADER_LENGTH if self.uncompressed_payload_length < 0 \ + @property + def segment_length(self): + """ + Return the total length of the segment, including the CRC. + """ + hl = SegmentCodec.UNCOMPRESSED_HEADER_LENGTH if self.uncompressed_payload_length < 1 \ else SegmentCodec.COMPRESSED_HEADER_LENGTH + return hl + CRC24_LENGTH + self.payload_length + CRC32_LENGTH class Segment(object): @@ -107,7 +113,8 @@ def header_length(self): @property def header_length_with_crc(self): - return self.header_length + CRC24_LENGTH + return (self.COMPRESSED_HEADER_LENGTH if self.compression + else self.UNCOMPRESSED_HEADER_LENGTH) + CRC24_LENGTH @property def compression(self): @@ -135,11 +142,14 @@ def encode_header(self, buffer, payload_length, uncompressed_length, is_self_con if is_self_contained: header_data |= 1 << flag_offset - write_uint_le(buffer, header_data, self.header_length) + write_uint_le(buffer, header_data, size=self.header_length) header_crc = compute_crc24(header_data, self.header_length) - write_uint_le(buffer, header_crc, CRC24_LENGTH) + write_uint_le(buffer, header_crc, size=CRC24_LENGTH) def _encode_segment(self, buffer, payload, is_self_contained): + """ + Encode a message to a single segment. + """ uncompressed_payload = payload uncompressed_payload_length = len(payload) @@ -155,12 +165,14 @@ def _encode_segment(self, buffer, payload, is_self_contained): payload_length = len(encoded_payload) self.encode_header(buffer, payload_length, uncompressed_payload_length, is_self_contained) - payload_crc = compute_crc32(encoded_payload, CRC32_INITIAL) buffer.write(encoded_payload) write_uint_le(buffer, payload_crc) def encode(self, buffer, msg): + """ + Encode a message to one of more segments. + """ msg_length = len(msg) if msg_length > Segment.MAX_PAYLOAD_LENGTH: @@ -175,10 +187,6 @@ def encode(self, buffer, msg): self._encode_segment(buffer, payload, is_self_contained) def decode_header(self, buffer): - if buffer.tell() < self.header_length_with_crc: - return - - buffer.seek(0) header_data = read_uint_le(buffer, self.header_length) expected_header_crc = read_uint_le(buffer, CRC24_LENGTH) @@ -201,12 +209,6 @@ def decode_header(self, buffer): return SegmentHeader(payload_length, uncompressed_payload_length, is_self_contained) def decode(self, buffer, header): - marker = buffer.tell() - buffer_size = buffer.seek(0, 2) # 2 == SEEK_END - buffer.seek(marker) - if buffer_size < self.header_length_with_crc + header.payload_length + CRC32_LENGTH: - return - encoded_payload = buffer.read(header.payload_length) expected_payload_crc = read_uint_le(buffer) diff --git a/tests/unit/test_segment.py b/tests/unit/test_segment.py new file mode 100644 index 0000000000..bf661bb961 --- /dev/null +++ b/tests/unit/test_segment.py @@ -0,0 +1,206 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import io + +from cassandra import DriverException +from cassandra.segment import Segment, CrcException +from cassandra.connection import segment_codec_no_compression, segment_codec_lz4 + + +class SegmentCodecTest(unittest.TestCase): + + small_msg = b'b' * 50 + max_msg = b'b' * Segment.MAX_PAYLOAD_LENGTH + large_msg = b'b' * (Segment.MAX_PAYLOAD_LENGTH + 1) + + @staticmethod + def _header_to_bits(data): + # unpack a header to bits + # data should be the little endian bytes sequence + if len(data) > 6: # compressed + data = data[:5] + bits = ''.join(['{:08b}'.format(b) for b in reversed(data)]) + # return the compressed payload length, the uncompressed payload length, + # the self contained flag and the padding as bits + return bits[23:40] + bits[6:23] + bits[5:6] + bits[:5] + else: # uncompressed + data = data[:3] + bits = ''.join(['{:08b}'.format(b) for b in reversed(data)]) + # return the payload length, the self contained flag and + # the padding as bits + return bits[7:24] + bits[6:7] + bits[:6] + + def test_encode_uncompressed_header(self): + buffer = io.BytesIO() + segment_codec_no_compression.encode_header(buffer, len(self.small_msg), -1, True) + self.assertEqual(buffer.tell(), 6) + self.assertEqual( + self._header_to_bits(buffer.getvalue()), + "00000000000110010" + "1" + "000000") + + def test_encode_compressed_header(self): + buffer = io.BytesIO() + compressed_length = len(segment_codec_lz4.compress(self.small_msg)) + segment_codec_lz4.encode_header(buffer, compressed_length, len(self.small_msg), True) + + self.assertEqual(buffer.tell(), 8) + self.assertEqual( + self._header_to_bits(buffer.getvalue()), + "{:017b}".format(compressed_length) + "00000000000110010" + "1" + "00000") + + def test_encode_uncompressed_header_with_max_payload(self): + buffer = io.BytesIO() + segment_codec_no_compression.encode_header(buffer, len(self.max_msg), -1, True) + self.assertEqual(buffer.tell(), 6) + self.assertEqual( + self._header_to_bits(buffer.getvalue()), + "11111111111111111" + "1" + "000000") + + def test_encode_header_fails_if_payload_too_big(self): + buffer = io.BytesIO() + for codec in [segment_codec_no_compression, segment_codec_lz4]: + with self.assertRaises(DriverException): + codec.encode_header(buffer, len(self.large_msg), -1, False) + + def test_encode_uncompressed_header_not_self_contained_msg(self): + buffer = io.BytesIO() + # simulate the first chunk with the max size + segment_codec_no_compression.encode_header(buffer, len(self.max_msg), -1, False) + self.assertEqual(buffer.tell(), 6) + self.assertEqual( + self._header_to_bits(buffer.getvalue()), + ("11111111111111111" + "0" # not self contained + "000000")) + + def test_encode_compressed_header_with_max_payload(self): + buffer = io.BytesIO() + compressed_length = len(segment_codec_lz4.compress(self.max_msg)) + segment_codec_lz4.encode_header(buffer, compressed_length, len(self.max_msg), True) + self.assertEqual(buffer.tell(), 8) + self.assertEqual( + self._header_to_bits(buffer.getvalue()), + "{:017b}".format(compressed_length) + "11111111111111111" + "1" + "00000") + + def test_encode_compressed_header_not_self_contained_msg(self): + buffer = io.BytesIO() + # simulate the first chunk with the max size + compressed_length = len(segment_codec_lz4.compress(self.max_msg)) + segment_codec_lz4.encode_header(buffer, compressed_length, len(self.max_msg), False) + self.assertEqual(buffer.tell(), 8) + self.assertEqual( + self._header_to_bits(buffer.getvalue()), + ("{:017b}".format(compressed_length) + + "11111111111111111" + "0" # not self contained + "00000")) + + def test_decode_uncompressed_header(self): + buffer = io.BytesIO() + segment_codec_no_compression.encode_header(buffer, len(self.small_msg), -1, True) + buffer.seek(0) + header = segment_codec_no_compression.decode_header(buffer) + self.assertEqual(header.uncompressed_payload_length, -1) + self.assertEqual(header.payload_length, len(self.small_msg)) + self.assertEqual(header.is_self_contained, True) + + def test_decode_compressed_header(self): + buffer = io.BytesIO() + compressed_length = len(segment_codec_lz4.compress(self.small_msg)) + segment_codec_lz4.encode_header(buffer, compressed_length, len(self.small_msg), True) + buffer.seek(0) + header = segment_codec_lz4.decode_header(buffer) + self.assertEqual(header.uncompressed_payload_length, len(self.small_msg)) + self.assertEqual(header.payload_length, compressed_length) + self.assertEqual(header.is_self_contained, True) + + def test_decode_header_fails_if_corrupted(self): + buffer = io.BytesIO() + segment_codec_no_compression.encode_header(buffer, len(self.small_msg), -1, True) + # corrupt one byte + buffer.seek(buffer.tell()-1) + buffer.write(b'0') + buffer.seek(0) + + with self.assertRaises(CrcException): + segment_codec_no_compression.decode_header(buffer) + + def test_decode_uncompressed_self_contained_segment(self): + buffer = io.BytesIO() + segment_codec_no_compression.encode(buffer, self.small_msg) + + buffer.seek(0) + header = segment_codec_no_compression.decode_header(buffer) + segment = segment_codec_no_compression.decode(buffer, header) + + self.assertEqual(header.is_self_contained, True) + self.assertEqual(header.uncompressed_payload_length, -1) + self.assertEqual(header.payload_length, len(self.small_msg)) + self.assertEqual(segment.payload, self.small_msg) + + def test_decode_compressed_self_contained_segment(self): + buffer = io.BytesIO() + segment_codec_lz4.encode(buffer, self.small_msg) + + buffer.seek(0) + header = segment_codec_lz4.decode_header(buffer) + segment = segment_codec_lz4.decode(buffer, header) + + self.assertEqual(header.is_self_contained, True) + self.assertEqual(header.uncompressed_payload_length, len(self.small_msg)) + self.assertGreater(header.uncompressed_payload_length, header.payload_length) + self.assertEqual(segment.payload, self.small_msg) + + def test_decode_multi_segments(self): + buffer = io.BytesIO() + segment_codec_no_compression.encode(buffer, self.large_msg) + + buffer.seek(0) + # We should have 2 segments to read + headers = [] + segments = [] + headers.append(segment_codec_no_compression.decode_header(buffer)) + segments.append(segment_codec_no_compression.decode(buffer, headers[0])) + headers.append(segment_codec_no_compression.decode_header(buffer)) + segments.append(segment_codec_no_compression.decode(buffer, headers[1])) + + self.assertTrue(all([h.is_self_contained is False for h in headers])) + decoded_msg = segments[0].payload + segments[1].payload + self.assertEqual(decoded_msg, self.large_msg) + + def test_decode_fails_if_corrupted(self): + buffer = io.BytesIO() + segment_codec_lz4.encode(buffer, self.small_msg) + buffer.seek(buffer.tell()-1) + buffer.write(b'0') + buffer.seek(0) + header = segment_codec_lz4.decode_header(buffer) + with self.assertRaises(CrcException): + segment_codec_lz4.decode(buffer, header) + + def test_decode_tiny_msg_not_compressed(self): + buffer = io.BytesIO() + segment_codec_lz4.encode(buffer, b'b') + buffer.seek(0) + header = segment_codec_lz4.decode_header(buffer) + segment = segment_codec_lz4.decode(buffer, header) + self.assertEqual(header.uncompressed_payload_length, 0) + self.assertEqual(header.payload_length, 1) + self.assertEqual(segment.payload, b'b') From 31ac39cd852cbc4e99b4a8049c9333b8c98f1b27 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 31 Aug 2020 14:47:31 -0400 Subject: [PATCH 1844/2005] Fix some unit tests to read the proper connection cql frame buffer --- tests/unit/io/test_twistedreactor.py | 6 +++--- tests/unit/io/utils.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index f3a2f05328..ef79943d7d 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -148,12 +148,12 @@ def test_handle_read__incomplete(self): # incomplete header self.obj_ut._iobuf.write(b'\x84\x00\x00\x00\x00') self.obj_ut.handle_read() - self.assertEqual(self.obj_ut._iobuf.getvalue(), b'\x84\x00\x00\x00\x00') + self.assertEqual(self.obj_ut._frame_iobuf.getvalue(), b'\x84\x00\x00\x00\x00') # full header, but incomplete body self.obj_ut._iobuf.write(b'\x00\x00\x00\x15') self.obj_ut.handle_read() - self.assertEqual(self.obj_ut._iobuf.getvalue(), + self.assertEqual(self.obj_ut._frame_iobuf.getvalue(), b'\x84\x00\x00\x00\x00\x00\x00\x00\x15') self.assertEqual(self.obj_ut._current_frame.end_pos, 30) @@ -174,7 +174,7 @@ def test_handle_read__fullmessage(self): self.obj_ut._iobuf.write( b'\x84\x01\x00\x02\x03\x00\x00\x00\x15' + body + extra) self.obj_ut.handle_read() - self.assertEqual(self.obj_ut._iobuf.getvalue(), extra) + self.assertEqual(self.obj_ut._frame_iobuf.getvalue(), extra) self.obj_ut.process_msg.assert_called_with( _Frame(version=4, flags=1, stream=2, opcode=3, body_offset=9, end_pos=9 + len(body)), body) diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 168a5e8b76..b587c24bfd 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -435,7 +435,7 @@ def test_partial_header_read(self): self.get_socket(c).recv.return_value = message[0:1] c.handle_read(*self.null_handle_function_args) - self.assertEqual(c._iobuf.getvalue(), message[0:1]) + self.assertEqual(c._frame_iobuf.getvalue(), message[0:1]) self.get_socket(c).recv.return_value = message[1:] c.handle_read(*self.null_handle_function_args) @@ -461,7 +461,7 @@ def test_partial_message_read(self): # read in the first nine bytes self.get_socket(c).recv.return_value = message[:9] c.handle_read(*self.null_handle_function_args) - self.assertEqual(c._iobuf.getvalue(), message[:9]) + self.assertEqual(c._frame_iobuf.getvalue(), message[:9]) # ... then read in the rest self.get_socket(c).recv.return_value = message[9:] From bd05fe6aeec9df67445a720067d7f8434e1b1e8b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 31 Aug 2020 15:04:36 -0400 Subject: [PATCH 1845/2005] Fix python2 bits formatting and missing lz4 deps with tox --- tests/unit/test_segment.py | 41 +++++++++++++++++++++----------------- tox.ini | 1 + 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/tests/unit/test_segment.py b/tests/unit/test_segment.py index bf661bb961..85d4f38cbd 100644 --- a/tests/unit/test_segment.py +++ b/tests/unit/test_segment.py @@ -17,13 +17,18 @@ except ImportError: import unittest # noqa -import io +import six from cassandra import DriverException from cassandra.segment import Segment, CrcException from cassandra.connection import segment_codec_no_compression, segment_codec_lz4 +def to_bits(b): + if six.PY2: + b = six.byte2int(b) + return '{:08b}'.format(b) + class SegmentCodecTest(unittest.TestCase): small_msg = b'b' * 50 @@ -36,19 +41,19 @@ def _header_to_bits(data): # data should be the little endian bytes sequence if len(data) > 6: # compressed data = data[:5] - bits = ''.join(['{:08b}'.format(b) for b in reversed(data)]) + bits = ''.join([to_bits(b) for b in reversed(data)]) # return the compressed payload length, the uncompressed payload length, # the self contained flag and the padding as bits return bits[23:40] + bits[6:23] + bits[5:6] + bits[:5] else: # uncompressed data = data[:3] - bits = ''.join(['{:08b}'.format(b) for b in reversed(data)]) + bits = ''.join([to_bits(b) for b in reversed(data)]) # return the payload length, the self contained flag and # the padding as bits return bits[7:24] + bits[6:7] + bits[:6] def test_encode_uncompressed_header(self): - buffer = io.BytesIO() + buffer = six.BytesIO() segment_codec_no_compression.encode_header(buffer, len(self.small_msg), -1, True) self.assertEqual(buffer.tell(), 6) self.assertEqual( @@ -56,7 +61,7 @@ def test_encode_uncompressed_header(self): "00000000000110010" + "1" + "000000") def test_encode_compressed_header(self): - buffer = io.BytesIO() + buffer = six.BytesIO() compressed_length = len(segment_codec_lz4.compress(self.small_msg)) segment_codec_lz4.encode_header(buffer, compressed_length, len(self.small_msg), True) @@ -66,7 +71,7 @@ def test_encode_compressed_header(self): "{:017b}".format(compressed_length) + "00000000000110010" + "1" + "00000") def test_encode_uncompressed_header_with_max_payload(self): - buffer = io.BytesIO() + buffer = six.BytesIO() segment_codec_no_compression.encode_header(buffer, len(self.max_msg), -1, True) self.assertEqual(buffer.tell(), 6) self.assertEqual( @@ -74,13 +79,13 @@ def test_encode_uncompressed_header_with_max_payload(self): "11111111111111111" + "1" + "000000") def test_encode_header_fails_if_payload_too_big(self): - buffer = io.BytesIO() + buffer = six.BytesIO() for codec in [segment_codec_no_compression, segment_codec_lz4]: with self.assertRaises(DriverException): codec.encode_header(buffer, len(self.large_msg), -1, False) def test_encode_uncompressed_header_not_self_contained_msg(self): - buffer = io.BytesIO() + buffer = six.BytesIO() # simulate the first chunk with the max size segment_codec_no_compression.encode_header(buffer, len(self.max_msg), -1, False) self.assertEqual(buffer.tell(), 6) @@ -91,7 +96,7 @@ def test_encode_uncompressed_header_not_self_contained_msg(self): "000000")) def test_encode_compressed_header_with_max_payload(self): - buffer = io.BytesIO() + buffer = six.BytesIO() compressed_length = len(segment_codec_lz4.compress(self.max_msg)) segment_codec_lz4.encode_header(buffer, compressed_length, len(self.max_msg), True) self.assertEqual(buffer.tell(), 8) @@ -100,7 +105,7 @@ def test_encode_compressed_header_with_max_payload(self): "{:017b}".format(compressed_length) + "11111111111111111" + "1" + "00000") def test_encode_compressed_header_not_self_contained_msg(self): - buffer = io.BytesIO() + buffer = six.BytesIO() # simulate the first chunk with the max size compressed_length = len(segment_codec_lz4.compress(self.max_msg)) segment_codec_lz4.encode_header(buffer, compressed_length, len(self.max_msg), False) @@ -113,7 +118,7 @@ def test_encode_compressed_header_not_self_contained_msg(self): "00000")) def test_decode_uncompressed_header(self): - buffer = io.BytesIO() + buffer = six.BytesIO() segment_codec_no_compression.encode_header(buffer, len(self.small_msg), -1, True) buffer.seek(0) header = segment_codec_no_compression.decode_header(buffer) @@ -122,7 +127,7 @@ def test_decode_uncompressed_header(self): self.assertEqual(header.is_self_contained, True) def test_decode_compressed_header(self): - buffer = io.BytesIO() + buffer = six.BytesIO() compressed_length = len(segment_codec_lz4.compress(self.small_msg)) segment_codec_lz4.encode_header(buffer, compressed_length, len(self.small_msg), True) buffer.seek(0) @@ -132,7 +137,7 @@ def test_decode_compressed_header(self): self.assertEqual(header.is_self_contained, True) def test_decode_header_fails_if_corrupted(self): - buffer = io.BytesIO() + buffer = six.BytesIO() segment_codec_no_compression.encode_header(buffer, len(self.small_msg), -1, True) # corrupt one byte buffer.seek(buffer.tell()-1) @@ -143,7 +148,7 @@ def test_decode_header_fails_if_corrupted(self): segment_codec_no_compression.decode_header(buffer) def test_decode_uncompressed_self_contained_segment(self): - buffer = io.BytesIO() + buffer = six.BytesIO() segment_codec_no_compression.encode(buffer, self.small_msg) buffer.seek(0) @@ -156,7 +161,7 @@ def test_decode_uncompressed_self_contained_segment(self): self.assertEqual(segment.payload, self.small_msg) def test_decode_compressed_self_contained_segment(self): - buffer = io.BytesIO() + buffer = six.BytesIO() segment_codec_lz4.encode(buffer, self.small_msg) buffer.seek(0) @@ -169,7 +174,7 @@ def test_decode_compressed_self_contained_segment(self): self.assertEqual(segment.payload, self.small_msg) def test_decode_multi_segments(self): - buffer = io.BytesIO() + buffer = six.BytesIO() segment_codec_no_compression.encode(buffer, self.large_msg) buffer.seek(0) @@ -186,7 +191,7 @@ def test_decode_multi_segments(self): self.assertEqual(decoded_msg, self.large_msg) def test_decode_fails_if_corrupted(self): - buffer = io.BytesIO() + buffer = six.BytesIO() segment_codec_lz4.encode(buffer, self.small_msg) buffer.seek(buffer.tell()-1) buffer.write(b'0') @@ -196,7 +201,7 @@ def test_decode_fails_if_corrupted(self): segment_codec_lz4.decode(buffer, header) def test_decode_tiny_msg_not_compressed(self): - buffer = io.BytesIO() + buffer = six.BytesIO() segment_codec_lz4.encode(buffer, b'b') buffer.seek(0) header = segment_codec_lz4.decode_header(buffer) diff --git a/tox.ini b/tox.ini index efb610ac09..d013a480f0 100644 --- a/tox.ini +++ b/tox.ini @@ -12,6 +12,7 @@ deps = nose pure-sasl kerberos futurist + lz4 [testenv] deps = {[base]deps} From d992e818edf93cc886100e5e4007e10ea8e4d4bc Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 9 Sep 2020 15:26:54 -0400 Subject: [PATCH 1846/2005] Avoid using 2 io buffers when checksumming is not used --- CHANGELOG.rst | 1 + cassandra/connection.py | 111 ++++++++++++------ .../standard/test_custom_protocol_handler.py | 11 +- tests/integration/standard/test_query.py | 4 +- tests/unit/io/test_twistedreactor.py | 6 +- tests/unit/io/utils.py | 18 +-- tests/unit/test_connection.py | 4 +- tests/unit/test_segment.py | 9 +- tox.ini | 5 +- 9 files changed, 114 insertions(+), 55 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a3217d6cda..9aaf719542 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,7 @@ Not released Features -------- * Ensure the driver can connect when invalid peer hosts are in system.peers (PYTHON-1260) +* Implement protocol v5 checksumming (PYTHON-1258) Bug Fixes --------- diff --git a/cassandra/connection.py b/cassandra/connection.py index 7054a389d0..477eaf2f28 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -27,6 +27,8 @@ from threading import Thread, Event, RLock, Condition import time import ssl +import weakref + if 'gevent.monkey' in sys.modules: from gevent.queue import Queue, Empty @@ -610,6 +612,55 @@ def int_from_buf_item(i): int_from_buf_item = ord +class _ConnectionIOBuffer(object): + """ + Abstraction class to ease the use of the different connection io buffers. With + protocol V5 and checksumming, the data is read, validated and copied to another + cql frame buffer. + """ + _io_buffer = None + _cql_frame_buffer = None + _connection = None + + def __init__(self, connection): + self._io_buffer = io.BytesIO() + self._connection = weakref.proxy(connection) + + @property + def io_buffer(self): + return self._io_buffer + + @property + def cql_frame_buffer(self): + return self._cql_frame_buffer if self.is_checksumming_enabled else \ + self._io_buffer + + def set_checksumming_buffer(self): + self.reset_io_buffer() + self._cql_frame_buffer = io.BytesIO() + + @property + def is_checksumming_enabled(self): + return self._connection._is_checksumming_enabled + + def readable_io_bytes(self): + return self.io_buffer.tell() + + def readable_cql_frame_bytes(self): + return self.cql_frame_buffer.tell() + + def reset_io_buffer(self): + self._io_buffer = io.BytesIO(self._io_buffer.read()) + self._io_buffer.seek(0, 2) # 2 == SEEK_END + + def reset_cql_frame_buffer(self): + if self.is_checksumming_enabled: + self._cql_frame_buffer = io.BytesIO(self._cql_frame_buffer.read()) + self._cql_frame_buffer.seek(0, 2) # 2 == SEEK_END + else: + self.reset_io_buffer() + + class Connection(object): CALLBACK_ERR_THREAD_THRESHOLD = 100 @@ -665,8 +716,6 @@ class Connection(object): allow_beta_protocol_version = False - _iobuf = None - _frame_iobuf = None _current_frame = None _socket = None @@ -679,6 +728,11 @@ class Connection(object): _is_checksumming_enabled = False + @property + def _iobuf(self): + # backward compatibility, to avoid any change in the reactors + return self._io_buffer.io_buffer + def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, cql_version=None, protocol_version=ProtocolVersion.MAX_SUPPORTED, is_control_connection=False, @@ -702,8 +756,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.no_compact = no_compact self._push_watchers = defaultdict(set) self._requests = {} - self._iobuf = io.BytesIO() - self._frame_iobuf = io.BytesIO() + self._io_buffer = _ConnectionIOBuffer(self) self._continuous_paging_sessions = {} self._socket_writable = True @@ -844,6 +897,12 @@ def _connect_socket(self): for args in self.sockopts: self._socket.setsockopt(*args) + def _enable_checksumming(self): + self._io_buffer.set_checksumming_buffer() + self._is_checksumming_enabled = True + self._segment_codec = segment_codec_lz4 if self.compressor else segment_codec_no_compression + log.debug("Enabling protocol checksumming on connection (%s).", id(self)) + def close(self): raise NotImplementedError() @@ -1032,7 +1091,7 @@ def control_conn_disposed(self): @defunct_on_error def _read_frame_header(self): - buf = self._frame_iobuf.getvalue() + buf = self._io_buffer.cql_frame_buffer.getvalue() pos = len(buf) if pos: version = int_from_buf_item(buf[0]) & PROTOCOL_VERSION_MASK @@ -1048,28 +1107,19 @@ def _read_frame_header(self): self._current_frame = _Frame(version, flags, stream, op, header_size, body_len + header_size) return pos - def _reset_frame(self): - self._frame_iobuf = io.BytesIO(self._frame_iobuf.read()) - self._frame_iobuf.seek(0, 2) # 2 == SEEK_END - self._current_frame = None - - def _reset_io_buffer(self): - self._iobuf = io.BytesIO(self._iobuf.read()) - self._iobuf.seek(0, 2) # 2 == SEEK_END - @defunct_on_error def _process_segment_buffer(self): - readable_bytes = self._iobuf.tell() + readable_bytes = self._io_buffer.readable_io_bytes() if readable_bytes >= self._segment_codec.header_length_with_crc: try: - self._iobuf.seek(0) - segment_header = self._segment_codec.decode_header(self._iobuf) + self._io_buffer.io_buffer.seek(0) + segment_header = self._segment_codec.decode_header(self._io_buffer.io_buffer) if readable_bytes >= segment_header.segment_length: segment = self._segment_codec.decode(self._iobuf, segment_header) - self._frame_iobuf.write(segment.payload) + self._io_buffer.cql_frame_buffer.write(segment.payload) else: # not enough data to read the segment - self._iobuf.seek(0, 2) + self._io_buffer.io_buffer.seek(0, 2) except CrcException as exc: # re-raise an exception that inherits from ConnectionException raise CrcMismatchException(str(exc), self.endpoint) @@ -1078,21 +1128,15 @@ def process_io_buffer(self): while True: if self._is_checksumming_enabled: self._process_segment_buffer() - else: - # We should probably refactor the IO buffering stuff out of the Connection - # class to handle this in a better way. That would make the segment and frame - # decoding code clearer. - self._frame_iobuf.write(self._iobuf.getvalue()) - - self._reset_io_buffer() + self._io_buffer.reset_io_buffer() if not self._current_frame: pos = self._read_frame_header() else: - pos = self._frame_iobuf.tell() + pos = self._io_buffer.readable_cql_frame_bytes() if not self._current_frame or pos < self._current_frame.end_pos: - if self._is_checksumming_enabled and self._iobuf.tell(): + if self._is_checksumming_enabled and self._io_buffer.readable_io_bytes(): # We have a multi-segments message and we need to read more # data to complete the current cql frame continue @@ -1103,10 +1147,11 @@ def process_io_buffer(self): return else: frame = self._current_frame - self._frame_iobuf.seek(frame.body_offset) - msg = self._frame_iobuf.read(frame.end_pos - frame.body_offset) + self._io_buffer.cql_frame_buffer.seek(frame.body_offset) + msg = self._io_buffer.cql_frame_buffer.read(frame.end_pos - frame.body_offset) self.process_msg(frame, msg) - self._reset_frame() + self._io_buffer.reset_cql_frame_buffer() + self._current_frame = None @defunct_on_error def process_msg(self, header, body): @@ -1287,9 +1332,7 @@ def _handle_startup_response(self, startup_response, did_authenticate=False): self.compressor = self._compressor if ProtocolVersion.has_checksumming_support(self.protocol_version): - self._is_checksumming_enabled = True - self._segment_codec = segment_codec_lz4 if self.compressor else segment_codec_no_compression - log.debug("Enabling protocol checksumming on connection (%s).", id(self)) + self._enable_checksumming() self.connected_event.set() elif isinstance(startup_response, AuthenticateMessage): diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index d5108ed47b..bf549511c8 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -25,7 +25,7 @@ from tests.integration import use_singledc, drop_keyspace_shutdown_cluster, \ greaterthanorequalcass30, execute_with_long_wait_retry, greaterthanorequaldse51, greaterthanorequalcass3_10, \ - greaterthanorequalcass31, TestCluster + TestCluster, greaterthanorequalcass40, requirecassandra from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES from tests.integration.standard.utils import create_table_with_all_types, get_all_primitive_params from six import binary_type @@ -124,7 +124,8 @@ def test_custom_raw_row_results_all_types(self): self.assertEqual(len(CustomResultMessageTracked.checked_rev_row_set), len(PRIMITIVE_DATATYPES)-1) cluster.shutdown() - @greaterthanorequalcass31 + @requirecassandra + @greaterthanorequalcass40 def test_protocol_divergence_v5_fail_by_continuous_paging(self): """ Test to validate that V5 and DSE_V1 diverge. ContinuousPagingOptions is not supported by V5 @@ -170,7 +171,8 @@ def test_protocol_divergence_v4_fail_by_flag_uses_int(self): self._protocol_divergence_fail_by_flag_uses_int(ProtocolVersion.V4, uses_int_query_flag=False, int_flag=True) - @greaterthanorequalcass3_10 + @requirecassandra + @greaterthanorequalcass40 def test_protocol_v5_uses_flag_int(self): """ Test to validate that the _PAGE_SIZE_FLAG is treated correctly using write_uint for V5 @@ -196,7 +198,8 @@ def test_protocol_dsev1_uses_flag_int(self): self._protocol_divergence_fail_by_flag_uses_int(ProtocolVersion.DSE_V1, uses_int_query_flag=True, int_flag=True) - @greaterthanorequalcass3_10 + @requirecassandra + @greaterthanorequalcass40 def test_protocol_divergence_v5_fail_by_flag_uses_int(self): """ Test to validate that the _PAGE_SIZE_FLAG is treated correctly using write_uint for V5 diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index d6401a987e..ea0e326ff5 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -28,7 +28,7 @@ from cassandra.policies import HostDistance, RoundRobinPolicy, WhiteListRoundRobinPolicy from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, \ greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, local, get_cluster, setup_keyspace, \ - USE_CASS_EXTERNAL, greaterthanorequalcass40, DSE_VERSION, TestCluster + USE_CASS_EXTERNAL, greaterthanorequalcass40, DSE_VERSION, TestCluster, requirecassandra from tests import notwindows from tests.integration import greaterthanorequalcass30, get_node @@ -1408,6 +1408,8 @@ def test_setting_keyspace(self): """ self._check_set_keyspace_in_statement(self.session) + @requirecassandra + @greaterthanorequalcass40 def test_setting_keyspace_and_session(self): """ Test we can still send the keyspace independently even the session diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index ef79943d7d..e7c34cb4b5 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -148,12 +148,12 @@ def test_handle_read__incomplete(self): # incomplete header self.obj_ut._iobuf.write(b'\x84\x00\x00\x00\x00') self.obj_ut.handle_read() - self.assertEqual(self.obj_ut._frame_iobuf.getvalue(), b'\x84\x00\x00\x00\x00') + self.assertEqual(self.obj_ut._io_buffer.cql_frame_buffer.getvalue(), b'\x84\x00\x00\x00\x00') # full header, but incomplete body self.obj_ut._iobuf.write(b'\x00\x00\x00\x15') self.obj_ut.handle_read() - self.assertEqual(self.obj_ut._frame_iobuf.getvalue(), + self.assertEqual(self.obj_ut._io_buffer.cql_frame_buffer.getvalue(), b'\x84\x00\x00\x00\x00\x00\x00\x00\x15') self.assertEqual(self.obj_ut._current_frame.end_pos, 30) @@ -174,7 +174,7 @@ def test_handle_read__fullmessage(self): self.obj_ut._iobuf.write( b'\x84\x01\x00\x02\x03\x00\x00\x00\x15' + body + extra) self.obj_ut.handle_read() - self.assertEqual(self.obj_ut._frame_iobuf.getvalue(), extra) + self.assertEqual(self.obj_ut._io_buffer.cql_frame_buffer.getvalue(), extra) self.obj_ut.process_msg.assert_called_with( _Frame(version=4, flags=1, stream=2, opcode=3, body_offset=9, end_pos=9 + len(body)), body) diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index b587c24bfd..848513f031 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -309,14 +309,14 @@ def chunk(size): for message, expected_size in messages: message_chunks = message - c._iobuf = io.BytesIO() + c._io_buffer._io_buffer = io.BytesIO() c.process_io_buffer.reset_mock() c.handle_read(*self.null_handle_function_args) - c._iobuf.seek(0, os.SEEK_END) + c._io_buffer.io_buffer.seek(0, os.SEEK_END) # Ensure the message size is the good one and that the # message has been processed if it is non-empty - self.assertEqual(c._iobuf.tell(), expected_size) + self.assertEqual(c._io_buffer.io_buffer.tell(), expected_size) if expected_size == 0: c.process_io_buffer.assert_not_called() else: @@ -435,11 +435,11 @@ def test_partial_header_read(self): self.get_socket(c).recv.return_value = message[0:1] c.handle_read(*self.null_handle_function_args) - self.assertEqual(c._frame_iobuf.getvalue(), message[0:1]) + self.assertEqual(c._io_buffer.cql_frame_buffer.getvalue(), message[0:1]) self.get_socket(c).recv.return_value = message[1:] c.handle_read(*self.null_handle_function_args) - self.assertEqual(six.binary_type(), c._iobuf.getvalue()) + self.assertEqual(six.binary_type(), c._io_buffer.io_buffer.getvalue()) # let it write out a StartupMessage c.handle_write(*self.null_handle_function_args) @@ -461,12 +461,12 @@ def test_partial_message_read(self): # read in the first nine bytes self.get_socket(c).recv.return_value = message[:9] c.handle_read(*self.null_handle_function_args) - self.assertEqual(c._frame_iobuf.getvalue(), message[:9]) + self.assertEqual(c._io_buffer.cql_frame_buffer.getvalue(), message[:9]) # ... then read in the rest self.get_socket(c).recv.return_value = message[9:] c.handle_read(*self.null_handle_function_args) - self.assertEqual(six.binary_type(), c._iobuf.getvalue()) + self.assertEqual(six.binary_type(), c._io_buffer.io_buffer.getvalue()) # let it write out a StartupMessage c.handle_write(*self.null_handle_function_args) @@ -501,7 +501,7 @@ def test_mixed_message_and_buffer_sizes(self): for i in range(1, 15): c.process_io_buffer.reset_mock() - c._iobuf = io.BytesIO() + c._io_buffer._io_buffer = io.BytesIO() message = io.BytesIO(six.b('a') * (2**i)) def recv_side_effect(*args): @@ -511,7 +511,7 @@ def recv_side_effect(*args): self.get_socket(c).recv.side_effect = recv_side_effect c.handle_read(*self.null_handle_function_args) - if c._iobuf.tell(): + if c._io_buffer.io_buffer.tell(): c.process_io_buffer.assert_called_once() else: c.process_io_buffer.assert_not_called() diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 68577a396e..21b8862772 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -100,7 +100,7 @@ def test_bad_protocol_version(self, *args): header = self.make_header_prefix(SupportedMessage, version=0x7f) options = self.make_options_body() message = self.make_msg(header, options) - c._iobuf = BytesIO() + c._iobuf._io_buffer = BytesIO() c._iobuf.write(message) c.process_io_buffer() @@ -117,7 +117,7 @@ def test_negative_body_length(self, *args): # read in a SupportedMessage response header = self.make_header_prefix(SupportedMessage) message = header + int32_pack(-13) - c._iobuf = BytesIO() + c._iobuf._io_buffer = BytesIO() c._iobuf.write(message) c.process_io_buffer() diff --git a/tests/unit/test_segment.py b/tests/unit/test_segment.py index 85d4f38cbd..fc49339d68 100644 --- a/tests/unit/test_segment.py +++ b/tests/unit/test_segment.py @@ -60,6 +60,7 @@ def test_encode_uncompressed_header(self): self._header_to_bits(buffer.getvalue()), "00000000000110010" + "1" + "000000") + @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_encode_compressed_header(self): buffer = six.BytesIO() compressed_length = len(segment_codec_lz4.compress(self.small_msg)) @@ -80,7 +81,7 @@ def test_encode_uncompressed_header_with_max_payload(self): def test_encode_header_fails_if_payload_too_big(self): buffer = six.BytesIO() - for codec in [segment_codec_no_compression, segment_codec_lz4]: + for codec in [c for c in [segment_codec_no_compression, segment_codec_lz4] if c is not None]: with self.assertRaises(DriverException): codec.encode_header(buffer, len(self.large_msg), -1, False) @@ -95,6 +96,7 @@ def test_encode_uncompressed_header_not_self_contained_msg(self): "0" # not self contained "000000")) + @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_encode_compressed_header_with_max_payload(self): buffer = six.BytesIO() compressed_length = len(segment_codec_lz4.compress(self.max_msg)) @@ -104,6 +106,7 @@ def test_encode_compressed_header_with_max_payload(self): self._header_to_bits(buffer.getvalue()), "{:017b}".format(compressed_length) + "11111111111111111" + "1" + "00000") + @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_encode_compressed_header_not_self_contained_msg(self): buffer = six.BytesIO() # simulate the first chunk with the max size @@ -126,6 +129,7 @@ def test_decode_uncompressed_header(self): self.assertEqual(header.payload_length, len(self.small_msg)) self.assertEqual(header.is_self_contained, True) + @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_decode_compressed_header(self): buffer = six.BytesIO() compressed_length = len(segment_codec_lz4.compress(self.small_msg)) @@ -160,6 +164,7 @@ def test_decode_uncompressed_self_contained_segment(self): self.assertEqual(header.payload_length, len(self.small_msg)) self.assertEqual(segment.payload, self.small_msg) + @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_decode_compressed_self_contained_segment(self): buffer = six.BytesIO() segment_codec_lz4.encode(buffer, self.small_msg) @@ -190,6 +195,7 @@ def test_decode_multi_segments(self): decoded_msg = segments[0].payload + segments[1].payload self.assertEqual(decoded_msg, self.large_msg) + @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_decode_fails_if_corrupted(self): buffer = six.BytesIO() segment_codec_lz4.encode(buffer, self.small_msg) @@ -200,6 +206,7 @@ def test_decode_fails_if_corrupted(self): with self.assertRaises(CrcException): segment_codec_lz4.decode(buffer, header) + @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_decode_tiny_msg_not_compressed(self): buffer = six.BytesIO() segment_codec_lz4.encode(buffer, b'b') diff --git a/tox.ini b/tox.ini index d013a480f0..d883a1f973 100644 --- a/tox.ini +++ b/tox.ini @@ -12,10 +12,11 @@ deps = nose pure-sasl kerberos futurist - lz4 +lz4_dependency = py27,py35,py36,py37,py38: lz4 [testenv] deps = {[base]deps} + {[base]lz4_dependency} setenv = LIBEV_EMBED=0 CARES_EMBED=0 @@ -26,6 +27,7 @@ commands = nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/ [testenv:gevent_loop] deps = {[base]deps} + {[base]lz4_dependency} gevent>=1.4,<1.5 setenv = LIBEV_EMBED=0 @@ -38,6 +40,7 @@ commands = [testenv:eventlet_loop] deps = {[base]deps} + {[base]lz4_dependency} gevent>=1.4,<1.5 setenv = LIBEV_EMBED=0 From ed76b47a8b8fc735dfae28049c4fd5a60557bc98 Mon Sep 17 00:00:00 2001 From: Radovan Date: Thu, 24 Sep 2020 20:52:35 +0300 Subject: [PATCH 1847/2005] Don't create Cluster with hosts if having cloud config We set out to build a Flask app that talks to Astra. The CQL plugin we used turned out to be crashing if we didn't give it hosts to connect to (https://github.com/thegeorgeous/flask-cqlalchemy/blob/master/flask_cqlalchemy/__init__.py#L48). But we also had to use the `cloud` key to pass in Astra credentials (via `setup_kwargs` nearby the link above). This exploded on us in cqlengine.connection#setup() where the hosts passed into Cluster() made that constructor blow up because: `ValueError: contact_points, endpoint_factory, ssl_context, and ssl_options cannot be specified with a cloud configuration` We're not sure this is the right place to patch, but it helped us at least. We figured coming out with a patch directly, instead of an issue, might save some time. --- cassandra/cqlengine/connection.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 884e04ed74..c844ec7675 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -98,7 +98,11 @@ def setup(self): if self.lazy_connect: return - self.cluster = Cluster(self.hosts, **self.cluster_options) + if 'cloud' in self.cluster_options: + self.cluster = Cluster(**self.cluster_options) + else: + self.cluster = Cluster(self.hosts, **self.cluster_options) + try: self.session = self.cluster.connect() log.debug(format_log_context("connection initialized with internally created session", connection=self.name)) From d3769c36766856d71256a0f7617f1275a4679004 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Fri, 23 Oct 2020 15:59:04 -0400 Subject: [PATCH 1848/2005] Add a test and docs --- CHANGELOG.rst | 1 + cassandra/cqlengine/connection.py | 4 ++++ docs/cloud.rst | 33 +++++++++++++++++++++++++++ tests/integration/cloud/test_cloud.py | 19 +++++++++++++-- 4 files changed, 55 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 9aaf719542..1e1d265bfd 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,6 +6,7 @@ Features -------- * Ensure the driver can connect when invalid peer hosts are in system.peers (PYTHON-1260) * Implement protocol v5 checksumming (PYTHON-1258) +* Fix the default cqlengine connection mechanism to work with Astra (PYTHON-1265) Bug Fixes --------- diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index c844ec7675..90e6d90317 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -99,6 +99,8 @@ def setup(self): return if 'cloud' in self.cluster_options: + if self.hosts: + log.warning("Ignoring hosts %s because a cloud config was provided.", self.hosts) self.cluster = Cluster(**self.cluster_options) else: self.cluster = Cluster(self.hosts, **self.cluster_options) @@ -305,6 +307,8 @@ def set_session(s): log.debug("cqlengine default connection initialized with %s", s) +# TODO next major: if a cloud config is specified in kwargs, hosts will be ignored. +# This function should be refactored to reflect this change. PYTHON-1265 def setup( hosts, default_keyspace, diff --git a/docs/cloud.rst b/docs/cloud.rst index 7ff7693736..3607ef8b4e 100644 --- a/docs/cloud.rst +++ b/docs/cloud.rst @@ -54,3 +54,36 @@ Limitations Event loops ^^^^^^^^^^^ Evenlet isn't yet supported for python 3.7+ due to an `issue in Eventlet `_. + + +CqlEngine +========= + +When using the object mapper, you can configure cqlengine with :func:`~.cqlengine.connection.set_session`: + +.. code:: python + + from cassandra.cqlengine import connection + ... + + c = Cluster(cloud={'secure_connect_bundle':'/path/to/secure-connect-test.zip'}, + auth_provider=PlainTextAuthProvider('user', 'pass')) + s = c.connect('myastrakeyspace') + connection.set_session(s) + ... + +If you are using some third-party libraries (flask, django, etc.), you might not be able to change the +configuration mechanism. For this reason, the `hosts` argument of the default +:func:`~.cqlengine.connection.setup` function will be ignored if a `cloud` config is provided: + +.. code:: python + + from cassandra.cqlengine import connection + ... + + connection.setup( + None, # or anything else + "myastrakeyspace", cloud={ + 'secure_connect_bundle':'/path/to/secure-connect-test.zip' + }, + auth_provider=PlainTextAuthProvider('user', 'pass')) diff --git a/tests/integration/cloud/test_cloud.py b/tests/integration/cloud/test_cloud.py index 5b9b268f5c..cd41b8f0e0 100644 --- a/tests/integration/cloud/test_cloud.py +++ b/tests/integration/cloud/test_cloud.py @@ -13,6 +13,10 @@ # limitations under the License from cassandra.datastax.cloud import parse_metadata_info from cassandra.query import SimpleStatement +from cassandra.cqlengine import connection +from cassandra.cqlengine.management import sync_table, create_keyspace_simple +from cassandra.cqlengine.models import Model +from cassandra.cqlengine import columns try: import unittest2 as unittest @@ -30,7 +34,7 @@ from mock import patch -from tests.integration import requirescloudproxy, TestCluster +from tests.integration import requirescloudproxy from tests.util import wait_until_not_raised from tests.integration.cloud import CloudProxyCluster, CLOUD_PROXY_SERVER @@ -143,7 +147,7 @@ def test_resolve_and_reconnect_on_node_down(self): wait_until_not_raised( lambda: self.assertEqual(len(self.hosts_up()), 3), 0.02, 250) - mocked_resolve.assert_called_once() + mocked_resolve.assert_called() def test_metadata_unreachable(self): with self.assertRaises(DriverException) as cm: @@ -234,3 +238,14 @@ def test_consistency_guardrails(self): self.session.execute(statement) except InvalidRequest: self.fail("InvalidRequest was incorrectly raised for write query at LOCAL QUORUM!") + + def test_cqlengine_can_connect(self): + class TestModel(Model): + id = columns.Integer(primary_key=True) + val = columns.Text() + + connection.setup(None, "test", cloud={'secure_connect_bundle': self.creds}) + create_keyspace_simple('test', 1) + sync_table(TestModel) + TestModel.objects.create(id=42, value='test') + self.assertEqual(len(TestModel.objects.all()), 1) From 8331eca6cc96d8bd3af2e37bc64693747515c2b6 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Mon, 26 Oct 2020 13:52:27 -0400 Subject: [PATCH 1849/2005] Update security documentation and examples to use PROTOCOL_TLS --- CHANGELOG.rst | 1 + cassandra/cluster.py | 2 +- docs/security.rst | 28 +++++++++++++-------------- tests/integration/cloud/test_cloud.py | 4 ++-- tests/integration/long/test_ssl.py | 12 ++++++------ 5 files changed, 24 insertions(+), 23 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 1e1d265bfd..a01a7ee49a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -15,6 +15,7 @@ Bug Fixes Others ------ * Drop Python 3.4 support (PYTHON-1220) +* Update security documentation and examples to use PROTOCOL_TLS (PYTHON-1264) 3.24.0 ====== diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ec91ce257a..cedcf8207b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -785,7 +785,7 @@ def default_retry_policy(self, policy): By default, a ``ca_certs`` value should be supplied (the value should be a string pointing to the location of the CA certs file), and you probably - want to specify ``ssl_version`` as ``ssl.PROTOCOL_TLSv1`` to match + want to specify ``ssl_version`` as ``ssl.PROTOCOL_TLS`` to match Cassandra's default protocol. .. versionchanged:: 3.3.0 diff --git a/docs/security.rst b/docs/security.rst index 4cf3163fb0..6dd2624c24 100644 --- a/docs/security.rst +++ b/docs/security.rst @@ -119,9 +119,9 @@ The driver configuration: .. code-block:: python from cassandra.cluster import Cluster, Session - from ssl import SSLContext, PROTOCOL_TLSv1 + from ssl import SSLContext, PROTOCOL_TLS - ssl_context = SSLContext(PROTOCOL_TLSv1) + ssl_context = SSLContext(PROTOCOL_TLS) cluster = Cluster(['127.0.0.1'], ssl_context=ssl_context) session = cluster.connect() @@ -147,9 +147,9 @@ to `CERT_REQUIRED`. Otherwise, the loaded verify certificate will have no effect .. code-block:: python from cassandra.cluster import Cluster, Session - from ssl import SSLContext, PROTOCOL_TLSv1, CERT_REQUIRED + from ssl import SSLContext, PROTOCOL_TLS, CERT_REQUIRED - ssl_context = SSLContext(PROTOCOL_TLSv1) + ssl_context = SSLContext(PROTOCOL_TLS) ssl_context.load_verify_locations('/path/to/rootca.crt') ssl_context.verify_mode = CERT_REQUIRED @@ -161,9 +161,9 @@ Additionally, you can also force the driver to verify the `hostname` of the serv .. code-block:: python from cassandra.cluster import Cluster, Session - from ssl import SSLContext, PROTOCOL_TLSv1, CERT_REQUIRED + from ssl import SSLContext, PROTOCOL_TLS, CERT_REQUIRED - ssl_context = SSLContext(PROTOCOL_TLSv1) + ssl_context = SSLContext(PROTOCOL_TLS) ssl_context.load_verify_locations('/path/to/rootca.crt') ssl_context.verify_mode = CERT_REQUIRED ssl_context.check_hostname = True @@ -228,9 +228,9 @@ Finally, you can use that configuration with the following driver code: .. code-block:: python from cassandra.cluster import Cluster, Session - from ssl import SSLContext, PROTOCOL_TLSv1 + from ssl import SSLContext, PROTOCOL_TLS - ssl_context = SSLContext(PROTOCOL_TLSv1) + ssl_context = SSLContext(PROTOCOL_TLS) ssl_context.load_cert_chain( certfile='/path/to/client.crt_signed', keyfile='/path/to/client.key') @@ -251,9 +251,9 @@ The following driver code specifies that the connection should use two-way verif .. code-block:: python from cassandra.cluster import Cluster, Session - from ssl import SSLContext, PROTOCOL_TLSv1, CERT_REQUIRED + from ssl import SSLContext, PROTOCOL_TLS, CERT_REQUIRED - ssl_context = SSLContext(PROTOCOL_TLSv1) + ssl_context = SSLContext(PROTOCOL_TLS) ssl_context.load_verify_locations('/path/to/rootca.crt') ssl_context.verify_mode = CERT_REQUIRED ssl_context.load_cert_chain( @@ -275,7 +275,7 @@ for more details about ``SSLContext`` configuration. from cassandra.cluster import Cluster from cassandra.io.twistedreactor import TwistedConnection - ssl_context = SSL.Context(SSL.TLSv1_METHOD) + ssl_context = SSL.Context(SSL.TLSv1_2_METHOD) ssl_context.set_verify(SSL.VERIFY_PEER, callback=lambda _1, _2, _3, _4, ok: ok) ssl_context.use_certificate_file('/path/to/client.crt_signed') ssl_context.use_privatekey_file('/path/to/client.key') @@ -303,7 +303,7 @@ deprecated in the next major release. By default, a ``ca_certs`` value should be supplied (the value should be a string pointing to the location of the CA certs file), and you probably -want to specify ``ssl_version`` as ``ssl.PROTOCOL_TLSv1`` to match +want to specify ``ssl_version`` as ``ssl.PROTOCOL_TLS`` to match Cassandra's default protocol. For example: @@ -311,11 +311,11 @@ For example: .. code-block:: python from cassandra.cluster import Cluster - from ssl import PROTOCOL_TLSv1, CERT_REQUIRED + from ssl import PROTOCOL_TLS, CERT_REQUIRED ssl_opts = { 'ca_certs': '/path/to/my/ca.certs', - 'ssl_version': PROTOCOL_TLSv1, + 'ssl_version': PROTOCOL_TLS, 'cert_reqs': CERT_REQUIRED # Certificates are required and validated } cluster = Cluster(ssl_options=ssl_opts) diff --git a/tests/integration/cloud/test_cloud.py b/tests/integration/cloud/test_cloud.py index cd41b8f0e0..e0b9e2d382 100644 --- a/tests/integration/cloud/test_cloud.py +++ b/tests/integration/cloud/test_cloud.py @@ -24,7 +24,7 @@ import unittest # noqa import six -from ssl import SSLContext, PROTOCOL_TLSv1 +from ssl import SSLContext, PROTOCOL_TLS from cassandra import DriverException, ConsistencyLevel, InvalidRequest from cassandra.cluster import NoHostAvailable, ExecutionProfile, Cluster, _execution_profile_to_string @@ -92,7 +92,7 @@ def test_support_overriding_auth_provider(self): def test_error_overriding_ssl_context(self): with self.assertRaises(ValueError) as cm: - self.connect(self.creds, ssl_context=SSLContext(PROTOCOL_TLSv1)) + self.connect(self.creds, ssl_context=SSLContext(PROTOCOL_TLS)) self.assertIn('cannot be specified with a cloud configuration', str(cm.exception)) diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 7698849945..4de46f4649 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -54,11 +54,11 @@ USES_PYOPENSSL = "twisted" in EVENT_LOOP_MANAGER or "eventlet" in EVENT_LOOP_MANAGER if "twisted" in EVENT_LOOP_MANAGER: import OpenSSL - ssl_version = OpenSSL.SSL.TLSv1_METHOD + ssl_version = OpenSSL.SSL.TLSv1_2_METHOD verify_certs = {'cert_reqs': SSL.VERIFY_PEER, 'check_hostname': True} else: - ssl_version = ssl.PROTOCOL_TLSv1 + ssl_version = ssl.PROTOCOL_TLS verify_certs = {'cert_reqs': ssl.CERT_REQUIRED, 'check_hostname': True} @@ -404,7 +404,7 @@ def test_can_connect_with_sslcontext_certificate(self): @test_category connection:ssl """ if USES_PYOPENSSL: - ssl_context = SSL.Context(SSL.TLSv1_METHOD) + ssl_context = SSL.Context(SSL.TLSv1_2_METHOD) ssl_context.load_verify_locations(CLIENT_CA_CERTS) else: ssl_context = ssl.SSLContext(ssl_version) @@ -428,7 +428,7 @@ def test_can_connect_with_ssl_client_auth_password_private_key(self): ssl_options = {} if USES_PYOPENSSL: - ssl_context = SSL.Context(SSL.TLSv1_METHOD) + ssl_context = SSL.Context(SSL.TLSv1_2_METHOD) ssl_context.use_certificate_file(abs_driver_certfile) with open(abs_driver_keyfile) as keyfile: key = crypto.load_privatekey(crypto.FILETYPE_PEM, keyfile.read(), b'cassandra') @@ -449,7 +449,7 @@ def test_can_connect_with_ssl_context_ca_host_match(self): """ ssl_options = {} if USES_PYOPENSSL: - ssl_context = SSL.Context(SSL.TLSv1_METHOD) + ssl_context = SSL.Context(SSL.TLSv1_2_METHOD) ssl_context.use_certificate_file(DRIVER_CERTFILE) with open(DRIVER_KEYFILE_ENCRYPTED) as keyfile: key = crypto.load_privatekey(crypto.FILETYPE_PEM, keyfile.read(), b'cassandra') @@ -472,7 +472,7 @@ def test_can_connect_with_ssl_context_ca_host_match(self): def test_cannot_connect_ssl_context_with_invalid_hostname(self): ssl_options = {} if USES_PYOPENSSL: - ssl_context = SSL.Context(SSL.TLSv1_METHOD) + ssl_context = SSL.Context(SSL.TLSv1_2_METHOD) ssl_context.use_certificate_file(DRIVER_CERTFILE) with open(DRIVER_KEYFILE_ENCRYPTED) as keyfile: key = crypto.load_privatekey(crypto.FILETYPE_PEM, keyfile.read(), b"cassandra") From 5c058524fbddabc6840d3189bf6df608aff77503 Mon Sep 17 00:00:00 2001 From: Aleksandr Sorokoumov Date: Tue, 10 Nov 2020 15:15:13 +0100 Subject: [PATCH 1850/2005] PYTHON-1269 Update list of CQL keywords * Add MBEAN and MBEANS to reserved words (CASSANDRA-15663) * Move UNSET outside of DSE specific (CASSANDRA-15663) --- CHANGELOG.rst | 1 + cassandra/metadata.py | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a01a7ee49a..97e3f2e8f9 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,6 +11,7 @@ Features Bug Fixes --------- * Asyncore race condition cause logging exception on shutdown (PYTHON-1266) +* Update list of reserved keywords (PYTHON-1269) Others ------ diff --git a/cassandra/metadata.py b/cassandra/metadata.py index df38fc6670..a82fbe48e3 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -52,18 +52,18 @@ 'counter', 'create', 'custom', 'date', 'decimal', 'default', 'delete', 'desc', 'describe', 'deterministic', 'distinct', 'double', 'drop', 'entries', 'execute', 'exists', 'filtering', 'finalfunc', 'float', 'from', 'frozen', 'full', 'function', 'functions', 'grant', 'if', 'in', 'index', 'inet', 'infinity', 'initcond', 'input', 'insert', 'int', 'into', 'is', 'json', - 'key', 'keys', 'keyspace', 'keyspaces', 'language', 'limit', 'list', 'login', 'map', 'materialized', 'modify', 'monotonic', 'nan', 'nologin', - 'norecursive', 'nosuperuser', 'not', 'null', 'of', 'on', 'options', 'or', 'order', 'password', 'permission', + 'key', 'keys', 'keyspace', 'keyspaces', 'language', 'limit', 'list', 'login', 'map', 'materialized', 'mbean', 'mbeans', 'modify', 'monotonic', + 'nan', 'nologin', 'norecursive', 'nosuperuser', 'not', 'null', 'of', 'on', 'options', 'or', 'order', 'password', 'permission', 'permissions', 'primary', 'rename', 'replace', 'returns', 'revoke', 'role', 'roles', 'schema', 'select', 'set', 'sfunc', 'smallint', 'static', 'storage', 'stype', 'superuser', 'table', 'text', 'time', 'timestamp', 'timeuuid', - 'tinyint', 'to', 'token', 'trigger', 'truncate', 'ttl', 'tuple', 'type', 'unlogged', 'update', 'use', 'user', + 'tinyint', 'to', 'token', 'trigger', 'truncate', 'ttl', 'tuple', 'type', 'unlogged', 'unset', 'update', 'use', 'user', 'users', 'using', 'uuid', 'values', 'varchar', 'varint', 'view', 'where', 'with', 'writetime', # DSE specifics "node", "nodes", "plan", "active", "application", "applications", "java", "executor", "executors", "std_out", "std_err", "renew", "delegation", "no", "redact", "token", "lowercasestring", "cluster", "authentication", "schemes", "scheme", "internal", "ldap", "kerberos", "remote", "object", "method", "call", "calls", "search", "schema", "config", "rows", - "columns", "profiles", "commit", "reload", "unset", "rebuild", "field", "workpool", "any", "submission", "indices", + "columns", "profiles", "commit", "reload", "rebuild", "field", "workpool", "any", "submission", "indices", "restrict", "unrestrict" )) """ From 95fae2fe82c6a2cff2be926d7ea6f9d930e81b22 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 1 Feb 2021 12:42:19 -0600 Subject: [PATCH 1851/2005] typos --- cassandra/cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cedcf8207b..45e1fb410b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1570,7 +1570,7 @@ def set_core_connections_per_host(self, host_distance, core_connections): If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this is not supported (there is always one connection per host, unless the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`) - and using this will result in an :exc:`~.UnsupporteOperation`. + and using this will result in an :exc:`~.UnsupportedOperation`. """ if self.protocol_version >= 3: raise UnsupportedOperation( @@ -1603,7 +1603,7 @@ def set_max_connections_per_host(self, host_distance, max_connections): If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this is not supported (there is always one connection per host, unless the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`) - and using this will result in an :exc:`~.UnsupporteOperation`. + and using this will result in an :exc:`~.UnsupportedOperation`. """ if self.protocol_version >= 3: raise UnsupportedOperation( From a3a8cd098c0786df09cc885fef8f3bd65afe48cf Mon Sep 17 00:00:00 2001 From: Sam Tunnicliffe Date: Tue, 8 Dec 2020 10:21:41 +0000 Subject: [PATCH 1852/2005] PYTHON-1232 Bring protocol v5 out of beta --- cassandra/__init__.py | 11 +++++-- tests/integration/__init__.py | 9 +++--- tests/integration/standard/test_cluster.py | 36 ++++++++++++++-------- 3 files changed, 37 insertions(+), 19 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 100df2df17..4a273daff7 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -161,7 +161,12 @@ class ProtocolVersion(object): V5 = 5 """ - v5, in beta from 3.x+ + v5, in beta from 3.x+. Finalised in 4.0-beta4 + """ + + V6 = 6 + """ + v6, in beta from 4.0-beta4 """ DSE_V1 = 0x41 @@ -174,12 +179,12 @@ class ProtocolVersion(object): DSE private protocol v2, supported in DSE 6.0+ """ - SUPPORTED_VERSIONS = (DSE_V2, DSE_V1, V5, V4, V3, V2, V1) + SUPPORTED_VERSIONS = (DSE_V2, DSE_V1, V6, V5, V4, V3, V2, V1) """ A tuple of all supported protocol versions """ - BETA_VERSIONS = (V5,) + BETA_VERSIONS = (V6,) """ A tuple of all beta protocol versions """ diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 1e1f582804..77923853f1 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -207,8 +207,6 @@ def get_default_protocol(): if DSE_VERSION: return ProtocolVersion.DSE_V2 else: - global ALLOW_BETA_PROTOCOL - ALLOW_BETA_PROTOCOL = True return ProtocolVersion.V5 if CASSANDRA_VERSION >= Version('3.10'): if DSE_VERSION: @@ -234,9 +232,12 @@ def get_supported_protocol_versions(): 3.X -> 4, 3 3.10(C*) -> 5(beta),4,3 3.10(DSE) -> DSE_V1,4,3 - 4.0(C*) -> 5(beta),4,3 + 4.0(C*) -> 6(beta),5,4,3 4.0(DSE) -> DSE_v2, DSE_V1,4,3 ` """ + if CASSANDRA_VERSION >= Version('4.0-beta4'): + if not DSE_VERSION: + return (3, 4, 5, 6) if CASSANDRA_VERSION >= Version('4.0-a'): if DSE_VERSION: return (3, 4, ProtocolVersion.DSE_V1, ProtocolVersion.DSE_V2) @@ -316,7 +317,7 @@ def _id_and_mark(f): notprotocolv1 = unittest.skipUnless(PROTOCOL_VERSION > 1, 'Protocol v1 not supported') lessthenprotocolv4 = unittest.skipUnless(PROTOCOL_VERSION < 4, 'Protocol versions 4 or greater not supported') greaterthanprotocolv3 = unittest.skipUnless(PROTOCOL_VERSION >= 4, 'Protocol versions less than 4 are not supported') -protocolv5 = unittest.skipUnless(5 in get_supported_protocol_versions(), 'Protocol versions less than 5 are not supported') +protocolv6 = unittest.skipUnless(6 in get_supported_protocol_versions(), 'Protocol versions less than 6 are not supported') greaterthancass20 = unittest.skipUnless(CASSANDRA_VERSION >= Version('2.1'), 'Cassandra version 2.1 or greater required') greaterthancass21 = unittest.skipUnless(CASSANDRA_VERSION >= Version('2.2'), 'Cassandra version 2.2 or greater required') greaterthanorequalcass30 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.0'), 'Cassandra version 3.0 or greater required') diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index cdb6f1f3b7..a99076ebaa 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -42,7 +42,7 @@ from tests import notwindows from tests.integration import use_singledc, get_server_versions, CASSANDRA_VERSION, \ execute_until_pass, execute_with_long_wait_retry, get_node, MockLoggingHandler, get_unsupported_lower_protocol, \ - get_unsupported_upper_protocol, protocolv5, local, CASSANDRA_IP, greaterthanorequalcass30, lessthanorequalcass40, \ + get_unsupported_upper_protocol, protocolv6, local, CASSANDRA_IP, greaterthanorequalcass30, lessthanorequalcass40, \ DSE_VERSION, TestCluster, PROTOCOL_VERSION from tests.integration.util import assert_quiescent_pool_state import sys @@ -261,6 +261,18 @@ def test_protocol_negotiation(self): elif DSE_VERSION and DSE_VERSION >= Version("5.1"): self.assertEqual(updated_protocol_version, cassandra.ProtocolVersion.DSE_V1) self.assertEqual(updated_cluster_version, cassandra.ProtocolVersion.DSE_V1) + elif CASSANDRA_VERSION >= Version('4.0-beta4'): + self.assertEqual(updated_protocol_version, cassandra.ProtocolVersion.V5) + self.assertEqual(updated_cluster_version, cassandra.ProtocolVersion.V5) + elif CASSANDRA_VERSION >= Version('4.0-a'): + self.assertEqual(updated_protocol_version, cassandra.ProtocolVersion.V4) + self.assertEqual(updated_cluster_version, cassandra.ProtocolVersion.V4) + elif CASSANDRA_VERSION >= Version('3.11'): + self.assertEqual(updated_protocol_version, cassandra.ProtocolVersion.V4) + self.assertEqual(updated_cluster_version, cassandra.ProtocolVersion.V4) + elif CASSANDRA_VERSION >= Version('3.0'): + self.assertEqual(updated_protocol_version, cassandra.ProtocolVersion.V4) + self.assertEqual(updated_cluster_version, cassandra.ProtocolVersion.V4) elif CASSANDRA_VERSION >= Version('2.2'): self.assertEqual(updated_protocol_version, 4) self.assertEqual(updated_cluster_version, 4) @@ -1473,42 +1485,42 @@ def test_prepare_on_ignored_hosts(self): cluster.shutdown() -@protocolv5 +@protocolv6 class BetaProtocolTest(unittest.TestCase): - @protocolv5 + @protocolv6 def test_invalid_protocol_version_beta_option(self): """ - Test cluster connection with protocol v5 and beta flag not set + Test cluster connection with protocol v6 and beta flag not set @since 3.7.0 - @jira_ticket PYTHON-614 - @expected_result client shouldn't connect with V5 and no beta flag set + @jira_ticket PYTHON-614, PYTHON-1232 + @expected_result client shouldn't connect with V6 and no beta flag set @test_category connection """ - cluster = TestCluster(protocol_version=cassandra.ProtocolVersion.V5, allow_beta_protocol_version=False) + cluster = TestCluster(protocol_version=cassandra.ProtocolVersion.V6, allow_beta_protocol_version=False) try: with self.assertRaises(NoHostAvailable): cluster.connect() except Exception as e: self.fail("Unexpected error encountered {0}".format(e.message)) - @protocolv5 + @protocolv6 def test_valid_protocol_version_beta_options_connect(self): """ Test cluster connection with protocol version 5 and beta flag set @since 3.7.0 - @jira_ticket PYTHON-614 - @expected_result client should connect with protocol v5 and beta flag set. + @jira_ticket PYTHON-614, PYTHON-1232 + @expected_result client should connect with protocol v6 and beta flag set. @test_category connection """ - cluster = Cluster(protocol_version=cassandra.ProtocolVersion.V5, allow_beta_protocol_version=True) + cluster = Cluster(protocol_version=cassandra.ProtocolVersion.V6, allow_beta_protocol_version=True) session = cluster.connect() - self.assertEqual(cluster.protocol_version, cassandra.ProtocolVersion.V5) + self.assertEqual(cluster.protocol_version, cassandra.ProtocolVersion.V6) self.assertTrue(session.execute("select release_version from system.local")[0]) cluster.shutdown() From 64d47649a16a2862724d20082d2f5e947fe1fed6 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Sat, 6 Feb 2021 14:09:29 -0500 Subject: [PATCH 1853/2005] Make sure the protocol v5 downgrade for C* <4 if it is not explicitly specified --- cassandra/cluster.py | 10 +++++++++- cassandra/protocol.py | 4 ++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 45e1fb410b..7e101afba8 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -63,7 +63,7 @@ BatchMessage, RESULT_KIND_PREPARED, RESULT_KIND_SET_KEYSPACE, RESULT_KIND_ROWS, RESULT_KIND_SCHEMA_CHANGE, ProtocolHandler, - RESULT_KIND_VOID) + RESULT_KIND_VOID, ProtocolException) from cassandra.metadata import Metadata, protect_name, murmur3, _NodeInfo from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, @@ -3548,6 +3548,14 @@ def _try_connect(self, host): break except ProtocolVersionUnsupported as e: self._cluster.protocol_downgrade(host.endpoint, e.startup_version) + except ProtocolException as e: + # protocol v5 is out of beta in C* >=4.0-beta5 and is now the default driver + # protocol version. If the protocol version was not explicitly specified, + # and that the server raises a beta protocol error, we should downgrade. + if not self._cluster._protocol_version_explicit and e.is_beta_protocol_error: + self._cluster.protocol_downgrade(host.endpoint, self._cluster.protocol_version) + else: + raise log.debug("[control connection] Established new connection %r, " "registering watchers and refreshing schema and topology", diff --git a/cassandra/protocol.py b/cassandra/protocol.py index c454824637..ed92a76679 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -180,6 +180,10 @@ class ProtocolException(ErrorMessageSub): summary = 'Protocol error' error_code = 0x000A + @property + def is_beta_protocol_error(self): + return 'USE_BETA flag is unset' in str(self) + class BadCredentials(ErrorMessageSub): summary = 'Bad credentials' From 942ec529fade2782169903bdc9d063f94170affb Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Sat, 6 Feb 2021 14:09:52 -0500 Subject: [PATCH 1854/2005] some test fixes --- cassandra/__init__.py | 4 ++-- tests/integration/__init__.py | 2 +- tests/integration/standard/test_cluster.py | 2 +- tests/unit/test_cluster.py | 2 ++ 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 4a273daff7..1e16bca287 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -161,12 +161,12 @@ class ProtocolVersion(object): V5 = 5 """ - v5, in beta from 3.x+. Finalised in 4.0-beta4 + v5, in beta from 3.x+. Finalised in 4.0-beta5 """ V6 = 6 """ - v6, in beta from 4.0-beta4 + v6, in beta from 4.0-beta5 """ DSE_V1 = 0x41 diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 77923853f1..9d350af707 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -235,7 +235,7 @@ def get_supported_protocol_versions(): 4.0(C*) -> 6(beta),5,4,3 4.0(DSE) -> DSE_v2, DSE_V1,4,3 ` """ - if CASSANDRA_VERSION >= Version('4.0-beta4'): + if CASSANDRA_VERSION >= Version('4.0-beta5'): if not DSE_VERSION: return (3, 4, 5, 6) if CASSANDRA_VERSION >= Version('4.0-a'): diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index a99076ebaa..c7d8266fd9 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -261,7 +261,7 @@ def test_protocol_negotiation(self): elif DSE_VERSION and DSE_VERSION >= Version("5.1"): self.assertEqual(updated_protocol_version, cassandra.ProtocolVersion.DSE_V1) self.assertEqual(updated_cluster_version, cassandra.ProtocolVersion.DSE_V1) - elif CASSANDRA_VERSION >= Version('4.0-beta4'): + elif CASSANDRA_VERSION >= Version('4.0-beta5'): self.assertEqual(updated_protocol_version, cassandra.ProtocolVersion.V5) self.assertEqual(updated_cluster_version, cassandra.ProtocolVersion.V5) elif CASSANDRA_VERSION >= Version('4.0-a'): diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 249c0a17cc..620f642084 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -209,6 +209,8 @@ def test_protocol_downgrade_test(self): lower = ProtocolVersion.get_lower_supported(ProtocolVersion.DSE_V2) self.assertEqual(ProtocolVersion.DSE_V1, lower) lower = ProtocolVersion.get_lower_supported(ProtocolVersion.DSE_V1) + self.assertEqual(ProtocolVersion.V5,lower) + lower = ProtocolVersion.get_lower_supported(ProtocolVersion.V5) self.assertEqual(ProtocolVersion.V4,lower) lower = ProtocolVersion.get_lower_supported(ProtocolVersion.V4) self.assertEqual(ProtocolVersion.V3,lower) From 8d62b28fae3d54520c460ab1fd49cd36f24d8d50 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 10 Feb 2021 08:24:13 -0500 Subject: [PATCH 1855/2005] make sure simulacron/test_empty_column uses protocolv4 --- tests/integration/simulacron/test_empty_column.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/simulacron/test_empty_column.py b/tests/integration/simulacron/test_empty_column.py index bd7fe6ead0..91c76985e1 100644 --- a/tests/integration/simulacron/test_empty_column.py +++ b/tests/integration/simulacron/test_empty_column.py @@ -27,8 +27,8 @@ from cassandra.cqlengine.connection import set_session from cassandra.cqlengine.models import Model -from tests.integration import PROTOCOL_VERSION, requiressimulacron -from tests.integration.simulacron import SimulacronCluster +from tests.integration import requiressimulacron +from tests.integration.simulacron import PROTOCOL_VERSION, SimulacronCluster from tests.integration.simulacron.utils import PrimeQuery, prime_request From 56053664652ac99f646e3472ce2854aa858fa483 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 10 Feb 2021 08:26:21 -0500 Subject: [PATCH 1856/2005] Enable checksumming before sending the auth message --- cassandra/connection.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/connection.py b/cassandra/connection.py index 477eaf2f28..ca1e7531db 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -1345,6 +1345,9 @@ def _handle_startup_response(self, startup_response, did_authenticate=False): "if DSE authentication is configured with transitional mode" % (self.host,)) raise AuthenticationFailed('Remote end requires authentication') + if ProtocolVersion.has_checksumming_support(self.protocol_version): + self._enable_checksumming() + if isinstance(self.authenticator, dict): log.debug("Sending credentials-based auth response on %s", self) cm = CredentialsMessage(creds=self.authenticator) From 4a00c2233a0e93c493383cd789071731721f1dc8 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 10 Feb 2021 12:08:06 -0500 Subject: [PATCH 1857/2005] make sure to enable compression before sending the auth creds --- cassandra/connection.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index ca1e7531db..48b3caefed 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -897,6 +897,10 @@ def _connect_socket(self): for args in self.sockopts: self._socket.setsockopt(*args) + def _enable_compression(self): + if self._compressor: + self.compressor = self._compressor + def _enable_checksumming(self): self._io_buffer.set_checksumming_buffer() self._is_checksumming_enabled = True @@ -1328,8 +1332,7 @@ def _handle_startup_response(self, startup_response, did_authenticate=False): self.authenticator.__class__.__name__) log.debug("Got ReadyMessage on new connection (%s) from %s", id(self), self.endpoint) - if self._compressor: - self.compressor = self._compressor + self._enable_compression() if ProtocolVersion.has_checksumming_support(self.protocol_version): self._enable_checksumming() @@ -1345,6 +1348,7 @@ def _handle_startup_response(self, startup_response, did_authenticate=False): "if DSE authentication is configured with transitional mode" % (self.host,)) raise AuthenticationFailed('Remote end requires authentication') + self._enable_compression() if ProtocolVersion.has_checksumming_support(self.protocol_version): self._enable_checksumming() From 3ab8baedd2b07d713534641563169c049b3476c8 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Sat, 27 Feb 2021 13:42:05 -0500 Subject: [PATCH 1858/2005] Make sure no data is lost when there is not enough data in the buffer to read a segment --- cassandra/connection.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 48b3caefed..77e12fa714 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -1122,8 +1122,9 @@ def _process_segment_buffer(self): segment = self._segment_codec.decode(self._iobuf, segment_header) self._io_buffer.cql_frame_buffer.write(segment.payload) else: - # not enough data to read the segment - self._io_buffer.io_buffer.seek(0, 2) + # not enough data to read the segment. reset the buffer pointer at the + # beginning to not lose what we previously read (header). + self._io_buffer.io_buffer.seek(0) except CrcException as exc: # re-raise an exception that inherits from ConnectionException raise CrcMismatchException(str(exc), self.endpoint) From eba143ab84bc42654cffd341310ef92c1ff8c072 Mon Sep 17 00:00:00 2001 From: Sam Tunnicliffe Date: Mon, 1 Mar 2021 16:15:45 +0000 Subject: [PATCH 1859/2005] Yield from process_io_buffer when containing an incomplete segment --- cassandra/connection.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 77e12fa714..0d8a50e76f 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -621,6 +621,7 @@ class _ConnectionIOBuffer(object): _io_buffer = None _cql_frame_buffer = None _connection = None + _segment_consumed = False def __init__(self, connection): self._io_buffer = io.BytesIO() @@ -643,6 +644,10 @@ def set_checksumming_buffer(self): def is_checksumming_enabled(self): return self._connection._is_checksumming_enabled + @property + def has_consumed_segment(self): + return self._segment_consumed; + def readable_io_bytes(self): return self.io_buffer.tell() @@ -1118,23 +1123,33 @@ def _process_segment_buffer(self): try: self._io_buffer.io_buffer.seek(0) segment_header = self._segment_codec.decode_header(self._io_buffer.io_buffer) + if readable_bytes >= segment_header.segment_length: segment = self._segment_codec.decode(self._iobuf, segment_header) + self._io_buffer._segment_consumed = True self._io_buffer.cql_frame_buffer.write(segment.payload) else: # not enough data to read the segment. reset the buffer pointer at the # beginning to not lose what we previously read (header). + self._io_buffer._segment_consumed = False self._io_buffer.io_buffer.seek(0) except CrcException as exc: # re-raise an exception that inherits from ConnectionException raise CrcMismatchException(str(exc), self.endpoint) + else: + self._io_buffer._segment_consumed = False def process_io_buffer(self): while True: - if self._is_checksumming_enabled: + if self._is_checksumming_enabled and self._io_buffer.readable_io_bytes(): self._process_segment_buffer() self._io_buffer.reset_io_buffer() + if self._is_checksumming_enabled and not self._io_buffer.has_consumed_segment: + # We couldn't read an entire segment from the io buffer, so return + # control to allow more bytes to be read off the wire + return + if not self._current_frame: pos = self._read_frame_header() else: From 457a4e11d61b71bf44c38ce45113bbbb6841c113 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 17 Mar 2021 09:23:35 -0400 Subject: [PATCH 1860/2005] Fix simulacron.test_policies.test_delay_can_be_0 to not use protocol v5 --- tests/integration/simulacron/test_policies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index 855a4de3ca..da093be43c 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -184,7 +184,7 @@ def test_delay_can_be_0(self): spec = ExecutionProfile(load_balancing_policy=RoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0, number_of_requests)) - cluster = Cluster(compression=False) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) cluster.add_execution_profile("spec", spec) session = cluster.connect(wait_for_all_pools=True) self.addCleanup(cluster.shutdown) From 6b963f6e1b2da11eeb96eab5b9023c05a6f76d34 Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Wed, 17 Mar 2021 13:42:04 -0400 Subject: [PATCH 1861/2005] ssl tests: update all SSL certs --- tests/integration/long/ssl/127.0.0.1.keystore | Bin 3775 -> 4317 bytes .../integration/long/ssl/cassandra.truststore | Bin 846 -> 1074 bytes tests/integration/long/ssl/client.crt_signed | 28 +++++----- tests/integration/long/ssl/client.key | 52 +++++++++--------- .../integration/long/ssl/client_encrypted.key | 52 +++++++++--------- tests/integration/long/ssl/rootCa.crt | 28 +++++----- 6 files changed, 80 insertions(+), 80 deletions(-) diff --git a/tests/integration/long/ssl/127.0.0.1.keystore b/tests/integration/long/ssl/127.0.0.1.keystore index 3855f00a1ae458fb330deaaf11d623e203ef3ccc..98193ab54e271fc4d241442371204f3a63457262 100644 GIT binary patch literal 4317 zcmY+EbyyP)zr_cPk)zd-(j6Ppof1knj1&+U4HifZL<~|oN5~k6lr+kulo|-qjUX|) zd-r$m^WOK~_mA^@pXZ$OJ%4_nNV+LvA`&Q)E(Js(9IG393ML{WLLup*fk?WDf9y~w zlGN`%B2p(Hk`(@rZT*kTK~(?xqNXGwMj?ThP$cjaDg>hZU;CeNRv;7mH@1Wz@{Qn0e6~EhAT0YzBsB8;(BdvY@x^z3+P>d&=m%=i2Eb|#(^z5Y88n-y z=9#zx&Sa_Wb^wQ~)=~uiQn#ORFz*KsMYfANwOySmyC(f{^yx4+USG zw~p`Dvsi3fhV@=FkAW00671DKEt0vJRixTXju{!BJq=g@cBsqRw;om?^-2iwA@9ib z;L6)nvQ{ivade*BKpeU+E956`VN@-`?7`8aH>1ZM7ix-Lj49k&>!#(#dq-KR3XOXZ&f88C($O_eZ1q?A^5#*I*K$Y1$=Zy~mi_{~99f&9Q>Lb` zGpagag$P~fC${0Vx|%926!(e1CG_?+G7z$yi&3_tD}|{I&-e2CB~{YYgq$J5OtLgS~wUJZuQqb(+YNvUY~tLAm;}sWq8%h zRwF1-B%fVpt+)$@B~ZdXs6;KNSaoIE6*Y#7DG*O@x1>ggE?v;YGhBk%_Q{UJs$%4? z*GrLIPf?4GOT{v0^9U=tkaq zpajE3BL3i+T5SpY|C^za6Nia(Hb=fmFPfJpUB)CFG~cMI%&SWP&v+ z_Bq|)Gw1pfF~_c4mAF8(I~ln47yK)6m+tVBOYPnSC5E1qW`m)48cKfk{ZBug;vY`6 zvwO!^`K2o87+$kwuc&`jg9Z~tuRfXLq?gpm*Mq0gH7d0=n%_I?O%@Q<{iz)N`bz`F zxs)+=g!6XbXA*yr?%kK)sT4v3x{F>fU;9rW+S>8*Gc&9b<)SBz zf%+6Xx|yuiB5bF)J{qTib5bKrubwH6M6 z{OLQBKu!hrt=eY0{py&%Q&EUJ9_`9rha64%rBfFANbt(Gr3Ts*eWPO|e~g9QhjRTJ zl@v@cCJqJw3?K=R1&9Nn{}Sx~S8la8JG+d3YCS)%1g;YkyOBc?~s8| zNXla>l45N)?+yX>W}!R!>;~^vyn*3>b4RW zHw8Mga{iTE{6rr2^0jiW!M#q0`c|>5qs`+cixbVmTKkst$4(pVAA2*nz6yb74Frvc zOC9LDlJB<}889w8A!t1An327=FdR1l{ydI)Akkc?v7HAoW?zoy!unQHj^c%^G11z9 zHvKeXzuQ98fewDBa?&>8WJeACrG%5-Uk8KM7dS76Qk`6Gz7zkc_XaODIZ8-}4P|C4 z3bhi{sUNCR8XjJK<@^HCog=3|Qx~hgV7!1WT$;^dti2lu3}%Hx@|nZf#}P%-gU;Di zb2dx|A@va1jyFUJTv>Mj_Wi^sW2t<~VqL)Rc(^$mpH9$^hNWpiVUg8@D|af^a%Pa@ zM{q8G#Jv-BQL2JoCf$49koRWyhb)$4!59HTXwoiGcC(VE?-*hRI&u4V2yotXK+nBf zjnq92D+Q_ZJF1*3cQ3s1rJ|zl&bd0G{g&&JZy{Nu5ihMKv&nF<(3u;bT&D=m zPDnjhdQ=v`lJbgGL*M4n*qHGAZ~Y+r_40&Z-XZNKrWr0Ir5hG@^07v`xW*-CH}gXA z)Ta-esI27Ybd7k{%Sd~A(h+n}I{}w1JUMI9gHC}Na68uQU(ckaW~aJ}H$z3sD{PN) zZN&qVF6Z;fNFOzOp#VyhuXEwtKi*mUw60mUiCpA7$VwiHnUL%ZC$sULRQNo3P}gja zJBBcGIxph0KO;o+^~H(8$WV88p&AVaX>2PJrRW)HKxovx-XXJ!K~|@ zQ+iF66KgR`qZVFF$33|Mq1!JN63+Q;xw*xAueF8kmrQ(m2ED5S*3%a9@yYU)MgdK$ z7dAj_L$or_SBiWO-r+1nFqc*FCpPkyXjE2o6QtUbnTgvY%{}Om>TLY z7`UWXchCrlOduV*pFg9~preouL@ob|P?Bl#s|9>fWEbk~TNwQN3^JBIui!XcWlo37 zDz)>xoR z8Q^u53JH-R<`L|-YQ9xE*FO(JUnZVjn)a8E-o#7xg`zrr z@T^sG6pp;UDmfnI2z^nKqTWu2Qv-dnlNW$Azm67?+=J}bU1wR*<$o*EHf|SzN;c{^ zmF2(Vc6}W&gfmd8G_a~bR6F%)qsoCuTn%xVVQ^q*H@|I4^00ow$M$!I2-rn}-NO(| zA`yk{uOaP_WcK?$YhonYe+JgPze!f#5@tRW0jmlv)tY?juM+&NmrqV9rS|Twn@pAg zal|QD#DRqwgR_H^+o(9LS<2#LCVvXRrgL%k<48Xqta8%-`1V(&UqqFN&-I7at%8{E z^+9}%D8az)W1RP|@=zrk%`2;u7j1J`J2(@zo05*l3T|TB+RM}$s3hJvUE2v|%aw*! z&7>9O(B_kFTAm?k?-J>vhhDatST<@-HJcj=Y8~YISZcVaM8Zxd1GJF+mGn)ngp=t8 z-6vHy+0J2!s2(}4n$#7y9CL|&JQOuqcw=RqI2$df zb|QBg?4P(u-`K<5sAtz@!xK6RGZlRE!B_g#pSKWw(mB5B+kG|SeCjW4+HErZ7VDlA)|MK9B-kq!>URa=v67N@45>M=idD`|^|^A5pr+&FQc-i=2T>ba)|B}X1SXX<-ArAW0~fEeFdy05t$d9C zfdgml6)Ks2#yH$Ed_AG2d$p|7tJB_7dL`D5Uzl&ASW@7T=Z5u7SahX=v!9@2ZEc?k z7BSAT5s7VCPEZPq`&1^v(okcVI`lMfdJ+&)Cq+|!{ZRU2^BraYqZ#*VUX$3XM%C~* z{4dFlBG()pBhyC-N!V{xYi>-64Mr7*qr?io*jQth=P}99*Mxg1{0H{9&vu-BooXrm zZOqSf9j+tMS4q|kpVmYB|FV1T8STYRSzVT-_xtJ)N9^KCx zCrs+6k5Y9pPTyLiWX1U_7-(pxt;IJ23EGN^F)g|n#B+{D&QA#%yyes6?ReQiD4&s_ z%nph4(8xp(iN9pMBbjgdycP7Dh034aU2-BqNn9MHNQwdTKy|*~o z6PNaf=1E&Qh5K%woW$>%vvs&i*3k$zI4!>kR5%`?h^7y7Q8h_y91GtN^ACGi@_wVE zbpl=F6c^$(%sxUS&l}{y;^iye^h8*$X=QwyaYr?qe7;it#^rl(K89@CH~Ci%`rXYM z{AsUZSl<%FELE6Zn;*Zcq8@$e++f`ILd?VV`D_g7%Z6V6nm727#;0}7sc^Whst8N sC0X?xQf7B?>xN&mfJ_xqQ`U+zvD&K1$6_jWmpSO2zuUwlM6sy<0)%8G`v3p{ literal 3775 zcmeI!X*8Sp8UXMll2{TAu|&(LeN9Php>&2SYO5-ZjwxcVQfV!%twofgvF~D6)KWw( zRa>WsYUmbJYOijJDj}Al*L3d8ojZ5V{WRb1J^%B5c%F0K^WWa*eSW)(yNdt-0QCC< z^&=7kT%7>`ATh5&Kw!!ja5WfUGa4t9J0B8RI=O8R%@0Gn2 z3=Z}1a`$ob|1H4*|F#B0XN)+CR}F)~Vlir1ELL699>uG^7h-V#4F8rnNIM0WCHS4%USpfPcdp9N0!XLS@0=JJbzSb)vL6}c_0uqI6kIwL6q6h zS?v^UMEewR!o!w*hp1%}X%^|(_$JL+gOg34Hxz|WFg)RJ=O<=tK`eW%!nZq-XURuk z5ZN;jb9(rqb$dj&gQ~oRB=b-Sg?pX`?p9kF&W^xraQ&df2IG2fm-RpCF`wj(D@f#g z*Rj7zEcEK?=T{jy=MAGT`a3={qo=<*812-&pF1;n6sWyGNLq6|I$|qR^dKQStEDe@ z%+zr+;;XjA1rP@i0Q}<#?OpEP2>}6FjGR+9Sir${!aP>mU_4>Gzsq7Yr)z-=j&mTC z>90^wFF>z(xaVN{LEyUD^LQB`oVyduH`sECzWW@;Zqzn^8Ff>Q+=lBoaPJx*E4Y2i zH_y3R5v1Fq-mfBjB=^c^pm>#LKwL@>(=~9*WW=iIv;X5+lu!d&T#598TK2ZR{-u}M zJX$MxSBg|Atv6#UU_IDXH1bk!3G=>7g%)<4Zfw%}i5AA>2>0=@PZ=g$%O#9>ysvFu z&|Zd{std--D2%3UO$W3Ud1ap}Fw;LH{(QNrWKp<#>kU3XqUqqGMvb-;)bE0Z#KS(~ z(*mS|Y1Cam9dox<{ZG!vu$_Aq0|anm)o?19y@CC;V9rnh00jHK?3!ntrf8$XQLayOfg2RKdi zNajnvS!i`2c6>lvNtR8!FjR&t1}t@M+xa!0=}fssK#h`Pwp>@VMgsZ}p}XBRQRW^G z9|U5b9nIZthN^^#>{E#rukd_Fa2dAX=8NC2{YFXVQSxW4GR8ID_v6Z%!iMy7tyi ze&J!h0cKk+&f6o6MQQJ7XDyi*7ZfZGqh8oX?2?6=ZoD-pAIM+L&@;7+u)E(Jb!WLz z`IJvSbCGCF@pjg8``I_I^JugOXw9`a?MDXEMpk!@sMs?bb%CR-a9e%EYB>Iu@Mp9~fI}=5HXiOJWc$g{b>5_ICjG7BwyWDz&~e_J=lEin zO6$7Q>!KkyC-pq#gsSXU_c~>s5U-B(ARsI18+SLypUR!Kd&uPwxgl(FIk1dSYr{CA zoHZhuM7y*0KA_ychle-@+8;|_Jrql9QIxk+1O$d7i$AdWgJM_CQNpMrbl60o>$X*} z8+D4T8@G-Zy6)dqdVQ?Eq~^gNwP`^E*er8Tf8NFKnbC!KybWa zob|;WjAU-*QL$%@kf2Ortbd~T+&=vbomnAMVUu>=S?6a5BaHF!WN)cNlUaG=S?mA{ zd!@FUkhOZ#YyQCxVo30^9;gp*Y&p3EkvMGNQwPSBNROR+A#zqMxV4mcvD#X38ETah zm#I(_xT!xgBk7$X_IXu^vH2v_|L2n1dGBA($*m+#Yqih;9}*;a*#_<}cwM$~u_e&C zmg^mFsxxxbD~vo*yxK3uE}K)$8S!4oY2%w64@tFns%eVx{aw>*FJyb)+CrG}g#9ga2zjv5buRhh(o=>c3hOuFKaQwu2ctUS(+)CLw zq3|_B6hz5${Lxi_?^Jjm>6R2gJQ_;jOYaRb9L^Gc{N-rd2%Ww5Tl-38y#a#WvsVdwN29t<60cAdw1fB z?M9r!d_{x+>v-v=oeDTJ^qVGrmjilF4hk3&h_8qP1O$NuISB(@Q3w-OR_Ab@8QU&~ z8h2IkPaWoOY3={c2izVX)UbPe(D}v(oIS?quZbe?pF|P(jVMSA-!Eq25ct=Re@_bk zT@bD6k)-jp21-3Hj_iN>a7>s0$NMSoHFj|2&J59dXK*{UCTE%+lPHpsNU&clm`Ix3 z1sF7EiQ*<2UWIs{Jc+hDdh<%y3<-k!vJnm2%e3WgmTixJ-D8!0P4~1=ecE9E z>*JG~d=yz8nUta6(#NFik4uG3a|j6@=PlgjgPQ#PWA2B@om$iM`nr1k}4j%-$xGrkF; zQ~>iU0l(u1b77!2z~Orh^G}!ljTK40Yy!T`50D<65nyy2bpdY74OFN;|7Wu z+afqcv}jchOCdCsH5aZP9z;tgcD-G#PL0Vl(Oq1sH|G;(3S9vGQ1I1j*{hyVBNyFR zTM{UiGvj$8cWB9<$%XQ;NL}+pd)Y|A?*Uv1gR^|I>alX-1p`je6fT?lhVI82o-QMO zLQ~D8)JrYH`<0D<(v>`Vnt#h;$9t&t0txoDhEjAV)$~Ej&XsZL3JwiXUCFXr>FJDP zJ&FxakLj<~-J!n<&nfrbveajsb}_hf7nV0)m2A-na(_~t|3p#qr(s>IyUi1MoOco| z;>VQ^qXDPLX97~5O=S$N9J;6Ye{laF+<#pe|LfomSh=UNY!x;XvU7j*di!{GX$p+~ E4@6Q;7ytkO diff --git a/tests/integration/long/ssl/cassandra.truststore b/tests/integration/long/ssl/cassandra.truststore index 4de3d319197650252f27bea858bd279736f31241..b31e34b8aaad16a24f56d19d3f30f097727816a1 100644 GIT binary patch literal 1074 zcmV-21kL*}f&?xC0Ru3C1Lp<_Duzgg_YDCD0ic2d*aU(D)G&er&@h4n%mxW6hDe6@ z4FLxRpn?OvFoFZQ0s#Opf&;P!2`Yw2hW8Bt2LUiC1_~;MNQUiYLVpZyXo~SzUK~8{rIu&2=o!sj&~mMDhuK3pYAXVJQ%tSPR+L(kP;%# z6t;Rl6-YihrCU@K@;O|f0k1<;11d<5AS&W@(wV3mhbsHa#t?Z*RCh*Q>Jz) z#xH#;*w`l3MzW#$ZjUd6nq>-r=((f|?BR|xrVc2JZRiZF-d4s8g9W6FpS9$^m{eXI z(gAnnIYUrjebAHu$O5@^ojmp3VU$dP2%KwC@P{vpcs^uk?0xB}SMxsI&X&c%)dV+} zFQVi~UiiO~Z_3w;YuJDU|68Ro?{#4nrXkPiF1c2C?LF%Tz26%IyxlO!&jTC@m%0Va zx>a)%T*cX43*P<7AeEUw8-;W#8gJ$cc#zwNxg9P7 z;}y$!0_YIgCNb^ce=ODLa-RMOkChZJzF{8;YyMu@SApoDMFMTSw=RNWG7+m#&nStR zxI7$^9-|-&S|a$>4#(SZhAaUWz82z=RO*DBHEX+Ji;3fFQMTO$E0%M;JlI%l+}4uiP2)e2T2T?31=m;s+WC3I{`wczLYN_Xr0+D_v z_b|-Z&ae=)+)w<5V+BZ2v0Z>a25Q+(B;f0ExINz1I6MA1p}Q%>MRZ6CMrjX#W|@7; zWDf0+>9Tj{{=_hg+e$w4?)+PHDO?6C6pj_f)&!Q0s{etpihnU9RL6T literal 846 zcmezO_TO6u1_mY|W(3o0Mfv$9$%#O2{zRYPAO_Y5JyQcq1_ow+gC=HfgC@q$Od|P< z1Rgwe)iLpO*4w`O`X%AZI_g^uc-c6$+C196^D;7WvoaVs8FCwNvN4CUun9AT1{(?* z@Pjy9JRB*BC5go)i4}(YKrKKaZXUL@%+#EeVweCk57-UPi3W1wyha8FhK2@4hK7d5 zrcvU&#z3xtIh0EkJDM1kke$xR%D~*j$j<=uC>K)`BO}9}U9W$wKE2oRW_9zYo7}Ia zaMXL8$tld{*Zif4$D1nV3cP2e$Sa@ zdp7@mle3^N%Sq`==HerXXG+D--np%pD}8g*zSZ%quE*Xr_@#w>Tbpkd-00PqzU9f} zL=*OJr(Ziu^W43h&hzy3-G?DepQFs?KmFSn(__ocsv5!SeY*Z_;GTxVvHI#hiZ^u^ z&E|Z4jODP=r>ip>41RFfYkgxeKe}S+$#X}%@9}od>tlbmS8}UNb?uiAx2Dd_a&9ds zjyvIXdde;N*7$9boYN|AFj~Ax=>Hn$bS*?>!GYcx)3+a=b=x!UXTyIBg Date: Thu, 18 Mar 2021 10:47:17 -0400 Subject: [PATCH 1862/2005] release 3.25: changelog & version --- CHANGELOG.rst | 2 +- cassandra/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 97e3f2e8f9..d2d577c957 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,6 @@ 3.25.0 ====== -Not released +March 18, 2021 Features -------- diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 1e16bca287..5739d5d98e 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 24, 0) +__version_info__ = (3, 25, 0) __version__ = '.'.join(map(str, __version_info__)) From 15d715f4e686032b02ce785eca1d176d2b25e32b Mon Sep 17 00:00:00 2001 From: Alan Boudreault Date: Thu, 18 Mar 2021 10:48:29 -0400 Subject: [PATCH 1863/2005] release 3.25: docs --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index eeccbe16b6..8e29b942e3 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,6 +22,8 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: + - name: '3.25' + ref: a83c36a5 - name: '3.24' ref: 21cac12b - name: '3.23' From 3ad2d1ecc1704aeef27214151111918e055ca13c Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Tue, 14 Sep 2021 23:43:29 -0500 Subject: [PATCH 1864/2005] Fixes to the Travis build. (#1111) These fixes were originally implemented by user tbbharaj in https://github.com/datastax/python-driver/pull/1108. Extracting them into their own PR since 1108 is still being worked and I'd very much like to benefit from this work across _all_ PRs against python-driver. Major thanks to tbbharaj for the original work here. --- .travis.yml | 1 + tox.ini | 1 + 2 files changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index b485e21227..7e1e374822 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,6 +24,7 @@ addons: - libev-dev install: + - pip install --upgrade setuptools - pip install tox-travis - if [[ $TRAVIS_PYTHON_VERSION != pypy3.5 ]]; then pip install lz4; fi diff --git a/tox.ini b/tox.ini index d883a1f973..6d94e11247 100644 --- a/tox.ini +++ b/tox.ini @@ -12,6 +12,7 @@ deps = nose pure-sasl kerberos futurist + greenlet>=0.4.14,<0.4.17 lz4_dependency = py27,py35,py36,py37,py38: lz4 [testenv] From a51ed116471a63a65c63db6356a3ade9efdd1b85 Mon Sep 17 00:00:00 2001 From: Piotr Sarna Date: Wed, 15 Sep 2021 07:06:54 +0200 Subject: [PATCH 1865/2005] Merge pull request #1103 from psarna/fix_deprecation_in_tracing Tracing code uses a deprecated mechanism for fetching the first row when populating traces. The behavior is now fixed. --- cassandra/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index 0e7a41dc2d..f7a5b8fdf5 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -996,7 +996,8 @@ def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): SimpleStatement(self._SELECT_SESSIONS_FORMAT, consistency_level=query_cl), (self.trace_id,), time_spent, max_wait) # PYTHON-730: There is race condition that the duration mutation is written before started_at the for fast queries - is_complete = session_results and session_results[0].duration is not None and session_results[0].started_at is not None + session_row = session_results.one() if session_results else None + is_complete = session_row is not None and session_row.duration is not None and session_row.started_at is not None if not session_results or (wait_for_complete and not is_complete): time.sleep(self._BASE_RETRY_SLEEP * (2 ** attempt)) attempt += 1 @@ -1006,7 +1007,6 @@ def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): else: log.debug("Fetching parital trace info for trace ID: %s", self.trace_id) - session_row = session_results[0] self.request_type = session_row.request self.duration = timedelta(microseconds=session_row.duration) if is_complete else None self.started_at = session_row.started_at From 1d9077d3f4c937929acc14f45c7693e76dde39a9 Mon Sep 17 00:00:00 2001 From: Ultrabug Date: Fri, 17 Sep 2021 19:40:42 +0200 Subject: [PATCH 1866/2005] Merge pull request #1103 from numberly/fix_empty_paging This commit provides a fix to the situation when iterating on a ResultSet, the driver aborts the iteration if the server returns an empty page even if there are next pages available. Python driver is affected by the same problem as JAVA-2934 This fix is similar to https://github.com/datastax/java-driver/pull/1544 --- cassandra/cluster.py | 1 + tests/unit/test_resultset.py | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 7e101afba8..c2d2e719ac 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -5141,6 +5141,7 @@ def next(self): if not self.response_future._continuous_paging_session: self.fetch_next_page() self._page_iter = iter(self._current_rows) + return self.next() return next(self._page_iter) diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index 1af3e849b6..b37c3a2594 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -41,6 +41,19 @@ def test_iter_paged(self): type(response_future).has_more_pages = PropertyMock(side_effect=(True, True, False)) # after init to avoid side effects being consumed by init self.assertListEqual(list(itr), expected) + def test_iter_paged_with_empty_pages(self): + expected = list(range(10)) + response_future = Mock(has_more_pages=True, _continuous_paging_session=None) + response_future.result.side_effect = [ + ResultSet(Mock(), []), + ResultSet(Mock(), [0, 1, 2, 3, 4]), + ResultSet(Mock(), []), + ResultSet(Mock(), [5, 6, 7, 8, 9]), + ] + rs = ResultSet(response_future, []) + itr = iter(rs) + self.assertListEqual(list(itr), expected) + def test_list_non_paged(self): # list access on RS for backwards-compatibility expected = list(range(10)) From 12a8adce943fe37a05ad6580e8bd302b65c2d93a Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Fri, 17 Sep 2021 12:47:48 -0500 Subject: [PATCH 1867/2005] Comment update following off of https://github.com/datastax/python-driver/pull/1110 --- cassandra/cluster.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c2d2e719ac..dc850ae809 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -5141,6 +5141,11 @@ def next(self): if not self.response_future._continuous_paging_session: self.fetch_next_page() self._page_iter = iter(self._current_rows) + + # Some servers can return empty pages in this case; Scylla is known to do + # so in some circumstances. Guard against this by recursing to handle + # the next(iter) call. If we have an empty page in that case it will + # get handled by the StopIteration handler when we recurse. return self.next() return next(self._page_iter) From 175942852bcfc97bac823834a0b170b0faa4adb0 Mon Sep 17 00:00:00 2001 From: Oren Efraimov Date: Tue, 23 Nov 2021 19:10:50 +0200 Subject: [PATCH 1868/2005] Merge pull request #1116 from Orenef11/fix_default_argument_value Removing Python mutable defaults from methods in tests/integration/__init__.py Co-authored-by: Efraimov Oren --- tests/integration/__init__.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 9d350af707..70ec11c213 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -383,15 +383,15 @@ def get_node(node_id): return CCM_CLUSTER.nodes['node%s' % node_id] -def use_multidc(dc_list, workloads=[]): +def use_multidc(dc_list, workloads=None): use_cluster(MULTIDC_CLUSTER_NAME, dc_list, start=True, workloads=workloads) -def use_singledc(start=True, workloads=[], use_single_interface=USE_SINGLE_INTERFACE): +def use_singledc(start=True, workloads=None, use_single_interface=USE_SINGLE_INTERFACE): use_cluster(CLUSTER_NAME, [3], start=start, workloads=workloads, use_single_interface=use_single_interface) -def use_single_node(start=True, workloads=[], configuration_options={}, dse_options={}): +def use_single_node(start=True, workloads=None, configuration_options=None, dse_options=None): use_cluster(SINGLE_NODE_CLUSTER_NAME, [1], start=start, workloads=workloads, configuration_options=configuration_options, dse_options=dse_options) @@ -453,10 +453,11 @@ def start_cluster_wait_for_up(cluster): def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, set_keyspace=True, ccm_options=None, - configuration_options={}, dse_options={}, use_single_interface=USE_SINGLE_INTERFACE): + configuration_options=None, dse_options=None, use_single_interface=USE_SINGLE_INTERFACE): + configuration_options = configuration_options or {} + dse_options = dse_options or {} + workloads = workloads or [] dse_cluster = True if DSE_VERSION else False - if not workloads: - workloads = [] if ccm_options is None and DSE_VERSION: ccm_options = {"version": CCM_VERSION} From 387150acc365b6cf1daaee58c62db13e4929099a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Jastrz=C4=99bski?= Date: Tue, 23 Nov 2021 18:18:33 +0100 Subject: [PATCH 1869/2005] Merge pull request #1114 from haaawk/stream_ids_fix Stop reusing stream ids of requests that have timed out due to client-side timeout (#1114) * ResponseFuture: do not return the stream ID on client timeout When a timeout occurs, the ResponseFuture associated with the query returns its stream ID to the associated connection's free stream ID pool - so that the stream ID can be immediately reused by another query. However, that it incorrect and dangerous. If query A times out before it receives a response from the cluster, a different query B might be issued on the same connection and stream. If response for query A arrives earlier than the response for query B, the first one might be misinterpreted as the response for query B. This commit changes the logic so that stream IDs are not returned on timeout - now, they are only returned after receiving a response. * Connection: fix tracking of in_flight requests This commit fixes tracking of in_flight requests. Before it, in case of a client-side timeout, the response ID was not returned to the pool, but the in_flight counter was decremented anyway. This counter is used to determine if there is a need to wait for stream IDs to be freed - without this patch, it could happen that the driver throught that it can initiate another request due to in_flight counter being low, but there weren't any free stream IDs to allocate, so an assertion was triggered and the connection was defuncted and opened again. Now, requests timed out on the client side are tracked in the orphaned_request_ids field, and the in_flight counter is decremented only after the response is received. * Connection: notify owning pool about released orphaned streams Before this patch, the following situation could occur: 1. On a single connection, multiple requests are spawned up to the maximum concurrency, 2. We want to issue more requests but we need to wait on a condition variable because requests spawned in 1. took all stream IDs and we need to wait until some of them are freed, 3. All requests from point 1. time out on the client side - we cannot free their stream IDs until the database node responds, 4. Responses for requests issued in point 1. arrive, but the Connection class has no access to the condition variable mentioned in point 2., so no requests from point 2. are admitted, 5. Requests from point 2. waiting on the condition variable time out despite there are stream IDs available. This commit adds an _on_orphaned_stream_released field to the Connection class, and now it notifies the owning pool in case a timed out request receives a late response and a stream ID is freed by calling _on_orphaned_stream_released callback. * HostConnection: implement replacing overloaded connections In a situation of very high overload or poor networking conditions, it might happen that there is a large number of outstanding requests on a single connection. Each request reserves a stream ID which cannot be reused until a response for it arrives, even if the request already timed out on the client side. Because the pool of available stream IDs for a single connection is limited, such situation might cause the set of free stream IDs to shrink to a very small size (including zero), which will drastically reduce the available concurrency on the connection, or even render it unusable for some time. In order to prevent this, the following strategy is adopted: when the number of orphaned stream IDs reaches a certain threshold (e.g. 75% of all available stream IDs), the connection becomes marked as overloaded. Meanwhile, a new connection is opened - when it becomes available, it replaces the old one, and the old connection is moved to "trash" where it waits until all its outstanding requests either respond or time out. This feature is implemented for HostConnection but not for HostConnectionPool, which means that it will only work for clusters which use protocol v3 or newer. This fix is heavily inspired by the fix for JAVA-1519. Co-authored-by: Piotr Dulikowski --- cassandra/cluster.py | 11 ++- cassandra/connection.py | 32 +++++++- cassandra/pool.py | 97 +++++++++++++++++++----- tests/unit/.noseids | Bin 0 -> 30098 bytes tests/unit/test_host_connection_pool.py | 20 ++--- tests/unit/test_response_future.py | 28 +++++++ 6 files changed, 158 insertions(+), 30 deletions(-) create mode 100644 tests/unit/.noseids diff --git a/cassandra/cluster.py b/cassandra/cluster.py index dc850ae809..cf78725f17 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -4361,10 +4361,17 @@ def _on_timeout(self, _attempts=0): pool = self.session._pools.get(self._current_host) if pool and not pool.is_shutdown: + # Do not return the stream ID to the pool yet. We cannot reuse it + # because the node might still be processing the query and will + # return a late response to that query - if we used such stream + # before the response to the previous query has arrived, the new + # query could get a response from the old query with self._connection.lock: - self._connection.request_ids.append(self._req_id) + self._connection.orphaned_request_ids.add(self._req_id) + if len(self._connection.orphaned_request_ids) >= self._connection.orphaned_threshold: + self._connection.orphaned_threshold_reached = True - pool.return_connection(self._connection) + pool.return_connection(self._connection, stream_was_orphaned=True) errors = self._errors if not errors: diff --git a/cassandra/connection.py b/cassandra/connection.py index 0d8a50e76f..0869584663 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -690,6 +690,7 @@ class Connection(object): # The current number of operations that are in flight. More precisely, # the number of request IDs that are currently in use. + # This includes orphaned requests. in_flight = 0 # Max concurrent requests allowed per connection. This is set optimistically high, allowing @@ -707,6 +708,20 @@ class Connection(object): # request_ids set highest_request_id = 0 + # Tracks the request IDs which are no longer waited on (timed out), but + # cannot be reused yet because the node might still send a response + # on this stream + orphaned_request_ids = None + + # Set to true if the orphaned stream ID count cross configured threshold + # and the connection will be replaced + orphaned_threshold_reached = False + + # If the number of orphaned streams reaches this threshold, this connection + # will become marked and will be replaced with a new connection by the + # owning pool (currently, only HostConnection supports this) + orphaned_threshold = 3 * max_in_flight // 4 + is_defunct = False is_closed = False lock = None @@ -733,6 +748,8 @@ class Connection(object): _is_checksumming_enabled = False + _on_orphaned_stream_released = None + @property def _iobuf(self): # backward compatibility, to avoid any change in the reactors @@ -742,7 +759,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, cql_version=None, protocol_version=ProtocolVersion.MAX_SUPPORTED, is_control_connection=False, user_type_map=None, connect_timeout=None, allow_beta_protocol_version=False, no_compact=False, - ssl_context=None): + ssl_context=None, on_orphaned_stream_released=None): # TODO next major rename host to endpoint and remove port kwarg. self.endpoint = host if isinstance(host, EndPoint) else DefaultEndPoint(host, port) @@ -764,6 +781,8 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self._io_buffer = _ConnectionIOBuffer(self) self._continuous_paging_sessions = {} self._socket_writable = True + self.orphaned_request_ids = set() + self._on_orphaned_stream_released = on_orphaned_stream_released if ssl_options: self._check_hostname = bool(self.ssl_options.pop('check_hostname', False)) @@ -1188,11 +1207,22 @@ def process_msg(self, header, body): decoder = paging_session.decoder result_metadata = None else: + need_notify_of_release = False + with self.lock: + if stream_id in self.orphaned_request_ids: + self.in_flight -= 1 + self.orphaned_request_ids.remove(stream_id) + need_notify_of_release = True + if need_notify_of_release and self._on_orphaned_stream_released: + self._on_orphaned_stream_released() + try: callback, decoder, result_metadata = self._requests.pop(stream_id) # This can only happen if the stream_id was # removed due to an OperationTimedOut except KeyError: + with self.lock: + self.request_ids.append(stream_id) return try: diff --git a/cassandra/pool.py b/cassandra/pool.py index cd27656046..c82dfe9a6b 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -390,6 +390,10 @@ def __init__(self, host, host_distance, session): # this is used in conjunction with the connection streams. Not using the connection lock because the connection can be replaced in the lifetime of the pool. self._stream_available_condition = Condition(self._lock) self._is_replacing = False + # Contains connections which shouldn't be used anymore + # and are waiting until all requests time out or complete + # so that we can dispose of them. + self._trash = set() if host_distance == HostDistance.IGNORED: log.debug("Not opening connection to ignored host %s", self.host) @@ -399,13 +403,13 @@ def __init__(self, host, host_distance, session): return log.debug("Initializing connection for host %s", self.host) - self._connection = session.cluster.connection_factory(host.endpoint) + self._connection = session.cluster.connection_factory(host.endpoint, on_orphaned_stream_released=self.on_orphaned_stream_released) self._keyspace = session.keyspace if self._keyspace: self._connection.set_keyspace_blocking(self._keyspace) log.debug("Finished initializing connection for host %s", self.host) - def borrow_connection(self, timeout): + def _get_connection(self): if self.is_shutdown: raise ConnectionException( "Pool for %s is shutdown" % (self.host,), self.host) @@ -413,12 +417,25 @@ def borrow_connection(self, timeout): conn = self._connection if not conn: raise NoConnectionsAvailable() + return conn + + def borrow_connection(self, timeout): + conn = self._get_connection() + if conn.orphaned_threshold_reached: + with self._lock: + if not self._is_replacing: + self._is_replacing = True + self._session.submit(self._replace, conn) + log.debug( + "Connection to host %s reached orphaned stream limit, replacing...", + self.host + ) start = time.time() remaining = timeout while True: with conn.lock: - if conn.in_flight < conn.max_request_id: + if not (conn.orphaned_threshold_reached and conn.is_closed) and conn.in_flight < conn.max_request_id: conn.in_flight += 1 return conn, conn.get_request_id() if timeout is not None: @@ -426,15 +443,19 @@ def borrow_connection(self, timeout): if remaining < 0: break with self._stream_available_condition: - self._stream_available_condition.wait(remaining) + if conn.orphaned_threshold_reached and conn.is_closed: + conn = self._get_connection() + else: + self._stream_available_condition.wait(remaining) raise NoConnectionsAvailable("All request IDs are currently in use") - def return_connection(self, connection): - with connection.lock: - connection.in_flight -= 1 - with self._stream_available_condition: - self._stream_available_condition.notify() + def return_connection(self, connection, stream_was_orphaned=False): + if not stream_was_orphaned: + with connection.lock: + connection.in_flight -= 1 + with self._stream_available_condition: + self._stream_available_condition.notify() if connection.is_defunct or connection.is_closed: if connection.signaled_error and not self.shutdown_on_error: @@ -461,6 +482,24 @@ def return_connection(self, connection): return self._is_replacing = True self._session.submit(self._replace, connection) + else: + if connection in self._trash: + with connection.lock: + if connection.in_flight == len(connection.orphaned_request_ids): + with self._lock: + if connection in self._trash: + self._trash.remove(connection) + log.debug("Closing trashed connection (%s) to %s", id(connection), self.host) + connection.close() + return + + def on_orphaned_stream_released(self): + """ + Called when a response for an orphaned stream (timed out on the client + side) was received. + """ + with self._stream_available_condition: + self._stream_available_condition.notify() def _replace(self, connection): with self._lock: @@ -469,7 +508,7 @@ def _replace(self, connection): log.debug("Replacing connection (%s) to %s", id(connection), self.host) try: - conn = self._session.cluster.connection_factory(self.host.endpoint) + conn = self._session.cluster.connection_factory(self.host.endpoint, on_orphaned_stream_released=self.on_orphaned_stream_released) if self._keyspace: conn.set_keyspace_blocking(self._keyspace) self._connection = conn @@ -477,9 +516,15 @@ def _replace(self, connection): log.warning("Failed reconnecting %s. Retrying." % (self.host.endpoint,)) self._session.submit(self._replace, connection) else: - with self._lock: - self._is_replacing = False - self._stream_available_condition.notify() + with connection.lock: + with self._lock: + if connection.orphaned_threshold_reached: + if connection.in_flight == len(connection.orphaned_request_ids): + connection.close() + else: + self._trash.add(connection) + self._is_replacing = False + self._stream_available_condition.notify() def shutdown(self): with self._lock: @@ -493,6 +538,16 @@ def shutdown(self): self._connection.close() self._connection = None + trash_conns = None + with self._lock: + if self._trash: + trash_conns = self._trash + self._trash = set() + + if trash_conns is not None: + for conn in self._trash: + conn.close() + def _set_keyspace_for_all_conns(self, keyspace, callback): if self.is_shutdown or not self._connection: return @@ -548,7 +603,7 @@ def __init__(self, host, host_distance, session): log.debug("Initializing new connection pool for host %s", self.host) core_conns = session.cluster.get_core_connections_per_host(host_distance) - self._connections = [session.cluster.connection_factory(host.endpoint) + self._connections = [session.cluster.connection_factory(host.endpoint, on_orphaned_stream_released=self.on_orphaned_stream_released) for i in range(core_conns)] self._keyspace = session.keyspace @@ -652,7 +707,7 @@ def _add_conn_if_under_max(self): log.debug("Going to open new connection to host %s", self.host) try: - conn = self._session.cluster.connection_factory(self.host.endpoint) + conn = self._session.cluster.connection_factory(self.host.endpoint, on_orphaned_stream_released=self.on_orphaned_stream_released) if self._keyspace: conn.set_keyspace_blocking(self._session.keyspace) self._next_trash_allowed_at = time.time() + _MIN_TRASH_INTERVAL @@ -712,9 +767,10 @@ def _wait_for_conn(self, timeout): raise NoConnectionsAvailable() - def return_connection(self, connection): + def return_connection(self, connection, stream_was_orphaned=False): with connection.lock: - connection.in_flight -= 1 + if not stream_was_orphaned: + connection.in_flight -= 1 in_flight = connection.in_flight if connection.is_defunct or connection.is_closed: @@ -750,6 +806,13 @@ def return_connection(self, connection): else: self._signal_available_conn() + def on_orphaned_stream_released(self): + """ + Called when a response for an orphaned stream (timed out on the client + side) was received. + """ + self._signal_available_conn() + def _maybe_trash_connection(self, connection): core_conns = self._session.cluster.get_core_connections_per_host(self.host_distance) did_trash = False diff --git a/tests/unit/.noseids b/tests/unit/.noseids new file mode 100644 index 0000000000000000000000000000000000000000..1c956146fc04cc0b92d287530f5167fb38737082 GIT binary patch literal 30098 zcmcg#cX%At5r;rX140PI*noSJkR=PtHrO;7Ng z{r3iE(SLU~wua}JMXv#09638YJXUrpVyvv|dTlZKU$yR+9eZ2RHJ5}t<_pjF#sb^) z$MoWoZX2RFW>`*8q)#;ZaQ>r}MUW)VaP-Wn77W_1cut9MTBa%)#deXJ_}#9F|%rJD?(0 zc#akLrehN@kLC5YE>Ca;e1jd1p$t5?g@(GQxDKL?ph&L#X< z^PIpngl6crRuGyee642sWletA`BPzH=kZUhd~tY3KWt48#3DT5E{P;v-PWzTZyKJK zO8jx*fn{ijWFutvD@pm65uRiPGY!2z)fyJ+fU9*<`uQ(hUaXoZ6H z;McX&37kL|wE>+(geP($T-=IK*1a-fJc$!ybt^^%TxeOhw5p~1^Nw3Vyz@En z+UDenDmkkQI5945os;LgU;sW&G`;)X|xL=*poTIRCC84jE)FM`!tzk5Pvv1%-W#q?hb;(aqWu*W$xahlwvYQh+Iu8>?31^@~ z991mQ9IrS7SXJGpt^P_jtTmbQf}@!)PN5W6v!Tgl*CEAdw_n2sB>h?n5bCeF0@j`( zJQI#zD9CkecTH-jh1G?cmZauNqO0-F+ zDY{Ngo0mh7z(zI#BiRI;!h$g14BHC5i49u%m{}j3W<}UZHxunko7u4B0>QIaLc-|E zGz^QQ1-a$X#Y1=2g140oZv&~ZZGO_FwIB{HD{?g<6fBh?GQ{*$_JKDV)(FQ@+LM$F z+UHG~hRQQcO=>y>0Rn6*u#50#!Gws{u}q_m);-KdvV3=VbvgwMYvG`RDH{0F21AFi zH^Jd5P7Y&TICxcIP=X>sV1$#vmUIHCIgxFgz&o>mtBwt8-h?xDh5|-8Y3xp?Azf^j z9MKpjk!^WIQq{4Y6MjP;eBuN&&I!6T9h4HJkjM0en=t#UIg!_=BfFTiOJMHc#N3>Y znYP3yIB_$i>^qCXR^B%Dusb=CGdygq3~`_Zse=qJzl)Q@#&oIIAcm^8*w=1O)OCn@ zP^cYF=Fm+W!cnuR$><5UvuijZX`*eAUSJr)^X3C|rhed3Hu>6Rp+?oCE<1fRtP$>^ z0OXh|I?XwgCyt_UR1%3ItqR2IX4Qhqf)zkQ$u;egwkYb93E0ENw)}8-s1GiXru0qP z`*ny!Qkd=KWG3@Ly-6u%776J(PAU`Kg=9Nv0=b@(#o_KOJg^fmnig?royx+Mb^|B1 zow?LdCsiF%b|9n@oz#R~f^;;}xRH~_?%rr5GD0ztn>dN&8;q|PKuI7Bb?9*1W~OK# zCkuIA&00Z{1<4}HQ|;$OAI()p0zJ+qO`bV?C3>2^r56O{1gDmTPa%T|1$OCWQ7H-h!y(z94Q^|v+=2^ zHmTsC=#yrXjZe)XQnN6m5$?WaDpCH&%q=8!%+or>)Lkgf^qK!OsF6vnEU<8sX+!aAfO*eX}BD2O@q2n{S2s5O3+zyPA%)H{Gj4JmY}fV4zPaLt8$5?a(V`6w=2iV_+p z31v_adP#@s1P~|$^YfrB&pb^up2baLB9{g_5NPW4JROQwzy=K3^#d~LXLB>zmCK~& z1XhtWlOmPpaC6y{$E9MP62+!eFe#8w5(QJw<>oV%$A^*>P}3;G?gFNQn+#g)?c5}` z_d+5vmWah2+$^^C!oq_FSCoOqjXs=WF(VGMreAH6@FV+`1YXGWyRmr6+a3`JyzT;*|;5nsLzU=E%El?55q4E z!~<6RY%kP~-wZyNSP5i%fc&tt#>!$M(*U{zoTpf6$c`K$Wys<{CjojZO@o#uOYIoQ zrd zV!!*0VM8ylFqbgli%%CI_C3tlBMH-^(@eKh-iw(DP@kp}e1bswS>`3-wSApseDEq- z^UZ=w@UJTW-u$EyPN4{9EXZoliBtYQc;Ns$u?-!|8Z=Dcg&Hgw8E^+0OWo>9J-kF= zK-Wf7H!lsZ>z|#f^ilz3MLR+DVIv%wls$q-Rg#MOD#1DXP=bN>UA&Sj3tnI+FIy&; zg@@2BZ|i_{#mNAr2+wF}^A_HI(EKu7$|*Y*?Oyk?;0|@*LTVqpAlyC2-6)_`k z*RgOr(i1l*iL??>Dgv&`Tq>~Bpu}F!!s^&^SvA!e@p=Obubc%N*)Ez+e8siF0(*V7 zIN%8#yp;voX?^ygtxMs479M+YctrO?<$mC44z#zr*uxU9<2SO9*gFUbr5&`;2Uu9- ztm(D|jV`sS(s!dSex>uxLrYg@xAmj5f{FlLN^(TLv4g1u=LaxBHb64YuGMe~@`fcI4NI`$jE zmVkI~W5Ju4l^v3Cm>xxPnQlZ-%gWnX;IHW)K6V}thBppydU0$ca9xln9bi_W`jRIs zuzU46G^I^Oe=+D@zf=btaRagoXoIgnM-!bQ-m!cx3;&e|m~>2hCjnfj3tcG~6TrCw zmIy(=lM8xx#&px6wUV%fi)T)>#&>a%xM@HVnrAt+s44#4Tx@pr$0o_)JzN~N^~S-4 z`Yt>hiOP5{7j%ynp`cqW;vp^)Jz7Kui581^9~YY*Eh1u*w21c)$U*hnXZkDkIn?;U zQq*4#31}`X2%R3~kbVn}j2`sbfqxHdgSM;>ET7QA1&_f>b}q1dy3lBHqpZLOxv+fFpc`{RF$D?t32>Sp;X>S=ix}AA6g+(a zn;=ZrgC`-R1NtZz`qmgdj!+6vSZD@f==4K-ta3pMSL>QF3Cr{KFNi;Ax*2c<7+0CXz(c^8B{&Y z1(@&=u|h$h*bQ!-o`AD*i+Df9g^T-V$89NHAiBuiq zHKc?^8QLI$5w&^mI=UKwll?3gY+?(HH67SqP^~)fJRHdUq=h$}qm}eI zE?|t(I#i7>TBoWT=lppt=!q%$X!f*SoWYt(Jn`r(7db3+oA8>35BGtQ!56p~3}-TE zB107hF3I@xJk%GtaEH3%g4cAcC0bEm;(}dGWxrU=s`Ter@K}o4i@w}vYpD;%P+X9{ z>rm=WnG(f($?n&TagA^c><_;=%?wo~+=J9YMaP;T6a-QKU*RLUa17(XUP#D0)G>*# za+4VEE(za(`|eJuo{+`YxM`p#%2EjIJXnF$)b>ut_^)#l*wF(4y2DRzanLhEA~vhY_5hd!kq<|)3>;39%i8#(fc+xz5RpGgM$$@ zvf_0`L8+tfaPzux5MHjRKz*WVa^K}9g_ps*xr6rANViMXf$n?Ugk}dJ1OW~Gng$CV z2$5$#N=usaS_uve^dQCexrrVagecW8nhR7wz=tdJ2i)B9_L|7at8F)NIR7Cxjfor) z)pXORHWbA}FS&xwSSXMmaWmO72ow1{1M>MXHy?LEJ{CM;QP7P=thz)0kXy&3JD9l? zq6hUKD9Kc?BNl&aNy~D;dHjTX9(g`3Q^T5DH8jg{7UeDDpK_DBejwp`i)OW|idaA6 zrZhJYr5@bc&$*dyp}S&mk^oV}M+v&5)luvFMR-I1m9FUMCEs6Ii6@6-p{w~Z)VXGD zbGqa37!A;pN4H-to5{j`cv9Gu51BZ%{fY^3Bo)!K&1O2U?R5OeK)_M&9i7}dg&utj zZR}vFQG-U@$OlT2*?P5I;F&NQg^uOd%OR1cCpN>TmN9D6+7sE|@D}@5WKl1a~nae}k1h#KtEv{89f8r;zs~0kmCB%wg8=F7#vzZ)>4c_v{ z%6{{3_7{F?qq)@Jp-_OYL7P4NDh>$y=(mjgm7m4ld=~Kyu#AQMH-1WO3p?R;Q$5|^ z`5EkJ6-t6ZzL)z4Kb6gyRLboh?w|bVBl+l^-tAxf9L6&_ Date: Tue, 23 Nov 2021 11:12:58 -0600 Subject: [PATCH 1870/2005] Removing file unexpectedly included in previous PR --- tests/unit/.noseids | Bin 30098 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 tests/unit/.noseids diff --git a/tests/unit/.noseids b/tests/unit/.noseids deleted file mode 100644 index 1c956146fc04cc0b92d287530f5167fb38737082..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30098 zcmcg#cX%At5r;rX140PI*noSJkR=PtHrO;7Ng z{r3iE(SLU~wua}JMXv#09638YJXUrpVyvv|dTlZKU$yR+9eZ2RHJ5}t<_pjF#sb^) z$MoWoZX2RFW>`*8q)#;ZaQ>r}MUW)VaP-Wn77W_1cut9MTBa%)#deXJ_}#9F|%rJD?(0 zc#akLrehN@kLC5YE>Ca;e1jd1p$t5?g@(GQxDKL?ph&L#X< z^PIpngl6crRuGyee642sWletA`BPzH=kZUhd~tY3KWt48#3DT5E{P;v-PWzTZyKJK zO8jx*fn{ijWFutvD@pm65uRiPGY!2z)fyJ+fU9*<`uQ(hUaXoZ6H z;McX&37kL|wE>+(geP($T-=IK*1a-fJc$!ybt^^%TxeOhw5p~1^Nw3Vyz@En z+UDenDmkkQI5945os;LgU;sW&G`;)X|xL=*poTIRCC84jE)FM`!tzk5Pvv1%-W#q?hb;(aqWu*W$xahlwvYQh+Iu8>?31^@~ z991mQ9IrS7SXJGpt^P_jtTmbQf}@!)PN5W6v!Tgl*CEAdw_n2sB>h?n5bCeF0@j`( zJQI#zD9CkecTH-jh1G?cmZauNqO0-F+ zDY{Ngo0mh7z(zI#BiRI;!h$g14BHC5i49u%m{}j3W<}UZHxunko7u4B0>QIaLc-|E zGz^QQ1-a$X#Y1=2g140oZv&~ZZGO_FwIB{HD{?g<6fBh?GQ{*$_JKDV)(FQ@+LM$F z+UHG~hRQQcO=>y>0Rn6*u#50#!Gws{u}q_m);-KdvV3=VbvgwMYvG`RDH{0F21AFi zH^Jd5P7Y&TICxcIP=X>sV1$#vmUIHCIgxFgz&o>mtBwt8-h?xDh5|-8Y3xp?Azf^j z9MKpjk!^WIQq{4Y6MjP;eBuN&&I!6T9h4HJkjM0en=t#UIg!_=BfFTiOJMHc#N3>Y znYP3yIB_$i>^qCXR^B%Dusb=CGdygq3~`_Zse=qJzl)Q@#&oIIAcm^8*w=1O)OCn@ zP^cYF=Fm+W!cnuR$><5UvuijZX`*eAUSJr)^X3C|rhed3Hu>6Rp+?oCE<1fRtP$>^ z0OXh|I?XwgCyt_UR1%3ItqR2IX4Qhqf)zkQ$u;egwkYb93E0ENw)}8-s1GiXru0qP z`*ny!Qkd=KWG3@Ly-6u%776J(PAU`Kg=9Nv0=b@(#o_KOJg^fmnig?royx+Mb^|B1 zow?LdCsiF%b|9n@oz#R~f^;;}xRH~_?%rr5GD0ztn>dN&8;q|PKuI7Bb?9*1W~OK# zCkuIA&00Z{1<4}HQ|;$OAI()p0zJ+qO`bV?C3>2^r56O{1gDmTPa%T|1$OCWQ7H-h!y(z94Q^|v+=2^ zHmTsC=#yrXjZe)XQnN6m5$?WaDpCH&%q=8!%+or>)Lkgf^qK!OsF6vnEU<8sX+!aAfO*eX}BD2O@q2n{S2s5O3+zyPA%)H{Gj4JmY}fV4zPaLt8$5?a(V`6w=2iV_+p z31v_adP#@s1P~|$^YfrB&pb^up2baLB9{g_5NPW4JROQwzy=K3^#d~LXLB>zmCK~& z1XhtWlOmPpaC6y{$E9MP62+!eFe#8w5(QJw<>oV%$A^*>P}3;G?gFNQn+#g)?c5}` z_d+5vmWah2+$^^C!oq_FSCoOqjXs=WF(VGMreAH6@FV+`1YXGWyRmr6+a3`JyzT;*|;5nsLzU=E%El?55q4E z!~<6RY%kP~-wZyNSP5i%fc&tt#>!$M(*U{zoTpf6$c`K$Wys<{CjojZO@o#uOYIoQ zrd zV!!*0VM8ylFqbgli%%CI_C3tlBMH-^(@eKh-iw(DP@kp}e1bswS>`3-wSApseDEq- z^UZ=w@UJTW-u$EyPN4{9EXZoliBtYQc;Ns$u?-!|8Z=Dcg&Hgw8E^+0OWo>9J-kF= zK-Wf7H!lsZ>z|#f^ilz3MLR+DVIv%wls$q-Rg#MOD#1DXP=bN>UA&Sj3tnI+FIy&; zg@@2BZ|i_{#mNAr2+wF}^A_HI(EKu7$|*Y*?Oyk?;0|@*LTVqpAlyC2-6)_`k z*RgOr(i1l*iL??>Dgv&`Tq>~Bpu}F!!s^&^SvA!e@p=Obubc%N*)Ez+e8siF0(*V7 zIN%8#yp;voX?^ygtxMs479M+YctrO?<$mC44z#zr*uxU9<2SO9*gFUbr5&`;2Uu9- ztm(D|jV`sS(s!dSex>uxLrYg@xAmj5f{FlLN^(TLv4g1u=LaxBHb64YuGMe~@`fcI4NI`$jE zmVkI~W5Ju4l^v3Cm>xxPnQlZ-%gWnX;IHW)K6V}thBppydU0$ca9xln9bi_W`jRIs zuzU46G^I^Oe=+D@zf=btaRagoXoIgnM-!bQ-m!cx3;&e|m~>2hCjnfj3tcG~6TrCw zmIy(=lM8xx#&px6wUV%fi)T)>#&>a%xM@HVnrAt+s44#4Tx@pr$0o_)JzN~N^~S-4 z`Yt>hiOP5{7j%ynp`cqW;vp^)Jz7Kui581^9~YY*Eh1u*w21c)$U*hnXZkDkIn?;U zQq*4#31}`X2%R3~kbVn}j2`sbfqxHdgSM;>ET7QA1&_f>b}q1dy3lBHqpZLOxv+fFpc`{RF$D?t32>Sp;X>S=ix}AA6g+(a zn;=ZrgC`-R1NtZz`qmgdj!+6vSZD@f==4K-ta3pMSL>QF3Cr{KFNi;Ax*2c<7+0CXz(c^8B{&Y z1(@&=u|h$h*bQ!-o`AD*i+Df9g^T-V$89NHAiBuiq zHKc?^8QLI$5w&^mI=UKwll?3gY+?(HH67SqP^~)fJRHdUq=h$}qm}eI zE?|t(I#i7>TBoWT=lppt=!q%$X!f*SoWYt(Jn`r(7db3+oA8>35BGtQ!56p~3}-TE zB107hF3I@xJk%GtaEH3%g4cAcC0bEm;(}dGWxrU=s`Ter@K}o4i@w}vYpD;%P+X9{ z>rm=WnG(f($?n&TagA^c><_;=%?wo~+=J9YMaP;T6a-QKU*RLUa17(XUP#D0)G>*# za+4VEE(za(`|eJuo{+`YxM`p#%2EjIJXnF$)b>ut_^)#l*wF(4y2DRzanLhEA~vhY_5hd!kq<|)3>;39%i8#(fc+xz5RpGgM$$@ zvf_0`L8+tfaPzux5MHjRKz*WVa^K}9g_ps*xr6rANViMXf$n?Ugk}dJ1OW~Gng$CV z2$5$#N=usaS_uve^dQCexrrVagecW8nhR7wz=tdJ2i)B9_L|7at8F)NIR7Cxjfor) z)pXORHWbA}FS&xwSSXMmaWmO72ow1{1M>MXHy?LEJ{CM;QP7P=thz)0kXy&3JD9l? zq6hUKD9Kc?BNl&aNy~D;dHjTX9(g`3Q^T5DH8jg{7UeDDpK_DBejwp`i)OW|idaA6 zrZhJYr5@bc&$*dyp}S&mk^oV}M+v&5)luvFMR-I1m9FUMCEs6Ii6@6-p{w~Z)VXGD zbGqa37!A;pN4H-to5{j`cv9Gu51BZ%{fY^3Bo)!K&1O2U?R5OeK)_M&9i7}dg&utj zZR}vFQG-U@$OlT2*?P5I;F&NQg^uOd%OR1cCpN>TmN9D6+7sE|@D}@5WKl1a~nae}k1h#KtEv{89f8r;zs~0kmCB%wg8=F7#vzZ)>4c_v{ z%6{{3_7{F?qq)@Jp-_OYL7P4NDh>$y=(mjgm7m4ld=~Kyu#AQMH-1WO3p?R;Q$5|^ z`5EkJ6-t6ZzL)z4Kb6gyRLboh?w|bVBl+l^-tAxf9L6&_ Date: Mon, 24 Jan 2022 14:42:08 -0600 Subject: [PATCH 1871/2005] Merge pull request #1117 from datastax/remove_unittest2 PYTHON-1289 Removing unittest2 from the dependency list --- test-requirements.txt | 1 - tests/__init__.py | 5 +---- tests/integration/__init__.py | 5 +---- tests/integration/advanced/__init__.py | 5 +---- tests/integration/advanced/graph/fluent/__init__.py | 5 +---- tests/integration/advanced/graph/fluent/test_graph.py | 5 +---- tests/integration/advanced/graph/test_graph_datatype.py | 5 +---- tests/integration/advanced/graph/test_graph_query.py | 5 +---- tests/integration/advanced/test_adv_metadata.py | 5 +---- tests/integration/advanced/test_auth.py | 5 +---- tests/integration/advanced/test_cont_paging.py | 5 +---- .../integration/advanced/test_cqlengine_where_operators.py | 5 +---- tests/integration/advanced/test_geometry.py | 5 +---- tests/integration/advanced/test_unixsocketendpoint.py | 5 +---- tests/integration/cloud/__init__.py | 5 +---- tests/integration/cloud/test_cloud.py | 5 +---- tests/integration/cqlengine/__init__.py | 5 +---- tests/integration/cqlengine/advanced/test_cont_paging.py | 5 +---- tests/integration/cqlengine/base.py | 5 +---- tests/integration/cqlengine/columns/test_static_column.py | 5 +---- tests/integration/cqlengine/columns/test_validation.py | 5 +---- tests/integration/cqlengine/columns/test_value_io.py | 5 +---- tests/integration/cqlengine/connections/test_connection.py | 5 +---- tests/integration/cqlengine/management/test_management.py | 5 +---- tests/integration/cqlengine/model/test_model.py | 5 +---- tests/integration/cqlengine/model/test_model_io.py | 5 +---- tests/integration/cqlengine/model/test_udts.py | 5 +---- .../cqlengine/operators/test_where_operators.py | 5 +---- tests/integration/cqlengine/query/test_named.py | 5 +---- tests/integration/cqlengine/query/test_queryset.py | 5 +---- .../cqlengine/statements/test_assignment_clauses.py | 5 +---- .../cqlengine/statements/test_base_statement.py | 5 +---- .../cqlengine/statements/test_insert_statement.py | 5 +---- .../cqlengine/statements/test_select_statement.py | 5 +---- .../cqlengine/statements/test_update_statement.py | 5 +---- .../integration/cqlengine/statements/test_where_clause.py | 5 +---- tests/integration/cqlengine/test_ifexists.py | 5 +---- tests/integration/cqlengine/test_ifnotexists.py | 5 +---- tests/integration/cqlengine/test_lwt_conditional.py | 5 +---- tests/integration/cqlengine/test_ttl.py | 5 +---- tests/integration/long/__init__.py | 5 +---- tests/integration/long/test_consistency.py | 5 +---- tests/integration/long/test_failure_types.py | 5 +---- tests/integration/long/test_ipv6.py | 5 +---- tests/integration/long/test_large_data.py | 5 +---- tests/integration/long/test_loadbalancingpolicies.py | 5 +---- tests/integration/long/test_policies.py | 5 +---- tests/integration/long/test_schema.py | 5 +---- tests/integration/long/test_ssl.py | 5 +---- tests/integration/simulacron/__init__.py | 5 +---- tests/integration/simulacron/advanced/test_insights.py | 5 +---- tests/integration/simulacron/test_cluster.py | 5 +---- tests/integration/simulacron/test_connection.py | 5 +---- tests/integration/simulacron/test_empty_column.py | 5 +---- tests/integration/simulacron/test_endpoint.py | 5 +---- tests/integration/simulacron/test_policies.py | 5 +---- tests/integration/standard/__init__.py | 5 +---- tests/integration/standard/test_authentication.py | 5 +---- tests/integration/standard/test_client_warnings.py | 5 +---- tests/integration/standard/test_cluster.py | 5 +---- tests/integration/standard/test_concurrent.py | 5 +---- tests/integration/standard/test_connection.py | 5 +---- tests/integration/standard/test_control_connection.py | 5 +---- tests/integration/standard/test_custom_cluster.py | 5 +---- tests/integration/standard/test_custom_payload.py | 5 +---- tests/integration/standard/test_custom_protocol_handler.py | 5 +---- .../integration/standard/test_cython_protocol_handlers.py | 5 +---- tests/integration/standard/test_dse.py | 5 +---- tests/integration/standard/test_metadata.py | 5 +---- tests/integration/standard/test_metrics.py | 5 +---- tests/integration/standard/test_policies.py | 5 +---- tests/integration/standard/test_prepared_statements.py | 5 +---- tests/integration/standard/test_query.py | 5 +---- tests/integration/standard/test_query_paging.py | 5 +---- tests/integration/standard/test_routing.py | 5 +---- tests/integration/standard/test_row_factories.py | 5 +---- tests/integration/standard/test_single_interface.py | 5 +---- tests/integration/standard/test_types.py | 5 +---- tests/integration/standard/test_udts.py | 5 +---- tests/integration/upgrade/__init__.py | 5 +---- tests/integration/upgrade/test_upgrade.py | 5 +---- tests/stress_tests/test_load.py | 5 +---- tests/stress_tests/test_multi_inserts.py | 5 +---- tests/unit/advanced/cloud/test_cloud.py | 5 +---- tests/unit/advanced/test_auth.py | 5 +---- tests/unit/advanced/test_execution_profile.py | 5 +---- tests/unit/advanced/test_geometry.py | 5 +---- tests/unit/advanced/test_graph.py | 5 +---- tests/unit/advanced/test_insights.py | 5 +---- tests/unit/advanced/test_metadata.py | 5 +---- tests/unit/advanced/test_policies.py | 5 +---- tests/unit/cqlengine/test_columns.py | 5 +---- tests/unit/cqlengine/test_connection.py | 5 +---- tests/unit/cqlengine/test_udt.py | 5 +---- tests/unit/cython/test_bytesio.py | 5 +---- tests/unit/cython/test_types.py | 5 +---- tests/unit/cython/test_utils.py | 7 ++----- tests/unit/cython/utils.py | 5 +---- tests/unit/io/test_asyncorereactor.py | 5 +---- tests/unit/io/test_eventletreactor.py | 5 +---- tests/unit/io/test_geventreactor.py | 5 +---- tests/unit/io/test_libevreactor.py | 5 +---- tests/unit/io/test_twistedreactor.py | 5 +---- tests/unit/io/utils.py | 5 +---- tests/unit/test_auth.py | 5 +---- tests/unit/test_cluster.py | 5 +---- tests/unit/test_concurrent.py | 5 +---- tests/unit/test_connection.py | 5 +---- tests/unit/test_control_connection.py | 5 +---- tests/unit/test_endpoints.py | 5 +---- tests/unit/test_exception.py | 5 +---- tests/unit/test_host_connection_pool.py | 5 +---- tests/unit/test_marshalling.py | 5 +---- tests/unit/test_metadata.py | 5 +---- tests/unit/test_orderedmap.py | 5 +---- tests/unit/test_parameter_binding.py | 5 +---- tests/unit/test_policies.py | 5 +---- tests/unit/test_protocol.py | 5 +---- tests/unit/test_query.py | 5 +---- tests/unit/test_response_future.py | 5 +---- tests/unit/test_resultset.py | 5 +---- tests/unit/test_row_factories.py | 5 +---- tests/unit/test_segment.py | 5 +---- tests/unit/test_sortedset.py | 5 +---- tests/unit/test_time_util.py | 5 +---- tests/unit/test_timestamps.py | 5 +---- tests/unit/test_types.py | 5 +---- tests/unit/test_util_types.py | 5 +---- 128 files changed, 128 insertions(+), 510 deletions(-) diff --git a/test-requirements.txt b/test-requirements.txt index 9e62bfdee8..996cf4341f 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -3,7 +3,6 @@ scales nose mock>1.1 ccm>=2.1.2 -unittest2 pytz sure pure-sasl diff --git a/tests/__init__.py b/tests/__init__.py index cea5a872c6..48c589c424 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import logging import sys import socket diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 70ec11c213..d3c3332649 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -18,10 +18,7 @@ from tests import connection_class, EVENT_LOOP_MANAGER Cluster.connection_class = connection_class -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from packaging.version import Version import logging diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py index b2820e037b..e2fa1a4a4a 100644 --- a/tests/integration/advanced/__init__.py +++ b/tests/integration/advanced/__init__.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from six.moves.urllib.request import build_opener, Request, HTTPHandler import re diff --git a/tests/integration/advanced/graph/fluent/__init__.py b/tests/integration/advanced/graph/fluent/__init__.py index 3bb81e78e3..3962029f45 100644 --- a/tests/integration/advanced/graph/fluent/__init__.py +++ b/tests/integration/advanced/graph/fluent/__init__.py @@ -35,10 +35,7 @@ VertexLabel) from tests.integration import requiredse -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import ipaddress diff --git a/tests/integration/advanced/graph/fluent/test_graph.py b/tests/integration/advanced/graph/fluent/test_graph.py index 02611c12c0..d46a74a146 100644 --- a/tests/integration/advanced/graph/fluent/test_graph.py +++ b/tests/integration/advanced/graph/fluent/test_graph.py @@ -28,10 +28,7 @@ from tests.integration.advanced.graph.fluent import ( BaseExplicitExecutionTest, create_traversal_profiles, check_equality_base) -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest @greaterthanorequaldse60 diff --git a/tests/integration/advanced/graph/test_graph_datatype.py b/tests/integration/advanced/graph/test_graph_datatype.py index 222b1f5ace..0445ce8030 100644 --- a/tests/integration/advanced/graph/test_graph_datatype.py +++ b/tests/integration/advanced/graph/test_graph_datatype.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import time import six diff --git a/tests/integration/advanced/graph/test_graph_query.py b/tests/integration/advanced/graph/test_graph_query.py index 0eda67894d..9bc23e611a 100644 --- a/tests/integration/advanced/graph/test_graph_query.py +++ b/tests/integration/advanced/graph/test_graph_query.py @@ -22,10 +22,7 @@ import json import time -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import OperationTimedOut, ConsistencyLevel, InvalidRequest from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, NoHostAvailable diff --git a/tests/integration/advanced/test_adv_metadata.py b/tests/integration/advanced/test_adv_metadata.py index b3af6fa5d1..8228bfe220 100644 --- a/tests/integration/advanced/test_adv_metadata.py +++ b/tests/integration/advanced/test_adv_metadata.py @@ -20,10 +20,7 @@ greaterthanorequaldse68, use_single_node, DSE_VERSION, requiredse, TestCluster) -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import logging import time diff --git a/tests/integration/advanced/test_auth.py b/tests/integration/advanced/test_auth.py index 7e9aa8c23e..3443419ab4 100644 --- a/tests/integration/advanced/test_auth.py +++ b/tests/integration/advanced/test_auth.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import logging import os import subprocess diff --git a/tests/integration/advanced/test_cont_paging.py b/tests/integration/advanced/test_cont_paging.py index c5f1cbfff3..2e75d7061d 100644 --- a/tests/integration/advanced/test_cont_paging.py +++ b/tests/integration/advanced/test_cont_paging.py @@ -18,10 +18,7 @@ import logging log = logging.getLogger(__name__) -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from itertools import cycle, count from six.moves import range diff --git a/tests/integration/advanced/test_cqlengine_where_operators.py b/tests/integration/advanced/test_cqlengine_where_operators.py index 8ade3db09d..b2e4d4ba9e 100644 --- a/tests/integration/advanced/test_cqlengine_where_operators.py +++ b/tests/integration/advanced/test_cqlengine_where_operators.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import os import time diff --git a/tests/integration/advanced/test_geometry.py b/tests/integration/advanced/test_geometry.py index 8bee144d19..6a6737bd50 100644 --- a/tests/integration/advanced/test_geometry.py +++ b/tests/integration/advanced/test_geometry.py @@ -18,10 +18,7 @@ from cassandra.util import OrderedMap, sortedset from collections import namedtuple -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from uuid import uuid1 from cassandra.util import Point, LineString, Polygon from cassandra.cqltypes import LineStringType, PointType, PolygonType diff --git a/tests/integration/advanced/test_unixsocketendpoint.py b/tests/integration/advanced/test_unixsocketendpoint.py index 10cbc1b362..f2795d1a68 100644 --- a/tests/integration/advanced/test_unixsocketendpoint.py +++ b/tests/integration/advanced/test_unixsocketendpoint.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import time import subprocess diff --git a/tests/integration/cloud/__init__.py b/tests/integration/cloud/__init__.py index ca05ae4ce5..a6a4ab7a5d 100644 --- a/tests/integration/cloud/__init__.py +++ b/tests/integration/cloud/__init__.py @@ -13,10 +13,7 @@ # limitations under the License from cassandra.cluster import Cluster -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import os import subprocess diff --git a/tests/integration/cloud/test_cloud.py b/tests/integration/cloud/test_cloud.py index e0b9e2d382..ef4909a257 100644 --- a/tests/integration/cloud/test_cloud.py +++ b/tests/integration/cloud/test_cloud.py @@ -18,10 +18,7 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import six from ssl import SSLContext, PROTOCOL_TLS diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index e68baaabf1..cd8f031ed1 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -14,10 +14,7 @@ import os import warnings -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import ConsistencyLevel from cassandra.cqlengine import connection diff --git a/tests/integration/cqlengine/advanced/test_cont_paging.py b/tests/integration/cqlengine/advanced/test_cont_paging.py index 38b4355312..89e05950e3 100644 --- a/tests/integration/cqlengine/advanced/test_cont_paging.py +++ b/tests/integration/cqlengine/advanced/test_cont_paging.py @@ -14,10 +14,7 @@ -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from packaging.version import Version diff --git a/tests/integration/cqlengine/base.py b/tests/integration/cqlengine/base.py index 8a6903350f..bdb62aa2a3 100644 --- a/tests/integration/cqlengine/base.py +++ b/tests/integration/cqlengine/base.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import sys diff --git a/tests/integration/cqlengine/columns/test_static_column.py b/tests/integration/cqlengine/columns/test_static_column.py index 69e222d2b9..0e8ace8c8f 100644 --- a/tests/integration/cqlengine/columns/test_static_column.py +++ b/tests/integration/cqlengine/columns/test_static_column.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from uuid import uuid4 diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 69682fd68d..21fe1581ff 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import sys from datetime import datetime, timedelta, date, tzinfo, time diff --git a/tests/integration/cqlengine/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py index 243c2b0fdb..2c82fe16f7 100644 --- a/tests/integration/cqlengine/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from datetime import datetime, timedelta, time from decimal import Decimal diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index c46df31280..92b6992573 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import ConsistencyLevel diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 7edb3e71dd..f37db5e51f 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import mock import logging diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index bbd9e0cbb6..859facf0e1 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from mock import patch diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 32ace5363f..3c4088cc83 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from uuid import uuid4, UUID import random diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 82973436ac..1e3adf9a71 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from datetime import datetime, date, time from decimal import Decimal diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index fdfce1f0b8..555af11025 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cqlengine.operators import * diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index 3a6f83b32e..eb85bbbb85 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import ConsistencyLevel from cassandra.cqlengine import operators diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 6bc9d701b8..ec5044b707 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -13,10 +13,7 @@ # limitations under the License. from __future__ import absolute_import -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from datetime import datetime from uuid import uuid4 diff --git a/tests/integration/cqlengine/statements/test_assignment_clauses.py b/tests/integration/cqlengine/statements/test_assignment_clauses.py index 594224d72d..82bf067cb4 100644 --- a/tests/integration/cqlengine/statements/test_assignment_clauses.py +++ b/tests/integration/cqlengine/statements/test_assignment_clauses.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cqlengine.statements import AssignmentClause, SetUpdateClause, ListUpdateClause, MapUpdateClause, MapDeleteClause, FieldDeleteClause, CounterUpdateClause diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index 474c45d02b..3b5be60520 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from uuid import uuid4 import six diff --git a/tests/integration/cqlengine/statements/test_insert_statement.py b/tests/integration/cqlengine/statements/test_insert_statement.py index 3bf90ec313..a1dcd08968 100644 --- a/tests/integration/cqlengine/statements/test_insert_statement.py +++ b/tests/integration/cqlengine/statements/test_insert_statement.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import six diff --git a/tests/integration/cqlengine/statements/test_select_statement.py b/tests/integration/cqlengine/statements/test_select_statement.py index 90c14bcfb6..c6d1ac69f4 100644 --- a/tests/integration/cqlengine/statements/test_select_statement.py +++ b/tests/integration/cqlengine/statements/test_select_statement.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cqlengine.columns import Column from cassandra.cqlengine.statements import SelectStatement, WhereClause diff --git a/tests/integration/cqlengine/statements/test_update_statement.py b/tests/integration/cqlengine/statements/test_update_statement.py index c6ed228d91..99105069dd 100644 --- a/tests/integration/cqlengine/statements/test_update_statement.py +++ b/tests/integration/cqlengine/statements/test_update_statement.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cqlengine.columns import Column, Set, List, Text from cassandra.cqlengine.operators import * diff --git a/tests/integration/cqlengine/statements/test_where_clause.py b/tests/integration/cqlengine/statements/test_where_clause.py index 3173320f7c..21671be086 100644 --- a/tests/integration/cqlengine/statements/test_where_clause.py +++ b/tests/integration/cqlengine/statements/test_where_clause.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import six from cassandra.cqlengine.operators import EqualsOperator diff --git a/tests/integration/cqlengine/test_ifexists.py b/tests/integration/cqlengine/test_ifexists.py index 2797edd846..1189bc0ff5 100644 --- a/tests/integration/cqlengine/test_ifexists.py +++ b/tests/integration/cqlengine/test_ifexists.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import mock from uuid import uuid4 diff --git a/tests/integration/cqlengine/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py index 206101f1b2..260e132731 100644 --- a/tests/integration/cqlengine/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import mock from uuid import uuid4 diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index 1c418ae6d8..f8459a95ad 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import mock import six diff --git a/tests/integration/cqlengine/test_ttl.py b/tests/integration/cqlengine/test_ttl.py index a9aa32db94..55457ff56a 100644 --- a/tests/integration/cqlengine/test_ttl.py +++ b/tests/integration/cqlengine/test_ttl.py @@ -13,10 +13,7 @@ # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from packaging.version import Version diff --git a/tests/integration/long/__init__.py b/tests/integration/long/__init__.py index 447f4885cc..19e7ed2c64 100644 --- a/tests/integration/long/__init__.py +++ b/tests/integration/long/__init__.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest try: from ccmlib import common diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index bbf446861a..0b9ebab3ed 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -28,10 +28,7 @@ force_stop, create_schema, wait_for_down, wait_for_up, start, CoordinatorStats ) -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest ALL_CONSISTENCY_LEVELS = { ConsistencyLevel.ANY, ConsistencyLevel.ONE, ConsistencyLevel.TWO, ConsistencyLevel.QUORUM, diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index 6bdff8d15d..2ca01066b0 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -34,10 +34,7 @@ local, CASSANDRA_VERSION, TestCluster) -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest log = logging.getLogger(__name__) diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index a49c1677e8..b63fdebcf3 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -30,10 +30,7 @@ except ImportError: LibevConnection = None -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest # If more modules do IPV6 testing, this can be moved down to integration.__init__. diff --git a/tests/integration/long/test_large_data.py b/tests/integration/long/test_large_data.py index ce7e4398da..59873204a4 100644 --- a/tests/integration/long/test_large_data.py +++ b/tests/integration/long/test_large_data.py @@ -27,10 +27,7 @@ from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster from tests.integration.long.utils import create_schema -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest log = logging.getLogger(__name__) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index f245569a80..7848a21b1d 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -36,10 +36,7 @@ wait_for_down, decommission, start, bootstrap, stop, IP_FORMAT) -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest log = logging.getLogger(__name__) diff --git a/tests/integration/long/test_policies.py b/tests/integration/long/test_policies.py index 0648e6cc93..680d0d7980 100644 --- a/tests/integration/long/test_policies.py +++ b/tests/integration/long/test_policies.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import ConsistencyLevel, Unavailable from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index e2945a117b..f1cc80a17a 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -21,10 +21,7 @@ import time -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest log = logging.getLogger(__name__) diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 4de46f4649..69285001f8 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest import os, sys, traceback, logging, ssl, time, math, uuid from cassandra.cluster import NoHostAvailable diff --git a/tests/integration/simulacron/__init__.py b/tests/integration/simulacron/__init__.py index 6543265db2..c959fd6e08 100644 --- a/tests/integration/simulacron/__init__.py +++ b/tests/integration/simulacron/__init__.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from tests.integration import requiredse, CASSANDRA_VERSION, DSE_VERSION, SIMULACRON_JAR, PROTOCOL_VERSION from tests.integration.simulacron.utils import ( diff --git a/tests/integration/simulacron/advanced/test_insights.py b/tests/integration/simulacron/advanced/test_insights.py index 3da14659af..5ddae4ec7c 100644 --- a/tests/integration/simulacron/advanced/test_insights.py +++ b/tests/integration/simulacron/advanced/test_insights.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import time import json diff --git a/tests/integration/simulacron/test_cluster.py b/tests/integration/simulacron/test_cluster.py index b89f564f08..f859a5dd05 100644 --- a/tests/integration/simulacron/test_cluster.py +++ b/tests/integration/simulacron/test_cluster.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import logging from packaging.version import Version diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 4ef97247a6..0c70d0a1e9 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import logging import time diff --git a/tests/integration/simulacron/test_empty_column.py b/tests/integration/simulacron/test_empty_column.py index 91c76985e1..046aaacf79 100644 --- a/tests/integration/simulacron/test_empty_column.py +++ b/tests/integration/simulacron/test_empty_column.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from collections import namedtuple, OrderedDict diff --git a/tests/integration/simulacron/test_endpoint.py b/tests/integration/simulacron/test_endpoint.py index 691fcc8718..9e2d91b6d3 100644 --- a/tests/integration/simulacron/test_endpoint.py +++ b/tests/integration/simulacron/test_endpoint.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from functools import total_ordering diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index da093be43c..6d0d081889 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import OperationTimedOut, WriteTimeout from cassandra.cluster import Cluster, ExecutionProfile, ResponseFuture, EXEC_PROFILE_DEFAULT, NoHostAvailable diff --git a/tests/integration/standard/__init__.py b/tests/integration/standard/__init__.py index e54b6fd6bd..1f14bd6ec4 100644 --- a/tests/integration/standard/__init__.py +++ b/tests/integration/standard/__init__.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest try: from ccmlib import common diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 9755c5098b..189da45c94 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -22,10 +22,7 @@ USE_CASS_EXTERNAL, start_cluster_wait_for_up, TestCluster from tests.integration.util import assert_quiescent_pool_state -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest log = logging.getLogger(__name__) diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index c5ce5dc726..5f63b5265a 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -13,10 +13,7 @@ # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest from cassandra.query import BatchStatement diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index c7d8266fd9..a15c7f32e2 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from collections import deque from copy import copy diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index 8bd65c7f6f..ad4ef47473 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -26,10 +26,7 @@ from six import next -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest log = logging.getLogger(__name__) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index aaa5a27dfd..76c8216d41 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from functools import partial from mock import patch diff --git a/tests/integration/standard/test_control_connection.py b/tests/integration/standard/test_control_connection.py index db7cff8506..9d579476d2 100644 --- a/tests/integration/standard/test_control_connection.py +++ b/tests/integration/standard/test_control_connection.py @@ -16,10 +16,7 @@ # from cassandra import InvalidRequest -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.protocol import ConfigurationException diff --git a/tests/integration/standard/test_custom_cluster.py b/tests/integration/standard/test_custom_cluster.py index 84e0737086..d0f10d51db 100644 --- a/tests/integration/standard/test_custom_cluster.py +++ b/tests/integration/standard/test_custom_cluster.py @@ -16,10 +16,7 @@ from tests.integration import use_singledc, get_cluster, remove_cluster, local, TestCluster from tests.util import wait_until, wait_until_not_raised -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest def setup_module(): diff --git a/tests/integration/standard/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py index 9906a8243e..3290852862 100644 --- a/tests/integration/standard/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -13,10 +13,7 @@ # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest import six diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index bf549511c8..7443ce0748 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.protocol import ProtocolHandler, ResultMessage, QueryMessage, UUIDType, read_int from cassandra.query import tuple_factory, SimpleStatement diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index 4e45553be2..9cb5914f16 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -2,10 +2,7 @@ # Based on test_custom_protocol_handler.py -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest from itertools import count diff --git a/tests/integration/standard/test_dse.py b/tests/integration/standard/test_dse.py index 1b9b5bef84..7b96094b3f 100644 --- a/tests/integration/standard/test_dse.py +++ b/tests/integration/standard/test_dse.py @@ -21,10 +21,7 @@ from tests.integration import (execute_until_pass, execute_with_long_wait_retry, use_cluster, TestCluster) -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest CCM_IS_DSE = (os.environ.get('CCM_IS_DSE', None) == 'true') diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index bd556f357d..e20f1f0640 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from collections import defaultdict import difflib diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 676a5340ef..ddc1091dc6 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -17,10 +17,7 @@ from cassandra.connection import ConnectionShutdown from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, FallthroughRetryPolicy -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.query import SimpleStatement from cassandra import ConsistencyLevel, WriteTimeout, Unavailable, ReadTimeout diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 24facf42a0..46e91918ac 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, SimpleConvictionPolicy, \ diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 5c79f27346..1ed48d2964 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -15,10 +15,7 @@ from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import InvalidRequest, DriverException from cassandra import ConsistencyLevel, ProtocolVersion diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index ea0e326ff5..8d2a3d74e2 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -15,10 +15,7 @@ from cassandra.concurrent import execute_concurrent from cassandra import DriverException -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import logging from cassandra import ProtocolVersion from cassandra import ConsistencyLevel, Unavailable, InvalidRequest, cluster diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index dac4ec5ce3..8e0ca8becc 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -16,10 +16,7 @@ import logging log = logging.getLogger(__name__) -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from itertools import cycle, count from six.moves import range diff --git a/tests/integration/standard/test_routing.py b/tests/integration/standard/test_routing.py index e1dabba49a..47697ee9c8 100644 --- a/tests/integration/standard/test_routing.py +++ b/tests/integration/standard/test_routing.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from uuid import uuid1 diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index 93f25d9276..6855e8a410 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -15,10 +15,7 @@ from tests.integration import get_server_versions, use_singledc, \ BasicSharedKeyspaceUnitTestCaseWFunctionTable, BasicSharedKeyspaceUnitTestCase, execute_until_pass, TestCluster -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cluster import ResultSet, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.query import tuple_factory, named_tuple_factory, dict_factory, ordered_dict_factory diff --git a/tests/integration/standard/test_single_interface.py b/tests/integration/standard/test_single_interface.py index 91451a52a0..4677eff641 100644 --- a/tests/integration/standard/test_single_interface.py +++ b/tests/integration/standard/test_single_interface.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import six diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 0592b7d737..f69e88c64f 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from datetime import datetime import math diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 6d9676f25e..4c7826fb98 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from collections import namedtuple from functools import partial diff --git a/tests/integration/upgrade/__init__.py b/tests/integration/upgrade/__init__.py index d2b9076bc2..e307a3e3cc 100644 --- a/tests/integration/upgrade/__init__.py +++ b/tests/integration/upgrade/__init__.py @@ -27,10 +27,7 @@ import time import logging -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest def setup_module(): diff --git a/tests/integration/upgrade/test_upgrade.py b/tests/integration/upgrade/test_upgrade.py index 31df55c02c..63e1a64b9d 100644 --- a/tests/integration/upgrade/test_upgrade.py +++ b/tests/integration/upgrade/test_upgrade.py @@ -20,10 +20,7 @@ from cassandra.policies import ConstantSpeculativeExecutionPolicy from tests.integration.upgrade import UpgradeBase, UpgradeBaseAuth, UpgradePath, upgrade_paths -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest # Previous Cassandra upgrade diff --git a/tests/stress_tests/test_load.py b/tests/stress_tests/test_load.py index a9771147ce..3492ff2923 100644 --- a/tests/stress_tests/test_load.py +++ b/tests/stress_tests/test_load.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import gc diff --git a/tests/stress_tests/test_multi_inserts.py b/tests/stress_tests/test_multi_inserts.py index 65bbe2a4e4..84dfc5e6f7 100644 --- a/tests/stress_tests/test_multi_inserts.py +++ b/tests/stress_tests/test_multi_inserts.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest import os from cassandra.cluster import Cluster diff --git a/tests/unit/advanced/cloud/test_cloud.py b/tests/unit/advanced/cloud/test_cloud.py index ab18f0af72..a7cd83a8ce 100644 --- a/tests/unit/advanced/cloud/test_cloud.py +++ b/tests/unit/advanced/cloud/test_cloud.py @@ -11,10 +11,7 @@ import shutil import six -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import DriverException from cassandra.datastax import cloud diff --git a/tests/unit/advanced/test_auth.py b/tests/unit/advanced/test_auth.py index bb411afe2b..840073e9e1 100644 --- a/tests/unit/advanced/test_auth.py +++ b/tests/unit/advanced/test_auth.py @@ -15,10 +15,7 @@ import os from puresasl import QOP -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.auth import DSEGSSAPIAuthProvider diff --git a/tests/unit/advanced/test_execution_profile.py b/tests/unit/advanced/test_execution_profile.py index 8592f56a44..478322f95b 100644 --- a/tests/unit/advanced/test_execution_profile.py +++ b/tests/unit/advanced/test_execution_profile.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cluster import GraphExecutionProfile, GraphAnalyticsExecutionProfile from cassandra.graph import GraphOptions diff --git a/tests/unit/advanced/test_geometry.py b/tests/unit/advanced/test_geometry.py index 4fa2644ff2..d85f1bc293 100644 --- a/tests/unit/advanced/test_geometry.py +++ b/tests/unit/advanced/test_geometry.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import struct import math diff --git a/tests/unit/advanced/test_graph.py b/tests/unit/advanced/test_graph.py index f25a229f42..25dd289dba 100644 --- a/tests/unit/advanced/test_graph.py +++ b/tests/unit/advanced/test_graph.py @@ -15,10 +15,7 @@ import warnings import json -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import six diff --git a/tests/unit/advanced/test_insights.py b/tests/unit/advanced/test_insights.py index 2cc170e485..4f1dd7ac12 100644 --- a/tests/unit/advanced/test_insights.py +++ b/tests/unit/advanced/test_insights.py @@ -13,10 +13,7 @@ # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import logging from mock import sentinel diff --git a/tests/unit/advanced/test_metadata.py b/tests/unit/advanced/test_metadata.py index addd514169..cf730ebec5 100644 --- a/tests/unit/advanced/test_metadata.py +++ b/tests/unit/advanced/test_metadata.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.metadata import ( KeyspaceMetadata, TableMetadataDSE68, diff --git a/tests/unit/advanced/test_policies.py b/tests/unit/advanced/test_policies.py index 79e7410799..b8e4a4e757 100644 --- a/tests/unit/advanced/test_policies.py +++ b/tests/unit/advanced/test_policies.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from mock import Mock diff --git a/tests/unit/cqlengine/test_columns.py b/tests/unit/cqlengine/test_columns.py index bcb174a8c0..a7bf74ec23 100644 --- a/tests/unit/cqlengine/test_columns.py +++ b/tests/unit/cqlengine/test_columns.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cqlengine.columns import Column diff --git a/tests/unit/cqlengine/test_connection.py b/tests/unit/cqlengine/test_connection.py index 9f8e500c6b..8e3a0b75bd 100644 --- a/tests/unit/cqlengine/test_connection.py +++ b/tests/unit/cqlengine/test_connection.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cluster import _ConfigMode from cassandra.cqlengine import connection diff --git a/tests/unit/cqlengine/test_udt.py b/tests/unit/cqlengine/test_udt.py index ebe1139fd0..0a126513d5 100644 --- a/tests/unit/cqlengine/test_udt.py +++ b/tests/unit/cqlengine/test_udt.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cqlengine import columns from cassandra.cqlengine.models import Model diff --git a/tests/unit/cython/test_bytesio.py b/tests/unit/cython/test_bytesio.py index a156fc1272..cd4ea86f52 100644 --- a/tests/unit/cython/test_bytesio.py +++ b/tests/unit/cython/test_bytesio.py @@ -15,10 +15,7 @@ from tests.unit.cython.utils import cyimport, cythontest bytesio_testhelper = cyimport('tests.unit.cython.bytesio_testhelper') -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest class BytesIOTest(unittest.TestCase): diff --git a/tests/unit/cython/test_types.py b/tests/unit/cython/test_types.py index a0d2138c6d..545b82fc11 100644 --- a/tests/unit/cython/test_types.py +++ b/tests/unit/cython/test_types.py @@ -15,10 +15,7 @@ from tests.unit.cython.utils import cyimport, cythontest types_testhelper = cyimport('tests.unit.cython.types_testhelper') -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest class TypesTest(unittest.TestCase): diff --git a/tests/unit/cython/test_utils.py b/tests/unit/cython/test_utils.py index dc8745e471..0e79c235d8 100644 --- a/tests/unit/cython/test_utils.py +++ b/tests/unit/cython/test_utils.py @@ -15,10 +15,7 @@ from tests.unit.cython.utils import cyimport, cythontest utils_testhelper = cyimport('tests.unit.cython.utils_testhelper') -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest class UtilsTest(unittest.TestCase): @@ -26,4 +23,4 @@ class UtilsTest(unittest.TestCase): @cythontest def test_datetime_from_timestamp(self): - utils_testhelper.test_datetime_from_timestamp(self.assertEqual) \ No newline at end of file + utils_testhelper.test_datetime_from_timestamp(self.assertEqual) diff --git a/tests/unit/cython/utils.py b/tests/unit/cython/utils.py index 7f8be22ce0..fc21597c7d 100644 --- a/tests/unit/cython/utils.py +++ b/tests/unit/cython/utils.py @@ -18,10 +18,7 @@ except ImportError: VERIFY_CYTHON = False -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest def cyimport(import_path): """ diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 4e0e540327..6f493896d0 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from mock import patch import socket diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index ce828cd6d8..e2b6a533a8 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -13,10 +13,7 @@ # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from tests.unit.io.utils import TimerTestMixin from tests import notpypy, EVENT_LOOP_MANAGER diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py index ec64ce34c1..466b9ae5d5 100644 --- a/tests/unit/io/test_geventreactor.py +++ b/tests/unit/io/test_geventreactor.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from tests.unit.io.utils import TimerTestMixin diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index a02458edc8..67ab5fc7d6 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from mock import patch, Mock import weakref diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index e7c34cb4b5..b426a820c4 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest from mock import Mock, patch from cassandra.connection import DefaultEndPoint diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 848513f031..ddfa2c3198 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -37,10 +37,7 @@ from socket import error as socket_error import ssl -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import time diff --git a/tests/unit/test_auth.py b/tests/unit/test_auth.py index 7b4196f831..68cce526e7 100644 --- a/tests/unit/test_auth.py +++ b/tests/unit/test_auth.py @@ -16,10 +16,7 @@ import six from cassandra.auth import PlainTextAuthenticator -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest class TestPlainTextAuthenticator(unittest.TestCase): diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 620f642084..6755f118fd 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import logging import six diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index cc6c12cdaa..9f67531a3c 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -13,10 +13,7 @@ # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from itertools import cycle from mock import Mock diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 21b8862772..f06b67ebe0 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from mock import Mock, ANY, call, patch import six diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index efad1ca5c9..276b2849ca 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import six diff --git a/tests/unit/test_endpoints.py b/tests/unit/test_endpoints.py index 2452e267ba..18f245e64b 100644 --- a/tests/unit/test_endpoints.py +++ b/tests/unit/test_endpoints.py @@ -6,10 +6,7 @@ # You may obtain a copy of the License at # # http://www.datastax.com/terms/datastax-dse-driver-license-terms -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import itertools diff --git a/tests/unit/test_exception.py b/tests/unit/test_exception.py index 3a082f7363..b39b22239c 100644 --- a/tests/unit/test_exception.py +++ b/tests/unit/test_exception.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest from cassandra import Unavailable, Timeout, ConsistencyLevel import re diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index bda48dc76b..86d4bf9843 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from mock import Mock, NonCallableMagicMock from threading import Thread, Event, Lock diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index c2363e0adc..1fdbfa6a4b 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -15,10 +15,7 @@ from cassandra import ProtocolVersion -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import platform from datetime import datetime, date diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index b2143f8c20..b0a8b63b16 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from binascii import unhexlify import logging diff --git a/tests/unit/test_orderedmap.py b/tests/unit/test_orderedmap.py index f2baab40f0..9ca5699204 100644 --- a/tests/unit/test_orderedmap.py +++ b/tests/unit/test_orderedmap.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.util import OrderedMap, OrderedMapSerializedKey from cassandra.cqltypes import EMPTY, UTF8Type, lookup_casstype diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index 228f3f4432..8820114dc3 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.encoder import Encoder from cassandra.protocol import ColumnMetadata diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 5c0c11281b..a31b4f4c1b 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from itertools import islice, cycle from mock import Mock, patch, call diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index b43b21eeff..95a7a12b11 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from mock import Mock diff --git a/tests/unit/test_query.py b/tests/unit/test_query.py index 7c2bfc0d14..2a2901aaff 100644 --- a/tests/unit/test_query.py +++ b/tests/unit/test_query.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import six diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index f76a2d677b..dbd8764ad9 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from collections import deque from threading import RLock diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index b37c3a2594..97002d90d7 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -13,10 +13,7 @@ # limitations under the License. from cassandra.query import named_tuple_factory, dict_factory, tuple_factory -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from mock import Mock, PropertyMock, patch diff --git a/tests/unit/test_row_factories.py b/tests/unit/test_row_factories.py index 13049ba034..70691ad8fd 100644 --- a/tests/unit/test_row_factories.py +++ b/tests/unit/test_row_factories.py @@ -20,10 +20,7 @@ import sys -try: - from unittest import TestCase -except ImportError: - from unittest2 import TestCase +from unittest import TestCase log = logging.getLogger(__name__) diff --git a/tests/unit/test_segment.py b/tests/unit/test_segment.py index fc49339d68..f794b38b1d 100644 --- a/tests/unit/test_segment.py +++ b/tests/unit/test_segment.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import six diff --git a/tests/unit/test_sortedset.py b/tests/unit/test_sortedset.py index 3845c2c31c..49c3658df8 100644 --- a/tests/unit/test_sortedset.py +++ b/tests/unit/test_sortedset.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.util import sortedset from cassandra.cqltypes import EMPTY diff --git a/tests/unit/test_time_util.py b/tests/unit/test_time_util.py index 7025f151d6..2605992d1c 100644 --- a/tests/unit/test_time_util.py +++ b/tests/unit/test_time_util.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import marshal from cassandra import util diff --git a/tests/unit/test_timestamps.py b/tests/unit/test_timestamps.py index 8903fbc99b..58958cff03 100644 --- a/tests/unit/test_timestamps.py +++ b/tests/unit/test_timestamps.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import mock diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 562fd2c899..af3b327ef8 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import datetime import tempfile diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py index b7dc837249..5d6058b394 100644 --- a/tests/unit/test_util_types.py +++ b/tests/unit/test_util_types.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import datetime From e4e290fe2dc5f15f677f33bbb40ed115fce2406d Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 2 Feb 2022 10:21:02 -0600 Subject: [PATCH 1872/2005] Merge pull request #1119 from datastax/python-1290 PYTHON-1290 Convert asyncio reactor away from @asyncio.coroutine --- cassandra/io/asyncioreactor.py | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/cassandra/io/asyncioreactor.py b/cassandra/io/asyncioreactor.py index 7cb0444a32..ab0e90ae09 100644 --- a/cassandra/io/asyncioreactor.py +++ b/cassandra/io/asyncioreactor.py @@ -46,9 +46,8 @@ def __init__(self, timeout, callback, loop): self._handle = asyncio.run_coroutine_threadsafe(delayed, loop=loop) @staticmethod - @asyncio.coroutine - def _call_delayed_coro(timeout, callback, loop): - yield from asyncio.sleep(timeout, loop=loop) + async def _call_delayed_coro(timeout, callback, loop): + await asyncio.sleep(timeout, loop=loop) return callback() def __lt__(self, other): @@ -136,8 +135,7 @@ def close(self): self._close(), loop=self._loop ) - @asyncio.coroutine - def _close(self): + async def _close(self): log.debug("Closing connection (%s) to %s" % (id(self), self.endpoint)) if self._write_watcher: self._write_watcher.cancel() @@ -174,21 +172,19 @@ def push(self, data): # avoid races/hangs by just scheduling this, not using threadsafe self._loop.create_task(self._push_msg(chunks)) - @asyncio.coroutine - def _push_msg(self, chunks): + async def _push_msg(self, chunks): # This lock ensures all chunks of a message are sequential in the Queue - with (yield from self._write_queue_lock): + with await self._write_queue_lock: for chunk in chunks: self._write_queue.put_nowait(chunk) - @asyncio.coroutine - def handle_write(self): + async def handle_write(self): while True: try: - next_msg = yield from self._write_queue.get() + next_msg = await self._write_queue.get() if next_msg: - yield from self._loop.sock_sendall(self._socket, next_msg) + await self._loop.sock_sendall(self._socket, next_msg) except socket.error as err: log.debug("Exception in send for %s: %s", self, err) self.defunct(err) @@ -196,18 +192,19 @@ def handle_write(self): except asyncio.CancelledError: return - @asyncio.coroutine - def handle_read(self): + async def handle_read(self): while True: try: - buf = yield from self._loop.sock_recv(self._socket, self.in_buffer_size) + buf = await self._loop.sock_recv(self._socket, self.in_buffer_size) self._iobuf.write(buf) # sock_recv expects EWOULDBLOCK if socket provides no data, but # nonblocking ssl sockets raise these instead, so we handle them # ourselves by yielding to the event loop, where the socket will # get the reading/writing it "wants" before retrying except (ssl.SSLWantWriteError, ssl.SSLWantReadError): - yield + # Apparently the preferred way to yield to the event loop from within + # a native coroutine based on https://github.com/python/asyncio/issues/284 + await asyncio.sleep(0) continue except socket.error as err: log.debug("Exception during socket recv for %s: %s", From 8c4c6536d4728e052771651120df6cbb2e730773 Mon Sep 17 00:00:00 2001 From: Andy Salnikov Date: Wed, 16 Mar 2022 13:19:03 -0700 Subject: [PATCH 1873/2005] Merge pull request #1122 from andy-slac/concurrent-execution-profiles Adds one more keyword argument `execution_profile` to the `execute_concurrent` method to pass an execution profile. It is fowarded to `Session.execute_async` call. --- cassandra/concurrent.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index a8bddcbdab..0228f297fe 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -21,7 +21,7 @@ from threading import Condition import sys -from cassandra.cluster import ResultSet +from cassandra.cluster import ResultSet, EXEC_PROFILE_DEFAULT import logging log = logging.getLogger(__name__) @@ -29,7 +29,7 @@ ExecutionResult = namedtuple('ExecutionResult', ['success', 'result_or_exc']) -def execute_concurrent(session, statements_and_parameters, concurrency=100, raise_on_first_error=True, results_generator=False): +def execute_concurrent(session, statements_and_parameters, concurrency=100, raise_on_first_error=True, results_generator=False, execution_profile=EXEC_PROFILE_DEFAULT): """ Executes a sequence of (statement, parameters) tuples concurrently. Each ``parameters`` item must be a sequence or :const:`None`. @@ -56,6 +56,9 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais footprint is marginal CPU overhead (more thread coordination and sorting out-of-order results on-the-fly). + `execution_profile` argument is the execution profile to use for this + request, it is passed directly to :meth:`Session.execute_async`. + A sequence of ``ExecutionResult(success, result_or_exc)`` namedtuples is returned in the same order that the statements were passed in. If ``success`` is :const:`False`, there was an error executing the statement, and ``result_or_exc`` will be @@ -90,7 +93,8 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais if not statements_and_parameters: return [] - executor = ConcurrentExecutorGenResults(session, statements_and_parameters) if results_generator else ConcurrentExecutorListResults(session, statements_and_parameters) + executor = ConcurrentExecutorGenResults(session, statements_and_parameters, execution_profile) \ + if results_generator else ConcurrentExecutorListResults(session, statements_and_parameters, execution_profile) return executor.execute(concurrency, raise_on_first_error) @@ -98,9 +102,10 @@ class _ConcurrentExecutor(object): max_error_recursion = 100 - def __init__(self, session, statements_and_params): + def __init__(self, session, statements_and_params, execution_profile): self.session = session self._enum_statements = enumerate(iter(statements_and_params)) + self._execution_profile = execution_profile self._condition = Condition() self._fail_fast = False self._results_queue = [] @@ -132,7 +137,7 @@ def _execute_next(self): def _execute(self, idx, statement, params): self._exec_depth += 1 try: - future = self.session.execute_async(statement, params, timeout=None) + future = self.session.execute_async(statement, params, timeout=None, execution_profile=self._execution_profile) args = (future, idx) future.add_callbacks( callback=self._on_success, callback_args=args, From 9e4904917dcc895c96daafbfe7cc215f101e8f8c Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 16 Mar 2022 15:24:32 -0500 Subject: [PATCH 1874/2005] Add tests for recent addition of execution profile support to cassandra.concurrent --- tests/integration/standard/test_concurrent.py | 66 +++++++++++-------- 1 file changed, 38 insertions(+), 28 deletions(-) diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index ad4ef47473..15da526bde 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -20,7 +20,7 @@ from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args, ExecutionResult from cassandra.policies import HostDistance -from cassandra.query import tuple_factory, SimpleStatement +from cassandra.query import dict_factory, tuple_factory, SimpleStatement from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster @@ -35,13 +35,16 @@ def setup_module(): use_singledc() +EXEC_PROFILE_DICT = "dict" + class ClusterTests(unittest.TestCase): @classmethod def setUpClass(cls): cls.cluster = TestCluster( execution_profiles = { - EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory) + EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory), + EXEC_PROFILE_DICT: ExecutionProfile(row_factory=dict_factory) } ) if PROTOCOL_VERSION < 3: @@ -52,11 +55,11 @@ def setUpClass(cls): def tearDownClass(cls): cls.cluster.shutdown() - def execute_concurrent_helper(self, session, query, results_generator=False): + def execute_concurrent_helper(self, session, query, **kwargs): count = 0 while count < 100: try: - return execute_concurrent(session, query, results_generator=False) + return execute_concurrent(session, query, results_generator=False, **kwargs) except (ReadTimeout, WriteTimeout, OperationTimedOut, ReadFailure, WriteFailure): ex_type, ex, tb = sys.exc_info() log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) @@ -65,11 +68,11 @@ def execute_concurrent_helper(self, session, query, results_generator=False): raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query)) - def execute_concurrent_args_helper(self, session, query, params, results_generator=False): + def execute_concurrent_args_helper(self, session, query, params, results_generator=False, **kwargs): count = 0 while count < 100: try: - return execute_concurrent_with_args(session, query, params, results_generator=results_generator) + return execute_concurrent_with_args(session, query, params, results_generator=results_generator, **kwargs) except (ReadTimeout, WriteTimeout, OperationTimedOut, ReadFailure, WriteFailure): ex_type, ex, tb = sys.exc_info() log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) @@ -77,7 +80,7 @@ def execute_concurrent_args_helper(self, session, query, params, results_generat raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query)) - def test_execute_concurrent(self): + def execute_concurrent_base(self, test_fn, validate_fn, zip_args=True): for num_statements in (0, 1, 2, 7, 10, 99, 100, 101, 199, 200, 201): # write statement = SimpleStatement( @@ -86,7 +89,9 @@ def test_execute_concurrent(self): statements = cycle((statement, )) parameters = [(i, i) for i in range(num_statements)] - results = self.execute_concurrent_helper(self.session, list(zip(statements, parameters))) + results = \ + test_fn(self.session, list(zip(statements, parameters))) if zip_args else \ + test_fn(self.session, statement, parameters) self.assertEqual(num_statements, len(results)) for success, result in results: self.assertTrue(success) @@ -99,32 +104,37 @@ def test_execute_concurrent(self): statements = cycle((statement, )) parameters = [(i, ) for i in range(num_statements)] - results = self.execute_concurrent_helper(self.session, list(zip(statements, parameters))) + results = \ + test_fn(self.session, list(zip(statements, parameters))) if zip_args else \ + test_fn(self.session, statement, parameters) + validate_fn(num_statements, results) + + def execute_concurrent_valiate_tuple(self, num_statements, results): self.assertEqual(num_statements, len(results)) self.assertEqual([(True, [(i,)]) for i in range(num_statements)], results) - def test_execute_concurrent_with_args(self): - for num_statements in (0, 1, 2, 7, 10, 99, 100, 101, 199, 200, 201): - statement = SimpleStatement( - "INSERT INTO test3rf.test (k, v) VALUES (%s, %s)", - consistency_level=ConsistencyLevel.QUORUM) - parameters = [(i, i) for i in range(num_statements)] - - results = self.execute_concurrent_args_helper(self.session, statement, parameters) + def execute_concurrent_valiate_dict(self, num_statements, results): self.assertEqual(num_statements, len(results)) - for success, result in results: - self.assertTrue(success) - self.assertFalse(result) + self.assertEqual([(True, [{"v":i}]) for i in range(num_statements)], results) - # read - statement = SimpleStatement( - "SELECT v FROM test3rf.test WHERE k=%s", - consistency_level=ConsistencyLevel.QUORUM) - parameters = [(i, ) for i in range(num_statements)] + def test_execute_concurrent(self): + self.execute_concurrent_base(self.execute_concurrent_helper, \ + self.execute_concurrent_valiate_tuple) - results = self.execute_concurrent_args_helper(self.session, statement, parameters) - self.assertEqual(num_statements, len(results)) - self.assertEqual([(True, [(i,)]) for i in range(num_statements)], results) + def test_execute_concurrent_with_args(self): + self.execute_concurrent_base(self.execute_concurrent_args_helper, \ + self.execute_concurrent_valiate_tuple, \ + zip_args=False) + + def test_execute_concurrent_with_execution_profile(self): + def run_fn(*args, **kwargs): + return self.execute_concurrent_helper(*args, execution_profile=EXEC_PROFILE_DICT, **kwargs) + self.execute_concurrent_base(run_fn, self.execute_concurrent_valiate_dict) + + def test_execute_concurrent_with_args_and_execution_profile(self): + def run_fn(*args, **kwargs): + return self.execute_concurrent_args_helper(*args, execution_profile=EXEC_PROFILE_DICT, **kwargs) + self.execute_concurrent_base(run_fn, self.execute_concurrent_valiate_dict, zip_args=False) def test_execute_concurrent_with_args_generator(self): """ From a7295e103023e12152fc0940906071b18356def3 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 17 Mar 2022 14:06:08 -0500 Subject: [PATCH 1875/2005] PYTHON-1294: Upgrade importlib-metadata to a much newer version --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 7e1e374822..906775e90c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,6 +7,7 @@ python: - "3.5" - "3.6" - "3.7" + - "3.8" - "pypy2.7-6.0" - "pypy3.5" @@ -24,7 +25,7 @@ addons: - libev-dev install: - - pip install --upgrade setuptools + - pip install --upgrade setuptools importlib-metadata - pip install tox-travis - if [[ $TRAVIS_PYTHON_VERSION != pypy3.5 ]]; then pip install lz4; fi From 9a645c58ca0ec57f775251f94e55c30aa837b2ad Mon Sep 17 00:00:00 2001 From: Emmanuel Arias Date: Tue, 17 May 2022 15:24:12 -0300 Subject: [PATCH 1876/2005] Merge pull request #1126 from eamanu/fix-typos Fix typos detected by Lintian during the packaging In Debian --- cassandra/cluster.py | 2 +- cassandra/util.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cf78725f17..c836fb4302 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2391,7 +2391,7 @@ def default_consistency_level(self, cl): *Deprecated:* use execution profiles instead """ warn("Setting the consistency level at the session level will be removed in 4.0. Consider using " - "execution profiles and setting the desired consitency level to the EXEC_PROFILE_DEFAULT profile." + "execution profiles and setting the desired consistency level to the EXEC_PROFILE_DEFAULT profile." , DeprecationWarning) self._validate_set_legacy_config('default_consistency_level', cl) diff --git a/cassandra/util.py b/cassandra/util.py index f896ff4f86..dd5c58b01d 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -797,7 +797,7 @@ class OrderedMap(Mapping): ''' An ordered map that accepts non-hashable types for keys. It also maintains the insertion order of items, behaving as OrderedDict in that regard. These maps - are constructed and read just as normal mapping types, exept that they may + are constructed and read just as normal mapping types, except that they may contain arbitrary collections and other non-hashable items as keys:: >>> od = OrderedMap([({'one': 1, 'two': 2}, 'value'), From e4e34846d548cd2dc8e070d5c3ec43f28b980c03 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Sat, 3 Sep 2022 04:32:59 -0500 Subject: [PATCH 1877/2005] Ninja fix for quorum docstring --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 5739d5d98e..e6cb5c55bb 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -55,7 +55,7 @@ class ConsistencyLevel(object): QUORUM = 4 """ - ``ceil(RF/2)`` replicas must respond to consider the operation a success + ``ceil(RF/2) + 1`` replicas must respond to consider the operation a success """ ALL = 5 From b01372a879eacfc525d79fa299da1288d8342291 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Fri, 16 Dec 2022 15:17:51 -0600 Subject: [PATCH 1878/2005] Applying fixes to Jenkinsfile to get build working in AWS --- Jenkinsfile | 26 +++----------------------- 1 file changed, 3 insertions(+), 23 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index abb6092758..58f189ebee 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -357,26 +357,6 @@ def getDriverMetricType() { return metric_type } -def submitCIMetrics(buildType) { - long durationMs = currentBuild.duration - long durationSec = durationMs / 1000 - long nowSec = (currentBuild.startTimeInMillis + durationMs) / 1000 - def branchNameNoPeriods = env.BRANCH_NAME.replaceAll('\\.', '_') - metric_type = getDriverMetricType() - def durationMetric = "okr.ci.python.${metric_type}.${buildType}.${branchNameNoPeriods} ${durationSec} ${nowSec}" - - timeout(time: 1, unit: 'MINUTES') { - withCredentials([string(credentialsId: 'lab-grafana-address', variable: 'LAB_GRAFANA_ADDRESS'), - string(credentialsId: 'lab-grafana-port', variable: 'LAB_GRAFANA_PORT')]) { - withEnv(["DURATION_METRIC=${durationMetric}"]) { - sh label: 'Send runtime metrics to labgrafana', script: '''#!/bin/bash -lex - echo "${DURATION_METRIC}" | nc -q 5 ${LAB_GRAFANA_ADDRESS} ${LAB_GRAFANA_PORT} - ''' - } - } - } -} - def describeBuild(buildContext) { script { def runtimes = buildContext.matrix["RUNTIME"] @@ -387,7 +367,9 @@ def describeBuild(buildContext) { } } -def scheduleTriggerJobName = "drivers/python/oss/master/disabled" +def scheduleTriggerJobName() { + "drivers/python/oss/master/disabled" +} pipeline { agent none @@ -663,8 +645,6 @@ pipeline { // build and test all builds parallel getMatrixBuilds(context) - // send the metrics - submitCIMetrics('commit') slack.notifyChannel(currentBuild.currentResult) } } From 5503bab7688edbd378b424eb917d056c690c871c Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Fri, 16 Dec 2022 15:47:10 -0600 Subject: [PATCH 1879/2005] Fix to prior fix --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 58f189ebee..f39a79a8b4 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -608,7 +608,7 @@ pipeline { } triggers { - parameterizedCron((scheduleTriggerJobName == env.JOB_NAME) ? """ + parameterizedCron((scheduleTriggerJobName() == env.JOB_NAME) ? """ # Every weeknight (Monday - Friday) around 4:00 AM # These schedules will run with and without Cython enabled for Python v2.7.18 and v3.5.9 H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18 3.5.9;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 From 3dbe62012389250e11d0f0e6cf731a5278af35d7 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 21 Dec 2022 14:48:08 -0600 Subject: [PATCH 1880/2005] Smaller smoke test configuration to avoid explosion of test builds in AWS --- Jenkinsfile | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index f39a79a8b4..283220e57d 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -50,6 +50,15 @@ matrices = [ "SERVER": ['dse-5.0', 'dse-5.1', 'dse-6.0', 'dse-6.7', 'dse-6.8'], "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], "CYTHON": ["True", "False"] + ], + /* + CI-friendly test configuration. Currently-supported Python version + modern C*/DSE instances. + We also avoid cython since it's tested as part of the nightlies. + */ + "SMOKE": [ + "SERVER": ['3.11', '4.0', 'dse-6.8'], + "RUNTIME": ['3.7.7', '3.8.3'], + "CYTHON": ["False"] ] ] @@ -72,7 +81,7 @@ def getBuildContext() { def profile = "${params.PROFILE}" def EVENT_LOOP = "${params.EVENT_LOOP.toLowerCase()}" - matrixType = "FULL" + matrixType = "SMOKE" developBranchPattern = ~"((dev|long)-)?python-.*" if (developBranchPattern.matcher(env.BRANCH_NAME).matches()) { @@ -404,7 +413,7 @@ pipeline { ''') choice( name: 'PROFILE', - choices: ['STANDARD', 'FULL', 'DSE-SMOKE-TEST', 'EVENT_LOOP'], + choices: ['SMOKE', 'STANDARD', 'FULL', 'DSE-SMOKE-TEST', 'EVENT_LOOP'], description: '''

Profile to utilize for scheduled or adhoc builds

@@ -413,6 +422,10 @@ pipeline { + + + + From 51416d9597d991eab22163a06a4b88cdd0939b8a Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 21 Dec 2022 14:52:21 -0600 Subject: [PATCH 1881/2005] Hey, let's actually update the right things, shall we? --- Jenkinsfile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 283220e57d..e8281a15b5 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -413,7 +413,7 @@ pipeline {
Choice Description
SMOKEBasic smoke tests for current Python runtimes + C*/DSE versions
STANDARD Execute the standard tests for the driver
''') choice( name: 'PROFILE', - choices: ['SMOKE', 'STANDARD', 'FULL', 'DSE-SMOKE-TEST', 'EVENT_LOOP'], + choices: ['STANDARD', 'FULL', 'DSE-SMOKE-TEST', 'EVENT_LOOP'], description: '''

Profile to utilize for scheduled or adhoc builds

@@ -422,10 +422,6 @@ pipeline { - - - - @@ -445,7 +441,7 @@ pipeline {
Choice Description
SMOKEBasic smoke tests for current Python runtimes + C*/DSE versions
STANDARD Execute the standard tests for the driver
''') choice( name: 'MATRIX', - choices: ['DEFAULT', 'FULL', 'DEVELOP', 'CASSANDRA', 'DSE'], + choices: ['DEFAULT', 'SMOKE', 'FULL', 'DEVELOP', 'CASSANDRA', 'DSE'], description: '''

The matrix for the build.

@@ -458,6 +454,10 @@ pipeline { + + + + From 1c9b4bd34da54d2ad2fccdf8479b62704615bd50 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 21 Dec 2022 14:56:05 -0600 Subject: [PATCH 1882/2005] Groovy fixes --- Jenkinsfile | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index e8281a15b5..9536f52aa7 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -30,6 +30,9 @@ import com.datastax.jenkins.drivers.python.Slack slack = new Slack() // Define our predefined matrices +// +// Smoke tests are CI-friendly test configuration. Currently-supported Python version + modern C*/DSE instances. +// We also avoid cython since it's tested as part of the nightlies. matrices = [ "FULL": [ "SERVER": ['2.1', '2.2', '3.0', '3.11', '4.0', 'dse-5.0', 'dse-5.1', 'dse-6.0', 'dse-6.7', 'dse-6.8'], @@ -51,10 +54,6 @@ matrices = [ "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], "CYTHON": ["True", "False"] ], - /* - CI-friendly test configuration. Currently-supported Python version + modern C*/DSE instances. - We also avoid cython since it's tested as part of the nightlies. - */ "SMOKE": [ "SERVER": ['3.11', '4.0', 'dse-6.8'], "RUNTIME": ['3.7.7', '3.8.3'], From dee95953b070b2f72ab564f48236466a3564aaba Mon Sep 17 00:00:00 2001 From: Stefano Rivera Date: Wed, 4 Jan 2023 21:55:21 -0800 Subject: [PATCH 1883/2005] HostFilterPolicyInitTest fix for Python 3.11 (#1131) The AttributeError message for a missing property setter changed in bpo-46730 (https://bugs.python.org/issue46730) --- tests/unit/test_policies.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index a31b4f4c1b..88db23daba 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1295,7 +1295,10 @@ def test_init_kwargs(self): )) def test_immutable_predicate(self): - expected_message_regex = "can't set attribute" + if sys.version_info >= (3, 11): + expected_message_regex = "has no setter" + else: + expected_message_regex = "can't set attribute" hfp = HostFilterPolicy(child_policy=Mock(name='child_policy'), predicate=Mock(name='predicate')) with self.assertRaisesRegexp(AttributeError, expected_message_regex): From da026e78361963f363c28638547179e24c89935d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomek=20=C5=81asica?= Date: Tue, 24 Jan 2023 21:11:52 +0100 Subject: [PATCH 1884/2005] Handle "log gone" case in the end of _run_loop (#1133) If log is somehow gone and file exception due to the race mention in PYTHON-1266 it will also inevitably fail for the same reason after the loop so we need to catch the exception there as well. --- cassandra/io/asyncorereactor.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 681552e589..074c62f690 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -259,7 +259,13 @@ def _run_loop(self): break self._started = False - log.debug("Asyncore event loop ended") + try: + log.debug("Asyncore event loop ended") + except Exception: + # TODO: Remove when Python 2 support is removed + # PYTHON-1266. If our logger has disappeared, there's nothing we + # can do, so just log nothing. + pass def add_timer(self, timer): self._timers.add_timer(timer) From 3638de4ae878109c691e61742d6e8ba5d0a98ebf Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Tue, 24 Jan 2023 14:25:14 -0600 Subject: [PATCH 1885/2005] Minor refactor of prior commit --- cassandra/io/asyncorereactor.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 074c62f690..0abdbbfe0a 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -248,24 +248,21 @@ def _run_loop(self): try: self._loop_dispatcher.loop(self.timer_resolution) self._timers.service_timeouts() - except Exception: - try: - log.debug("Asyncore event loop stopped unexpectedly", exc_info=True) - except Exception: - # TODO: Remove when Python 2 support is removed - # PYTHON-1266. If our logger has disappeared, there's nothing we - # can do, so just log nothing. - pass + except Exception as exc: + self._maybe_log_debug("Asyncore event loop stopped unexpectedly", exc_info=exc) break self._started = False + self._maybe_log_debug("Asyncore event loop ended") + + def _maybe_log_debug(self, *args, **kwargs): try: - log.debug("Asyncore event loop ended") + log.debug(*args, **kwargs) except Exception: # TODO: Remove when Python 2 support is removed # PYTHON-1266. If our logger has disappeared, there's nothing we # can do, so just log nothing. - pass + pass def add_timer(self, timer): self._timers.add_timer(timer) From 728c7126e1c7a09c6cd470cad2cba55fbbe49cb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Reis?= Date: Thu, 23 Feb 2023 16:43:39 +0000 Subject: [PATCH 1886/2005] Fix jenkins builds (#1134) * remove master node dependency * set git env vars "manually" * fix branch_name * disable concurrent builds * workaround dse versions --- Jenkinsfile | 85 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 53 insertions(+), 32 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 9536f52aa7..0fdafb17d2 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -35,12 +35,12 @@ slack = new Slack() // We also avoid cython since it's tested as part of the nightlies. matrices = [ "FULL": [ - "SERVER": ['2.1', '2.2', '3.0', '3.11', '4.0', 'dse-5.0', 'dse-5.1', 'dse-6.0', 'dse-6.7', 'dse-6.8'], + "SERVER": ['2.1', '2.2', '3.0', '3.11', '4.0', 'dse-5.0.15', 'dse-5.1.35', 'dse-6.0.18', 'dse-6.7.17', 'dse-6.8.30'], "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], "CYTHON": ["True", "False"] ], "DEVELOP": [ - "SERVER": ['2.1', '3.11', 'dse-6.8'], + "SERVER": ['2.1', '3.11', 'dse-6.8.30'], "RUNTIME": ['2.7.18', '3.6.10'], "CYTHON": ["True", "False"] ], @@ -50,20 +50,20 @@ matrices = [ "CYTHON": ["True", "False"] ], "DSE": [ - "SERVER": ['dse-5.0', 'dse-5.1', 'dse-6.0', 'dse-6.7', 'dse-6.8'], + "SERVER": ['dse-5.0.15', 'dse-5.1.35', 'dse-6.0.18', 'dse-6.7.17', 'dse-6.8.30'], "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], "CYTHON": ["True", "False"] ], "SMOKE": [ - "SERVER": ['3.11', '4.0', 'dse-6.8'], + "SERVER": ['3.11', '4.0', 'dse-6.8.30'], "RUNTIME": ['3.7.7', '3.8.3'], "CYTHON": ["False"] ] ] -def getBuildContext() { +def initializeSlackContext() { /* - Based on schedule, parameters and branch name, configure the build context and env vars. + Based on git branch/commit, configure the build context and env vars. */ def driver_display_name = 'Cassandra Python Driver' @@ -72,11 +72,17 @@ def getBuildContext() { } else if (env.GIT_URL.contains('python-dse-driver')) { driver_display_name = 'DSE Python Driver' } + env.DRIVER_DISPLAY_NAME = driver_display_name + env.GIT_SHA = "${env.GIT_COMMIT.take(7)}" + env.GITHUB_PROJECT_URL = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" + env.GITHUB_BRANCH_URL = "${env.GITHUB_PROJECT_URL}/tree/${env.BRANCH_NAME}" + env.GITHUB_COMMIT_URL = "${env.GITHUB_PROJECT_URL}/commit/${env.GIT_COMMIT}" +} - def git_sha = "${env.GIT_COMMIT.take(7)}" - def github_project_url = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" - def github_branch_url = "${github_project_url}/tree/${env.BRANCH_NAME}" - def github_commit_url = "${github_project_url}/commit/${env.GIT_COMMIT}" +def getBuildContext() { + /* + Based on schedule and parameters, configure the build context and env vars. + */ def profile = "${params.PROFILE}" def EVENT_LOOP = "${params.EVENT_LOOP.toLowerCase()}" @@ -116,9 +122,7 @@ def getBuildContext() { context = [ vars: [ "PROFILE=${profile}", - "EVENT_LOOP=${EVENT_LOOP}", - "DRIVER_DISPLAY_NAME=${driver_display_name}", "GIT_SHA=${git_sha}", "GITHUB_PROJECT_URL=${github_project_url}", - "GITHUB_BRANCH_URL=${github_branch_url}", "GITHUB_COMMIT_URL=${github_commit_url}" + "EVENT_LOOP=${EVENT_LOOP}" ], matrix: matrix ] @@ -152,7 +156,14 @@ def getMatrixBuilds(buildContext) { def cythonDesc = cythonFlag == "True" ? ", Cython": "" tasks["${serverVersion}, py${runtimeVersion}${cythonDesc}"] = { node("${OS_VERSION}") { - checkout scm + scm_variables = checkout scm + env.GIT_COMMIT = scm_variables.get('GIT_COMMIT') + env.GIT_URL = scm_variables.get('GIT_URL') + initializeSlackContext() + + if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { + slack.notifyChannel() + } withEnv(taskVars) { buildAndTest(context) @@ -203,6 +214,21 @@ def initializeEnvironment() { . ${CCM_ENVIRONMENT_SHELL} ${CASSANDRA_VERSION} ''' + if (env.CASSANDRA_VERSION.split('-')[0] == 'dse') { + env.DSE_FIXED_VERSION = env.CASSANDRA_VERSION.split('-')[1] + sh label: 'Update environment for DataStax Enterprise', script: '''#!/bin/bash -le + cat >> ${HOME}/environment.txt << ENVIRONMENT_EOF +CCM_CASSANDRA_VERSION=${DSE_FIXED_VERSION} # maintain for backwards compatibility +CCM_VERSION=${DSE_FIXED_VERSION} +CCM_SERVER_TYPE=dse +DSE_VERSION=${DSE_FIXED_VERSION} +CCM_IS_DSE=true +CCM_BRANCH=${DSE_FIXED_VERSION} +DSE_BRANCH=${DSE_FIXED_VERSION} +ENVIRONMENT_EOF + ''' + } + sh label: 'Display Python and environment information', script: '''#!/bin/bash -le # Load CCM environment variables set -o allexport @@ -384,6 +410,7 @@ pipeline { // Global pipeline timeout options { + disableConcurrentBuilds() timeout(time: 10, unit: 'HOURS') // TODO timeout should be per build buildDiscarder(logRotator(artifactNumToKeepStr: '10', // Keep only the last 10 artifacts numToKeepStr: '50')) // Keep only the last 50 build records @@ -486,11 +513,11 @@ pipeline { '3.0', // Previous Apache CassandraⓇ '3.11', // Current Apache CassandraⓇ '4.0', // Development Apache CassandraⓇ - 'dse-5.0', // Long Term Support DataStax Enterprise - 'dse-5.1', // Legacy DataStax Enterprise - 'dse-6.0', // Previous DataStax Enterprise - 'dse-6.7', // Previous DataStax Enterprise - 'dse-6.8', // Current DataStax Enterprise + 'dse-5.0.15', // Long Term Support DataStax Enterprise + 'dse-5.1.35', // Legacy DataStax Enterprise + 'dse-6.0.18', // Previous DataStax Enterprise + 'dse-6.7.17', // Previous DataStax Enterprise + 'dse-6.8.30', // Current DataStax Enterprise ], description: '''Apache CassandraⓇ and DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY!
DEFAULT Default to the build context.
SMOKEBasic smoke tests for current Python runtimes + C*/DSE versions, no Cython
FULL All server versions, python runtimes tested with and without Cython.
@@ -525,23 +552,23 @@ pipeline { - + - + - + - + - +
Apache CassandraⓇ v4.x (CURRENTLY UNDER DEVELOPMENT)
dse-5.0dse-5.0.15 DataStax Enterprise v5.0.x (Long Term Support)
dse-5.1dse-5.1.35 DataStax Enterprise v5.1.x
dse-6.0dse-6.0.18 DataStax Enterprise v6.0.x
dse-6.7dse-6.7.17 DataStax Enterprise v6.7.x
dse-6.8dse-6.8.30 DataStax Enterprise v6.8.x (CURRENTLY UNDER DEVELOPMENT)
''') @@ -623,7 +650,7 @@ pipeline { parameterizedCron((scheduleTriggerJobName() == env.JOB_NAME) ? """ # Every weeknight (Monday - Friday) around 4:00 AM # These schedules will run with and without Cython enabled for Python v2.7.18 and v3.5.9 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18 3.5.9;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18 3.5.9;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1.35 dse-6.0.18 dse-6.7.17 """ : "") } @@ -635,11 +662,6 @@ pipeline { stages { stage ('Build and Test') { - agent { - // // If I removed this agent block, GIT_URL and GIT_COMMIT aren't set. - // // However, this trigger an additional checkout - label "master" - } when { beforeAgent true allOf { @@ -651,8 +673,7 @@ pipeline { script { context = getBuildContext() withEnv(context.vars) { - describeBuild(context) - slack.notifyChannel() + describeBuild(context) // build and test all builds parallel getMatrixBuilds(context) From 02aa886946d3e308f0e646cba8b61bed7a85ea11 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Tue, 28 Feb 2023 14:22:05 -0600 Subject: [PATCH 1887/2005] Merge pull request #1128 from python-driver/python-1304 Contains fixes for PYTHON-1304 and PYTHON-1287, both of which describe test failures caused by recent changes to driver code --- cassandra/pool.py | 8 ++++++-- tests/integration/standard/test_cluster.py | 15 +++++++++++++-- tests/integration/util.py | 12 +++++++++--- 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/cassandra/pool.py b/cassandra/pool.py index c82dfe9a6b..d61e81cd0d 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -568,7 +568,9 @@ def get_state(self): connection = self._connection open_count = 1 if connection and not (connection.is_closed or connection.is_defunct) else 0 in_flights = [connection.in_flight] if connection else [] - return {'shutdown': self.is_shutdown, 'open_count': open_count, 'in_flights': in_flights} + orphan_requests = [connection.orphaned_request_ids] if connection else [] + return {'shutdown': self.is_shutdown, 'open_count': open_count, \ + 'in_flights': in_flights, 'orphan_requests': orphan_requests} @property def open_count(self): @@ -926,4 +928,6 @@ def get_connections(self): def get_state(self): in_flights = [c.in_flight for c in self._connections] - return {'shutdown': self.is_shutdown, 'open_count': self.open_count, 'in_flights': in_flights} + orphan_requests = [c.orphaned_request_ids for c in self._connections] + return {'shutdown': self.is_shutdown, 'open_count': self.open_count, \ + 'in_flights': in_flights, 'orphan_requests': orphan_requests} diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index a15c7f32e2..deceed58fd 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -16,7 +16,7 @@ from collections import deque from copy import copy -from mock import Mock, call, patch +from mock import Mock, call, patch, ANY import time from uuid import uuid4 import logging @@ -1478,7 +1478,18 @@ def test_prepare_on_ignored_hosts(self): # the length of mock_calls will vary, but all should use the unignored # address for c in cluster.connection_factory.mock_calls: - self.assertEqual(call(DefaultEndPoint(unignored_address)), c) + # PYTHON-1287 + # + # Cluster._prepare_all_queries() will call connection_factory _without_ the + # on_orphaned_stream_released arg introduced in commit + # 387150acc365b6cf1daaee58c62db13e4929099a. The reconnect handler for the + # downed node _will_ add this arg when it tries to rebuild it's conn pool, and + # whether this occurs while running this test amounts to a race condition. So + # to cover this case we assert one of two call styles here... the key is that + # the _only_ address we should see is the unignored_address. + self.assertTrue( \ + c == call(DefaultEndPoint(unignored_address)) or \ + c == call(DefaultEndPoint(unignored_address), on_orphaned_stream_released=ANY)) cluster.shutdown() diff --git a/tests/integration/util.py b/tests/integration/util.py index 6215449d1f..bcc4cb829b 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from itertools import chain + from tests.integration import PROTOCOL_VERSION import time @@ -38,14 +40,18 @@ def assert_quiescent_pool_state(test_case, cluster, wait=None): for state in pool_states: test_case.assertFalse(state['shutdown']) test_case.assertGreater(state['open_count'], 0) - test_case.assertTrue(all((i == 0 for i in state['in_flights']))) + no_in_flight = all((i == 0 for i in state['in_flights'])) + orphans_and_inflights = zip(state['orphan_requests'],state['in_flights']) + all_orphaned = all((len(orphans) == inflight for (orphans,inflight) in orphans_and_inflights)) + test_case.assertTrue(no_in_flight or all_orphaned) for holder in cluster.get_connection_holders(): for connection in holder.get_connections(): # all ids are unique req_ids = connection.request_ids + orphan_ids = connection.orphaned_request_ids test_case.assertEqual(len(req_ids), len(set(req_ids))) - test_case.assertEqual(connection.highest_request_id, len(req_ids) - 1) - test_case.assertEqual(connection.highest_request_id, max(req_ids)) + test_case.assertEqual(connection.highest_request_id, len(req_ids) + len(orphan_ids) - 1) + test_case.assertEqual(connection.highest_request_id, max(chain(req_ids, orphan_ids))) if PROTOCOL_VERSION < 3: test_case.assertEqual(connection.highest_request_id, connection.max_request_id) From b0030194e3e3c5c2a932315b2282dab75ec23f67 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 8 Mar 2023 09:55:53 -0600 Subject: [PATCH 1888/2005] Merge pull request #1137 from python-driver/python-1329 PYTHON-1329 Change expected port numbers if use_single_interface is used --- tests/integration/__init__.py | 19 +++++++++++++++---- .../standard/test_single_interface.py | 4 ++-- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index d3c3332649..a344931a4e 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -52,6 +52,14 @@ SINGLE_NODE_CLUSTER_NAME = 'single_node' MULTIDC_CLUSTER_NAME = 'multidc_test_cluster' +# When use_single_interface is specified ccm will assign distinct port numbers to each +# node in the cluster. This value specifies the default port value used for the first +# node that comes up. +# +# TODO: In the future we may want to make this configurable, but this should only apply +# if a non-standard port were specified when starting up the cluster. +DEFAULT_SINGLE_INTERFACE_PORT=9046 + CCM_CLUSTER = None path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'ccm') @@ -593,7 +601,10 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, wait_for_node_socket(node, 300) log.debug("Binary ports are open") if set_keyspace: - setup_keyspace(ipformat=ipformat) + args = {"ipformat": ipformat} + if use_single_interface: + args["port"] = DEFAULT_SINGLE_INTERFACE_PORT + setup_keyspace(**args) except Exception: log.exception("Failed to start CCM cluster; removing cluster.") @@ -692,7 +703,7 @@ def drop_keyspace_shutdown_cluster(keyspace_name, session, cluster): cluster.shutdown() -def setup_keyspace(ipformat=None, wait=True, protocol_version=None): +def setup_keyspace(ipformat=None, wait=True, protocol_version=None, port=9042): # wait for nodes to startup if wait: time.sleep(10) @@ -703,9 +714,9 @@ def setup_keyspace(ipformat=None, wait=True, protocol_version=None): _protocol_version = PROTOCOL_VERSION if not ipformat: - cluster = TestCluster(protocol_version=_protocol_version) + cluster = TestCluster(protocol_version=_protocol_version, port=port) else: - cluster = TestCluster(contact_points=["::1"], protocol_version=_protocol_version) + cluster = TestCluster(contact_points=["::1"], protocol_version=_protocol_version, port=port) session = cluster.connect() try: diff --git a/tests/integration/standard/test_single_interface.py b/tests/integration/standard/test_single_interface.py index 4677eff641..ffd2bbe9c4 100644 --- a/tests/integration/standard/test_single_interface.py +++ b/tests/integration/standard/test_single_interface.py @@ -22,7 +22,7 @@ from packaging.version import Version from tests.integration import use_singledc, PROTOCOL_VERSION, \ remove_cluster, greaterthanorequalcass40, notdse, \ - CASSANDRA_VERSION, DSE_VERSION, TestCluster + CASSANDRA_VERSION, DSE_VERSION, TestCluster, DEFAULT_SINGLE_INTERFACE_PORT def setup_module(): @@ -39,7 +39,7 @@ def teardown_module(): class SingleInterfaceTest(unittest.TestCase): def setUp(self): - self.cluster = TestCluster() + self.cluster = TestCluster(port=DEFAULT_SINGLE_INTERFACE_PORT) self.session = self.cluster.connect() def tearDown(self): From ff704d6a225c10de270c47bf68d1c1559ba7839e Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 9 Mar 2023 15:46:56 -0600 Subject: [PATCH 1889/2005] Merge pull request #1139 from python-driver/python-1328 PYTHON-1328: Add explicit wait to give cluster time to get initialized --- tests/integration/standard/test_authentication.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 189da45c94..b055bc75ec 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -12,14 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +from packaging.version import Version import logging import time from cassandra.cluster import NoHostAvailable from cassandra.auth import PlainTextAuthProvider, SASLClient, SaslAuthProvider -from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION, CASSANDRA_IP, \ - USE_CASS_EXTERNAL, start_cluster_wait_for_up, TestCluster +from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION, \ + CASSANDRA_IP, CASSANDRA_VERSION, USE_CASS_EXTERNAL, start_cluster_wait_for_up, TestCluster from tests.integration.util import assert_quiescent_pool_state import unittest @@ -42,12 +43,19 @@ def setup_module(): log.debug("Starting ccm test cluster with %s", config_options) start_cluster_wait_for_up(ccm_cluster) + # PYTHON-1328 + # + # Give the cluster enough time to startup (and perform necessary initialization) + # before executing the test. + if CASSANDRA_VERSION > Version('4.0-a'): + time.sleep(10) def teardown_module(): remove_cluster() # this test messes with config class AuthenticationTests(unittest.TestCase): + """ Tests to cover basic authentication functionality """ @@ -86,6 +94,7 @@ def cluster_as(self, usr, pwd): raise Exception('Unable to connect with creds: {}/{}'.format(usr, pwd)) def test_auth_connect(self): + user = 'u' passwd = 'password' From ffe0097505b96b61c5fd2ec626aa934caf397873 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 9 Mar 2023 15:48:35 -0600 Subject: [PATCH 1890/2005] Merge pull request #1140 from python-driver/python-1327 PYTHON-1327: Add annotation to note server-side fix for certain C* versions --- tests/integration/standard/test_prepared_statements.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 1ed48d2964..a643b19c07 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -13,9 +13,12 @@ # limitations under the License. -from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster +from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster, CASSANDRA_VERSION import unittest + +from packaging.version import Version + from cassandra import InvalidRequest, DriverException from cassandra import ConsistencyLevel, ProtocolVersion @@ -392,6 +395,9 @@ def test_raise_error_on_prepared_statement_execution_dropped_table(self): with self.assertRaises(InvalidRequest): self.session.execute(prepared, [0]) + @unittest.skipIf((CASSANDRA_VERSION >= Version('3.11.12') and CASSANDRA_VERSION < Version('4.0')) or \ + CASSANDRA_VERSION >= Version('4.0.2'), + "Fixed server-side in Cassandra 3.11.12, 4.0.2") def test_fail_if_different_query_id_on_reprepare(self): """ PYTHON-1124 and CASSANDRA-15252 """ keyspace = "test_fail_if_different_query_id_on_reprepare" From 4da7001b38e65e8d578d0b71b37ef7be3a618c2e Mon Sep 17 00:00:00 2001 From: Karthikeyan Singaravelan Date: Fri, 10 Mar 2023 03:39:01 +0530 Subject: [PATCH 1891/2005] Refactor deprecated unittest aliases for Python 3.11 compatibility. (#1112) --- .../cqlengine/management/test_compaction_settings.py | 6 +++--- .../integration/cqlengine/management/test_management.py | 3 ++- .../cqlengine/model/test_class_construction.py | 3 ++- tests/integration/cqlengine/test_batch_query.py | 3 ++- tests/integration/long/test_ipv6.py | 5 +++-- tests/integration/simulacron/test_connection.py | 3 ++- tests/integration/standard/test_authentication.py | 9 +++++---- tests/integration/standard/test_client_warnings.py | 9 +++++---- tests/integration/standard/test_cluster.py | 7 ++++--- tests/integration/standard/test_metadata.py | 6 +++--- tests/integration/standard/test_single_interface.py | 2 +- tests/integration/standard/test_types.py | 6 +++--- tests/unit/advanced/test_graph.py | 2 +- tests/unit/cqlengine/test_connection.py | 6 ++++-- tests/unit/test_connection.py | 2 +- tests/unit/test_control_connection.py | 4 ++-- tests/unit/test_policies.py | 2 +- tests/unit/test_protocol.py | 3 ++- tests/unit/test_response_future.py | 4 +++- tests/unit/test_timestamps.py | 3 ++- 20 files changed, 51 insertions(+), 37 deletions(-) diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index d5dea12744..673bda29a7 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -83,7 +83,7 @@ def test_alter_actually_alters(self): table_meta = _get_table_metadata(tmp) - self.assertRegexpMatches(table_meta.export_as_string(), '.*SizeTieredCompactionStrategy.*') + six.assertRegex(self, table_meta.export_as_string(), '.*SizeTieredCompactionStrategy.*') def test_alter_options(self): @@ -97,11 +97,11 @@ class AlterTable(Model): drop_table(AlterTable) sync_table(AlterTable) table_meta = _get_table_metadata(AlterTable) - self.assertRegexpMatches(table_meta.export_as_string(), ".*'sstable_size_in_mb': '64'.*") + six.assertRegex(self, table_meta.export_as_string(), ".*'sstable_size_in_mb': '64'.*") AlterTable.__options__['compaction']['sstable_size_in_mb'] = '128' sync_table(AlterTable) table_meta = _get_table_metadata(AlterTable) - self.assertRegexpMatches(table_meta.export_as_string(), ".*'sstable_size_in_mb': '128'.*") + six.assertRegex(self, table_meta.export_as_string(), ".*'sstable_size_in_mb': '128'.*") class OptionsTest(BaseCassEngTestCase): diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index f37db5e51f..67f87b10e4 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -13,6 +13,7 @@ # limitations under the License. import unittest +import six import mock import logging from packaging.version import Version @@ -261,7 +262,7 @@ def test_bogus_option_update(self): option = 'no way will this ever be an option' try: ModelWithTableProperties.__options__[option] = 'what was I thinking?' - self.assertRaisesRegexp(KeyError, "Invalid table option.*%s.*" % option, sync_table, ModelWithTableProperties) + six.assertRaisesRegex(self, KeyError, "Invalid table option.*%s.*" % option, sync_table, ModelWithTableProperties) finally: ModelWithTableProperties.__options__.pop(option, None) diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index 9c5afecbfc..95ba1f49bd 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -15,6 +15,7 @@ from uuid import uuid4 import warnings +import six from cassandra.cqlengine import columns, CQLEngineException from cassandra.cqlengine.models import Model, ModelException, ModelDefinitionException, ColumnQueryEvaluator from cassandra.cqlengine.query import ModelQuerySet, DMLQuery @@ -91,7 +92,7 @@ def test_attempting_to_make_duplicate_column_names_fails(self): Tests that trying to create conflicting db column names will fail """ - with self.assertRaisesRegexp(ModelException, r".*more than once$"): + with six.assertRaisesRegex(self, ModelException, r".*more than once$"): class BadNames(Model): words = columns.Text(primary_key=True) content = columns.Text(db_field='words') diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index 7b78fa9979..07ee2e13bf 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -13,6 +13,7 @@ # limitations under the License. import warnings +import six import sure from cassandra.cqlengine import columns @@ -223,7 +224,7 @@ def my_callback(*args, **kwargs): batch.execute() batch.execute() self.assertEqual(len(w), 2) # package filter setup to warn always - self.assertRegexpMatches(str(w[0].message), r"^Batch.*multiple.*") + six.assertRegex(self, str(w[0].message), r"^Batch.*multiple.*") def test_disable_multiple_callback_warning(self): """ diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index b63fdebcf3..6c7d447dfb 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -13,6 +13,7 @@ # limitations under the License. import os, socket, errno +import six from ccmlib import common from cassandra.cluster import NoHostAvailable @@ -82,7 +83,7 @@ def test_connect(self): def test_error(self): cluster = TestCluster(connection_class=self.connection_class, contact_points=['::1'], port=9043, connect_timeout=10) - self.assertRaisesRegexp(NoHostAvailable, '\(\'Unable to connect.*%s.*::1\', 9043.*Connection refused.*' + six.assertRaisesRegex(self, NoHostAvailable, '\(\'Unable to connect.*%s.*::1\', 9043.*Connection refused.*' % errno.ECONNREFUSED, cluster.connect) def test_error_multiple(self): @@ -90,7 +91,7 @@ def test_error_multiple(self): raise unittest.SkipTest('localhost only resolves one address') cluster = TestCluster(connection_class=self.connection_class, contact_points=['localhost'], port=9043, connect_timeout=10) - self.assertRaisesRegexp(NoHostAvailable, '\(\'Unable to connect.*Tried connecting to \[\(.*\(.*\].*Last error', + six.assertRaisesRegex(self, NoHostAvailable, '\(\'Unable to connect.*Tried connecting to \[\(.*\(.*\].*Last error', cluster.connect) diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 0c70d0a1e9..e34e69f458 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -14,6 +14,7 @@ import unittest import logging +import six import time from mock import Mock, patch @@ -262,7 +263,7 @@ def connection_factory(self, *args, **kwargs): prime_request(PrimeOptions(then={"result": "no_result", "delay_in_ms": never})) prime_request(RejectConnections("unbind")) - self.assertRaisesRegexp(OperationTimedOut, "Connection defunct by heartbeat", future.result) + six.assertRaisesRegex(self, OperationTimedOut, "Connection defunct by heartbeat", future.result) def test_close_when_query(self): """ diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index b055bc75ec..c23c9eedf2 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -16,6 +16,7 @@ import logging import time +import six from cassandra.cluster import NoHostAvailable from cassandra.auth import PlainTextAuthProvider, SASLClient, SaslAuthProvider @@ -121,7 +122,7 @@ def test_auth_connect(self): def test_connect_wrong_pwd(self): cluster = self.cluster_as('cassandra', 'wrong_pass') try: - self.assertRaisesRegexp(NoHostAvailable, + six.assertRaisesRegex(self, NoHostAvailable, '.*AuthenticationFailed.', cluster.connect) assert_quiescent_pool_state(self, cluster) @@ -131,7 +132,7 @@ def test_connect_wrong_pwd(self): def test_connect_wrong_username(self): cluster = self.cluster_as('wrong_user', 'cassandra') try: - self.assertRaisesRegexp(NoHostAvailable, + six.assertRaisesRegex(self, NoHostAvailable, '.*AuthenticationFailed.*', cluster.connect) assert_quiescent_pool_state(self, cluster) @@ -141,7 +142,7 @@ def test_connect_wrong_username(self): def test_connect_empty_pwd(self): cluster = self.cluster_as('Cassandra', '') try: - self.assertRaisesRegexp(NoHostAvailable, + six.assertRaisesRegex(self, NoHostAvailable, '.*AuthenticationFailed.*', cluster.connect) assert_quiescent_pool_state(self, cluster) @@ -151,7 +152,7 @@ def test_connect_empty_pwd(self): def test_connect_no_auth_provider(self): cluster = TestCluster() try: - self.assertRaisesRegexp(NoHostAvailable, + six.assertRaisesRegex(self, NoHostAvailable, '.*AuthenticationFailed.*', cluster.connect) assert_quiescent_pool_state(self, cluster) diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index 5f63b5265a..166f172a16 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -15,6 +15,7 @@ import unittest +import six from cassandra.query import BatchStatement from tests.integration import use_singledc, PROTOCOL_VERSION, local, TestCluster @@ -70,7 +71,7 @@ def test_warning_basic(self): future = self.session.execute_async(self.warn_batch) future.result() self.assertEqual(len(future.warnings), 1) - self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') + six.assertRegex(self, future.warnings[0], 'Batch.*exceeding.*') def test_warning_with_trace(self): """ @@ -86,7 +87,7 @@ def test_warning_with_trace(self): future = self.session.execute_async(self.warn_batch, trace=True) future.result() self.assertEqual(len(future.warnings), 1) - self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') + six.assertRegex(self, future.warnings[0], 'Batch.*exceeding.*') self.assertIsNotNone(future.get_query_trace()) @local @@ -105,7 +106,7 @@ def test_warning_with_custom_payload(self): future = self.session.execute_async(self.warn_batch, custom_payload=payload) future.result() self.assertEqual(len(future.warnings), 1) - self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') + six.assertRegex(self, future.warnings[0], 'Batch.*exceeding.*') self.assertDictEqual(future.custom_payload, payload) @local @@ -124,6 +125,6 @@ def test_warning_with_trace_and_custom_payload(self): future = self.session.execute_async(self.warn_batch, trace=True, custom_payload=payload) future.result() self.assertEqual(len(future.warnings), 1) - self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') + six.assertRegex(self, future.warnings[0], 'Batch.*exceeding.*') self.assertIsNotNone(future.get_query_trace()) self.assertDictEqual(future.custom_payload, payload) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index deceed58fd..c5f64f6c28 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -23,6 +23,7 @@ import warnings from packaging.version import Version +import six import cassandra from cassandra.cluster import NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT, ControlConnection, Cluster from cassandra.concurrent import execute_concurrent @@ -147,7 +148,7 @@ def test_raise_error_on_control_connection_timeout(self): get_node(1).pause() cluster = TestCluster(contact_points=['127.0.0.1'], connect_timeout=1) - with self.assertRaisesRegexp(NoHostAvailable, "OperationTimedOut\('errors=Timed out creating connection \(1 seconds\)"): + with six.assertRaisesRegex(self, NoHostAvailable, "OperationTimedOut\('errors=Timed out creating connection \(1 seconds\)"): cluster.connect() cluster.shutdown() @@ -535,7 +536,7 @@ def patched_wait_for_responses(*args, **kwargs): # cluster agreement wait used for refresh original_meta = c.metadata.keyspaces start_time = time.time() - self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata) + six.assertRaisesRegex(self, Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata) end_time = time.time() self.assertGreaterEqual(end_time - start_time, agreement_timeout) self.assertIs(original_meta, c.metadata.keyspaces) @@ -572,7 +573,7 @@ def patched_wait_for_responses(*args, **kwargs): # refresh wait overrides cluster value original_meta = c.metadata.keyspaces start_time = time.time() - self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata, + six.assertRaisesRegex(self, Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata, max_schema_agreement_wait=agreement_timeout) end_time = time.time() self.assertGreaterEqual(end_time - start_time, agreement_timeout) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index e20f1f0640..6f76c2a9b0 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1590,7 +1590,7 @@ def test_function_no_parameters(self): with self.VerifiedFunction(self, **kwargs) as vf: fn_meta = self.keyspace_function_meta[vf.signature] - self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*%s\(\) .*" % kwargs['name']) + six.assertRegex(self, fn_meta.as_cql_query(), "CREATE FUNCTION.*%s\(\) .*" % kwargs['name']) def test_functions_follow_keyspace_alter(self): """ @@ -1638,12 +1638,12 @@ def test_function_cql_called_on_null(self): kwargs['called_on_null_input'] = True with self.VerifiedFunction(self, **kwargs) as vf: fn_meta = self.keyspace_function_meta[vf.signature] - self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) CALLED ON NULL INPUT RETURNS .*") + six.assertRegex(self, fn_meta.as_cql_query(), "CREATE FUNCTION.*\) CALLED ON NULL INPUT RETURNS .*") kwargs['called_on_null_input'] = False with self.VerifiedFunction(self, **kwargs) as vf: fn_meta = self.keyspace_function_meta[vf.signature] - self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) RETURNS NULL ON NULL INPUT RETURNS .*") + six.assertRegex(self, fn_meta.as_cql_query(), "CREATE FUNCTION.*\) RETURNS NULL ON NULL INPUT RETURNS .*") class AggregateMetadata(FunctionTest): diff --git a/tests/integration/standard/test_single_interface.py b/tests/integration/standard/test_single_interface.py index ffd2bbe9c4..8d407be958 100644 --- a/tests/integration/standard/test_single_interface.py +++ b/tests/integration/standard/test_single_interface.py @@ -71,4 +71,4 @@ def test_single_interface(self): consistency_level=ConsistencyLevel.ALL)) for pool in self.session.get_pools(): - self.assertEquals(1, pool.get_state()['open_count']) + self.assertEqual(1, pool.get_state()['open_count']) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index f69e88c64f..828f10b5e2 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -69,7 +69,7 @@ def test_can_insert_blob_type_as_string(self): msg = r'.*Invalid STRING constant \(.*?\) for "b" of type blob.*' else: msg = r'.*Invalid STRING constant \(.*?\) for b of type blob.*' - self.assertRaisesRegexp(InvalidRequest, msg, s.execute, query, params) + six.assertRaisesRegex(self, InvalidRequest, msg, s.execute, query, params) return # In python2, with Cassandra < 2.0, we can manually encode the 'byte str' type as hex for insertion in a blob. @@ -1060,7 +1060,7 @@ def _daterange_round_trip(self, to_insert, expected=None): results = self.session.execute(prep_sel) dr = results[0].dr - # sometimes this is truncated in the assertEquals output on failure; + # sometimes this is truncated in the assertEqual output on failure; if isinstance(expected, six.string_types): self.assertEqual(str(dr), expected) else: @@ -1114,7 +1114,7 @@ def _daterange_round_trip(self, to_insert, expected=None): results= self.session.execute("SELECT * FROM tab WHERE dr = '{0}' ".format(to_insert)) dr = results[0].dr - # sometimes this is truncated in the assertEquals output on failure; + # sometimes this is truncated in the assertEqual output on failure; if isinstance(expected, six.string_types): self.assertEqual(str(dr), expected) else: diff --git a/tests/unit/advanced/test_graph.py b/tests/unit/advanced/test_graph.py index 25dd289dba..77a920a3bf 100644 --- a/tests/unit/advanced/test_graph.py +++ b/tests/unit/advanced/test_graph.py @@ -259,7 +259,7 @@ def test_init_unknown_kwargs(self): with warnings.catch_warnings(record=True) as w: GraphOptions(unknown_param=42) self.assertEqual(len(w), 1) - self.assertRegexpMatches(str(w[0].message), r"^Unknown keyword.*GraphOptions.*") + six.assertRegex(self, str(w[0].message), r"^Unknown keyword.*GraphOptions.*") def test_update(self): opts = GraphOptions(**self.api_params) diff --git a/tests/unit/cqlengine/test_connection.py b/tests/unit/cqlengine/test_connection.py index 8e3a0b75bd..9c3454796a 100644 --- a/tests/unit/cqlengine/test_connection.py +++ b/tests/unit/cqlengine/test_connection.py @@ -14,6 +14,8 @@ import unittest +import six + from cassandra.cluster import _ConfigMode from cassandra.cqlengine import connection from cassandra.query import dict_factory @@ -50,12 +52,12 @@ def test_get_session_fails_without_existing_connection(self): """ Users can't get the default session without having a default connection set. """ - with self.assertRaisesRegexp(connection.CQLEngineException, self.no_registered_connection_msg): + with six.assertRaisesRegex(self, connection.CQLEngineException, self.no_registered_connection_msg): connection.get_session(connection=None) def test_get_cluster_fails_without_existing_connection(self): """ Users can't get the default cluster without having a default connection set. """ - with self.assertRaisesRegexp(connection.CQLEngineException, self.no_registered_connection_msg): + with six.assertRaisesRegex(self, connection.CQLEngineException, self.no_registered_connection_msg): connection.get_cluster(connection=None) diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index f06b67ebe0..97faa5e7fc 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -392,7 +392,7 @@ def send_msg(msg, req_id, msg_callback): connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) exc = connection.defunct.call_args_list[0][0][0] self.assertIsInstance(exc, ConnectionException) - self.assertRegexpMatches(exc.args[0], r'^Received unexpected response to OptionsMessage.*') + six.assertRegex(self, exc.args[0], r'^Received unexpected response to OptionsMessage.*') holder.return_connection.assert_has_calls( [call(connection)] * get_holders.call_count) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index 276b2849ca..53a5d6affc 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -526,7 +526,7 @@ def test_refresh_nodes_and_tokens_add_host_detects_port(self): self.assertEqual(self.cluster.added_hosts[0].broadcast_rpc_address, "192.168.1.3") self.assertEqual(self.cluster.added_hosts[0].broadcast_rpc_port, 555) self.assertEqual(self.cluster.added_hosts[0].broadcast_address, "10.0.0.3") - self.assertEquals(self.cluster.added_hosts[0].broadcast_port, 666) + self.assertEqual(self.cluster.added_hosts[0].broadcast_port, 666) self.assertEqual(self.cluster.added_hosts[0].datacenter, "dc1") self.assertEqual(self.cluster.added_hosts[0].rack, "rack1") @@ -546,7 +546,7 @@ def test_refresh_nodes_and_tokens_add_host_detects_invalid_port(self): self.assertEqual(self.cluster.added_hosts[0].broadcast_rpc_address, "192.168.1.3") self.assertEqual(self.cluster.added_hosts[0].broadcast_rpc_port, None) self.assertEqual(self.cluster.added_hosts[0].broadcast_address, "10.0.0.3") - self.assertEquals(self.cluster.added_hosts[0].broadcast_port, None) + self.assertEqual(self.cluster.added_hosts[0].broadcast_port, None) self.assertEqual(self.cluster.added_hosts[0].datacenter, "dc1") self.assertEqual(self.cluster.added_hosts[0].rack, "rack1") diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 88db23daba..edafb7cb01 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1301,7 +1301,7 @@ def test_immutable_predicate(self): expected_message_regex = "can't set attribute" hfp = HostFilterPolicy(child_policy=Mock(name='child_policy'), predicate=Mock(name='predicate')) - with self.assertRaisesRegexp(AttributeError, expected_message_regex): + with six.assertRaisesRegex(self, AttributeError, expected_message_regex): hfp.predicate = object() diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index 95a7a12b11..3d6828bdc5 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -14,6 +14,7 @@ import unittest +import six from mock import Mock from cassandra import ProtocolVersion, UnsupportedOperation @@ -172,7 +173,7 @@ def test_keyspace_flag_raises_before_v5(self): keyspace_message = QueryMessage('a', consistency_level=3, keyspace='ks') io = Mock(name='io') - with self.assertRaisesRegexp(UnsupportedOperation, 'Keyspaces.*set'): + with six.assertRaisesRegex(self, UnsupportedOperation, 'Keyspaces.*set'): keyspace_message.send_body(io, protocol_version=4) io.assert_not_called() diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index dbd8764ad9..a9c05976e0 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -16,6 +16,8 @@ from collections import deque from threading import RLock + +import six from mock import Mock, MagicMock, ANY from cassandra import ConsistencyLevel, Unavailable, SchemaTargetType, SchemaChangeType, OperationTimedOut @@ -158,7 +160,7 @@ def test_heartbeat_defunct_deadlock(self): # Simulate ResponseFuture timing out rf._on_timeout() - self.assertRaisesRegexp(OperationTimedOut, "Connection defunct by heartbeat", rf.result) + six.assertRaisesRegex(self, OperationTimedOut, "Connection defunct by heartbeat", rf.result) def test_read_timeout_error_message(self): session = self.make_session() diff --git a/tests/unit/test_timestamps.py b/tests/unit/test_timestamps.py index 58958cff03..fc1be071ad 100644 --- a/tests/unit/test_timestamps.py +++ b/tests/unit/test_timestamps.py @@ -15,6 +15,7 @@ import unittest import mock +import six from cassandra import timestamps from threading import Thread, Lock @@ -105,7 +106,7 @@ def assertLastCallArgRegex(self, call, pattern): last_warn_args, last_warn_kwargs = call self.assertEqual(len(last_warn_args), 1) self.assertEqual(len(last_warn_kwargs), 0) - self.assertRegexpMatches( + six.assertRegex(self, last_warn_args[0], pattern, ) From 6111c0c2d850433552ba87aa61cdda3c88d139eb Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 9 Mar 2023 16:24:58 -0600 Subject: [PATCH 1892/2005] Remove references to unsupported Python versions from setup.py --- setup.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/setup.py b/setup.py index aaaa1b4d2d..056469aca6 100644 --- a/setup.py +++ b/setup.py @@ -37,8 +37,6 @@ DistutilsExecError) from distutils.cmd import Command -PY3 = sys.version_info[0] == 3 - try: import subprocess has_subprocess = True @@ -406,9 +404,6 @@ def run_setup(extensions): dependencies = ['six >=1.9', 'geomet>=0.1,<0.3'] - if not PY3: - dependencies.append('futures') - _EXTRAS_REQUIRE = { 'graph': ['gremlinpython==3.4.6'] } @@ -442,9 +437,6 @@ def run_setup(extensions): 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: Implementation :: CPython', From 922d7ad565b4d3b3e0aafd6898c2639d968c6534 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 9 Mar 2023 17:09:13 -0600 Subject: [PATCH 1893/2005] Minor refactor of prior commit: now that we're dropping 2.7.x support we don't really need to leverage six for unit test functions. --- tests/integration/advanced/graph/test_graph.py | 2 +- .../cqlengine/management/test_compaction_settings.py | 6 +++--- tests/integration/cqlengine/management/test_management.py | 2 +- .../cqlengine/model/test_class_construction.py | 2 +- tests/integration/cqlengine/test_batch_query.py | 2 +- tests/integration/long/test_ipv6.py | 4 ++-- tests/integration/simulacron/test_connection.py | 2 +- tests/integration/standard/test_authentication.py | 8 ++++---- tests/integration/standard/test_client_warnings.py | 8 ++++---- tests/integration/standard/test_cluster.py | 6 +++--- tests/integration/standard/test_metadata.py | 6 +++--- tests/integration/standard/test_types.py | 2 +- tests/unit/advanced/test_graph.py | 2 +- tests/unit/cqlengine/test_connection.py | 4 ++-- tests/unit/test_connection.py | 2 +- tests/unit/test_policies.py | 2 +- tests/unit/test_protocol.py | 2 +- tests/unit/test_response_future.py | 2 +- 18 files changed, 32 insertions(+), 32 deletions(-) diff --git a/tests/integration/advanced/graph/test_graph.py b/tests/integration/advanced/graph/test_graph.py index a0b6534c34..277283ea5a 100644 --- a/tests/integration/advanced/graph/test_graph.py +++ b/tests/integration/advanced/graph/test_graph.py @@ -266,6 +266,6 @@ def test_graph_protocol_default_for_core_fallback_to_graphson1_if_no_graph_name( self.assertEqual(ep.row_factory, graph_object_row_factory) regex = re.compile(".*Variable.*is unknown.*", re.S) - with six.assertRaisesRegex(self, SyntaxException, regex): + with self.assertRaisesRegex(SyntaxException, regex): self.execute_graph_queries(CoreGraphSchema.fixtures.classic(), execution_profile=ep, verify_graphson=GraphProtocol.GRAPHSON_1_0) diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index 673bda29a7..604e225586 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -83,7 +83,7 @@ def test_alter_actually_alters(self): table_meta = _get_table_metadata(tmp) - six.assertRegex(self, table_meta.export_as_string(), '.*SizeTieredCompactionStrategy.*') + self.assertRegex(table_meta.export_as_string(), '.*SizeTieredCompactionStrategy.*') def test_alter_options(self): @@ -97,11 +97,11 @@ class AlterTable(Model): drop_table(AlterTable) sync_table(AlterTable) table_meta = _get_table_metadata(AlterTable) - six.assertRegex(self, table_meta.export_as_string(), ".*'sstable_size_in_mb': '64'.*") + self.assertRegex(table_meta.export_as_string(), ".*'sstable_size_in_mb': '64'.*") AlterTable.__options__['compaction']['sstable_size_in_mb'] = '128' sync_table(AlterTable) table_meta = _get_table_metadata(AlterTable) - six.assertRegex(self, table_meta.export_as_string(), ".*'sstable_size_in_mb': '128'.*") + self.assertRegex(table_meta.export_as_string(), ".*'sstable_size_in_mb': '128'.*") class OptionsTest(BaseCassEngTestCase): diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 67f87b10e4..2fd35b865e 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -262,7 +262,7 @@ def test_bogus_option_update(self): option = 'no way will this ever be an option' try: ModelWithTableProperties.__options__[option] = 'what was I thinking?' - six.assertRaisesRegex(self, KeyError, "Invalid table option.*%s.*" % option, sync_table, ModelWithTableProperties) + self.assertRaisesRegex(KeyError, "Invalid table option.*%s.*" % option, sync_table, ModelWithTableProperties) finally: ModelWithTableProperties.__options__.pop(option, None) diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index 95ba1f49bd..f764e78e5c 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -92,7 +92,7 @@ def test_attempting_to_make_duplicate_column_names_fails(self): Tests that trying to create conflicting db column names will fail """ - with six.assertRaisesRegex(self, ModelException, r".*more than once$"): + with self.assertRaisesRegex(ModelException, r".*more than once$"): class BadNames(Model): words = columns.Text(primary_key=True) content = columns.Text(db_field='words') diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index 07ee2e13bf..94496727a7 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -224,7 +224,7 @@ def my_callback(*args, **kwargs): batch.execute() batch.execute() self.assertEqual(len(w), 2) # package filter setup to warn always - six.assertRegex(self, str(w[0].message), r"^Batch.*multiple.*") + self.assertRegex(str(w[0].message), r"^Batch.*multiple.*") def test_disable_multiple_callback_warning(self): """ diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index 6c7d447dfb..3e2f2ffc5e 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -83,7 +83,7 @@ def test_connect(self): def test_error(self): cluster = TestCluster(connection_class=self.connection_class, contact_points=['::1'], port=9043, connect_timeout=10) - six.assertRaisesRegex(self, NoHostAvailable, '\(\'Unable to connect.*%s.*::1\', 9043.*Connection refused.*' + self.assertRaisesRegex(NoHostAvailable, '\(\'Unable to connect.*%s.*::1\', 9043.*Connection refused.*' % errno.ECONNREFUSED, cluster.connect) def test_error_multiple(self): @@ -91,7 +91,7 @@ def test_error_multiple(self): raise unittest.SkipTest('localhost only resolves one address') cluster = TestCluster(connection_class=self.connection_class, contact_points=['localhost'], port=9043, connect_timeout=10) - six.assertRaisesRegex(self, NoHostAvailable, '\(\'Unable to connect.*Tried connecting to \[\(.*\(.*\].*Last error', + self.assertRaisesRegex(NoHostAvailable, '\(\'Unable to connect.*Tried connecting to \[\(.*\(.*\].*Last error', cluster.connect) diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index e34e69f458..1def601d2e 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -263,7 +263,7 @@ def connection_factory(self, *args, **kwargs): prime_request(PrimeOptions(then={"result": "no_result", "delay_in_ms": never})) prime_request(RejectConnections("unbind")) - six.assertRaisesRegex(self, OperationTimedOut, "Connection defunct by heartbeat", future.result) + self.assertRaisesRegex(OperationTimedOut, "Connection defunct by heartbeat", future.result) def test_close_when_query(self): """ diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index c23c9eedf2..2f8ffbb068 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -122,7 +122,7 @@ def test_auth_connect(self): def test_connect_wrong_pwd(self): cluster = self.cluster_as('cassandra', 'wrong_pass') try: - six.assertRaisesRegex(self, NoHostAvailable, + self.assertRaisesRegex(NoHostAvailable, '.*AuthenticationFailed.', cluster.connect) assert_quiescent_pool_state(self, cluster) @@ -132,7 +132,7 @@ def test_connect_wrong_pwd(self): def test_connect_wrong_username(self): cluster = self.cluster_as('wrong_user', 'cassandra') try: - six.assertRaisesRegex(self, NoHostAvailable, + self.assertRaisesRegex(NoHostAvailable, '.*AuthenticationFailed.*', cluster.connect) assert_quiescent_pool_state(self, cluster) @@ -142,7 +142,7 @@ def test_connect_wrong_username(self): def test_connect_empty_pwd(self): cluster = self.cluster_as('Cassandra', '') try: - six.assertRaisesRegex(self, NoHostAvailable, + self.assertRaisesRegex(NoHostAvailable, '.*AuthenticationFailed.*', cluster.connect) assert_quiescent_pool_state(self, cluster) @@ -152,7 +152,7 @@ def test_connect_empty_pwd(self): def test_connect_no_auth_provider(self): cluster = TestCluster() try: - six.assertRaisesRegex(self, NoHostAvailable, + self.assertRaisesRegex(NoHostAvailable, '.*AuthenticationFailed.*', cluster.connect) assert_quiescent_pool_state(self, cluster) diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index 166f172a16..37003d5213 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -71,7 +71,7 @@ def test_warning_basic(self): future = self.session.execute_async(self.warn_batch) future.result() self.assertEqual(len(future.warnings), 1) - six.assertRegex(self, future.warnings[0], 'Batch.*exceeding.*') + self.assertRegex(future.warnings[0], 'Batch.*exceeding.*') def test_warning_with_trace(self): """ @@ -87,7 +87,7 @@ def test_warning_with_trace(self): future = self.session.execute_async(self.warn_batch, trace=True) future.result() self.assertEqual(len(future.warnings), 1) - six.assertRegex(self, future.warnings[0], 'Batch.*exceeding.*') + self.assertRegex(future.warnings[0], 'Batch.*exceeding.*') self.assertIsNotNone(future.get_query_trace()) @local @@ -106,7 +106,7 @@ def test_warning_with_custom_payload(self): future = self.session.execute_async(self.warn_batch, custom_payload=payload) future.result() self.assertEqual(len(future.warnings), 1) - six.assertRegex(self, future.warnings[0], 'Batch.*exceeding.*') + self.assertRegex(future.warnings[0], 'Batch.*exceeding.*') self.assertDictEqual(future.custom_payload, payload) @local @@ -125,6 +125,6 @@ def test_warning_with_trace_and_custom_payload(self): future = self.session.execute_async(self.warn_batch, trace=True, custom_payload=payload) future.result() self.assertEqual(len(future.warnings), 1) - six.assertRegex(self, future.warnings[0], 'Batch.*exceeding.*') + self.assertRegex(future.warnings[0], 'Batch.*exceeding.*') self.assertIsNotNone(future.get_query_trace()) self.assertDictEqual(future.custom_payload, payload) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index c5f64f6c28..ae6e3e5a4e 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -148,7 +148,7 @@ def test_raise_error_on_control_connection_timeout(self): get_node(1).pause() cluster = TestCluster(contact_points=['127.0.0.1'], connect_timeout=1) - with six.assertRaisesRegex(self, NoHostAvailable, "OperationTimedOut\('errors=Timed out creating connection \(1 seconds\)"): + with self.assertRaisesRegex(NoHostAvailable, "OperationTimedOut\('errors=Timed out creating connection \(1 seconds\)"): cluster.connect() cluster.shutdown() @@ -536,7 +536,7 @@ def patched_wait_for_responses(*args, **kwargs): # cluster agreement wait used for refresh original_meta = c.metadata.keyspaces start_time = time.time() - six.assertRaisesRegex(self, Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata) + self.assertRaisesRegex(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata) end_time = time.time() self.assertGreaterEqual(end_time - start_time, agreement_timeout) self.assertIs(original_meta, c.metadata.keyspaces) @@ -573,7 +573,7 @@ def patched_wait_for_responses(*args, **kwargs): # refresh wait overrides cluster value original_meta = c.metadata.keyspaces start_time = time.time() - six.assertRaisesRegex(self, Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata, + self.assertRaisesRegex(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata, max_schema_agreement_wait=agreement_timeout) end_time = time.time() self.assertGreaterEqual(end_time - start_time, agreement_timeout) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 6f76c2a9b0..b83df22032 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1590,7 +1590,7 @@ def test_function_no_parameters(self): with self.VerifiedFunction(self, **kwargs) as vf: fn_meta = self.keyspace_function_meta[vf.signature] - six.assertRegex(self, fn_meta.as_cql_query(), "CREATE FUNCTION.*%s\(\) .*" % kwargs['name']) + self.assertRegex(fn_meta.as_cql_query(), "CREATE FUNCTION.*%s\(\) .*" % kwargs['name']) def test_functions_follow_keyspace_alter(self): """ @@ -1638,12 +1638,12 @@ def test_function_cql_called_on_null(self): kwargs['called_on_null_input'] = True with self.VerifiedFunction(self, **kwargs) as vf: fn_meta = self.keyspace_function_meta[vf.signature] - six.assertRegex(self, fn_meta.as_cql_query(), "CREATE FUNCTION.*\) CALLED ON NULL INPUT RETURNS .*") + self.assertRegex(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) CALLED ON NULL INPUT RETURNS .*") kwargs['called_on_null_input'] = False with self.VerifiedFunction(self, **kwargs) as vf: fn_meta = self.keyspace_function_meta[vf.signature] - six.assertRegex(self, fn_meta.as_cql_query(), "CREATE FUNCTION.*\) RETURNS NULL ON NULL INPUT RETURNS .*") + self.assertRegex(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) RETURNS NULL ON NULL INPUT RETURNS .*") class AggregateMetadata(FunctionTest): diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 828f10b5e2..6e2e9f7328 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -69,7 +69,7 @@ def test_can_insert_blob_type_as_string(self): msg = r'.*Invalid STRING constant \(.*?\) for "b" of type blob.*' else: msg = r'.*Invalid STRING constant \(.*?\) for b of type blob.*' - six.assertRaisesRegex(self, InvalidRequest, msg, s.execute, query, params) + self.assertRaisesRegex(InvalidRequest, msg, s.execute, query, params) return # In python2, with Cassandra < 2.0, we can manually encode the 'byte str' type as hex for insertion in a blob. diff --git a/tests/unit/advanced/test_graph.py b/tests/unit/advanced/test_graph.py index 77a920a3bf..a98a48c82f 100644 --- a/tests/unit/advanced/test_graph.py +++ b/tests/unit/advanced/test_graph.py @@ -259,7 +259,7 @@ def test_init_unknown_kwargs(self): with warnings.catch_warnings(record=True) as w: GraphOptions(unknown_param=42) self.assertEqual(len(w), 1) - six.assertRegex(self, str(w[0].message), r"^Unknown keyword.*GraphOptions.*") + self.assertRegex(str(w[0].message), r"^Unknown keyword.*GraphOptions.*") def test_update(self): opts = GraphOptions(**self.api_params) diff --git a/tests/unit/cqlengine/test_connection.py b/tests/unit/cqlengine/test_connection.py index 9c3454796a..962ee06b52 100644 --- a/tests/unit/cqlengine/test_connection.py +++ b/tests/unit/cqlengine/test_connection.py @@ -52,12 +52,12 @@ def test_get_session_fails_without_existing_connection(self): """ Users can't get the default session without having a default connection set. """ - with six.assertRaisesRegex(self, connection.CQLEngineException, self.no_registered_connection_msg): + with self.assertRaisesRegex(connection.CQLEngineException, self.no_registered_connection_msg): connection.get_session(connection=None) def test_get_cluster_fails_without_existing_connection(self): """ Users can't get the default cluster without having a default connection set. """ - with six.assertRaisesRegex(self, connection.CQLEngineException, self.no_registered_connection_msg): + with self.assertRaisesRegex(connection.CQLEngineException, self.no_registered_connection_msg): connection.get_cluster(connection=None) diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 97faa5e7fc..bc6749a477 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -392,7 +392,7 @@ def send_msg(msg, req_id, msg_callback): connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) exc = connection.defunct.call_args_list[0][0][0] self.assertIsInstance(exc, ConnectionException) - six.assertRegex(self, exc.args[0], r'^Received unexpected response to OptionsMessage.*') + self.assertRegex(exc.args[0], r'^Received unexpected response to OptionsMessage.*') holder.return_connection.assert_has_calls( [call(connection)] * get_holders.call_count) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index edafb7cb01..a6c63dcfdc 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1301,7 +1301,7 @@ def test_immutable_predicate(self): expected_message_regex = "can't set attribute" hfp = HostFilterPolicy(child_policy=Mock(name='child_policy'), predicate=Mock(name='predicate')) - with six.assertRaisesRegex(self, AttributeError, expected_message_regex): + with self.assertRaisesRegex(AttributeError, expected_message_regex): hfp.predicate = object() diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index 3d6828bdc5..0f251ffc0e 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -173,7 +173,7 @@ def test_keyspace_flag_raises_before_v5(self): keyspace_message = QueryMessage('a', consistency_level=3, keyspace='ks') io = Mock(name='io') - with six.assertRaisesRegex(self, UnsupportedOperation, 'Keyspaces.*set'): + with self.assertRaisesRegex(UnsupportedOperation, 'Keyspaces.*set'): keyspace_message.send_body(io, protocol_version=4) io.assert_not_called() diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index a9c05976e0..273490072f 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -160,7 +160,7 @@ def test_heartbeat_defunct_deadlock(self): # Simulate ResponseFuture timing out rf._on_timeout() - six.assertRaisesRegex(self, OperationTimedOut, "Connection defunct by heartbeat", rf.result) + self.assertRaisesRegex(OperationTimedOut, "Connection defunct by heartbeat", rf.result) def test_read_timeout_error_message(self): session = self.make_session() From bf7abff6e541dd720f739733f897279f29f5b7cd Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 13 Mar 2023 14:19:21 -0500 Subject: [PATCH 1894/2005] Update Travis config to only run versions that will be supported going forward --- .travis.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 906775e90c..7e59fa486d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,13 +3,9 @@ sudo: false language: python python: - - "2.7" - - "3.5" - - "3.6" - "3.7" - "3.8" - - "pypy2.7-6.0" - - "pypy3.5" + - "pypy3.7" env: - CASS_DRIVER_NO_CYTHON=1 From ee3f3af95cdd66075aa4da2da71a92caad74165a Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 13 Mar 2023 14:45:55 -0500 Subject: [PATCH 1895/2005] Trying to get to a maximal working Pypy version. Have to go back to 3.6 which isn't ideal... --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 7e59fa486d..9f9c450a77 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,7 @@ language: python python: - "3.7" - "3.8" - - "pypy3.7" + - "pypy3.6" env: - CASS_DRIVER_NO_CYTHON=1 From fa9b7af85bf07c381ff955790faa3ec7d13c7713 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 13 Mar 2023 14:48:45 -0500 Subject: [PATCH 1896/2005] Forgot to add complete extension --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 9f9c450a77..54d3a6c89c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,7 @@ language: python python: - "3.7" - "3.8" - - "pypy3.6" + - "pypy3.6-7.0.0" env: - CASS_DRIVER_NO_CYTHON=1 From fdac31e5c93f867166861e1573ee41b00f3e78f7 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 13 Mar 2023 15:52:34 -0500 Subject: [PATCH 1897/2005] Going back to known good non-2.7 PyPy target. PYTHON-1333 has more detail. --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 54d3a6c89c..4d94d86087 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,7 @@ language: python python: - "3.7" - "3.8" - - "pypy3.6-7.0.0" + - "pypy3.5" env: - CASS_DRIVER_NO_CYTHON=1 From f5001b8759d34896a1899b791a5ca57db8ba8069 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 13 Mar 2023 16:57:23 -0500 Subject: [PATCH 1898/2005] Release 3.26: changelog & version --- CHANGELOG.rst | 33 +++++++++++++++++++++++++++++++++ cassandra/__init__.py | 2 +- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d2d577c957..fc7a702534 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,36 @@ +3.26.0 +====== +March 13, 2023 + +Features +-------- +* Add support for execution profiles in execute_concurrent (PR 1122) + +Bug Fixes +--------- +* Handle empty non-final result pages (PR 1110) +* Do not re-use stream IDs for in-flight requests (PR 1114) +* Asyncore race condition cause logging exception on shutdown (PYTHON-1266) + +Others +------ +* Fix deprecation warning in query tracing (PR 1103) +* Remove mutable default values from some tests (PR 1116) +* Remove dependency on unittest2 (PYTHON-1289) +* Fix deprecation warnings for asyncio.coroutine annotation in asyncioreactor (PYTTHON-1290) +* Fix typos in source files (PR 1126) +* HostFilterPolicyInitTest fix for Python 3.11 (PR 1131) +* Fix for DontPrepareOnIgnoredHostsTest (PYTHON-1287) +* tests.integration.simulacron.test_connection failures (PYTHON-1304) +* tests.integration.standard.test_single_interface.py appears to be failing for C* 4.0 (PYTHON-1329) +* Authentication tests appear to be failing fraudulently (PYTHON-1328) +* PreparedStatementTests.test_fail_if_different_query_id_on_reprepare() failing unexpectedly (PTYHON-1327) +* Refactor deprecated unittest aliases for Python 3.11 compatibility (PR 1112) + +Deprecations +------------ +* This release removes support for Python 2.7.x as well as Python 3.5.x and 3.6.x + 3.25.0 ====== March 18, 2021 diff --git a/cassandra/__init__.py b/cassandra/__init__.py index e6cb5c55bb..e14f20c6ed 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 25, 0) +__version_info__ = (3, 26, 0) __version__ = '.'.join(map(str, __version_info__)) From 54ef7d3483a0772ba0d907306bddff6885f9fe7f Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 27 Mar 2023 20:22:03 -0500 Subject: [PATCH 1899/2005] Doc changes following on from 3.26.0 release --- README-dev.rst | 5 ++--- README.rst | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/README-dev.rst b/README-dev.rst index 85a722c3b7..5c0555f3a7 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -241,11 +241,10 @@ Adding a New Python Runtime Support * Add the new python version to our jenkins image: https://github.com/riptano/openstack-jenkins-drivers/ -* Add the new python version in job-creator: - https://github.com/riptano/job-creator/ +* Add the new python version in the Jenkinsfile and TravisCI configs as appropriate * Run the tests and ensure they all pass * also test all event loops * Update the wheels building repo to support that version: - https://github.com/riptano/python-dse-driver-wheels + https://github.com/datastax/python-driver-wheels diff --git a/README.rst b/README.rst index 7c5bf1eee1..197b698aa0 100644 --- a/README.rst +++ b/README.rst @@ -7,7 +7,7 @@ DataStax Driver for Apache Cassandra A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (2.1+) and DataStax Enterprise (4.7+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. -The driver supports Python 2.7, 3.5, 3.6, 3.7 and 3.8. +The driver supports Python 3.7 and 3.8. **Note:** DataStax products do not support big-endian systems. From 17adb81817b353ec0aad92f46b936478f8f57514 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Sat, 8 Apr 2023 10:59:51 -0500 Subject: [PATCH 1900/2005] Fix typo --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index fc7a702534..544585e1ce 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -17,7 +17,7 @@ Others * Fix deprecation warning in query tracing (PR 1103) * Remove mutable default values from some tests (PR 1116) * Remove dependency on unittest2 (PYTHON-1289) -* Fix deprecation warnings for asyncio.coroutine annotation in asyncioreactor (PYTTHON-1290) +* Fix deprecation warnings for asyncio.coroutine annotation in asyncioreactor (PYTHON-1290) * Fix typos in source files (PR 1126) * HostFilterPolicyInitTest fix for Python 3.11 (PR 1131) * Fix for DontPrepareOnIgnoredHostsTest (PYTHON-1287) From 32a2613db39cb8ca14f8b8cb65bfd5ebf2992083 Mon Sep 17 00:00:00 2001 From: Jamie Gillenwater Date: Fri, 21 Apr 2023 10:41:15 -0400 Subject: [PATCH 1901/2005] Add 3.26 for docs (#1147) --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index 8e29b942e3..11e9d2d5ac 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,6 +22,8 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: + - name: '3.26' + ref: f5001b8 - name: '3.25' ref: a83c36a5 - name: '3.24' From f1e9126fc4db56c2f0dc9fb5c10c4c06f0892fc7 Mon Sep 17 00:00:00 2001 From: Jamie Gillenwater Date: Fri, 21 Apr 2023 11:02:16 -0400 Subject: [PATCH 1902/2005] Update driver version supported (#1148) --- docs/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index 978faa17c6..005d02bf84 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,7 +4,7 @@ A Python client driver for `Apache Cassandra® `_. This driver works exclusively with the Cassandra Query Language v3 (CQL3) and Cassandra's native protocol. Cassandra 2.1+ is supported, including DSE 4.7+. -The driver supports Python 2.7, 3.5, 3.6, 3.7 and 3.8. +The driver supports Python 3.7 and 3.8. This driver is open source under the `Apache v2 License `_. From 9fa742a78a0b242b5f00d30e8a09df9c7467dfa6 Mon Sep 17 00:00:00 2001 From: Jamie Gillenwater Date: Fri, 21 Apr 2023 11:04:13 -0400 Subject: [PATCH 1903/2005] Update ref to include latest version supported (#1149) --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 11e9d2d5ac..c95a63d5a9 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.26' - ref: f5001b8 + ref: f1e9126 - name: '3.25' ref: a83c36a5 - name: '3.24' From 058be1e995a71dbf6afc0cbdf3a2b0a429cd5e28 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 1 May 2023 10:56:12 -0500 Subject: [PATCH 1904/2005] PYTHON-1341 Impl of client-side column-level encryption/decryption (#1150) --- .travis.yml | 1 - Jenkinsfile | 2 +- cassandra/cluster.py | 20 +- cassandra/obj_parser.pyx | 16 +- cassandra/parsing.pxd | 2 + cassandra/parsing.pyx | 4 +- cassandra/policies.py | 181 +++++++++++++++++- cassandra/protocol.py | 46 +++-- cassandra/query.py | 23 ++- cassandra/row_parser.pyx | 12 +- docs/column_encryption.rst | 92 +++++++++ docs/index.rst | 3 + requirements.txt | 1 + .../standard/test_custom_protocol_handler.py | 4 +- tests/integration/standard/test_policies.py | 73 ++++++- tests/unit/test_policies.py | 136 ++++++++++++- tox.ini | 5 +- 17 files changed, 573 insertions(+), 48 deletions(-) create mode 100644 docs/column_encryption.rst diff --git a/.travis.yml b/.travis.yml index 4d94d86087..dbabf61378 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,6 @@ language: python python: - "3.7" - "3.8" - - "pypy3.5" env: - CASS_DRIVER_NO_CYTHON=1 diff --git a/Jenkinsfile b/Jenkinsfile index 0fdafb17d2..f3b5d0718b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -57,7 +57,7 @@ matrices = [ "SMOKE": [ "SERVER": ['3.11', '4.0', 'dse-6.8.30'], "RUNTIME": ['3.7.7', '3.8.3'], - "CYTHON": ["False"] + "CYTHON": ["True", "False"] ] ] diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c836fb4302..6b0d495013 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1003,6 +1003,12 @@ def default_retry_policy(self, policy): load the configuration and certificates. """ + column_encryption_policy = None + """ + An instance of :class:`cassandra.policies.ColumnEncryptionPolicy` specifying encryption materials to be + used for columns in this cluster. + """ + @property def schema_metadata_enabled(self): """ @@ -1104,7 +1110,8 @@ def __init__(self, monitor_reporting_enabled=True, monitor_reporting_interval=30, client_id=None, - cloud=None): + cloud=None, + column_encryption_policy=None): """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as extablishing connection pools or refreshing metadata. @@ -1152,6 +1159,9 @@ def __init__(self, self.port = port + if column_encryption_policy is not None: + self.column_encryption_policy = column_encryption_policy + self.endpoint_factory = endpoint_factory or DefaultEndPointFactory(port=self.port) self.endpoint_factory.configure(self) @@ -2535,6 +2545,12 @@ def __init__(self, cluster, hosts, keyspace=None): self.encoder = Encoder() + if self.cluster.column_encryption_policy is not None: + try: + self.client_protocol_handler.column_encryption_policy = self.cluster.column_encryption_policy + except AttributeError: + log.info("Unable to set column encryption policy for session") + # create connection pools in parallel self._initial_connect_futures = set() for host in hosts: @@ -3074,7 +3090,7 @@ def prepare(self, query, custom_payload=None, keyspace=None): prepared_keyspace = keyspace if keyspace else None prepared_statement = PreparedStatement.from_message( response.query_id, response.bind_metadata, response.pk_indexes, self.cluster.metadata, query, prepared_keyspace, - self._protocol_version, response.column_metadata, response.result_metadata_id) + self._protocol_version, response.column_metadata, response.result_metadata_id, self.cluster.column_encryption_policy) prepared_statement.custom_payload = future.custom_payload self.cluster.add_prepared(response.query_id, prepared_statement) diff --git a/cassandra/obj_parser.pyx b/cassandra/obj_parser.pyx index a0b5316a33..cf43771dd7 100644 --- a/cassandra/obj_parser.pyx +++ b/cassandra/obj_parser.pyx @@ -17,9 +17,12 @@ include "ioutils.pyx" from cassandra import DriverException from cassandra.bytesio cimport BytesIOReader from cassandra.deserializers cimport Deserializer, from_binary +from cassandra.deserializers import find_deserializer from cassandra.parsing cimport ParseDesc, ColumnParser, RowParser from cassandra.tuple cimport tuple_new, tuple_set +from cpython.bytes cimport PyBytes_AsStringAndSize + cdef class ListParser(ColumnParser): """Decode a ResultMessage into a list of tuples (or other objects)""" @@ -58,18 +61,29 @@ cdef class TupleRowParser(RowParser): assert desc.rowsize >= 0 cdef Buffer buf + cdef Buffer newbuf cdef Py_ssize_t i, rowsize = desc.rowsize cdef Deserializer deserializer cdef tuple res = tuple_new(desc.rowsize) + ce_policy = desc.column_encryption_policy for i in range(rowsize): # Read the next few bytes get_buf(reader, &buf) # Deserialize bytes to python object deserializer = desc.deserializers[i] + coldesc = desc.coldescs[i] + uses_ce = ce_policy and ce_policy.contains_column(coldesc) try: - val = from_binary(deserializer, &buf, desc.protocol_version) + if uses_ce: + col_type = ce_policy.column_type(coldesc) + decrypted_bytes = ce_policy.decrypt(coldesc, to_bytes(&buf)) + PyBytes_AsStringAndSize(decrypted_bytes, &newbuf.ptr, &newbuf.size) + deserializer = find_deserializer(ce_policy.column_type(coldesc)) + val = from_binary(deserializer, &newbuf, desc.protocol_version) + else: + val = from_binary(deserializer, &buf, desc.protocol_version) except Exception as e: raise DriverException('Failed decoding result column "%s" of type %s: %s' % (desc.colnames[i], desc.coltypes[i].cql_parameterized_type(), diff --git a/cassandra/parsing.pxd b/cassandra/parsing.pxd index aa9478cd14..27dc368b07 100644 --- a/cassandra/parsing.pxd +++ b/cassandra/parsing.pxd @@ -18,6 +18,8 @@ from cassandra.deserializers cimport Deserializer cdef class ParseDesc: cdef public object colnames cdef public object coltypes + cdef public object column_encryption_policy + cdef public list coldescs cdef Deserializer[::1] deserializers cdef public int protocol_version cdef Py_ssize_t rowsize diff --git a/cassandra/parsing.pyx b/cassandra/parsing.pyx index d2bc0a3abe..954767d227 100644 --- a/cassandra/parsing.pyx +++ b/cassandra/parsing.pyx @@ -19,9 +19,11 @@ Module containing the definitions and declarations (parsing.pxd) for parsers. cdef class ParseDesc: """Description of what structure to parse""" - def __init__(self, colnames, coltypes, deserializers, protocol_version): + def __init__(self, colnames, coltypes, column_encryption_policy, coldescs, deserializers, protocol_version): self.colnames = colnames self.coltypes = coltypes + self.column_encryption_policy = column_encryption_policy + self.coldescs = coldescs self.deserializers = deserializers self.protocol_version = protocol_version self.rowsize = len(colnames) diff --git a/cassandra/policies.py b/cassandra/policies.py index fa1e8cf385..26b9aa4c5a 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -12,13 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. +from collections import namedtuple +from functools import lru_cache from itertools import islice, cycle, groupby, repeat import logging +import os from random import randint, shuffle from threading import Lock import socket import warnings + +from cryptography.hazmat.primitives import padding +from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes + from cassandra import WriteType as WT +from cassandra.cqltypes import _cqltypes # This is done this way because WriteType was originally @@ -455,7 +463,7 @@ class HostFilterPolicy(LoadBalancingPolicy): A :class:`.LoadBalancingPolicy` subclass configured with a child policy, and a single-argument predicate. This policy defers to the child policy for hosts where ``predicate(host)`` is truthy. Hosts for which - ``predicate(host)`` is falsey will be considered :attr:`.IGNORED`, and will + ``predicate(host)`` is falsy will be considered :attr:`.IGNORED`, and will not be used in a query plan. This can be used in the cases where you need a whitelist or blacklist @@ -491,7 +499,7 @@ def __init__(self, child_policy, predicate): :param child_policy: an instantiated :class:`.LoadBalancingPolicy` that this one will defer to. :param predicate: a one-parameter function that takes a :class:`.Host`. - If it returns a falsey value, the :class:`.Host` will + If it returns a falsy value, the :class:`.Host` will be :attr:`.IGNORED` and not returned in query plans. """ super(HostFilterPolicy, self).__init__() @@ -527,7 +535,7 @@ def predicate(self): def distance(self, host): """ Checks if ``predicate(host)``, then returns - :attr:`~HostDistance.IGNORED` if falsey, and defers to the child policy + :attr:`~HostDistance.IGNORED` if falsy, and defers to the child policy otherwise. """ if self.predicate(host): @@ -616,7 +624,7 @@ class ReconnectionPolicy(object): def new_schedule(self): """ This should return a finite or infinite iterable of delays (each as a - floating point number of seconds) inbetween each failed reconnection + floating point number of seconds) in-between each failed reconnection attempt. Note that if the iterable is finite, reconnection attempts will cease once the iterable is exhausted. """ @@ -626,12 +634,12 @@ def new_schedule(self): class ConstantReconnectionPolicy(ReconnectionPolicy): """ A :class:`.ReconnectionPolicy` subclass which sleeps for a fixed delay - inbetween each reconnection attempt. + in-between each reconnection attempt. """ def __init__(self, delay, max_attempts=64): """ - `delay` should be a floating point number of seconds to wait inbetween + `delay` should be a floating point number of seconds to wait in-between each attempt. `max_attempts` should be a total number of attempts to be made before @@ -655,7 +663,7 @@ def new_schedule(self): class ExponentialReconnectionPolicy(ReconnectionPolicy): """ A :class:`.ReconnectionPolicy` subclass which exponentially increases - the length of the delay inbetween each reconnection attempt up to + the length of the delay in-between each reconnection attempt up to a set maximum delay. A random amount of jitter (+/- 15%) will be added to the pure exponential @@ -715,7 +723,7 @@ class RetryPolicy(object): timeout and unavailable failures. These are failures reported from the server side. Timeouts are configured by `settings in cassandra.yaml `_. - Unavailable failures occur when the coordinator cannot acheive the consistency + Unavailable failures occur when the coordinator cannot achieve the consistency level for a request. For further information see the method descriptions below. @@ -1181,3 +1189,160 @@ def _rethrow(self, *args, **kwargs): on_read_timeout = _rethrow on_write_timeout = _rethrow on_unavailable = _rethrow + + +ColDesc = namedtuple('ColDesc', ['ks', 'table', 'col']) +ColData = namedtuple('ColData', ['key','type']) + +class ColumnEncryptionPolicy(object): + """ + A policy enabling (mostly) transparent encryption and decryption of data before it is + sent to the cluster. + + Key materials and other configurations are specified on a per-column basis. This policy can + then be used by driver structures which are aware of the underlying columns involved in their + work. In practice this includes the following cases: + + * Prepared statements - data for columns specified by the cluster's policy will be transparently + encrypted before they are sent + * Rows returned from any query - data for columns specified by the cluster's policy will be + transparently decrypted before they are returned to the user + + To enable this functionality, create an instance of this class (or more likely a subclass) + before creating a cluster. This policy should then be configured and supplied to the Cluster + at creation time via the :attr:`.Cluster.column_encryption_policy` attribute. + """ + + def encrypt(self, coldesc, obj_bytes): + """ + Encrypt the specified bytes using the cryptography materials for the specified column. + Largely used internally, although this could also be used to encrypt values supplied + to non-prepared statements in a way that is consistent with this policy. + """ + raise NotImplementedError() + + def decrypt(self, coldesc, encrypted_bytes): + """ + Decrypt the specified (encrypted) bytes using the cryptography materials for the + specified column. Used internally; could be used externally as well but there's + not currently an obvious use case. + """ + raise NotImplementedError() + + def add_column(self, coldesc, key): + """ + Provide cryptography materials to be used when encrypted and/or decrypting data + for the specified column. + """ + raise NotImplementedError() + + def contains_column(self, coldesc): + """ + Predicate to determine if a specific column is supported by this policy. + Currently only used internally. + """ + raise NotImplementedError() + + def encode_and_encrypt(self, coldesc, obj): + """ + Helper function to enable use of this policy on simple (i.e. non-prepared) + statements. + """ + raise NotImplementedError() + +AES256_BLOCK_SIZE = 128 +AES256_BLOCK_SIZE_BYTES = int(AES256_BLOCK_SIZE / 8) +AES256_KEY_SIZE = 256 +AES256_KEY_SIZE_BYTES = int(AES256_KEY_SIZE / 8) + +class AES256ColumnEncryptionPolicy(ColumnEncryptionPolicy): + + # CBC uses an IV that's the same size as the block size + # + # TODO: Need to find some way to expose mode options + # (CBC etc.) without leaking classes from the underlying + # impl here + def __init__(self, mode = modes.CBC, iv = os.urandom(AES256_BLOCK_SIZE_BYTES)): + + self.mode = mode + self.iv = iv + + # ColData for a given ColDesc is always preserved. We only create a Cipher + # when there's an actual need to for a given ColDesc + self.coldata = {} + self.ciphers = {} + + def encrypt(self, coldesc, obj_bytes): + + # AES256 has a 128-bit block size so if the input bytes don't align perfectly on + # those blocks we have to pad them. There's plenty of room for optimization here: + # + # * Instances of the PKCS7 padder should be managed in a bounded pool + # * It would be nice if we could get a flag from encrypted data to indicate + # whether it was padded or not + # * Might be able to make this happen with a leading block of flags in encrypted data + padder = padding.PKCS7(AES256_BLOCK_SIZE).padder() + padded_bytes = padder.update(obj_bytes) + padder.finalize() + + cipher = self._get_cipher(coldesc) + encryptor = cipher.encryptor() + return encryptor.update(padded_bytes) + encryptor.finalize() + + def decrypt(self, coldesc, encrypted_bytes): + + cipher = self._get_cipher(coldesc) + decryptor = cipher.decryptor() + padded_bytes = decryptor.update(encrypted_bytes) + decryptor.finalize() + + unpadder = padding.PKCS7(AES256_BLOCK_SIZE).unpadder() + return unpadder.update(padded_bytes) + unpadder.finalize() + + def add_column(self, coldesc, key, type): + + if not coldesc: + raise ValueError("ColDesc supplied to add_column cannot be None") + if not key: + raise ValueError("Key supplied to add_column cannot be None") + if not type: + raise ValueError("Type supplied to add_column cannot be None") + if type not in _cqltypes.keys(): + raise ValueError("Type %s is not a supported type".format(type)) + if not len(key) == AES256_KEY_SIZE_BYTES: + raise ValueError("AES256 column encryption policy expects a 256-bit encryption key") + self.coldata[coldesc] = ColData(key, _cqltypes[type]) + + def contains_column(self, coldesc): + return coldesc in self.coldata + + def encode_and_encrypt(self, coldesc, obj): + if not coldesc: + raise ValueError("ColDesc supplied to encode_and_encrypt cannot be None") + if not obj: + raise ValueError("Object supplied to encode_and_encrypt cannot be None") + coldata = self.coldata.get(coldesc) + if not coldata: + raise ValueError("Could not find ColData for ColDesc %s".format(coldesc)) + return self.encrypt(coldesc, coldata.type.serialize(obj, None)) + + def cache_info(self): + return AES256ColumnEncryptionPolicy._build_cipher.cache_info() + + def column_type(self, coldesc): + return self.coldata[coldesc].type + + def _get_cipher(self, coldesc): + """ + Access relevant state from this instance necessary to create a Cipher and then get one, + hopefully returning a cached instance if we've already done so (and it hasn't been evicted) + """ + + try: + coldata = self.coldata[coldesc] + return AES256ColumnEncryptionPolicy._build_cipher(coldata.key, self.mode, self.iv) + except KeyError: + raise ValueError("Could not find column {}".format(coldesc)) + + # Explicitly use a class method here to avoid caching self + @lru_cache(maxsize=128) + def _build_cipher(key, mode, iv): + return Cipher(algorithms.AES256(key), mode(iv)) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index ed92a76679..5e3610811e 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -29,9 +29,6 @@ AlreadyExists, InvalidRequest, Unauthorized, UnsupportedOperation, UserFunctionDescriptor, UserAggregateDescriptor, SchemaTargetType) -from cassandra.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack, - uint8_pack, int8_unpack, uint64_pack, header_pack, - v3_header_pack, uint32_pack, uint32_le_unpack, uint32_le_pack) from cassandra.cqltypes import (AsciiType, BytesType, BooleanType, CounterColumnType, DateType, DecimalType, DoubleType, FloatType, Int32Type, @@ -40,6 +37,10 @@ UTF8Type, VarcharType, UUIDType, UserType, TupleType, lookup_casstype, SimpleDateType, TimeType, ByteType, ShortType, DurationType) +from cassandra.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack, + uint8_pack, int8_unpack, uint64_pack, header_pack, + v3_header_pack, uint32_pack, uint32_le_unpack, uint32_le_pack) +from cassandra.policies import ColDesc from cassandra import WriteType from cassandra.cython_deps import HAVE_CYTHON, HAVE_NUMPY from cassandra import util @@ -723,11 +724,11 @@ class ResultMessage(_MessageType): def __init__(self, kind): self.kind = kind - def recv(self, f, protocol_version, user_type_map, result_metadata): + def recv(self, f, protocol_version, user_type_map, result_metadata, column_encryption_policy): if self.kind == RESULT_KIND_VOID: return elif self.kind == RESULT_KIND_ROWS: - self.recv_results_rows(f, protocol_version, user_type_map, result_metadata) + self.recv_results_rows(f, protocol_version, user_type_map, result_metadata, column_encryption_policy) elif self.kind == RESULT_KIND_SET_KEYSPACE: self.new_keyspace = read_string(f) elif self.kind == RESULT_KIND_PREPARED: @@ -738,32 +739,40 @@ def recv(self, f, protocol_version, user_type_map, result_metadata): raise DriverException("Unknown RESULT kind: %d" % self.kind) @classmethod - def recv_body(cls, f, protocol_version, user_type_map, result_metadata): + def recv_body(cls, f, protocol_version, user_type_map, result_metadata, column_encryption_policy): kind = read_int(f) msg = cls(kind) - msg.recv(f, protocol_version, user_type_map, result_metadata) + msg.recv(f, protocol_version, user_type_map, result_metadata, column_encryption_policy) return msg - def recv_results_rows(self, f, protocol_version, user_type_map, result_metadata): + def recv_results_rows(self, f, protocol_version, user_type_map, result_metadata, column_encryption_policy): self.recv_results_metadata(f, user_type_map) column_metadata = self.column_metadata or result_metadata rowcount = read_int(f) rows = [self.recv_row(f, len(column_metadata)) for _ in range(rowcount)] self.column_names = [c[2] for c in column_metadata] self.column_types = [c[3] for c in column_metadata] + col_descs = [ColDesc(md[0], md[1], md[2]) for md in column_metadata] + + def decode_val(val, col_md, col_desc): + uses_ce = column_encryption_policy and column_encryption_policy.contains_column(col_desc) + col_type = column_encryption_policy.column_type(col_desc) if uses_ce else col_md[3] + raw_bytes = column_encryption_policy.decrypt(col_desc, val) if uses_ce else val + return col_type.from_binary(raw_bytes, protocol_version) + + def decode_row(row): + return tuple(decode_val(val, col_md, col_desc) for val, col_md, col_desc in zip(row, column_metadata, col_descs)) + try: - self.parsed_rows = [ - tuple(ctype.from_binary(val, protocol_version) - for ctype, val in zip(self.column_types, row)) - for row in rows] + self.parsed_rows = [decode_row(row) for row in rows] except Exception: for row in rows: - for i in range(len(row)): + for val, col_md, col_desc in zip(row, column_metadata, col_descs): try: - self.column_types[i].from_binary(row[i], protocol_version) + decode_val(val, col_md, col_desc) except Exception as e: - raise DriverException('Failed decoding result column "%s" of type %s: %s' % (self.column_names[i], - self.column_types[i].cql_parameterized_type(), + raise DriverException('Failed decoding result column "%s" of type %s: %s' % (col_md[2], + col_md[3].cql_parameterized_type(), str(e))) def recv_results_prepared(self, f, protocol_version, user_type_map): @@ -1099,6 +1108,9 @@ class _ProtocolHandler(object): result decoding implementations. """ + column_encryption_policy = None + """Instance of :class:`cassandra.policies.ColumnEncryptionPolicy` in use by this handler""" + @classmethod def encode_message(cls, msg, stream_id, protocol_version, compressor, allow_beta_protocol_version): """ @@ -1193,7 +1205,7 @@ def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcod log.warning("Unknown protocol flags set: %02x. May cause problems.", flags) msg_class = cls.message_types_by_opcode[opcode] - msg = msg_class.recv_body(body, protocol_version, user_type_map, result_metadata) + msg = msg_class.recv_body(body, protocol_version, user_type_map, result_metadata, cls.column_encryption_policy) msg.stream_id = stream_id msg.trace_id = trace_id msg.custom_payload = custom_payload diff --git a/cassandra/query.py b/cassandra/query.py index f7a5b8fdf5..dc72d7cbcd 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -31,6 +31,7 @@ from cassandra.util import unix_time_from_uuid1 from cassandra.encoder import Encoder import cassandra.encoder +from cassandra.policies import ColDesc from cassandra.protocol import _UNSET_VALUE from cassandra.util import OrderedDict, _sanitize_identifiers @@ -442,12 +443,14 @@ class PreparedStatement(object): query_string = None result_metadata = None result_metadata_id = None + column_encryption_policy = None routing_key_indexes = None _routing_key_index_set = None serial_consistency_level = None # TODO never used? def __init__(self, column_metadata, query_id, routing_key_indexes, query, - keyspace, protocol_version, result_metadata, result_metadata_id): + keyspace, protocol_version, result_metadata, result_metadata_id, + column_encryption_policy=None): self.column_metadata = column_metadata self.query_id = query_id self.routing_key_indexes = routing_key_indexes @@ -456,14 +459,17 @@ def __init__(self, column_metadata, query_id, routing_key_indexes, query, self.protocol_version = protocol_version self.result_metadata = result_metadata self.result_metadata_id = result_metadata_id + self.column_encryption_policy = column_encryption_policy self.is_idempotent = False @classmethod def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, query, prepared_keyspace, protocol_version, result_metadata, - result_metadata_id): + result_metadata_id, column_encryption_policy=None): if not column_metadata: - return PreparedStatement(column_metadata, query_id, None, query, prepared_keyspace, protocol_version, result_metadata, result_metadata_id) + return PreparedStatement(column_metadata, query_id, None, + query, prepared_keyspace, protocol_version, result_metadata, + result_metadata_id, column_encryption_policy) if pk_indexes: routing_key_indexes = pk_indexes @@ -489,7 +495,7 @@ def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, return PreparedStatement(column_metadata, query_id, routing_key_indexes, query, prepared_keyspace, protocol_version, result_metadata, - result_metadata_id) + result_metadata_id, column_encryption_policy) def bind(self, values): """ @@ -577,6 +583,7 @@ def bind(self, values): values = () proto_version = self.prepared_statement.protocol_version col_meta = self.prepared_statement.column_metadata + ce_policy = self.prepared_statement.column_encryption_policy # special case for binding dicts if isinstance(values, dict): @@ -623,7 +630,13 @@ def bind(self, values): raise ValueError("Attempt to bind UNSET_VALUE while using unsuitable protocol version (%d < 4)" % proto_version) else: try: - self.values.append(col_spec.type.serialize(value, proto_version)) + col_desc = ColDesc(col_spec.keyspace_name, col_spec.table_name, col_spec.name) + uses_ce = ce_policy and ce_policy.contains_column(col_desc) + col_type = ce_policy.column_type(col_desc) if uses_ce else col_spec.type + col_bytes = col_type.serialize(value, proto_version) + if uses_ce: + col_bytes = ce_policy.encrypt(col_desc, col_bytes) + self.values.append(col_bytes) except (TypeError, struct.error) as exc: actual_type = type(value) message = ('Received an argument of invalid type for column "%s". ' diff --git a/cassandra/row_parser.pyx b/cassandra/row_parser.pyx index 3a4b2f4604..88277a4593 100644 --- a/cassandra/row_parser.pyx +++ b/cassandra/row_parser.pyx @@ -13,13 +13,14 @@ # limitations under the License. from cassandra.parsing cimport ParseDesc, ColumnParser +from cassandra.policies import ColDesc from cassandra.obj_parser import TupleRowParser from cassandra.deserializers import make_deserializers include "ioutils.pyx" def make_recv_results_rows(ColumnParser colparser): - def recv_results_rows(self, f, int protocol_version, user_type_map, result_metadata): + def recv_results_rows(self, f, int protocol_version, user_type_map, result_metadata, column_encryption_policy): """ Parse protocol data given as a BytesIO f into a set of columns (e.g. list of tuples) This is used as the recv_results_rows method of (Fast)ResultMessage @@ -28,11 +29,12 @@ def make_recv_results_rows(ColumnParser colparser): column_metadata = self.column_metadata or result_metadata - self.column_names = [c[2] for c in column_metadata] - self.column_types = [c[3] for c in column_metadata] + self.column_names = [md[2] for md in column_metadata] + self.column_types = [md[3] for md in column_metadata] - desc = ParseDesc(self.column_names, self.column_types, make_deserializers(self.column_types), - protocol_version) + desc = ParseDesc(self.column_names, self.column_types, column_encryption_policy, + [ColDesc(md[0], md[1], md[2]) for md in column_metadata], + make_deserializers(self.column_types), protocol_version) reader = BytesIOReader(f.read()) try: self.parsed_rows = colparser.parse_rows(reader, desc) diff --git a/docs/column_encryption.rst b/docs/column_encryption.rst new file mode 100644 index 0000000000..4d2a6c2d91 --- /dev/null +++ b/docs/column_encryption.rst @@ -0,0 +1,92 @@ +Column Encryption +================= + +Overview +-------- +Support for client-side encryption of data was added in version 3.27.0 of the Python driver. When using +this feature data will be encrypted on-the-fly according to a specified :class:`~.ColumnEncryptionPolicy` +instance. This policy is also used to decrypt data in returned rows. If a prepared statement is used +this decryption is transparent to the user; retrieved data will be decrypted and converted into the original +type (according to definitions in the encryption policy). Support for simple (i.e. non-prepared) queries is +also available, although in this case values must be manually encrypted and/or decrypted. The +:class:`~.ColumnEncryptionPolicy` instance provides methods to assist with these operations. + +Client-side encryption and decryption should work against all versions of Cassandra and DSE. It does not +utilize any server-side functionality to do its work. + +Configuration +------------- +Client-side encryption is enabled by creating an instance of a subclass of :class:`~.ColumnEncryptionPolicy` +and adding information about columns to be encrypted to it. This policy is then supplied to :class:`~.Cluster` +when it's created. + +.. code-block:: python + import os + + from cassandra.policies import ColDesc, AES256ColumnEncryptionPolicy, AES256_KEY_SIZE_BYTES + + key = os.urandom(AES256_KEY_SIZE_BYTES) + cl_policy = AES256ColumnEncryptionPolicy() + col_desc = ColDesc('ks1','table1','column1') + cql_type = "int" + cl_policy.add_column(col_desc, key, cql_type) + cluster = Cluster(column_encryption_policy=cl_policy) + +:class:`~.AES256ColumnEncryptionPolicy` is a subclass of :class:`~.ColumnEncryptionPolicy` which provides +encryption and decryption via AES-256. This class is currently the only available column encryption policy +implementation, although users can certainly implement their own by subclassing :class:`~.ColumnEncryptionPolicy`. + +:class:`~.ColDesc` is a named tuple which uniquely identifies a column in a given keyspace and table. When we +have this tuple, the encryption key and the CQL type contained by this column we can add the column to the policy +using :func:`~.ColumnEncryptionPolicy.add_column`. Once we have added all column definitions to the policy we +pass it along to the cluster. + +The CQL type for the column only has meaning at the client; it is never sent to Cassandra. The encryption key +is also never sent to the server; all the server ever sees are random bytes reflecting the encrypted data. As a +result all columns containing client-side encrypted values should be declared with the CQL type "blob" at the +Cassandra server. + +Usage +----- + +Encryption +^^^^^^^^^^ +Client-side encryption shines most when used with prepared statements. A prepared statement is aware of information +about the columns in the query it was built from and we can use this information to transparently encrypt any +supplied parameters. For example, we can create a prepared statement to insert a value into column1 (as defined above) +by executing the following code after creating a :class:`~.Cluster` in the manner described above: + +.. code-block:: python + session = cluster.connect() + prepared = session.prepare("insert into ks1.table1 (column1) values (?)") + session.execute(prepared, (1000,)) + +Our encryption policy will detect that "column1" is an encrypted column and take appropriate action. + +As mentioned above client-side encryption can also be used with simple queries, although such use cases are +certainly not transparent. :class:`~.ColumnEncryptionPolicy` provides a helper named +:func:`~.ColumnEncryptionPolicy.encode_and_encrypt` which will convert an input value into bytes using the +standard serialization methods employed by the driver. The result is then encrypted according to the configuration +of the policy. Using this approach the example above could be implemented along the lines of the following: + +.. code-block:: python + session = cluster.connect() + session.execute("insert into ks1.table1 (column1) values (%s)",(cl_policy.encode_and_encrypt(col_desc, 1000),)) + +Decryption +^^^^^^^^^^ +Decryption of values returned from the server is always transparent. Whether we're executing a simple or prepared +statement encrypted columns will be decrypted automatically and made available via rows just like any other +result. + +Limitations +----------- +:class:`~.AES256ColumnEncryptionPolicy` uses the implementation of AES-256 provided by the +`cryptography `_ module. Any limitations of this module should be considered +when deploying client-side encryption. Note specifically that a Rust compiler is required for modern versions +of the cryptography package, although wheels exist for many common platforms. + +Client-side encryption has been implemented for both the default Cython and pure Python row processing logic. +This functionality has not yet been ported to the NumPy Cython implementation. We have reason to believe the +NumPy processing works reasonably well on Python 3.7 but fails for Python 3.8. We hope to address this discrepancy +in a future release. \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index 005d02bf84..6f34f249fb 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -50,6 +50,9 @@ Contents :doc:`cloud` A guide to connecting to Datastax Astra +:doc:`column_encryption` + Transparent client-side per-column encryption and decryption + :doc:`geo_types` Working with DSE geometry types diff --git a/requirements.txt b/requirements.txt index f784fba1b9..44356365ce 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ +cryptography >= 35.0 geomet>=0.1,<0.3 six >=1.9 futures <=2.2.0 diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 7443ce0748..60c5fb8969 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -261,7 +261,7 @@ class CustomResultMessageRaw(ResultMessage): my_type_codes[0xc] = UUIDType type_codes = my_type_codes - def recv_results_rows(self, f, protocol_version, user_type_map, result_metadata): + def recv_results_rows(self, f, protocol_version, user_type_map, result_metadata, column_encryption_policy): self.recv_results_metadata(f, user_type_map) column_metadata = self.column_metadata or result_metadata rowcount = read_int(f) @@ -290,7 +290,7 @@ class CustomResultMessageTracked(ResultMessage): type_codes = my_type_codes checked_rev_row_set = set() - def recv_results_rows(self, f, protocol_version, user_type_map, result_metadata): + def recv_results_rows(self, f, protocol_version, user_type_map, result_metadata, column_encryption_policy): self.recv_results_metadata(f, user_type_map) column_metadata = self.column_metadata or result_metadata rowcount = read_int(f) diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 46e91918ac..8f46306236 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -12,11 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +from decimal import Decimal +import os +import random import unittest from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, SimpleConvictionPolicy, \ - WhiteListRoundRobinPolicy + WhiteListRoundRobinPolicy, ColDesc, AES256ColumnEncryptionPolicy, AES256_KEY_SIZE_BYTES from cassandra.pool import Host from cassandra.connection import DefaultEndPoint @@ -90,3 +93,71 @@ def test_only_connects_to_subset(self): queried_hosts.update(response.response_future.attempted_hosts) queried_hosts = set(host.address for host in queried_hosts) self.assertEqual(queried_hosts, only_connect_hosts) + +class ColumnEncryptionPolicyTest(unittest.TestCase): + + def _recreate_keyspace(self, session): + session.execute("drop keyspace if exists foo") + session.execute("CREATE KEYSPACE foo WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}") + session.execute("CREATE TABLE foo.bar(encrypted blob, unencrypted int, primary key(unencrypted))") + + def test_end_to_end_prepared(self): + + # We only currently perform testing on a single type/expected value pair since CLE functionality is essentially + # independent of the underlying type. We intercept data after it's been encoded when it's going out and before it's + # encoded when coming back; the actual types of the data involved don't impact us. + expected = 12345 + expected_type = "int" + + key = os.urandom(AES256_KEY_SIZE_BYTES) + cl_policy = AES256ColumnEncryptionPolicy() + col_desc = ColDesc('foo','bar','encrypted') + cl_policy.add_column(col_desc, key, expected_type) + + cluster = TestCluster(column_encryption_policy=cl_policy) + session = cluster.connect() + self._recreate_keyspace(session) + + prepared = session.prepare("insert into foo.bar (encrypted, unencrypted) values (?,?)") + session.execute(prepared, (expected,expected)) + + # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted + # values here to confirm that we don't interfere with regular processing of unencrypted vals. + (encrypted,unencrypted) = session.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() + self.assertEquals(expected, encrypted) + self.assertEquals(expected, unencrypted) + + # Confirm the same behaviour from a subsequent prepared statement as well + prepared = session.prepare("select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering") + (encrypted,unencrypted) = session.execute(prepared, [expected]).one() + self.assertEquals(expected, encrypted) + self.assertEquals(expected, unencrypted) + + def test_end_to_end_simple(self): + + expected = 67890 + expected_type = "int" + + key = os.urandom(AES256_KEY_SIZE_BYTES) + cl_policy = AES256ColumnEncryptionPolicy() + col_desc = ColDesc('foo','bar','encrypted') + cl_policy.add_column(col_desc, key, expected_type) + + cluster = TestCluster(column_encryption_policy=cl_policy) + session = cluster.connect() + self._recreate_keyspace(session) + + # Use encode_and_encrypt helper function to populate date + session.execute("insert into foo.bar (encrypted, unencrypted) values (%s,%s)",(cl_policy.encode_and_encrypt(col_desc, expected), expected)) + + # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted + # values here to confirm that we don't interfere with regular processing of unencrypted vals. + (encrypted,unencrypted) = session.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() + self.assertEquals(expected, encrypted) + self.assertEquals(expected, unencrypted) + + # Confirm the same behaviour from a subsequent prepared statement as well + prepared = session.prepare("select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering") + (encrypted,unencrypted) = session.execute(prepared, [expected]).one() + self.assertEquals(expected, encrypted) + self.assertEquals(expected, unencrypted) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index a6c63dcfdc..451d5c50c9 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -16,6 +16,7 @@ from itertools import islice, cycle from mock import Mock, patch, call +import os from random import randint import six from six.moves._thread import LockType @@ -25,6 +26,8 @@ from cassandra import ConsistencyLevel from cassandra.cluster import Cluster +from cassandra.connection import DefaultEndPoint +from cassandra.cqltypes import BytesType from cassandra.metadata import Metadata from cassandra.policies import (RoundRobinPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy, TokenAwarePolicy, SimpleConvictionPolicy, @@ -32,9 +35,10 @@ RetryPolicy, WriteType, DowngradingConsistencyRetryPolicy, ConstantReconnectionPolicy, LoadBalancingPolicy, ConvictionPolicy, ReconnectionPolicy, FallthroughRetryPolicy, - IdentityTranslator, EC2MultiRegionTranslator, HostFilterPolicy) + IdentityTranslator, EC2MultiRegionTranslator, HostFilterPolicy, + AES256ColumnEncryptionPolicy, ColDesc, + AES256_BLOCK_SIZE_BYTES, AES256_KEY_SIZE_BYTES) from cassandra.pool import Host -from cassandra.connection import DefaultEndPoint from cassandra.query import Statement from six.moves import xrange @@ -1500,3 +1504,131 @@ def test_create_whitelist(self): self.assertEqual(set(query_plan), {Host(DefaultEndPoint("127.0.0.1"), SimpleConvictionPolicy), Host(DefaultEndPoint("127.0.0.4"), SimpleConvictionPolicy)}) +class AES256ColumnEncryptionPolicyTest(unittest.TestCase): + + def _random_block(self): + return os.urandom(AES256_BLOCK_SIZE_BYTES) + + def _random_key(self): + return os.urandom(AES256_KEY_SIZE_BYTES) + + def _test_round_trip(self, bytes): + coldesc = ColDesc('ks1','table1','col1') + policy = AES256ColumnEncryptionPolicy() + policy.add_column(coldesc, self._random_key(), "blob") + encrypted_bytes = policy.encrypt(coldesc, bytes) + self.assertEqual(bytes, policy.decrypt(coldesc, encrypted_bytes)) + + def test_no_padding_necessary(self): + self._test_round_trip(self._random_block()) + + def test_some_padding_required(self): + for byte_size in range(1,AES256_BLOCK_SIZE_BYTES - 1): + bytes = os.urandom(byte_size) + self._test_round_trip(bytes) + for byte_size in range(AES256_BLOCK_SIZE_BYTES + 1,(2 * AES256_BLOCK_SIZE_BYTES) - 1): + bytes = os.urandom(byte_size) + self._test_round_trip(bytes) + + def test_add_column_invalid_key_size_raises(self): + coldesc = ColDesc('ks1','table1','col1') + policy = AES256ColumnEncryptionPolicy() + for key_size in range(1,AES256_KEY_SIZE_BYTES - 1): + with self.assertRaises(ValueError): + policy.add_column(coldesc, os.urandom(key_size), "blob") + for key_size in range(AES256_KEY_SIZE_BYTES + 1,(2 * AES256_KEY_SIZE_BYTES) - 1): + with self.assertRaises(ValueError): + policy.add_column(coldesc, os.urandom(key_size), "blob") + + def test_add_column_null_coldesc_raises(self): + with self.assertRaises(ValueError): + policy = AES256ColumnEncryptionPolicy() + policy.add_column(None, self._random_block(), "blob") + + def test_add_column_null_key_raises(self): + with self.assertRaises(ValueError): + policy = AES256ColumnEncryptionPolicy() + coldesc = ColDesc('ks1','table1','col1') + policy.add_column(coldesc, None, "blob") + + def test_add_column_null_type_raises(self): + with self.assertRaises(ValueError): + policy = AES256ColumnEncryptionPolicy() + coldesc = ColDesc('ks1','table1','col1') + policy.add_column(coldesc, self._random_block(), None) + + def test_add_column_unknown_type_raises(self): + with self.assertRaises(ValueError): + policy = AES256ColumnEncryptionPolicy() + coldesc = ColDesc('ks1','table1','col1') + policy.add_column(coldesc, self._random_block(), "foobar") + + def test_encode_and_encrypt_null_coldesc_raises(self): + with self.assertRaises(ValueError): + policy = AES256ColumnEncryptionPolicy() + coldesc = ColDesc('ks1','table1','col1') + policy.add_column(coldesc, self._random_key(), "blob") + policy.encode_and_encrypt(None, self._random_block()) + + def test_encode_and_encrypt_null_obj_raises(self): + with self.assertRaises(ValueError): + policy = AES256ColumnEncryptionPolicy() + coldesc = ColDesc('ks1','table1','col1') + policy.add_column(coldesc, self._random_key(), "blob") + policy.encode_and_encrypt(coldesc, None) + + def test_encode_and_encrypt_unknown_coldesc_raises(self): + with self.assertRaises(ValueError): + policy = AES256ColumnEncryptionPolicy() + coldesc = ColDesc('ks1','table1','col1') + policy.add_column(coldesc, self._random_key(), "blob") + policy.encode_and_encrypt(ColDesc('ks2','table2','col2'), self._random_block()) + + def test_contains_column(self): + coldesc = ColDesc('ks1','table1','col1') + policy = AES256ColumnEncryptionPolicy() + policy.add_column(coldesc, self._random_key(), "blob") + self.assertTrue(policy.contains_column(coldesc)) + self.assertFalse(policy.contains_column(ColDesc('ks2','table1','col1'))) + self.assertFalse(policy.contains_column(ColDesc('ks1','table2','col1'))) + self.assertFalse(policy.contains_column(ColDesc('ks1','table1','col2'))) + self.assertFalse(policy.contains_column(ColDesc('ks2','table2','col2'))) + + def test_encrypt_unknown_column(self): + with self.assertRaises(ValueError): + policy = AES256ColumnEncryptionPolicy() + coldesc = ColDesc('ks1','table1','col1') + policy.add_column(coldesc, self._random_key(), "blob") + policy.encrypt(ColDesc('ks2','table2','col2'), self._random_block()) + + def test_decrypt_unknown_column(self): + policy = AES256ColumnEncryptionPolicy() + coldesc = ColDesc('ks1','table1','col1') + policy.add_column(coldesc, self._random_key(), "blob") + encrypted_bytes = policy.encrypt(coldesc, self._random_block()) + with self.assertRaises(ValueError): + policy.decrypt(ColDesc('ks2','table2','col2'), encrypted_bytes) + + def test_cache_info(self): + coldesc1 = ColDesc('ks1','table1','col1') + coldesc2 = ColDesc('ks2','table2','col2') + coldesc3 = ColDesc('ks3','table3','col3') + policy = AES256ColumnEncryptionPolicy() + for coldesc in [coldesc1, coldesc2, coldesc3]: + policy.add_column(coldesc, self._random_key(), "blob") + + # First run for this coldesc should be a miss, everything else should be a cache hit + for _ in range(10): + policy.encrypt(coldesc1, self._random_block()) + cache_info = policy.cache_info() + self.assertEqual(cache_info.hits, 9) + self.assertEqual(cache_info.misses, 1) + self.assertEqual(cache_info.maxsize, 128) + + # Important note: we're measuring the size of the cache of ciphers, NOT stored + # keys. We won't have a cipher here until we actually encrypt something + self.assertEqual(cache_info.currsize, 1) + policy.encrypt(coldesc2, self._random_block()) + self.assertEqual(policy.cache_info().currsize, 2) + policy.encrypt(coldesc3, self._random_block()) + self.assertEqual(policy.cache_info().currsize, 3) diff --git a/tox.ini b/tox.ini index 6d94e11247..4aae7a0140 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py{27,35,36,37,38},pypy +envlist = py{37,38},pypy [base] deps = nose @@ -13,7 +13,8 @@ deps = nose kerberos futurist greenlet>=0.4.14,<0.4.17 -lz4_dependency = py27,py35,py36,py37,py38: lz4 + cryptography>=35.0 +lz4_dependency = py37,py38: lz4 [testenv] deps = {[base]deps} From ad400562efe6ffde9d6b8d4e7a4255e7751015f2 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 1 May 2023 11:01:35 -0500 Subject: [PATCH 1905/2005] Release 3.27: changelog & version --- CHANGELOG.rst | 8 ++++++++ cassandra/__init__.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 544585e1ce..432998869e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +3.27.0 +====== +May 1, 2023 + +Features +-------- +* Add support for client-side encryption (PYTHON-1341) + 3.26.0 ====== March 13, 2023 diff --git a/cassandra/__init__.py b/cassandra/__init__.py index e14f20c6ed..1573abdf00 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 26, 0) +__version_info__ = (3, 27, 0) __version__ = '.'.join(map(str, __version_info__)) From 5646fb0ff5e95c2eea7998f96a1679d996ca88ac Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 1 May 2023 15:08:25 -0500 Subject: [PATCH 1906/2005] Include docs for 3.27.0 (#1153) --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index c95a63d5a9..92d68b3013 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,6 +22,8 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: + - name: '3.27' + ref: ad40056 - name: '3.26' ref: f1e9126 - name: '3.25' From 1767fdbfefc97fa2474461c391bc387c7cf0fdc2 Mon Sep 17 00:00:00 2001 From: Jamie Gillenwater Date: Mon, 1 May 2023 16:45:09 -0400 Subject: [PATCH 1907/2005] update RH nav order (#1154) * update RH nav order * add line break * add api --- docs/.nav | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/docs/.nav b/docs/.nav index 375f058817..79f3029073 100644 --- a/docs/.nav +++ b/docs/.nav @@ -3,10 +3,6 @@ getting_started execution_profiles lwt object_mapper -geo_types -graph -graph_fluent -classic_graph performance query_paging security @@ -14,5 +10,12 @@ upgrading user_defined_types dates_and_times cloud +column_encryption +geo_types +graph +classic_graph +graph_fluent +CHANGELOG faq api + From 7556a2da46aa48cbdac7ea266970e7dfc426caa6 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 1 May 2023 15:46:22 -0500 Subject: [PATCH 1908/2005] Update docs.yaml to point to latest 3.27.0 docs --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 92d68b3013..0529caddbe 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.27' - ref: ad40056 + ref: 1767fdb - name: '3.26' ref: f1e9126 - name: '3.25' From 78b1e2bde47fdef8ffe967fa8b06371872ba7c2a Mon Sep 17 00:00:00 2001 From: Jamie Gillenwater Date: Mon, 1 May 2023 17:04:53 -0400 Subject: [PATCH 1909/2005] remove future plans (#1155) --- docs/column_encryption.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/column_encryption.rst b/docs/column_encryption.rst index 4d2a6c2d91..289f9cd62b 100644 --- a/docs/column_encryption.rst +++ b/docs/column_encryption.rst @@ -87,6 +87,5 @@ when deploying client-side encryption. Note specifically that a Rust compiler i of the cryptography package, although wheels exist for many common platforms. Client-side encryption has been implemented for both the default Cython and pure Python row processing logic. -This functionality has not yet been ported to the NumPy Cython implementation. We have reason to believe the -NumPy processing works reasonably well on Python 3.7 but fails for Python 3.8. We hope to address this discrepancy -in a future release. \ No newline at end of file +This functionality has not yet been ported to the NumPy Cython implementation. During testing, +the NumPy processing works on Python 3.7 but fails for Python 3.8. \ No newline at end of file From e98c26eb959a8bdbd14a0ecdf40bc05e4f047781 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 1 May 2023 16:12:33 -0500 Subject: [PATCH 1910/2005] Update docs.yaml to point to latest 3.27.0 docs, take two --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 0529caddbe..d7226d5e69 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.27' - ref: 1767fdb + ref: 78b1e2b - name: '3.26' ref: f1e9126 - name: '3.25' From d911621d365baa6c637dedb70eed487a8a18f78c Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 1 May 2023 21:18:34 -0500 Subject: [PATCH 1911/2005] Missed dependency on cryptography in setup.py --- setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 056469aca6..ba7cd92f20 100644 --- a/setup.py +++ b/setup.py @@ -402,7 +402,8 @@ def run_setup(extensions): sys.stderr.write("Bypassing Cython setup requirement\n") dependencies = ['six >=1.9', - 'geomet>=0.1,<0.3'] + 'geomet>=0.1,<0.3', + 'cryptography>=35.0'] _EXTRAS_REQUIRE = { 'graph': ['gremlinpython==3.4.6'] From 078e2f616176c2a666852179c9e07b10a488ca9c Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Tue, 9 May 2023 09:34:28 -0500 Subject: [PATCH 1912/2005] Remove different build matrix selection for develop branches (#1138) --- Jenkinsfile | 68 ++++++++++++++++++++--------------------------------- 1 file changed, 25 insertions(+), 43 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index f3b5d0718b..bd04b0fa29 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -33,31 +33,35 @@ slack = new Slack() // // Smoke tests are CI-friendly test configuration. Currently-supported Python version + modern C*/DSE instances. // We also avoid cython since it's tested as part of the nightlies. +DEFAULT_CASSANDRA = ['2.1', '2.2', '3.0', '3.11', '4.0'] +DEFAULT_DSE = ['dse-5.0.15', 'dse-5.1.35', 'dse-6.0.18', 'dse-6.7.17', 'dse-6.8.30'] +DEFAULT_RUNTIME = ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'] +DEFAULT_CYTHON = ["True", "False"] matrices = [ "FULL": [ - "SERVER": ['2.1', '2.2', '3.0', '3.11', '4.0', 'dse-5.0.15', 'dse-5.1.35', 'dse-6.0.18', 'dse-6.7.17', 'dse-6.8.30'], - "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], - "CYTHON": ["True", "False"] + "SERVER": DEFAULT_CASSANDRA + DEFAULT_DSE, + "RUNTIME": DEFAULT_RUNTIME, + "CYTHON": DEFAULT_CYTHON ], "DEVELOP": [ "SERVER": ['2.1', '3.11', 'dse-6.8.30'], "RUNTIME": ['2.7.18', '3.6.10'], - "CYTHON": ["True", "False"] + "CYTHON": DEFAULT_CYTHON ], "CASSANDRA": [ - "SERVER": ['2.1', '2.2', '3.0', '3.11', '4.0'], - "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], - "CYTHON": ["True", "False"] + "SERVER": DEFAULT_CASSANDRA, + "RUNTIME": DEFAULT_RUNTIME, + "CYTHON": DEFAULT_CYTHON ], "DSE": [ - "SERVER": ['dse-5.0.15', 'dse-5.1.35', 'dse-6.0.18', 'dse-6.7.17', 'dse-6.8.30'], - "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], - "CYTHON": ["True", "False"] + "SERVER": DEFAULT_DSE, + "RUNTIME": DEFAULT_RUNTIME, + "CYTHON": DEFAULT_CYTHON ], "SMOKE": [ - "SERVER": ['3.11', '4.0', 'dse-6.8.30'], - "RUNTIME": ['3.7.7', '3.8.3'], - "CYTHON": ["True", "False"] + "SERVER": DEFAULT_CASSANDRA.takeRight(2) + DEFAULT_DSE.takeRight(1), + "RUNTIME": DEFAULT_RUNTIME.takeRight(2), + "CYTHON": ["False"] ] ] @@ -84,24 +88,13 @@ def getBuildContext() { Based on schedule and parameters, configure the build context and env vars. */ - def profile = "${params.PROFILE}" + def PROFILE = "${params.PROFILE}" def EVENT_LOOP = "${params.EVENT_LOOP.toLowerCase()}" - matrixType = "SMOKE" - developBranchPattern = ~"((dev|long)-)?python-.*" - if (developBranchPattern.matcher(env.BRANCH_NAME).matches()) { - matrixType = "DEVELOP" - if (env.BRANCH_NAME.contains("long")) { - profile = "FULL" - } - } + matrixType = params.MATRIX != "DEFAULT" ? params.MATRIX : "SMOKE" + matrix = matrices[matrixType].clone() // Check if parameters were set explicitly - if (params.MATRIX != "DEFAULT") { - matrixType = params.MATRIX - } - - matrix = matrices[matrixType].clone() if (params.CYTHON != "DEFAULT") { matrix["CYTHON"] = [params.CYTHON] } @@ -121,7 +114,7 @@ def getBuildContext() { context = [ vars: [ - "PROFILE=${profile}", + "PROFILE=${PROFILE}", "EVENT_LOOP=${EVENT_LOOP}" ], matrix: matrix @@ -503,22 +496,11 @@ pipeline { ''') choice( name: 'PYTHON_VERSION', - choices: ['DEFAULT', '2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], + choices: ['DEFAULT'] + DEFAULT_RUNTIME, description: 'Python runtime version. Default to the build context.') choice( name: 'SERVER_VERSION', - choices: ['DEFAULT', - '2.1', // Legacy Apache CassandraⓇ - '2.2', // Legacy Apache CassandraⓇ - '3.0', // Previous Apache CassandraⓇ - '3.11', // Current Apache CassandraⓇ - '4.0', // Development Apache CassandraⓇ - 'dse-5.0.15', // Long Term Support DataStax Enterprise - 'dse-5.1.35', // Legacy DataStax Enterprise - 'dse-6.0.18', // Previous DataStax Enterprise - 'dse-6.7.17', // Previous DataStax Enterprise - 'dse-6.8.30', // Current DataStax Enterprise - ], + choices: ['DEFAULT'] + DEFAULT_CASSANDRA + DEFAULT_DSE, description: '''Apache CassandraⓇ and DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY! @@ -549,7 +531,7 @@ pipeline { - + @@ -574,7 +556,7 @@ pipeline {
4.0Apache CassandraⓇ v4.x (CURRENTLY UNDER DEVELOPMENT)Apache CassandraⓇ v4.0.x
dse-5.0.15
''') choice( name: 'CYTHON', - choices: ['DEFAULT', 'True', 'False'], + choices: ['DEFAULT'] + DEFAULT_CYTHON, description: '''

Flag to determine if Cython should be enabled

From 9ed9894a4cd9b1c2338a512ce13658b8675b0638 Mon Sep 17 00:00:00 2001 From: Emelia <105240296+emeliawilkinson24@users.noreply.github.com> Date: Tue, 9 May 2023 10:38:02 -0400 Subject: [PATCH 1913/2005] DOC-2813 (#1145) Added error handling blog reference. --- README.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.rst b/README.rst index 197b698aa0..e140371f07 100644 --- a/README.rst +++ b/README.rst @@ -57,6 +57,10 @@ Contributing ------------ See `CONTRIBUTING.md `_. +Error Handling +------------ +While originally written for the Java driver, users may reference the `Cassandra error handling done right blog `_ for resolving error handling scenarios with Apache Cassandra. + Reporting Problems ------------------ Please report any bugs and make any feature requests on the From 1092fbd8812415deae0845ae40f0936f326c588c Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Fri, 12 May 2023 09:54:04 -0500 Subject: [PATCH 1914/2005] DOC-3278 Update comment for retry policy (#1158) --- cassandra/policies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 26b9aa4c5a..36063abafe 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -873,7 +873,7 @@ def on_request_error(self, query, consistency, error, retry_num): `retry_num` counts how many times the operation has been retried, so the first time this method is called, `retry_num` will be 0. - The default, it triggers a retry on the next host in the query plan + By default, it triggers a retry on the next host in the query plan with the same consistency level. """ # TODO revisit this for the next major From 0002e9126f66363220421b85219ae5183f22dd12 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Fri, 19 May 2023 12:25:32 -0500 Subject: [PATCH 1915/2005] Fix for rendering of code blocks in CLE documentation (#1159) --- docs/column_encryption.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/column_encryption.rst b/docs/column_encryption.rst index 289f9cd62b..1392972fa6 100644 --- a/docs/column_encryption.rst +++ b/docs/column_encryption.rst @@ -21,6 +21,7 @@ and adding information about columns to be encrypted to it. This policy is then when it's created. .. code-block:: python + import os from cassandra.policies import ColDesc, AES256ColumnEncryptionPolicy, AES256_KEY_SIZE_BYTES @@ -57,6 +58,7 @@ supplied parameters. For example, we can create a prepared statement to insert by executing the following code after creating a :class:`~.Cluster` in the manner described above: .. code-block:: python + session = cluster.connect() prepared = session.prepare("insert into ks1.table1 (column1) values (?)") session.execute(prepared, (1000,)) @@ -70,6 +72,7 @@ standard serialization methods employed by the driver. The result is then encry of the policy. Using this approach the example above could be implemented along the lines of the following: .. code-block:: python + session = cluster.connect() session.execute("insert into ks1.table1 (column1) values (%s)",(cl_policy.encode_and_encrypt(col_desc, 1000),)) @@ -88,4 +91,4 @@ of the cryptography package, although wheels exist for many common platforms. Client-side encryption has been implemented for both the default Cython and pure Python row processing logic. This functionality has not yet been ported to the NumPy Cython implementation. During testing, -the NumPy processing works on Python 3.7 but fails for Python 3.8. \ No newline at end of file +the NumPy processing works on Python 3.7 but fails for Python 3.8. From 7ba87616b8de2110f0a678d7b3d93476391b9208 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 18 May 2023 04:40:50 -0500 Subject: [PATCH 1916/2005] Update docs.yaml to include recent fixes to CLE docs --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index d7226d5e69..77c738b4f4 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.27' - ref: 78b1e2b + ref: 0002e912 - name: '3.26' ref: f1e9126 - name: '3.25' From 6894b028ffda01a63fad1deebe0b5300349d1611 Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Tue, 23 May 2023 17:30:35 -0400 Subject: [PATCH 1917/2005] remove unnecessary import __future__ (#1156) --- cassandra/cluster.py | 1 - cassandra/connection.py | 1 - cassandra/cqlengine/functions.py | 1 - cassandra/cqltypes.py | 1 - cassandra/protocol.py | 1 - cassandra/util.py | 1 - 6 files changed, 6 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 6b0d495013..13af920e87 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -16,7 +16,6 @@ This module houses the main classes you will interact with, :class:`.Cluster` and :class:`.Session`. """ -from __future__ import absolute_import import atexit from binascii import hexlify diff --git a/cassandra/connection.py b/cassandra/connection.py index 0869584663..2532342d67 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import # to enable import io from stdlib from collections import defaultdict, deque import errno from functools import wraps, partial, total_ordering diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index 5cb0f673d1..a2495c010d 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import division from datetime import datetime from cassandra.cqlengine import UnicodeMixin, ValidationError diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 7946a63af8..65a863b6b5 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -27,7 +27,6 @@ # for example), these classes would be a good place to tack on # .from_cql_literal() and .as_cql_literal() classmethods (or whatever). -from __future__ import absolute_import # to enable import io from stdlib import ast from binascii import unhexlify import calendar diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 5e3610811e..39c91a0e5b 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import # to enable import io from stdlib from collections import namedtuple import logging import socket diff --git a/cassandra/util.py b/cassandra/util.py index dd5c58b01d..60490e5460 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import with_statement import calendar import datetime from functools import total_ordering From 64aa5ba16bb2d86cf70925112b4c458434981ea1 Mon Sep 17 00:00:00 2001 From: Lukas Elmer Date: Tue, 23 May 2023 23:34:41 +0200 Subject: [PATCH 1918/2005] docs: convert print statement to function in docs (#1157) --- cassandra/cluster.py | 4 ++-- cassandra/cqlengine/query.py | 12 ++++++------ cassandra/datastax/graph/fluent/__init__.py | 2 +- cassandra/query.py | 10 +++++----- docs/api/cassandra/cqlengine/models.rst | 4 ++-- docs/cqlengine/connections.rst | 2 +- docs/cqlengine/models.rst | 2 +- docs/execution_profiles.rst | 14 +++++++------- docs/faq.rst | 4 ++-- docs/getting_started.rst | 12 ++++++------ docs/graph_fluent.rst | 10 +++++----- docs/object_mapper.rst | 4 ++-- 12 files changed, 40 insertions(+), 40 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 13af920e87..d55547d559 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -989,7 +989,7 @@ def default_retry_policy(self, policy): cloud = None """ A dict of the cloud configuration. Example:: - + { # path to the secure connect bundle 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip', @@ -1450,7 +1450,7 @@ def __init__(self, street, zipcode): # results will include Address instances results = session.execute("SELECT * FROM users") row = results[0] - print row.id, row.location.street, row.location.zipcode + print(row.id, row.location.street, row.location.zipcode) """ if self.protocol_version < 3: diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 11f664ec02..1978d319f4 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -286,15 +286,15 @@ class ContextQuery(object): with ContextQuery(Automobile, keyspace='test2') as A: A.objects.create(manufacturer='honda', year=2008, model='civic') - print len(A.objects.all()) # 1 result + print(len(A.objects.all())) # 1 result with ContextQuery(Automobile, keyspace='test4') as A: - print len(A.objects.all()) # 0 result + print(len(A.objects.all())) # 0 result # Multiple models with ContextQuery(Automobile, Automobile2, connection='cluster2') as (A, A2): - print len(A.objects.all()) - print len(A2.objects.all()) + print(len(A.objects.all())) + print(len(A2.objects.all())) """ @@ -809,11 +809,11 @@ class Comment(Model): print("Normal") for comment in Comment.objects(photo_id=u): - print comment.comment_id + print(comment.comment_id) print("Reversed") for comment in Comment.objects(photo_id=u).order_by("-comment_id"): - print comment.comment_id + print(comment.comment_id) """ if len(colnames) == 0: clone = copy.deepcopy(self) diff --git a/cassandra/datastax/graph/fluent/__init__.py b/cassandra/datastax/graph/fluent/__init__.py index 44a0d136e0..92f148721e 100644 --- a/cassandra/datastax/graph/fluent/__init__.py +++ b/cassandra/datastax/graph/fluent/__init__.py @@ -257,7 +257,7 @@ def traversal_source(session=None, graph_name=None, execution_profile=EXEC_PROFI session = c.connect() g = DseGraph.traversal_source(session, 'my_graph') - print g.V().valueMap().toList() + print(g.V().valueMap().toList()) """ diff --git a/cassandra/query.py b/cassandra/query.py index dc72d7cbcd..7e4efc2511 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -77,7 +77,7 @@ def tuple_factory(colnames, rows): >>> session = cluster.connect('mykeyspace') >>> session.row_factory = tuple_factory >>> rows = session.execute("SELECT name, age FROM users LIMIT 1") - >>> print rows[0] + >>> print(rows[0]) ('Bob', 42) .. versionchanged:: 2.0.0 @@ -133,16 +133,16 @@ def named_tuple_factory(colnames, rows): >>> user = rows[0] >>> # you can access field by their name: - >>> print "name: %s, age: %d" % (user.name, user.age) + >>> print("name: %s, age: %d" % (user.name, user.age)) name: Bob, age: 42 >>> # or you can access fields by their position (like a tuple) >>> name, age = user - >>> print "name: %s, age: %d" % (name, age) + >>> print("name: %s, age: %d" % (name, age)) name: Bob, age: 42 >>> name = user[0] >>> age = user[1] - >>> print "name: %s, age: %d" % (name, age) + >>> print("name: %s, age: %d" % (name, age)) name: Bob, age: 42 .. versionchanged:: 2.0.0 @@ -188,7 +188,7 @@ def dict_factory(colnames, rows): >>> session = cluster.connect('mykeyspace') >>> session.row_factory = dict_factory >>> rows = session.execute("SELECT name, age FROM users LIMIT 1") - >>> print rows[0] + >>> print(rows[0]) {u'age': 42, u'name': u'Bob'} .. versionchanged:: 2.0.0 diff --git a/docs/api/cassandra/cqlengine/models.rst b/docs/api/cassandra/cqlengine/models.rst index 60b1471184..ee689a2b48 100644 --- a/docs/api/cassandra/cqlengine/models.rst +++ b/docs/api/cassandra/cqlengine/models.rst @@ -103,7 +103,7 @@ Model TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') except LWTException as e: # handle failure case - print e.existing # dict containing LWT result fields + print(e.existing # dict containing LWT result fields) This method is supported on Cassandra 2.0 or later. @@ -144,7 +144,7 @@ Model t.iff(count=5).update('other text') except LWTException as e: # handle failure case - print e.existing # existing object + print(e.existing # existing object) .. automethod:: get diff --git a/docs/cqlengine/connections.rst b/docs/cqlengine/connections.rst index 03ade27521..fd44303514 100644 --- a/docs/cqlengine/connections.rst +++ b/docs/cqlengine/connections.rst @@ -99,7 +99,7 @@ You can specify a default connection per model: year = columns.Integer(primary_key=True) model = columns.Text(primary_key=True) - print len(Automobile.objects.all()) # executed on the connection 'cluster2' + print(len(Automobile.objects.all())) # executed on the connection 'cluster2' QuerySet and model instance --------------------------- diff --git a/docs/cqlengine/models.rst b/docs/cqlengine/models.rst index c0ba390119..719513f4a9 100644 --- a/docs/cqlengine/models.rst +++ b/docs/cqlengine/models.rst @@ -201,7 +201,7 @@ are only created, presisted, and queried via table Models. A short example to in users.create(name="Joe", addr=address(street="Easy St.", zipcode=99999)) user = users.objects(name="Joe")[0] - print user.name, user.addr + print(user.name, user.addr) # Joe address(street=u'Easy St.', zipcode=99999) UDTs are modeled by inheriting :class:`~.usertype.UserType`, and setting column type attributes. Types are then used in defining diff --git a/docs/execution_profiles.rst b/docs/execution_profiles.rst index 7be1a85e3f..0965d77f3d 100644 --- a/docs/execution_profiles.rst +++ b/docs/execution_profiles.rst @@ -43,7 +43,7 @@ Default session = cluster.connect() local_query = 'SELECT rpc_address FROM system.local' for _ in cluster.metadata.all_hosts(): - print session.execute(local_query)[0] + print(session.execute(local_query)[0]) .. parsed-literal:: @@ -69,7 +69,7 @@ Initializing cluster with profiles profiles = {'node1': node1_profile, 'node2': node2_profile} session = Cluster(execution_profiles=profiles).connect() for _ in cluster.metadata.all_hosts(): - print session.execute(local_query, execution_profile='node1')[0] + print(session.execute(local_query, execution_profile='node1')[0]) .. parsed-literal:: @@ -81,7 +81,7 @@ Initializing cluster with profiles .. code:: python for _ in cluster.metadata.all_hosts(): - print session.execute(local_query, execution_profile='node2')[0] + print(session.execute(local_query, execution_profile='node2')[0]) .. parsed-literal:: @@ -93,7 +93,7 @@ Initializing cluster with profiles .. code:: python for _ in cluster.metadata.all_hosts(): - print session.execute(local_query)[0] + print(session.execute(local_query)[0]) .. parsed-literal:: @@ -123,7 +123,7 @@ New profiles can be added constructing from scratch, or deriving from default: cluster.add_execution_profile(node1_profile, locked_execution) for _ in cluster.metadata.all_hosts(): - print session.execute(local_query, execution_profile=node1_profile)[0] + print(session.execute(local_query, execution_profile=node1_profile)[0]) .. parsed-literal:: @@ -144,8 +144,8 @@ We also have the ability to pass profile instances to be used for execution, but tmp = session.execution_profile_clone_update('node1', request_timeout=100, row_factory=tuple_factory) - print session.execute(local_query, execution_profile=tmp)[0] - print session.execute(local_query, execution_profile='node1')[0] + print(session.execute(local_query, execution_profile=tmp)[0]) + print(session.execute(local_query, execution_profile='node1')[0]) .. parsed-literal:: diff --git a/docs/faq.rst b/docs/faq.rst index 56cb648a24..194d5520e8 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -44,7 +44,7 @@ Since tracing is done asynchronously to the request, this method polls until the >>> result = future.result() >>> trace = future.get_query_trace() >>> for e in trace.events: - >>> print e.source_elapsed, e.description + >>> print(e.source_elapsed, e.description) 0:00:00.000077 Parsing select * from system.local 0:00:00.000153 Preparing statement @@ -67,7 +67,7 @@ With prepared statements, the replicas are obtained by ``routing_key``, based on >>> bound = prepared.bind((1,)) >>> replicas = cluster.metadata.get_replicas(bound.keyspace, bound.routing_key) >>> for h in replicas: - >>> print h.address + >>> print(h.address) 127.0.0.1 127.0.0.2 diff --git a/docs/getting_started.rst b/docs/getting_started.rst index ce31ca5d6f..432e42ec4f 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -142,7 +142,7 @@ way to execute a query is to use :meth:`~.Session.execute()`: rows = session.execute('SELECT name, age, email FROM users') for user_row in rows: - print user_row.name, user_row.age, user_row.email + print(user_row.name, user_row.age, user_row.email) This will transparently pick a Cassandra node to execute the query against and handle any retries that are necessary if the operation fails. @@ -158,19 +158,19 @@ examples are equivalent: rows = session.execute('SELECT name, age, email FROM users') for row in rows: - print row.name, row.age, row.email + print(row.name, row.age, row.email) .. code-block:: python rows = session.execute('SELECT name, age, email FROM users') for (name, age, email) in rows: - print name, age, email + print(name, age, email) .. code-block:: python rows = session.execute('SELECT name, age, email FROM users') for row in rows: - print row[0], row[1], row[2] + print(row[0], row[1], row[2]) If you prefer another result format, such as a ``dict`` per row, you can change the :attr:`~.Session.row_factory` attribute. @@ -358,7 +358,7 @@ For example: try: rows = future.result() user = rows[0] - print user.name, user.age + print(user.name, user.age) except ReadTimeout: log.exception("Query timed out:") @@ -375,7 +375,7 @@ This works well for executing many queries concurrently: # wait for them to complete and use the results for future in futures: rows = future.result() - print rows[0].name + print(rows[0].name) Alternatively, instead of calling :meth:`~.ResponseFuture.result()`, you can attach callback and errback functions through the diff --git a/docs/graph_fluent.rst b/docs/graph_fluent.rst index 03cf8d36c0..8d5ad5377d 100644 --- a/docs/graph_fluent.rst +++ b/docs/graph_fluent.rst @@ -83,7 +83,7 @@ to accomplish this configuration: session = cluster.connect() g = DseGraph.traversal_source(session) # Build the GraphTraversalSource - print g.V().toList() # Traverse the Graph + print(g.V().toList()) # Traverse the Graph Note that the execution profile created with :meth:`DseGraph.create_execution_profile <.datastax.graph.fluent.DseGraph.create_execution_profile>` cannot be used for any groovy string queries. @@ -231,11 +231,11 @@ Batch Queries DSE Graph supports batch queries using a :class:`TraversalBatch <.datastax.graph.fluent.query.TraversalBatch>` object instantiated with :meth:`DseGraph.batch <.datastax.graph.fluent.DseGraph.batch>`. A :class:`TraversalBatch <.datastax.graph.fluent.query.TraversalBatch>` allows -you to execute multiple graph traversals in a single atomic transaction. A -traversal batch is executed with :meth:`.Session.execute_graph` or using -:meth:`TraversalBatch.execute <.datastax.graph.fluent.query.TraversalBatch.execute>` if bounded to a DSE session. +you to execute multiple graph traversals in a single atomic transaction. A +traversal batch is executed with :meth:`.Session.execute_graph` or using +:meth:`TraversalBatch.execute <.datastax.graph.fluent.query.TraversalBatch.execute>` if bounded to a DSE session. -Either way you choose to execute the traversal batch, you need to configure +Either way you choose to execute the traversal batch, you need to configure the execution profile accordingly. Here is a example:: from cassandra.cluster import Cluster diff --git a/docs/object_mapper.rst b/docs/object_mapper.rst index 50d3cbf320..4366c0ad52 100644 --- a/docs/object_mapper.rst +++ b/docs/object_mapper.rst @@ -87,7 +87,7 @@ Getting Started >>> q.count() 4 >>> for instance in q: - >>> print instance.description + >>> print(instance.description) example5 example6 example7 @@ -101,5 +101,5 @@ Getting Started >>> q2.count() 1 >>> for instance in q2: - >>> print instance.description + >>> print(instance.description) example5 From f46581ebc7c98f80beb297522df4e2a07a67383a Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 24 May 2023 12:06:18 -0500 Subject: [PATCH 1919/2005] Revert "remove unnecessary import __future__ (#1156)" This reverts commit 6894b028ffda01a63fad1deebe0b5300349d1611. --- cassandra/cluster.py | 1 + cassandra/connection.py | 1 + cassandra/cqlengine/functions.py | 1 + cassandra/cqltypes.py | 1 + cassandra/protocol.py | 1 + cassandra/util.py | 1 + 6 files changed, 6 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d55547d559..12b00c42db 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -16,6 +16,7 @@ This module houses the main classes you will interact with, :class:`.Cluster` and :class:`.Session`. """ +from __future__ import absolute_import import atexit from binascii import hexlify diff --git a/cassandra/connection.py b/cassandra/connection.py index 2532342d67..0869584663 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import absolute_import # to enable import io from stdlib from collections import defaultdict, deque import errno from functools import wraps, partial, total_ordering diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index a2495c010d..5cb0f673d1 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import division from datetime import datetime from cassandra.cqlengine import UnicodeMixin, ValidationError diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 65a863b6b5..7946a63af8 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -27,6 +27,7 @@ # for example), these classes would be a good place to tack on # .from_cql_literal() and .as_cql_literal() classmethods (or whatever). +from __future__ import absolute_import # to enable import io from stdlib import ast from binascii import unhexlify import calendar diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 39c91a0e5b..5e3610811e 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import absolute_import # to enable import io from stdlib from collections import namedtuple import logging import socket diff --git a/cassandra/util.py b/cassandra/util.py index 60490e5460..dd5c58b01d 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import with_statement import calendar import datetime from functools import total_ordering From d8431d4bf1b19742ca6552a0ddd006038d497e62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9o=20Mathieu?= Date: Wed, 24 May 2023 22:43:06 +0200 Subject: [PATCH 1920/2005] Don't fail when inserting UDTs with prepared queries with some missing fields (#1151) --- cassandra/cqltypes.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 7946a63af8..8167b3b894 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -1026,7 +1026,9 @@ def serialize_safe(cls, val, protocol_version): try: item = val[i] except TypeError: - item = getattr(val, fieldname) + item = getattr(val, fieldname, None) + if item is None and not hasattr(val, fieldname): + log.warning(f"field {fieldname} is part of the UDT {cls.typename} but is not present in the value {val}") if item is not None: packed_item = subtype.to_binary(item, proto_version) From a1e2d3d16ce9baf4eb4055404e1f4129495cff57 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 24 May 2023 15:48:05 -0500 Subject: [PATCH 1921/2005] PYTHON-1343 Use Cython for smoke builds (#1162) --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index bd04b0fa29..4d872f6927 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -61,7 +61,7 @@ matrices = [ "SMOKE": [ "SERVER": DEFAULT_CASSANDRA.takeRight(2) + DEFAULT_DSE.takeRight(1), "RUNTIME": DEFAULT_RUNTIME.takeRight(2), - "CYTHON": ["False"] + "CYTHON": ["True"] ] ] From 863e690ae256a4b774590ff77180af02b4fe02ba Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 31 May 2023 12:56:14 -0500 Subject: [PATCH 1922/2005] Jenkinsfile cleanup (#1163) --- Jenkinsfile | 31 ++++++++++--------------------- 1 file changed, 10 insertions(+), 21 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 4d872f6927..e6c2d9700f 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -13,9 +13,10 @@ Test Profiles: Matrix Types: Full: All server versions, python runtimes tested with and without Cython. - Develop: Smaller matrix for dev purpose. Cassandra: All cassandra server versions. Dse: All dse server versions. + Smoke: CI-friendly configurations. Currently-supported Python version + modern Cassandra/DSE instances. + We also avoid cython since it's tested as part of the nightlies Parameters: @@ -29,13 +30,9 @@ import com.datastax.jenkins.drivers.python.Slack slack = new Slack() -// Define our predefined matrices -// -// Smoke tests are CI-friendly test configuration. Currently-supported Python version + modern C*/DSE instances. -// We also avoid cython since it's tested as part of the nightlies. DEFAULT_CASSANDRA = ['2.1', '2.2', '3.0', '3.11', '4.0'] DEFAULT_DSE = ['dse-5.0.15', 'dse-5.1.35', 'dse-6.0.18', 'dse-6.7.17', 'dse-6.8.30'] -DEFAULT_RUNTIME = ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'] +DEFAULT_RUNTIME = ['3.7.7', '3.8.3'] DEFAULT_CYTHON = ["True", "False"] matrices = [ "FULL": [ @@ -43,11 +40,6 @@ matrices = [ "RUNTIME": DEFAULT_RUNTIME, "CYTHON": DEFAULT_CYTHON ], - "DEVELOP": [ - "SERVER": ['2.1', '3.11', 'dse-6.8.30'], - "RUNTIME": ['2.7.18', '3.6.10'], - "CYTHON": DEFAULT_CYTHON - ], "CASSANDRA": [ "SERVER": DEFAULT_CASSANDRA, "RUNTIME": DEFAULT_RUNTIME, @@ -394,8 +386,9 @@ def describeBuild(buildContext) { } } -def scheduleTriggerJobName() { - "drivers/python/oss/master/disabled" +// branch pattern for cron +def branchPatternCron() { + ~"(master)" } pipeline { @@ -460,7 +453,7 @@ pipeline {
''') choice( name: 'MATRIX', - choices: ['DEFAULT', 'SMOKE', 'FULL', 'DEVELOP', 'CASSANDRA', 'DSE'], + choices: ['DEFAULT', 'SMOKE', 'FULL', 'CASSANDRA', 'DSE'], description: '''

The matrix for the build.

@@ -481,10 +474,6 @@ pipeline { - - - - @@ -629,10 +618,10 @@ pipeline { } triggers { - parameterizedCron((scheduleTriggerJobName() == env.JOB_NAME) ? """ + parameterizedCron(branchPatternCron().matcher(env.BRANCH_NAME).matches() ? """ # Every weeknight (Monday - Friday) around 4:00 AM - # These schedules will run with and without Cython enabled for Python v2.7.18 and v3.5.9 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18 3.5.9;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1.35 dse-6.0.18 dse-6.7.17 + # These schedules will run with and without Cython enabled for Python 3.7.7 and 3.8.3 + H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.7.7 3.8.3;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1.35 dse-6.0.18 dse-6.7.17 """ : "") } From 5ea3f4b2b6258118312bec6c43f10729165ed7d5 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 1 Jun 2023 15:34:15 -0500 Subject: [PATCH 1923/2005] PYTHON-1351 Convert cryptography to an optional dependency (#1164) --- cassandra/column_encryption/_policies.py | 126 +++++++++++++++ cassandra/column_encryption/policies.py | 20 +++ cassandra/policies.py | 108 +------------ docs/column_encryption.rst | 3 +- docs/installation.rst | 27 +++- requirements.txt | 1 - setup.py | 9 +- test-datastax-requirements.txt | 1 + .../column_encryption/test_policies.py | 94 +++++++++++ tests/integration/standard/test_policies.py | 75 +-------- tests/unit/column_encryption/test_policies.py | 149 ++++++++++++++++++ tests/unit/test_policies.py | 135 +--------------- 12 files changed, 424 insertions(+), 324 deletions(-) create mode 100644 cassandra/column_encryption/_policies.py create mode 100644 cassandra/column_encryption/policies.py create mode 100644 tests/integration/standard/column_encryption/test_policies.py create mode 100644 tests/unit/column_encryption/test_policies.py diff --git a/cassandra/column_encryption/_policies.py b/cassandra/column_encryption/_policies.py new file mode 100644 index 0000000000..e049ba2d22 --- /dev/null +++ b/cassandra/column_encryption/_policies.py @@ -0,0 +1,126 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import namedtuple +from functools import lru_cache + +import logging +import os + +log = logging.getLogger(__name__) + +from cassandra.cqltypes import _cqltypes +from cassandra.policies import ColumnEncryptionPolicy + +from cryptography.hazmat.primitives import padding +from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes + +AES256_BLOCK_SIZE = 128 +AES256_BLOCK_SIZE_BYTES = int(AES256_BLOCK_SIZE / 8) +AES256_KEY_SIZE = 256 +AES256_KEY_SIZE_BYTES = int(AES256_KEY_SIZE / 8) + +ColData = namedtuple('ColData', ['key','type']) + +class AES256ColumnEncryptionPolicy(ColumnEncryptionPolicy): + + # CBC uses an IV that's the same size as the block size + # + # TODO: Need to find some way to expose mode options + # (CBC etc.) without leaking classes from the underlying + # impl here + def __init__(self, mode = modes.CBC, iv = os.urandom(AES256_BLOCK_SIZE_BYTES)): + + self.mode = mode + self.iv = iv + + # ColData for a given ColDesc is always preserved. We only create a Cipher + # when there's an actual need to for a given ColDesc + self.coldata = {} + self.ciphers = {} + + def encrypt(self, coldesc, obj_bytes): + + # AES256 has a 128-bit block size so if the input bytes don't align perfectly on + # those blocks we have to pad them. There's plenty of room for optimization here: + # + # * Instances of the PKCS7 padder should be managed in a bounded pool + # * It would be nice if we could get a flag from encrypted data to indicate + # whether it was padded or not + # * Might be able to make this happen with a leading block of flags in encrypted data + padder = padding.PKCS7(AES256_BLOCK_SIZE).padder() + padded_bytes = padder.update(obj_bytes) + padder.finalize() + + cipher = self._get_cipher(coldesc) + encryptor = cipher.encryptor() + return encryptor.update(padded_bytes) + encryptor.finalize() + + def decrypt(self, coldesc, encrypted_bytes): + + cipher = self._get_cipher(coldesc) + decryptor = cipher.decryptor() + padded_bytes = decryptor.update(encrypted_bytes) + decryptor.finalize() + + unpadder = padding.PKCS7(AES256_BLOCK_SIZE).unpadder() + return unpadder.update(padded_bytes) + unpadder.finalize() + + def add_column(self, coldesc, key, type): + + if not coldesc: + raise ValueError("ColDesc supplied to add_column cannot be None") + if not key: + raise ValueError("Key supplied to add_column cannot be None") + if not type: + raise ValueError("Type supplied to add_column cannot be None") + if type not in _cqltypes.keys(): + raise ValueError("Type %s is not a supported type".format(type)) + if not len(key) == AES256_KEY_SIZE_BYTES: + raise ValueError("AES256 column encryption policy expects a 256-bit encryption key") + self.coldata[coldesc] = ColData(key, _cqltypes[type]) + + def contains_column(self, coldesc): + return coldesc in self.coldata + + def encode_and_encrypt(self, coldesc, obj): + if not coldesc: + raise ValueError("ColDesc supplied to encode_and_encrypt cannot be None") + if not obj: + raise ValueError("Object supplied to encode_and_encrypt cannot be None") + coldata = self.coldata.get(coldesc) + if not coldata: + raise ValueError("Could not find ColData for ColDesc %s".format(coldesc)) + return self.encrypt(coldesc, coldata.type.serialize(obj, None)) + + def cache_info(self): + return AES256ColumnEncryptionPolicy._build_cipher.cache_info() + + def column_type(self, coldesc): + return self.coldata[coldesc].type + + def _get_cipher(self, coldesc): + """ + Access relevant state from this instance necessary to create a Cipher and then get one, + hopefully returning a cached instance if we've already done so (and it hasn't been evicted) + """ + + try: + coldata = self.coldata[coldesc] + return AES256ColumnEncryptionPolicy._build_cipher(coldata.key, self.mode, self.iv) + except KeyError: + raise ValueError("Could not find column {}".format(coldesc)) + + # Explicitly use a class method here to avoid caching self + @lru_cache(maxsize=128) + def _build_cipher(key, mode, iv): + return Cipher(algorithms.AES256(key), mode(iv)) diff --git a/cassandra/column_encryption/policies.py b/cassandra/column_encryption/policies.py new file mode 100644 index 0000000000..770084bd48 --- /dev/null +++ b/cassandra/column_encryption/policies.py @@ -0,0 +1,20 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import cryptography + from cassandra.column_encryption._policies import * +except ImportError: + # Cryptography is not installed + pass diff --git a/cassandra/policies.py b/cassandra/policies.py index 36063abafe..c60e558465 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -16,30 +16,22 @@ from functools import lru_cache from itertools import islice, cycle, groupby, repeat import logging -import os from random import randint, shuffle from threading import Lock import socket import warnings -from cryptography.hazmat.primitives import padding -from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes +log = logging.getLogger(__name__) from cassandra import WriteType as WT -from cassandra.cqltypes import _cqltypes - # This is done this way because WriteType was originally # defined here and in order not to break the API. # It may removed in the next mayor. WriteType = WT - from cassandra import ConsistencyLevel, OperationTimedOut -log = logging.getLogger(__name__) - - class HostDistance(object): """ A measure of how "distant" a node is from the client, which @@ -1192,7 +1184,6 @@ def _rethrow(self, *args, **kwargs): ColDesc = namedtuple('ColDesc', ['ks', 'table', 'col']) -ColData = namedtuple('ColData', ['key','type']) class ColumnEncryptionPolicy(object): """ @@ -1249,100 +1240,3 @@ def encode_and_encrypt(self, coldesc, obj): statements. """ raise NotImplementedError() - -AES256_BLOCK_SIZE = 128 -AES256_BLOCK_SIZE_BYTES = int(AES256_BLOCK_SIZE / 8) -AES256_KEY_SIZE = 256 -AES256_KEY_SIZE_BYTES = int(AES256_KEY_SIZE / 8) - -class AES256ColumnEncryptionPolicy(ColumnEncryptionPolicy): - - # CBC uses an IV that's the same size as the block size - # - # TODO: Need to find some way to expose mode options - # (CBC etc.) without leaking classes from the underlying - # impl here - def __init__(self, mode = modes.CBC, iv = os.urandom(AES256_BLOCK_SIZE_BYTES)): - - self.mode = mode - self.iv = iv - - # ColData for a given ColDesc is always preserved. We only create a Cipher - # when there's an actual need to for a given ColDesc - self.coldata = {} - self.ciphers = {} - - def encrypt(self, coldesc, obj_bytes): - - # AES256 has a 128-bit block size so if the input bytes don't align perfectly on - # those blocks we have to pad them. There's plenty of room for optimization here: - # - # * Instances of the PKCS7 padder should be managed in a bounded pool - # * It would be nice if we could get a flag from encrypted data to indicate - # whether it was padded or not - # * Might be able to make this happen with a leading block of flags in encrypted data - padder = padding.PKCS7(AES256_BLOCK_SIZE).padder() - padded_bytes = padder.update(obj_bytes) + padder.finalize() - - cipher = self._get_cipher(coldesc) - encryptor = cipher.encryptor() - return encryptor.update(padded_bytes) + encryptor.finalize() - - def decrypt(self, coldesc, encrypted_bytes): - - cipher = self._get_cipher(coldesc) - decryptor = cipher.decryptor() - padded_bytes = decryptor.update(encrypted_bytes) + decryptor.finalize() - - unpadder = padding.PKCS7(AES256_BLOCK_SIZE).unpadder() - return unpadder.update(padded_bytes) + unpadder.finalize() - - def add_column(self, coldesc, key, type): - - if not coldesc: - raise ValueError("ColDesc supplied to add_column cannot be None") - if not key: - raise ValueError("Key supplied to add_column cannot be None") - if not type: - raise ValueError("Type supplied to add_column cannot be None") - if type not in _cqltypes.keys(): - raise ValueError("Type %s is not a supported type".format(type)) - if not len(key) == AES256_KEY_SIZE_BYTES: - raise ValueError("AES256 column encryption policy expects a 256-bit encryption key") - self.coldata[coldesc] = ColData(key, _cqltypes[type]) - - def contains_column(self, coldesc): - return coldesc in self.coldata - - def encode_and_encrypt(self, coldesc, obj): - if not coldesc: - raise ValueError("ColDesc supplied to encode_and_encrypt cannot be None") - if not obj: - raise ValueError("Object supplied to encode_and_encrypt cannot be None") - coldata = self.coldata.get(coldesc) - if not coldata: - raise ValueError("Could not find ColData for ColDesc %s".format(coldesc)) - return self.encrypt(coldesc, coldata.type.serialize(obj, None)) - - def cache_info(self): - return AES256ColumnEncryptionPolicy._build_cipher.cache_info() - - def column_type(self, coldesc): - return self.coldata[coldesc].type - - def _get_cipher(self, coldesc): - """ - Access relevant state from this instance necessary to create a Cipher and then get one, - hopefully returning a cached instance if we've already done so (and it hasn't been evicted) - """ - - try: - coldata = self.coldata[coldesc] - return AES256ColumnEncryptionPolicy._build_cipher(coldata.key, self.mode, self.iv) - except KeyError: - raise ValueError("Could not find column {}".format(coldesc)) - - # Explicitly use a class method here to avoid caching self - @lru_cache(maxsize=128) - def _build_cipher(key, mode, iv): - return Cipher(algorithms.AES256(key), mode(iv)) diff --git a/docs/column_encryption.rst b/docs/column_encryption.rst index 1392972fa6..5cfb736c1f 100644 --- a/docs/column_encryption.rst +++ b/docs/column_encryption.rst @@ -24,7 +24,8 @@ when it's created. import os - from cassandra.policies import ColDesc, AES256ColumnEncryptionPolicy, AES256_KEY_SIZE_BYTES + from cassandra.policies import ColDesc + from cassandra.column_encryption.policies import AES256ColumnEncryptionPolicy, AES256_KEY_SIZE_BYTES key = os.urandom(AES256_KEY_SIZE_BYTES) cl_policy = AES256ColumnEncryptionPolicy() diff --git a/docs/installation.rst b/docs/installation.rst index 3855383602..bea6d6ece5 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -3,7 +3,7 @@ Installation Supported Platforms ------------------- -Python 2.7, 3.5, 3.6, 3.7 and 3.8 are supported. Both CPython (the standard Python +Python 3.7 and 3.8 are supported. Both CPython (the standard Python implementation) and `PyPy `_ are supported and tested. Linux, OSX, and Windows are supported. @@ -26,7 +26,7 @@ To check if the installation was successful, you can run:: python -c 'import cassandra; print cassandra.__version__' -It should print something like "3.22.0". +It should print something like "3.27.0". .. _installation-datastax-graph: @@ -34,7 +34,7 @@ It should print something like "3.22.0". --------------------------- The driver provides an optional fluent graph API that depends on Apache TinkerPop (gremlinpython). It is not installed by default. To be able to build Gremlin traversals, you need to install -the `graph` requirements:: +the `graph` extra:: pip install cassandra-driver[graph] @@ -67,6 +67,27 @@ support this:: pip install scales +*Optional:* Column-Level Encryption (CLE) Support +-------------------------------------------------- +The driver has built-in support for client-side encryption and +decryption of data. For more, see :doc:`column_encryption`. + +CLE depends on the Python `cryptography `_ module. +When installing Python driver 3.27.0. the `cryptography` module is +also downloaded and installed. +If you are using Python driver 3.28.0 or later and want to use CLE, you must +install the `cryptography `_ module. + +You can install this module along with the driver by specifying the `cle` extra:: + + pip install cassandra-driver[cle] + +Alternatively, you can also install the module directly via `pip`:: + + pip install cryptography + +Any version of cryptography >= 35.0 will work for the CLE feature. You can find additional +details at `PYTHON-1351 `_ Speeding Up Installation ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/requirements.txt b/requirements.txt index 44356365ce..f784fba1b9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ -cryptography >= 35.0 geomet>=0.1,<0.3 six >=1.9 futures <=2.2.0 diff --git a/setup.py b/setup.py index ba7cd92f20..30ce602c3e 100644 --- a/setup.py +++ b/setup.py @@ -402,11 +402,11 @@ def run_setup(extensions): sys.stderr.write("Bypassing Cython setup requirement\n") dependencies = ['six >=1.9', - 'geomet>=0.1,<0.3', - 'cryptography>=35.0'] + 'geomet>=0.1,<0.3'] _EXTRAS_REQUIRE = { - 'graph': ['gremlinpython==3.4.6'] + 'graph': ['gremlinpython==3.4.6'], + 'cle': ['cryptography>=35.0'] } setup( @@ -424,7 +424,8 @@ def run_setup(extensions): packages=[ 'cassandra', 'cassandra.io', 'cassandra.cqlengine', 'cassandra.graph', 'cassandra.datastax', 'cassandra.datastax.insights', 'cassandra.datastax.graph', - 'cassandra.datastax.graph.fluent', 'cassandra.datastax.cloud' + 'cassandra.datastax.graph.fluent', 'cassandra.datastax.cloud', + "cassandra.column_encryption" ], keywords='cassandra,cql,orm,dse,graph', include_package_data=True, diff --git a/test-datastax-requirements.txt b/test-datastax-requirements.txt index 3a47b8de16..038a8b571d 100644 --- a/test-datastax-requirements.txt +++ b/test-datastax-requirements.txt @@ -1,3 +1,4 @@ -r test-requirements.txt kerberos gremlinpython==3.4.6 +cryptography >= 35.0 diff --git a/tests/integration/standard/column_encryption/test_policies.py b/tests/integration/standard/column_encryption/test_policies.py new file mode 100644 index 0000000000..87bfde3c31 --- /dev/null +++ b/tests/integration/standard/column_encryption/test_policies.py @@ -0,0 +1,94 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest + +from tests.integration import use_singledc, TestCluster + +from cassandra.policies import ColDesc + +from cassandra.column_encryption.policies import AES256ColumnEncryptionPolicy, \ + AES256_KEY_SIZE_BYTES + +def setup_module(): + use_singledc() + +class ColumnEncryptionPolicyTest(unittest.TestCase): + + def _recreate_keyspace(self, session): + session.execute("drop keyspace if exists foo") + session.execute("CREATE KEYSPACE foo WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}") + session.execute("CREATE TABLE foo.bar(encrypted blob, unencrypted int, primary key(unencrypted))") + + def test_end_to_end_prepared(self): + + # We only currently perform testing on a single type/expected value pair since CLE functionality is essentially + # independent of the underlying type. We intercept data after it's been encoded when it's going out and before it's + # encoded when coming back; the actual types of the data involved don't impact us. + expected = 12345 + expected_type = "int" + + key = os.urandom(AES256_KEY_SIZE_BYTES) + cl_policy = AES256ColumnEncryptionPolicy() + col_desc = ColDesc('foo','bar','encrypted') + cl_policy.add_column(col_desc, key, expected_type) + + cluster = TestCluster(column_encryption_policy=cl_policy) + session = cluster.connect() + self._recreate_keyspace(session) + + prepared = session.prepare("insert into foo.bar (encrypted, unencrypted) values (?,?)") + session.execute(prepared, (expected,expected)) + + # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted + # values here to confirm that we don't interfere with regular processing of unencrypted vals. + (encrypted,unencrypted) = session.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() + self.assertEquals(expected, encrypted) + self.assertEquals(expected, unencrypted) + + # Confirm the same behaviour from a subsequent prepared statement as well + prepared = session.prepare("select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering") + (encrypted,unencrypted) = session.execute(prepared, [expected]).one() + self.assertEquals(expected, encrypted) + self.assertEquals(expected, unencrypted) + + def test_end_to_end_simple(self): + + expected = 67890 + expected_type = "int" + + key = os.urandom(AES256_KEY_SIZE_BYTES) + cl_policy = AES256ColumnEncryptionPolicy() + col_desc = ColDesc('foo','bar','encrypted') + cl_policy.add_column(col_desc, key, expected_type) + + cluster = TestCluster(column_encryption_policy=cl_policy) + session = cluster.connect() + self._recreate_keyspace(session) + + # Use encode_and_encrypt helper function to populate date + session.execute("insert into foo.bar (encrypted, unencrypted) values (%s,%s)",(cl_policy.encode_and_encrypt(col_desc, expected), expected)) + + # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted + # values here to confirm that we don't interfere with regular processing of unencrypted vals. + (encrypted,unencrypted) = session.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() + self.assertEquals(expected, encrypted) + self.assertEquals(expected, unencrypted) + + # Confirm the same behaviour from a subsequent prepared statement as well + prepared = session.prepare("select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering") + (encrypted,unencrypted) = session.execute(prepared, [expected]).one() + self.assertEquals(expected, encrypted) + self.assertEquals(expected, unencrypted) diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 8f46306236..696bc46856 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -12,14 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from decimal import Decimal -import os -import random import unittest from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, SimpleConvictionPolicy, \ - WhiteListRoundRobinPolicy, ColDesc, AES256ColumnEncryptionPolicy, AES256_KEY_SIZE_BYTES + WhiteListRoundRobinPolicy from cassandra.pool import Host from cassandra.connection import DefaultEndPoint @@ -27,11 +24,9 @@ from concurrent.futures import wait as wait_futures - def setup_module(): use_singledc() - class HostFilterPolicyTests(unittest.TestCase): def test_predicate_changes(self): @@ -93,71 +88,3 @@ def test_only_connects_to_subset(self): queried_hosts.update(response.response_future.attempted_hosts) queried_hosts = set(host.address for host in queried_hosts) self.assertEqual(queried_hosts, only_connect_hosts) - -class ColumnEncryptionPolicyTest(unittest.TestCase): - - def _recreate_keyspace(self, session): - session.execute("drop keyspace if exists foo") - session.execute("CREATE KEYSPACE foo WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}") - session.execute("CREATE TABLE foo.bar(encrypted blob, unencrypted int, primary key(unencrypted))") - - def test_end_to_end_prepared(self): - - # We only currently perform testing on a single type/expected value pair since CLE functionality is essentially - # independent of the underlying type. We intercept data after it's been encoded when it's going out and before it's - # encoded when coming back; the actual types of the data involved don't impact us. - expected = 12345 - expected_type = "int" - - key = os.urandom(AES256_KEY_SIZE_BYTES) - cl_policy = AES256ColumnEncryptionPolicy() - col_desc = ColDesc('foo','bar','encrypted') - cl_policy.add_column(col_desc, key, expected_type) - - cluster = TestCluster(column_encryption_policy=cl_policy) - session = cluster.connect() - self._recreate_keyspace(session) - - prepared = session.prepare("insert into foo.bar (encrypted, unencrypted) values (?,?)") - session.execute(prepared, (expected,expected)) - - # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted - # values here to confirm that we don't interfere with regular processing of unencrypted vals. - (encrypted,unencrypted) = session.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() - self.assertEquals(expected, encrypted) - self.assertEquals(expected, unencrypted) - - # Confirm the same behaviour from a subsequent prepared statement as well - prepared = session.prepare("select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering") - (encrypted,unencrypted) = session.execute(prepared, [expected]).one() - self.assertEquals(expected, encrypted) - self.assertEquals(expected, unencrypted) - - def test_end_to_end_simple(self): - - expected = 67890 - expected_type = "int" - - key = os.urandom(AES256_KEY_SIZE_BYTES) - cl_policy = AES256ColumnEncryptionPolicy() - col_desc = ColDesc('foo','bar','encrypted') - cl_policy.add_column(col_desc, key, expected_type) - - cluster = TestCluster(column_encryption_policy=cl_policy) - session = cluster.connect() - self._recreate_keyspace(session) - - # Use encode_and_encrypt helper function to populate date - session.execute("insert into foo.bar (encrypted, unencrypted) values (%s,%s)",(cl_policy.encode_and_encrypt(col_desc, expected), expected)) - - # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted - # values here to confirm that we don't interfere with regular processing of unencrypted vals. - (encrypted,unencrypted) = session.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() - self.assertEquals(expected, encrypted) - self.assertEquals(expected, unencrypted) - - # Confirm the same behaviour from a subsequent prepared statement as well - prepared = session.prepare("select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering") - (encrypted,unencrypted) = session.execute(prepared, [expected]).one() - self.assertEquals(expected, encrypted) - self.assertEquals(expected, unencrypted) diff --git a/tests/unit/column_encryption/test_policies.py b/tests/unit/column_encryption/test_policies.py new file mode 100644 index 0000000000..f6b06a3ade --- /dev/null +++ b/tests/unit/column_encryption/test_policies.py @@ -0,0 +1,149 @@ +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest + +from cassandra.policies import ColDesc +from cassandra.column_encryption.policies import AES256ColumnEncryptionPolicy, \ + AES256_BLOCK_SIZE_BYTES, AES256_KEY_SIZE_BYTES + +class AES256ColumnEncryptionPolicyTest(unittest.TestCase): + + def _random_block(self): + return os.urandom(AES256_BLOCK_SIZE_BYTES) + + def _random_key(self): + return os.urandom(AES256_KEY_SIZE_BYTES) + + def _test_round_trip(self, bytes): + coldesc = ColDesc('ks1','table1','col1') + policy = AES256ColumnEncryptionPolicy() + policy.add_column(coldesc, self._random_key(), "blob") + encrypted_bytes = policy.encrypt(coldesc, bytes) + self.assertEqual(bytes, policy.decrypt(coldesc, encrypted_bytes)) + + def test_no_padding_necessary(self): + self._test_round_trip(self._random_block()) + + def test_some_padding_required(self): + for byte_size in range(1,AES256_BLOCK_SIZE_BYTES - 1): + bytes = os.urandom(byte_size) + self._test_round_trip(bytes) + for byte_size in range(AES256_BLOCK_SIZE_BYTES + 1,(2 * AES256_BLOCK_SIZE_BYTES) - 1): + bytes = os.urandom(byte_size) + self._test_round_trip(bytes) + + def test_add_column_invalid_key_size_raises(self): + coldesc = ColDesc('ks1','table1','col1') + policy = AES256ColumnEncryptionPolicy() + for key_size in range(1,AES256_KEY_SIZE_BYTES - 1): + with self.assertRaises(ValueError): + policy.add_column(coldesc, os.urandom(key_size), "blob") + for key_size in range(AES256_KEY_SIZE_BYTES + 1,(2 * AES256_KEY_SIZE_BYTES) - 1): + with self.assertRaises(ValueError): + policy.add_column(coldesc, os.urandom(key_size), "blob") + + def test_add_column_null_coldesc_raises(self): + with self.assertRaises(ValueError): + policy = AES256ColumnEncryptionPolicy() + policy.add_column(None, self._random_block(), "blob") + + def test_add_column_null_key_raises(self): + with self.assertRaises(ValueError): + policy = AES256ColumnEncryptionPolicy() + coldesc = ColDesc('ks1','table1','col1') + policy.add_column(coldesc, None, "blob") + + def test_add_column_null_type_raises(self): + with self.assertRaises(ValueError): + policy = AES256ColumnEncryptionPolicy() + coldesc = ColDesc('ks1','table1','col1') + policy.add_column(coldesc, self._random_block(), None) + + def test_add_column_unknown_type_raises(self): + with self.assertRaises(ValueError): + policy = AES256ColumnEncryptionPolicy() + coldesc = ColDesc('ks1','table1','col1') + policy.add_column(coldesc, self._random_block(), "foobar") + + def test_encode_and_encrypt_null_coldesc_raises(self): + with self.assertRaises(ValueError): + policy = AES256ColumnEncryptionPolicy() + coldesc = ColDesc('ks1','table1','col1') + policy.add_column(coldesc, self._random_key(), "blob") + policy.encode_and_encrypt(None, self._random_block()) + + def test_encode_and_encrypt_null_obj_raises(self): + with self.assertRaises(ValueError): + policy = AES256ColumnEncryptionPolicy() + coldesc = ColDesc('ks1','table1','col1') + policy.add_column(coldesc, self._random_key(), "blob") + policy.encode_and_encrypt(coldesc, None) + + def test_encode_and_encrypt_unknown_coldesc_raises(self): + with self.assertRaises(ValueError): + policy = AES256ColumnEncryptionPolicy() + coldesc = ColDesc('ks1','table1','col1') + policy.add_column(coldesc, self._random_key(), "blob") + policy.encode_and_encrypt(ColDesc('ks2','table2','col2'), self._random_block()) + + def test_contains_column(self): + coldesc = ColDesc('ks1','table1','col1') + policy = AES256ColumnEncryptionPolicy() + policy.add_column(coldesc, self._random_key(), "blob") + self.assertTrue(policy.contains_column(coldesc)) + self.assertFalse(policy.contains_column(ColDesc('ks2','table1','col1'))) + self.assertFalse(policy.contains_column(ColDesc('ks1','table2','col1'))) + self.assertFalse(policy.contains_column(ColDesc('ks1','table1','col2'))) + self.assertFalse(policy.contains_column(ColDesc('ks2','table2','col2'))) + + def test_encrypt_unknown_column(self): + with self.assertRaises(ValueError): + policy = AES256ColumnEncryptionPolicy() + coldesc = ColDesc('ks1','table1','col1') + policy.add_column(coldesc, self._random_key(), "blob") + policy.encrypt(ColDesc('ks2','table2','col2'), self._random_block()) + + def test_decrypt_unknown_column(self): + policy = AES256ColumnEncryptionPolicy() + coldesc = ColDesc('ks1','table1','col1') + policy.add_column(coldesc, self._random_key(), "blob") + encrypted_bytes = policy.encrypt(coldesc, self._random_block()) + with self.assertRaises(ValueError): + policy.decrypt(ColDesc('ks2','table2','col2'), encrypted_bytes) + + def test_cache_info(self): + coldesc1 = ColDesc('ks1','table1','col1') + coldesc2 = ColDesc('ks2','table2','col2') + coldesc3 = ColDesc('ks3','table3','col3') + policy = AES256ColumnEncryptionPolicy() + for coldesc in [coldesc1, coldesc2, coldesc3]: + policy.add_column(coldesc, self._random_key(), "blob") + + # First run for this coldesc should be a miss, everything else should be a cache hit + for _ in range(10): + policy.encrypt(coldesc1, self._random_block()) + cache_info = policy.cache_info() + self.assertEqual(cache_info.hits, 9) + self.assertEqual(cache_info.misses, 1) + self.assertEqual(cache_info.maxsize, 128) + + # Important note: we're measuring the size of the cache of ciphers, NOT stored + # keys. We won't have a cipher here until we actually encrypt something + self.assertEqual(cache_info.currsize, 1) + policy.encrypt(coldesc2, self._random_block()) + self.assertEqual(policy.cache_info().currsize, 2) + policy.encrypt(coldesc3, self._random_block()) + self.assertEqual(policy.cache_info().currsize, 3) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 451d5c50c9..ec004ca9fe 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -16,7 +16,6 @@ from itertools import islice, cycle from mock import Mock, patch, call -import os from random import randint import six from six.moves._thread import LockType @@ -27,7 +26,6 @@ from cassandra import ConsistencyLevel from cassandra.cluster import Cluster from cassandra.connection import DefaultEndPoint -from cassandra.cqltypes import BytesType from cassandra.metadata import Metadata from cassandra.policies import (RoundRobinPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy, TokenAwarePolicy, SimpleConvictionPolicy, @@ -35,9 +33,7 @@ RetryPolicy, WriteType, DowngradingConsistencyRetryPolicy, ConstantReconnectionPolicy, LoadBalancingPolicy, ConvictionPolicy, ReconnectionPolicy, FallthroughRetryPolicy, - IdentityTranslator, EC2MultiRegionTranslator, HostFilterPolicy, - AES256ColumnEncryptionPolicy, ColDesc, - AES256_BLOCK_SIZE_BYTES, AES256_KEY_SIZE_BYTES) + IdentityTranslator, EC2MultiRegionTranslator, HostFilterPolicy) from cassandra.pool import Host from cassandra.query import Statement @@ -1503,132 +1499,3 @@ def test_create_whitelist(self): # Only the filtered replicas should be allowed self.assertEqual(set(query_plan), {Host(DefaultEndPoint("127.0.0.1"), SimpleConvictionPolicy), Host(DefaultEndPoint("127.0.0.4"), SimpleConvictionPolicy)}) - -class AES256ColumnEncryptionPolicyTest(unittest.TestCase): - - def _random_block(self): - return os.urandom(AES256_BLOCK_SIZE_BYTES) - - def _random_key(self): - return os.urandom(AES256_KEY_SIZE_BYTES) - - def _test_round_trip(self, bytes): - coldesc = ColDesc('ks1','table1','col1') - policy = AES256ColumnEncryptionPolicy() - policy.add_column(coldesc, self._random_key(), "blob") - encrypted_bytes = policy.encrypt(coldesc, bytes) - self.assertEqual(bytes, policy.decrypt(coldesc, encrypted_bytes)) - - def test_no_padding_necessary(self): - self._test_round_trip(self._random_block()) - - def test_some_padding_required(self): - for byte_size in range(1,AES256_BLOCK_SIZE_BYTES - 1): - bytes = os.urandom(byte_size) - self._test_round_trip(bytes) - for byte_size in range(AES256_BLOCK_SIZE_BYTES + 1,(2 * AES256_BLOCK_SIZE_BYTES) - 1): - bytes = os.urandom(byte_size) - self._test_round_trip(bytes) - - def test_add_column_invalid_key_size_raises(self): - coldesc = ColDesc('ks1','table1','col1') - policy = AES256ColumnEncryptionPolicy() - for key_size in range(1,AES256_KEY_SIZE_BYTES - 1): - with self.assertRaises(ValueError): - policy.add_column(coldesc, os.urandom(key_size), "blob") - for key_size in range(AES256_KEY_SIZE_BYTES + 1,(2 * AES256_KEY_SIZE_BYTES) - 1): - with self.assertRaises(ValueError): - policy.add_column(coldesc, os.urandom(key_size), "blob") - - def test_add_column_null_coldesc_raises(self): - with self.assertRaises(ValueError): - policy = AES256ColumnEncryptionPolicy() - policy.add_column(None, self._random_block(), "blob") - - def test_add_column_null_key_raises(self): - with self.assertRaises(ValueError): - policy = AES256ColumnEncryptionPolicy() - coldesc = ColDesc('ks1','table1','col1') - policy.add_column(coldesc, None, "blob") - - def test_add_column_null_type_raises(self): - with self.assertRaises(ValueError): - policy = AES256ColumnEncryptionPolicy() - coldesc = ColDesc('ks1','table1','col1') - policy.add_column(coldesc, self._random_block(), None) - - def test_add_column_unknown_type_raises(self): - with self.assertRaises(ValueError): - policy = AES256ColumnEncryptionPolicy() - coldesc = ColDesc('ks1','table1','col1') - policy.add_column(coldesc, self._random_block(), "foobar") - - def test_encode_and_encrypt_null_coldesc_raises(self): - with self.assertRaises(ValueError): - policy = AES256ColumnEncryptionPolicy() - coldesc = ColDesc('ks1','table1','col1') - policy.add_column(coldesc, self._random_key(), "blob") - policy.encode_and_encrypt(None, self._random_block()) - - def test_encode_and_encrypt_null_obj_raises(self): - with self.assertRaises(ValueError): - policy = AES256ColumnEncryptionPolicy() - coldesc = ColDesc('ks1','table1','col1') - policy.add_column(coldesc, self._random_key(), "blob") - policy.encode_and_encrypt(coldesc, None) - - def test_encode_and_encrypt_unknown_coldesc_raises(self): - with self.assertRaises(ValueError): - policy = AES256ColumnEncryptionPolicy() - coldesc = ColDesc('ks1','table1','col1') - policy.add_column(coldesc, self._random_key(), "blob") - policy.encode_and_encrypt(ColDesc('ks2','table2','col2'), self._random_block()) - - def test_contains_column(self): - coldesc = ColDesc('ks1','table1','col1') - policy = AES256ColumnEncryptionPolicy() - policy.add_column(coldesc, self._random_key(), "blob") - self.assertTrue(policy.contains_column(coldesc)) - self.assertFalse(policy.contains_column(ColDesc('ks2','table1','col1'))) - self.assertFalse(policy.contains_column(ColDesc('ks1','table2','col1'))) - self.assertFalse(policy.contains_column(ColDesc('ks1','table1','col2'))) - self.assertFalse(policy.contains_column(ColDesc('ks2','table2','col2'))) - - def test_encrypt_unknown_column(self): - with self.assertRaises(ValueError): - policy = AES256ColumnEncryptionPolicy() - coldesc = ColDesc('ks1','table1','col1') - policy.add_column(coldesc, self._random_key(), "blob") - policy.encrypt(ColDesc('ks2','table2','col2'), self._random_block()) - - def test_decrypt_unknown_column(self): - policy = AES256ColumnEncryptionPolicy() - coldesc = ColDesc('ks1','table1','col1') - policy.add_column(coldesc, self._random_key(), "blob") - encrypted_bytes = policy.encrypt(coldesc, self._random_block()) - with self.assertRaises(ValueError): - policy.decrypt(ColDesc('ks2','table2','col2'), encrypted_bytes) - - def test_cache_info(self): - coldesc1 = ColDesc('ks1','table1','col1') - coldesc2 = ColDesc('ks2','table2','col2') - coldesc3 = ColDesc('ks3','table3','col3') - policy = AES256ColumnEncryptionPolicy() - for coldesc in [coldesc1, coldesc2, coldesc3]: - policy.add_column(coldesc, self._random_key(), "blob") - - # First run for this coldesc should be a miss, everything else should be a cache hit - for _ in range(10): - policy.encrypt(coldesc1, self._random_block()) - cache_info = policy.cache_info() - self.assertEqual(cache_info.hits, 9) - self.assertEqual(cache_info.misses, 1) - self.assertEqual(cache_info.maxsize, 128) - - # Important note: we're measuring the size of the cache of ciphers, NOT stored - # keys. We won't have a cipher here until we actually encrypt something - self.assertEqual(cache_info.currsize, 1) - policy.encrypt(coldesc2, self._random_block()) - self.assertEqual(policy.cache_info().currsize, 2) - policy.encrypt(coldesc3, self._random_block()) - self.assertEqual(policy.cache_info().currsize, 3) From 531a6c767aaaeb612ad9412ac3ff268e117aad2f Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Fri, 2 Jun 2023 10:52:01 -0500 Subject: [PATCH 1924/2005] PYTHON-1350 Store IV along with encrypted text when using column-level encryption (#1160) --- cassandra/column_encryption/_policies.py | 45 +++++++----- .../column_encryption/test_policies.py | 71 +++++++++++++++---- tests/unit/column_encryption/test_policies.py | 20 ++++++ 3 files changed, 105 insertions(+), 31 deletions(-) diff --git a/cassandra/column_encryption/_policies.py b/cassandra/column_encryption/_policies.py index e049ba2d22..ef8097bfbd 100644 --- a/cassandra/column_encryption/_policies.py +++ b/cassandra/column_encryption/_policies.py @@ -35,15 +35,27 @@ class AES256ColumnEncryptionPolicy(ColumnEncryptionPolicy): - # CBC uses an IV that's the same size as the block size - # - # TODO: Need to find some way to expose mode options - # (CBC etc.) without leaking classes from the underlying - # impl here - def __init__(self, mode = modes.CBC, iv = os.urandom(AES256_BLOCK_SIZE_BYTES)): - - self.mode = mode + # Fix block cipher mode for now. IV size is a function of block cipher used + # so fixing this avoids (possibly unnecessary) validation logic here. + mode = modes.CBC + + # "iv" param here expects a bytearray that's the same size as the block + # size for AES-256 (128 bits or 16 bytes). If none is provided a new one + # will be randomly generated, but in this case the IV should be recorded and + # preserved or else you will not be able to decrypt any data encrypted by this + # policy. + def __init__(self, iv=None): + + # CBC uses an IV that's the same size as the block size + # + # Avoid defining IV with a default arg in order to stay away from + # any issues around the caching of default args self.iv = iv + if self.iv: + if not len(self.iv) == AES256_BLOCK_SIZE_BYTES: + raise ValueError("This policy uses AES-256 with CBC mode and therefore expects a 128-bit initialization vector") + else: + self.iv = os.urandom(AES256_BLOCK_SIZE_BYTES) # ColData for a given ColDesc is always preserved. We only create a Cipher # when there's an actual need to for a given ColDesc @@ -64,11 +76,13 @@ def encrypt(self, coldesc, obj_bytes): cipher = self._get_cipher(coldesc) encryptor = cipher.encryptor() - return encryptor.update(padded_bytes) + encryptor.finalize() + return self.iv + encryptor.update(padded_bytes) + encryptor.finalize() - def decrypt(self, coldesc, encrypted_bytes): + def decrypt(self, coldesc, bytes): - cipher = self._get_cipher(coldesc) + iv = bytes[:AES256_BLOCK_SIZE_BYTES] + encrypted_bytes = bytes[AES256_BLOCK_SIZE_BYTES:] + cipher = self._get_cipher(coldesc, iv=iv) decryptor = cipher.decryptor() padded_bytes = decryptor.update(encrypted_bytes) + decryptor.finalize() @@ -108,19 +122,18 @@ def cache_info(self): def column_type(self, coldesc): return self.coldata[coldesc].type - def _get_cipher(self, coldesc): + def _get_cipher(self, coldesc, iv=None): """ Access relevant state from this instance necessary to create a Cipher and then get one, hopefully returning a cached instance if we've already done so (and it hasn't been evicted) """ - try: coldata = self.coldata[coldesc] - return AES256ColumnEncryptionPolicy._build_cipher(coldata.key, self.mode, self.iv) + return AES256ColumnEncryptionPolicy._build_cipher(coldata.key, iv or self.iv) except KeyError: raise ValueError("Could not find column {}".format(coldesc)) # Explicitly use a class method here to avoid caching self @lru_cache(maxsize=128) - def _build_cipher(key, mode, iv): - return Cipher(algorithms.AES256(key), mode(iv)) + def _build_cipher(key, iv): + return Cipher(algorithms.AES256(key), AES256ColumnEncryptionPolicy.mode(iv)) diff --git a/tests/integration/standard/column_encryption/test_policies.py b/tests/integration/standard/column_encryption/test_policies.py index 87bfde3c31..bb84c0352c 100644 --- a/tests/integration/standard/column_encryption/test_policies.py +++ b/tests/integration/standard/column_encryption/test_policies.py @@ -20,7 +20,7 @@ from cassandra.policies import ColDesc from cassandra.column_encryption.policies import AES256ColumnEncryptionPolicy, \ - AES256_KEY_SIZE_BYTES + AES256_KEY_SIZE_BYTES, AES256_BLOCK_SIZE_BYTES def setup_module(): use_singledc() @@ -32,25 +32,28 @@ def _recreate_keyspace(self, session): session.execute("CREATE KEYSPACE foo WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}") session.execute("CREATE TABLE foo.bar(encrypted blob, unencrypted int, primary key(unencrypted))") + def _create_policy(self, key, iv = None): + cl_policy = AES256ColumnEncryptionPolicy() + col_desc = ColDesc('foo','bar','encrypted') + cl_policy.add_column(col_desc, key, "int") + return (col_desc, cl_policy) + def test_end_to_end_prepared(self): # We only currently perform testing on a single type/expected value pair since CLE functionality is essentially # independent of the underlying type. We intercept data after it's been encoded when it's going out and before it's # encoded when coming back; the actual types of the data involved don't impact us. - expected = 12345 - expected_type = "int" + expected = 0 key = os.urandom(AES256_KEY_SIZE_BYTES) - cl_policy = AES256ColumnEncryptionPolicy() - col_desc = ColDesc('foo','bar','encrypted') - cl_policy.add_column(col_desc, key, expected_type) - + (_, cl_policy) = self._create_policy(key) cluster = TestCluster(column_encryption_policy=cl_policy) session = cluster.connect() self._recreate_keyspace(session) prepared = session.prepare("insert into foo.bar (encrypted, unencrypted) values (?,?)") - session.execute(prepared, (expected,expected)) + for i in range(100): + session.execute(prepared, (i, i)) # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted # values here to confirm that we don't interfere with regular processing of unencrypted vals. @@ -66,20 +69,19 @@ def test_end_to_end_prepared(self): def test_end_to_end_simple(self): - expected = 67890 - expected_type = "int" + expected = 1 key = os.urandom(AES256_KEY_SIZE_BYTES) - cl_policy = AES256ColumnEncryptionPolicy() - col_desc = ColDesc('foo','bar','encrypted') - cl_policy.add_column(col_desc, key, expected_type) - + (col_desc, cl_policy) = self._create_policy(key) cluster = TestCluster(column_encryption_policy=cl_policy) session = cluster.connect() self._recreate_keyspace(session) # Use encode_and_encrypt helper function to populate date - session.execute("insert into foo.bar (encrypted, unencrypted) values (%s,%s)",(cl_policy.encode_and_encrypt(col_desc, expected), expected)) + for i in range(1,100): + self.assertIsNotNone(i) + encrypted = cl_policy.encode_and_encrypt(col_desc, i) + session.execute("insert into foo.bar (encrypted, unencrypted) values (%s,%s)", (encrypted, i)) # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted # values here to confirm that we don't interfere with regular processing of unencrypted vals. @@ -92,3 +94,42 @@ def test_end_to_end_simple(self): (encrypted,unencrypted) = session.execute(prepared, [expected]).one() self.assertEquals(expected, encrypted) self.assertEquals(expected, unencrypted) + + def test_end_to_end_different_cle_contexts(self): + + expected = 2 + + key = os.urandom(AES256_KEY_SIZE_BYTES) + + # Simulate the creation of two AES256 policies at two different times. Python caches + # default param args at function definition time so a single value will be used any time + # the default val is used. Upshot is that within the same test we'll always have the same + # IV if we rely on the default args, so manually introduce some variation here to simulate + # what actually happens if you have two distinct sessions created at two different times. + iv1 = os.urandom(AES256_BLOCK_SIZE_BYTES) + (col_desc1, cl_policy1) = self._create_policy(key, iv=iv1) + cluster1 = TestCluster(column_encryption_policy=cl_policy1) + session1 = cluster1.connect() + self._recreate_keyspace(session1) + + # Use encode_and_encrypt helper function to populate date + for i in range(1,100): + self.assertIsNotNone(i) + encrypted = cl_policy1.encode_and_encrypt(col_desc1, i) + session1.execute("insert into foo.bar (encrypted, unencrypted) values (%s,%s)", (encrypted, i)) + session1.shutdown() + cluster1.shutdown() + + # Explicitly clear the class-level cache here; we're trying to simulate a second connection from a completely new process and + # that would entail not re-using any cached ciphers + AES256ColumnEncryptionPolicy._build_cipher.cache_clear() + cache_info = cl_policy1.cache_info() + self.assertEqual(cache_info.currsize, 0) + + iv2 = os.urandom(AES256_BLOCK_SIZE_BYTES) + (_, cl_policy2) = self._create_policy(key, iv=iv2) + cluster2 = TestCluster(column_encryption_policy=cl_policy2) + session2 = cluster2.connect() + (encrypted,unencrypted) = session2.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() + self.assertEquals(expected, encrypted) + self.assertEquals(expected, unencrypted) diff --git a/tests/unit/column_encryption/test_policies.py b/tests/unit/column_encryption/test_policies.py index f6b06a3ade..38136c69d4 100644 --- a/tests/unit/column_encryption/test_policies.py +++ b/tests/unit/column_encryption/test_policies.py @@ -55,6 +55,23 @@ def test_add_column_invalid_key_size_raises(self): with self.assertRaises(ValueError): policy.add_column(coldesc, os.urandom(key_size), "blob") + def test_add_column_invalid_iv_size_raises(self): + def test_iv_size(iv_size): + policy = AES256ColumnEncryptionPolicy(iv = os.urandom(iv_size)) + policy.add_column(coldesc, os.urandom(AES256_KEY_SIZE_BYTES), "blob") + policy.encrypt(coldesc, os.urandom(128)) + + coldesc = ColDesc('ks1','table1','col1') + for iv_size in range(1,AES256_BLOCK_SIZE_BYTES - 1): + with self.assertRaises(ValueError): + test_iv_size(iv_size) + for iv_size in range(AES256_BLOCK_SIZE_BYTES + 1,(2 * AES256_BLOCK_SIZE_BYTES) - 1): + with self.assertRaises(ValueError): + test_iv_size(iv_size) + + # Finally, confirm that the expected IV size has no issue + test_iv_size(AES256_BLOCK_SIZE_BYTES) + def test_add_column_null_coldesc_raises(self): with self.assertRaises(ValueError): policy = AES256ColumnEncryptionPolicy() @@ -125,6 +142,9 @@ def test_decrypt_unknown_column(self): policy.decrypt(ColDesc('ks2','table2','col2'), encrypted_bytes) def test_cache_info(self): + # Exclude any interference from tests above + AES256ColumnEncryptionPolicy._build_cipher.cache_clear() + coldesc1 = ColDesc('ks1','table1','col1') coldesc2 = ColDesc('ks2','table2','col2') coldesc3 = ColDesc('ks3','table3','col3') From 356d150358723607be1d9956a9bef684cd9cff0e Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Fri, 2 Jun 2023 14:57:09 -0500 Subject: [PATCH 1925/2005] PYTHON-1356 Create session-specific protocol handlers to contain session-specific CLE policies (#1165) --- cassandra/cluster.py | 15 +++++--- .../column_encryption/test_policies.py | 37 ++++++++++++++++++- 2 files changed, 45 insertions(+), 7 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 12b00c42db..57fc2d4e8c 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2545,12 +2545,6 @@ def __init__(self, cluster, hosts, keyspace=None): self.encoder = Encoder() - if self.cluster.column_encryption_policy is not None: - try: - self.client_protocol_handler.column_encryption_policy = self.cluster.column_encryption_policy - except AttributeError: - log.info("Unable to set column encryption policy for session") - # create connection pools in parallel self._initial_connect_futures = set() for host in hosts: @@ -2571,6 +2565,15 @@ def __init__(self, cluster, hosts, keyspace=None): self.session_id = uuid.uuid4() self._graph_paging_available = self._check_graph_paging_available() + if self.cluster.column_encryption_policy is not None: + try: + self.client_protocol_handler = type( + str(self.session_id) + "-ProtocolHandler", + (ProtocolHandler,), + {"column_encryption_policy": self.cluster.column_encryption_policy}) + except AttributeError: + log.info("Unable to set column encryption policy for session") + if self.cluster.monitor_reporting_enabled: cc_host = self.cluster.get_control_connection_host() valid_insights_version = (cc_host and version_supports_insights(cc_host.dse_version)) diff --git a/tests/integration/standard/column_encryption/test_policies.py b/tests/integration/standard/column_encryption/test_policies.py index bb84c0352c..dea6b6d39e 100644 --- a/tests/integration/standard/column_encryption/test_policies.py +++ b/tests/integration/standard/column_encryption/test_policies.py @@ -95,7 +95,11 @@ def test_end_to_end_simple(self): self.assertEquals(expected, encrypted) self.assertEquals(expected, unencrypted) - def test_end_to_end_different_cle_contexts(self): + def test_end_to_end_different_cle_contexts_different_ivs(self): + """ + Test to validate PYTHON-1350. We should be able to decode the data from two different contexts (with two different IVs) + since the IV used to decrypt the data is actually now stored with the data. + """ expected = 2 @@ -133,3 +137,34 @@ def test_end_to_end_different_cle_contexts(self): (encrypted,unencrypted) = session2.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() self.assertEquals(expected, encrypted) self.assertEquals(expected, unencrypted) + + def test_end_to_end_different_cle_contexts_different_policies(self): + """ + Test to validate PYTHON-1356. Class variables used to pass CLE policy down to protocol handler shouldn't persist. + """ + + expected = 3 + + key = os.urandom(AES256_KEY_SIZE_BYTES) + (col_desc, cl_policy) = self._create_policy(key) + cluster = TestCluster(column_encryption_policy=cl_policy) + session = cluster.connect() + self._recreate_keyspace(session) + + # Use encode_and_encrypt helper function to populate date + session.execute("insert into foo.bar (encrypted, unencrypted) values (%s,%s)",(cl_policy.encode_and_encrypt(col_desc, expected), expected)) + + # We now open a new session _without_ the CLE policy specified. We should _not_ be able to read decrypted bits from this session. + cluster2 = TestCluster() + session2 = cluster2.connect() + + # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted + # values here to confirm that we don't interfere with regular processing of unencrypted vals. + (encrypted,unencrypted) = session2.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() + self.assertEquals(cl_policy.encode_and_encrypt(col_desc, expected), encrypted) + self.assertEquals(expected, unencrypted) + + # Confirm the same behaviour from a subsequent prepared statement as well + prepared = session2.prepare("select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering") + (encrypted,unencrypted) = session2.execute(prepared, [expected]).one() + self.assertEquals(cl_policy.encode_and_encrypt(col_desc, expected), encrypted) From 910f02820c7a9e0e7b46fc9fbda1ee9a0e051909 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 5 Jun 2023 09:23:25 -0500 Subject: [PATCH 1926/2005] CONN-38 Notes for 3.27.0 on PYTHON-1350 (#1166) --- docs/column_encryption.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/column_encryption.rst b/docs/column_encryption.rst index 5cfb736c1f..e18b9286ed 100644 --- a/docs/column_encryption.rst +++ b/docs/column_encryption.rst @@ -14,6 +14,20 @@ also available, although in this case values must be manually encrypted and/or d Client-side encryption and decryption should work against all versions of Cassandra and DSE. It does not utilize any server-side functionality to do its work. +WARNING: Consider upgrading to 3.28.0 or later +------------------------------------------------ +There is a significant issue with the column encryption functionality in Python driver 3.27.0. +To be able to decrypt your data, you must preserve the cipher initialization vector (IV) used by +the :class:`~.AES256ColumnEncryptionPolicy` when your data was written. +To decrypt your data, you must supply this IV when creating a policy to read this data. +If you do not supply this IV in the policy to read this data, you will **NOT BE ABLE TO DECRYPT YOUR DATA**. +See +`PYTHON-1350 `_ for more detail. + +DataStax recommends upgrading to Python driver 3.28.0 or later to avoid this issue. 3.28.0 or later manages the IV automatically. +Because of this change in functionality, any encrypted data written in 3.27.0 will **NOT** be readable by 3.28.0 or later. +After upgrading to Python driver 3.28.0 or later, it is critical that you re-encrypt your data with the new driver version. + Configuration ------------- Client-side encryption is enabled by creating an instance of a subclass of :class:`~.ColumnEncryptionPolicy` From 643d3a631b3f15603942af42b47228e0e2d7f193 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 5 Jun 2023 09:25:27 -0500 Subject: [PATCH 1927/2005] Update docs.yaml to point to most recent 3.27.0 docs changes --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 77c738b4f4..eb94f74590 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.27' - ref: 0002e912 + ref: 910f0282 - name: '3.26' ref: f1e9126 - name: '3.25' From 2db3728a4791ef08e41bdd2631b34ade0cd2dd8b Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 5 Jun 2023 09:36:50 -0500 Subject: [PATCH 1928/2005] PYTHON-1352 Add vector type, codec + support for parsing CQL type (#1161) --- cassandra/__init__.py | 2 +- cassandra/cqltypes.py | 37 ++++++++++++++++++++++++++++++++++--- tests/unit/test_types.py | 22 +++++++++++++++++++++- 3 files changed, 56 insertions(+), 5 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 1573abdf00..ca15e93602 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 27, 0) +__version_info__ = (3, 28, 0b1) __version__ = '.'.join(map(str, __version_info__)) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 8167b3b894..6cc89aafbb 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -235,13 +235,15 @@ def parse_casstype_args(typestring): else: names.append(None) - ctype = lookup_casstype_simple(tok) + try: + ctype = int(tok) + except ValueError: + ctype = lookup_casstype_simple(tok) types.append(ctype) # return the first (outer) type, which will have all parameters applied return args[0][0][0] - def lookup_casstype(casstype): """ Given a Cassandra type as a string (possibly including parameters), hand @@ -259,6 +261,7 @@ def lookup_casstype(casstype): try: return parse_casstype_args(casstype) except (ValueError, AssertionError, IndexError) as e: + log.debug("Exception in parse_casstype_args: %s" % e) raise ValueError("Don't know how to parse type string %r: %s" % (casstype, e)) @@ -296,7 +299,7 @@ class _CassandraType(object): """ def __repr__(self): - return '<%s( %r )>' % (self.cql_parameterized_type(), self.val) + return '<%s>' % (self.cql_parameterized_type()) @classmethod def from_binary(cls, byts, protocol_version): @@ -1423,3 +1426,31 @@ def serialize(cls, v, protocol_version): buf.write(int8_pack(cls._encode_precision(bound.precision))) return buf.getvalue() + +class VectorType(_CassandraType): + typename = 'org.apache.cassandra.db.marshal.VectorType' + vector_size = 0 + subtype = None + + @classmethod + def apply_parameters(cls, params, names): + assert len(params) == 2 + subtype = lookup_casstype(params[0]) + vsize = params[1] + return type('%s(%s)' % (cls.cass_parameterized_type_with([]), vsize), (cls,), {'vector_size': vsize, 'subtype': subtype}) + + @classmethod + def deserialize(cls, byts, protocol_version): + indexes = (4 * x for x in range(0, cls.vector_size)) + return [cls.subtype.deserialize(byts[idx:idx + 4], protocol_version) for idx in indexes] + + @classmethod + def serialize(cls, v, protocol_version): + buf = io.BytesIO() + for item in v: + buf.write(cls.subtype.serialize(item, protocol_version)) + return buf.getvalue() + + @classmethod + def cql_parameterized_type(cls): + return "%s<%s, %s>" % (cls.typename, cls.subtype.typename, cls.vector_size) diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index af3b327ef8..e85f5dbe67 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -27,7 +27,8 @@ EmptyValue, LongType, SetType, UTF8Type, cql_typename, int8_pack, int64_pack, lookup_casstype, lookup_casstype_simple, parse_casstype_args, - int32_pack, Int32Type, ListType, MapType + int32_pack, Int32Type, ListType, MapType, VectorType, + FloatType ) from cassandra.encoder import cql_quote from cassandra.pool import Host @@ -190,6 +191,12 @@ class BarType(FooType): self.assertEqual(UTF8Type, ctype.subtypes[2]) self.assertEqual([b'city', None, b'zip'], ctype.names) + def test_parse_casstype_vector(self): + ctype = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 3)") + self.assertTrue(issubclass(ctype, VectorType)) + self.assertEqual(3, ctype.vector_size) + self.assertEqual(FloatType, ctype.subtype) + def test_empty_value(self): self.assertEqual(str(EmptyValue()), 'EMPTY') @@ -303,6 +310,19 @@ def test_cql_quote(self): self.assertEqual(cql_quote('test'), "'test'") self.assertEqual(cql_quote(0), '0') + def test_vector_round_trip(self): + base = [3.4, 2.9, 41.6, 12.0] + ctype = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 4)") + base_bytes = ctype.serialize(base, 0) + self.assertEqual(16, len(base_bytes)) + result = ctype.deserialize(base_bytes, 0) + self.assertEqual(len(base), len(result)) + for idx in range(0,len(base)): + self.assertAlmostEqual(base[idx], result[idx], places=5) + + def test_vector_cql_parameterized_type(self): + ctype = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 4)") + self.assertEqual(ctype.cql_parameterized_type(), "org.apache.cassandra.db.marshal.VectorType") ZERO = datetime.timedelta(0) From e3400a1e1eeb34fc1162fd601bf667cf802761ab Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 5 Jun 2023 10:08:34 -0500 Subject: [PATCH 1929/2005] Release 3.28.0: changelog & version --- CHANGELOG.rst | 22 ++++++++++++++++++++++ cassandra/__init__.py | 2 +- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 432998869e..472881dbc5 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,25 @@ +3.28.0 +====== +June 5, 2023 + +Features +-------- +* Add support for vector type (PYTHON-1352) +* Cryptography module is now an optional dependency (PYTHON-1351) + +Bug Fixes +--------- +* Store IV along with encrypted text when using column-level encryption (PYTHON-1350) +* Create session-specific protocol handlers to contain session-specific CLE policies (PYTHON-1356) + +Others +------ +* Use Cython for smoke builds (PYTHON-1343) +* Don't fail when inserting UDTs with prepared queries with some missing fields (PR 1151) +* Convert print statement to function in docs (PR 1157) +* Update comment for retry policy (DOC-3278) +* Added error handling blog reference (DOC-2813) + 3.27.0 ====== May 1, 2023 diff --git a/cassandra/__init__.py b/cassandra/__init__.py index ca15e93602..b048bd9358 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 28, 0b1) +__version_info__ = (3, 28, 0) __version__ = '.'.join(map(str, __version_info__)) From 7ccf5026d870072af548f29cbfc15873f51a2935 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 5 Jun 2023 23:55:41 -0500 Subject: [PATCH 1930/2005] Fixed non-valid rst in README --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index e140371f07..47483f3881 100644 --- a/README.rst +++ b/README.rst @@ -58,7 +58,7 @@ Contributing See `CONTRIBUTING.md `_. Error Handling ------------- +-------------- While originally written for the Java driver, users may reference the `Cassandra error handling done right blog `_ for resolving error handling scenarios with Apache Cassandra. Reporting Problems From 50c93e907ae53a26e105c5203fa5aa04741d116c Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Tue, 6 Jun 2023 10:04:22 -0500 Subject: [PATCH 1931/2005] Include docs for 3.28.0 --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index eb94f74590..2daac0608b 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,6 +22,8 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: + - name: '3.28' + ref: 7ccf5026 - name: '3.27' ref: 910f0282 - name: '3.26' From 4325afb6c652fc1ccbf09f1e1aa893da2b0f370c Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Tue, 6 Jun 2023 16:21:40 -0500 Subject: [PATCH 1932/2005] CONN-38 Notes for 3.28.0 on PYTHON-1350 (#1167) Co-authored-by: Jamie Gillenwater --- docs/column_encryption.rst | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/docs/column_encryption.rst b/docs/column_encryption.rst index e18b9286ed..ab67ef16d0 100644 --- a/docs/column_encryption.rst +++ b/docs/column_encryption.rst @@ -14,19 +14,11 @@ also available, although in this case values must be manually encrypted and/or d Client-side encryption and decryption should work against all versions of Cassandra and DSE. It does not utilize any server-side functionality to do its work. -WARNING: Consider upgrading to 3.28.0 or later +WARNING: Encryption format changes in 3.28.0 ------------------------------------------------ -There is a significant issue with the column encryption functionality in Python driver 3.27.0. -To be able to decrypt your data, you must preserve the cipher initialization vector (IV) used by -the :class:`~.AES256ColumnEncryptionPolicy` when your data was written. -To decrypt your data, you must supply this IV when creating a policy to read this data. -If you do not supply this IV in the policy to read this data, you will **NOT BE ABLE TO DECRYPT YOUR DATA**. -See -`PYTHON-1350 `_ for more detail. - -DataStax recommends upgrading to Python driver 3.28.0 or later to avoid this issue. 3.28.0 or later manages the IV automatically. -Because of this change in functionality, any encrypted data written in 3.27.0 will **NOT** be readable by 3.28.0 or later. -After upgrading to Python driver 3.28.0 or later, it is critical that you re-encrypt your data with the new driver version. +Python driver 3.28.0 introduces a new encryption format for data written by :class:`~.AES256ColumnEncryptionPolicy`. +As a result, any encrypted data written by Python driver 3.27.0 will **NOT** be readable. +If you upgraded from 3.27.0, you should re-encrypt your data with 3.28.0. Configuration ------------- From 8c41066330eb04c34eff57153ab2eda810844d5f Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Tue, 6 Jun 2023 16:24:28 -0500 Subject: [PATCH 1933/2005] Update docs.yaml to point to most recent 3.28.0 docs changes --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 2daac0608b..c34324e170 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.28' - ref: 7ccf5026 + ref: 4325afb6 - name: '3.27' ref: 910f0282 - name: '3.26' From 8ba0a5ccd71b66c77ee58994ed9da9ea34ff4cbe Mon Sep 17 00:00:00 2001 From: weideng1 Date: Mon, 28 Aug 2023 15:42:30 -0600 Subject: [PATCH 1934/2005] Jenkins using new python versions in the matrix (#1174) * update Jenkins matrix to use available python versions in the latest runner image * Use earliest and latest supported python runtimes for smoke test * Remove C* and DSE versions that are EOL'ed * make ccm-private work with python 3.10+ * use pynose to replace nosetests so that it can run under Python 3.10+, before PYTHON-1297 gets merged --- Jenkinsfile | 69 +++++++++++++++++++++-------------------------------- 1 file changed, 27 insertions(+), 42 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index e6c2d9700f..a6d19fb5ef 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -30,9 +30,9 @@ import com.datastax.jenkins.drivers.python.Slack slack = new Slack() -DEFAULT_CASSANDRA = ['2.1', '2.2', '3.0', '3.11', '4.0'] -DEFAULT_DSE = ['dse-5.0.15', 'dse-5.1.35', 'dse-6.0.18', 'dse-6.7.17', 'dse-6.8.30'] -DEFAULT_RUNTIME = ['3.7.7', '3.8.3'] +DEFAULT_CASSANDRA = ['3.0', '3.11', '4.0'] +DEFAULT_DSE = ['dse-5.1.35', 'dse-6.8.30'] +DEFAULT_RUNTIME = ['3.8.16', '3.9.16', '3.10.11', '3.11.3'] DEFAULT_CYTHON = ["True", "False"] matrices = [ "FULL": [ @@ -52,7 +52,7 @@ matrices = [ ], "SMOKE": [ "SERVER": DEFAULT_CASSANDRA.takeRight(2) + DEFAULT_DSE.takeRight(1), - "RUNTIME": DEFAULT_RUNTIME.takeRight(2), + "RUNTIME": DEFAULT_RUNTIME.take(1) + DEFAULT_RUNTIME.takeRight(1), "CYTHON": ["True"] ] ] @@ -167,6 +167,11 @@ def initializeEnvironment() { sudo apt-get install socat pip install --upgrade pip pip install -U setuptools + + # install a version of pyyaml<6.0 compatible with ccm-3.1.5 as of Aug 2023 + # this works around the python-3.10+ compatibility problem as described in DSP-23524 + pip install wheel + pip install "Cython<3.0" "pyyaml<6.0" --no-build-isolation pip install ${HOME}/ccm ''' @@ -186,7 +191,7 @@ def initializeEnvironment() { } sh label: 'Install unit test modules', script: '''#!/bin/bash -lex - pip install nose-ignore-docstring nose-exclude service_identity + pip install pynose nose-ignore-docstring nose-exclude service_identity ''' if (env.CYTHON_ENABLED == 'True') { @@ -247,9 +252,9 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - EVENT_LOOP=${EVENT_LOOP} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true - EVENT_LOOP=eventlet VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true - EVENT_LOOP=gevent VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true + EVENT_LOOP=${EVENT_LOOP} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true + EVENT_LOOP=eventlet VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true + EVENT_LOOP=gevent VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true ''' sh label: 'Execute Simulacron integration tests', script: '''#!/bin/bash -lex @@ -259,13 +264,13 @@ def executeStandardTests() { set +o allexport SIMULACRON_JAR="${HOME}/simulacron.jar" - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true # Run backpressure tests separately to avoid memory issue - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || true ''' sh label: 'Execute CQL engine integration tests', script: '''#!/bin/bash -lex @@ -274,7 +279,7 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true ''' sh label: 'Execute Apache CassandraⓇ integration tests', script: '''#!/bin/bash -lex @@ -283,7 +288,7 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true ''' if (env.CASSANDRA_VERSION.split('-')[0] == 'dse' && env.CASSANDRA_VERSION.split('-')[1] != '4.8') { @@ -293,7 +298,7 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true + EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true ''' } @@ -303,7 +308,7 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - EVENT_LOOP=${EVENT_LOOP} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true + EVENT_LOOP=${EVENT_LOOP} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true ''' if (env.PROFILE == 'FULL') { @@ -313,7 +318,7 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true ''' } } @@ -325,7 +330,7 @@ def executeDseSmokeTests() { . ${HOME}/environment.txt set +o allexport - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} DSE_VERSION=${DSE_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} DSE_VERSION=${DSE_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true ''' } @@ -346,7 +351,7 @@ def executeEventLoopTests() { "tests/integration/simulacron/test_endpoint.py" "tests/integration/long/test_ssl.py" ) - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true ''' } @@ -502,14 +507,6 @@ pipeline { - - - - - - - - @@ -522,22 +519,10 @@ pipeline { - - - - - - - - - - - - @@ -620,8 +605,8 @@ pipeline { triggers { parameterizedCron(branchPatternCron().matcher(env.BRANCH_NAME).matches() ? """ # Every weeknight (Monday - Friday) around 4:00 AM - # These schedules will run with and without Cython enabled for Python 3.7.7 and 3.8.3 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.7.7 3.8.3;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1.35 dse-6.0.18 dse-6.7.17 + # These schedules will run with and without Cython enabled for Python 3.8.16 and 3.11.3 + H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.16 3.11.3;CI_SCHEDULE_SERVER_VERSION=3.11 4.0 dse-5.1.35 dse-6.8.30 """ : "") } From e24ffe451dcdb67266d986c46d492f759961be89 Mon Sep 17 00:00:00 2001 From: Emelia <105240296+emeliawilkinson24@users.noreply.github.com> Date: Fri, 22 Sep 2023 12:28:50 -0400 Subject: [PATCH 1935/2005] Update redirects in docs.yaml (#1178) --- docs.yaml | 46 ++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 42 insertions(+), 4 deletions(-) diff --git a/docs.yaml b/docs.yaml index c34324e170..7dde5a0299 100644 --- a/docs.yaml +++ b/docs.yaml @@ -73,9 +73,47 @@ versions: redirects: - \A\/(.*)/\Z: /\1.html rewrites: - - search: cassandra.apache.org/doc/cql3/CQL.html - replace: cassandra.apache.org/doc/cql3/CQL-3.0.html - - search: http://www.datastax.com/documentation/cql/3.1/ - replace: https://docs.datastax.com/en/archived/cql/3.1/ - search: http://www.datastax.com/docs/1.2/cql_cli/cql/BATCH replace: https://docs.datastax.com/en/dse/6.7/cql/cql/cql_reference/cql_commands/cqlBatch.html + - search: http://www.datastax.com/documentation/cql/3.1/ + replace: https://docs.datastax.com/en/archived/cql/3.1/ + - search: 'https://community.datastax.com' + replace: 'https://www.datastax.com/dev/community' + - search: 'https://docs.datastax.com/en/astra/aws/doc/index.html' + replace: 'https://docs.datastax.com/en/astra-serverless/docs/connect/drivers/connect-python.html' + - search: 'http://cassandra.apache.org/doc/cql3/CQL.html#timeuuidFun' + replace: 'https://cassandra.apache.org/doc/3.11/cassandra/cql/functions.html#timeuuid-functions' + - search: 'http://cassandra.apache.org/doc/cql3/CQL.html#tokenFun' + replace: 'https://cassandra.apache.org/doc/3.11/cassandra/cql/functions.html#token' + - search: 'http://cassandra.apache.org/doc/cql3/CQL.html#collections' + replace: 'https://cassandra.apache.org/doc/3.11/cassandra/cql/types.html#collections' + - search: 'http://cassandra.apache.org/doc/cql3/CQL.html#batchStmt' + replace: 'https://cassandra.apache.org/doc/3.11/cassandra/cql/dml.html#batch_statement' + - search: 'http://cassandra.apache.org/doc/cql3/CQL-3.0.html#timeuuidFun' + replace: 'https://cassandra.apache.org/doc/3.11/cassandra/cql/functions.html#timeuuid-functions' + - search: 'http://cassandra.apache.org/doc/cql3/CQL-3.0.html#tokenFun' + replace: 'https://cassandra.apache.org/doc/3.11/cassandra/cql/functions.html#token' + - search: 'http://cassandra.apache.org/doc/cql3/CQL-3.0.html#collections' + replace: 'https://cassandra.apache.org/doc/3.11/cassandra/cql/types.html#collections' + - search: 'http://cassandra.apache.org/doc/cql3/CQL-3.0.html#batchStmt' + replace: 'https://cassandra.apache.org/doc/3.11/cassandra/cql/dml.html#batch_statement' +checks: + external_links: + exclude: + - 'https://twitter.com/dsJavaDriver' + - 'https://twitter.com/datastaxeng' + - 'https://twitter.com/datastax' + - 'https://projectreactor.io' + - 'https://docs.datastax.com/en/drivers/java/4.[0-9]+/com/datastax/oss/driver/internal/' + - 'http://www.planetcassandra.org/blog/user-defined-functions-in-cassandra-3-0/' + - 'http://www.planetcassandra.org/making-the-change-from-thrift-to-cql/' + - 'https://academy.datastax.com/slack' + - 'https://community.datastax.com/index.html' + - 'https://micrometer.io/docs' + - 'http://datastax.github.io/java-driver/features/shaded_jar/' + - 'http://aka.ms/vcpython27' + internal_links: + exclude: + - 'netty_pipeline/' + - '../core/' + - '%5Bguava%20eviction%5D' From 375da0002651a012bee8aac1d069ad0af3b4c35e Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 11 Oct 2023 15:27:53 -0500 Subject: [PATCH 1936/2005] Add Jenkins support for Python 3.12.0 (#1180) --- Jenkinsfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index a6d19fb5ef..7e4a3c4761 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -32,7 +32,7 @@ slack = new Slack() DEFAULT_CASSANDRA = ['3.0', '3.11', '4.0'] DEFAULT_DSE = ['dse-5.1.35', 'dse-6.8.30'] -DEFAULT_RUNTIME = ['3.8.16', '3.9.16', '3.10.11', '3.11.3'] +DEFAULT_RUNTIME = ['3.8.16', '3.9.16', '3.10.11', '3.11.3', '3.12.0'] DEFAULT_CYTHON = ["True", "False"] matrices = [ "FULL": [ @@ -605,8 +605,8 @@ pipeline { triggers { parameterizedCron(branchPatternCron().matcher(env.BRANCH_NAME).matches() ? """ # Every weeknight (Monday - Friday) around 4:00 AM - # These schedules will run with and without Cython enabled for Python 3.8.16 and 3.11.3 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.16 3.11.3;CI_SCHEDULE_SERVER_VERSION=3.11 4.0 dse-5.1.35 dse-6.8.30 + # These schedules will run with and without Cython enabled for Python 3.8.16 and 3.12.0 + H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.16 3.12.0;CI_SCHEDULE_SERVER_VERSION=3.11 4.0 dse-5.1.35 dse-6.8.30 """ : "") } From a7ab8cc76f41b511aeada7147fd423833c04e9be Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 11 Oct 2023 23:10:51 -0500 Subject: [PATCH 1937/2005] PYTHON-1364 Fix ssl.wrap_socket errors (from eventlet) for Python 3.12 (#1181) --- cassandra/cluster.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 57fc2d4e8c..00240186c0 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -99,7 +99,11 @@ try: from cassandra.io.eventletreactor import EventletConnection -except ImportError: +# PYTHON-1364 +# +# At the moment eventlet initialization is chucking AttributeErrors due to it's dependence on pyOpenSSL +# and some changes in Python 3.12 which have some knock-on effects there. +except (ImportError, AttributeError): EventletConnection = None try: @@ -113,8 +117,12 @@ def _is_eventlet_monkey_patched(): if 'eventlet.patcher' not in sys.modules: return False - import eventlet.patcher - return eventlet.patcher.is_monkey_patched('socket') + try: + import eventlet.patcher + return eventlet.patcher.is_monkey_patched('socket') + # Another case related to PYTHON-1364 + except AttributeError: + return False def _is_gevent_monkey_patched(): From e9136f41db04a5347877b948837b4c4172b51e14 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Fri, 13 Oct 2023 17:15:03 -0500 Subject: [PATCH 1938/2005] PYTHON-1313 Fix asyncio removals in Python 3.10 (#1179) --- cassandra/io/asyncioreactor.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/cassandra/io/asyncioreactor.py b/cassandra/io/asyncioreactor.py index ab0e90ae09..95f92e26e0 100644 --- a/cassandra/io/asyncioreactor.py +++ b/cassandra/io/asyncioreactor.py @@ -41,13 +41,12 @@ def end(self): def __init__(self, timeout, callback, loop): delayed = self._call_delayed_coro(timeout=timeout, - callback=callback, - loop=loop) + callback=callback) self._handle = asyncio.run_coroutine_threadsafe(delayed, loop=loop) @staticmethod - async def _call_delayed_coro(timeout, callback, loop): - await asyncio.sleep(timeout, loop=loop) + async def _call_delayed_coro(timeout, callback): + await asyncio.sleep(timeout) return callback() def __lt__(self, other): @@ -90,8 +89,8 @@ def __init__(self, *args, **kwargs): self._connect_socket() self._socket.setblocking(0) - self._write_queue = asyncio.Queue(loop=self._loop) - self._write_queue_lock = asyncio.Lock(loop=self._loop) + self._write_queue = asyncio.Queue() + self._write_queue_lock = asyncio.Lock() # see initialize_reactor -- loop is running in a separate thread, so we # have to use a threadsafe call From d0e639b0e5c6e4d763169a18bf2972ce55cec385 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Fri, 27 Oct 2023 21:22:57 +0200 Subject: [PATCH 1939/2005] Remove some remnants of Python2 (#1172) Co-authored-by: vgali7 --- .gitignore | 3 + CONTRIBUTING.rst | 1 - appveyor.yml | 5 +- benchmarks/callback_full_pipeline.py | 1 - benchmarks/future_batches.py | 2 +- benchmarks/future_full_pipeline.py | 2 +- benchmarks/sync.py | 1 - cassandra/auth.py | 16 +- cassandra/cluster.py | 33 +- cassandra/compat.py | 20 -- cassandra/concurrent.py | 26 +- cassandra/connection.py | 14 +- cassandra/cqlengine/__init__.py | 8 +- cassandra/cqlengine/columns.py | 7 +- cassandra/cqlengine/connection.py | 3 +- cassandra/cqlengine/management.py | 5 +- cassandra/cqlengine/models.py | 8 +- cassandra/cqlengine/operators.py | 4 +- cassandra/cqlengine/query.py | 25 +- cassandra/cqlengine/statements.py | 25 +- cassandra/cqlengine/usertype.py | 10 +- cassandra/cqltypes.py | 100 +++--- cassandra/cython_marshal.pyx | 16 +- cassandra/datastax/cloud/__init__.py | 11 +- cassandra/datastax/graph/fluent/_query.py | 3 +- .../datastax/graph/fluent/_serializers.py | 12 +- cassandra/datastax/graph/graphson.py | 53 ++-- cassandra/datastax/graph/query.py | 12 +- cassandra/datastax/insights/registry.py | 3 +- cassandra/datastax/insights/reporter.py | 7 +- cassandra/datastax/insights/serializers.py | 6 +- cassandra/deserializers.pyx | 4 - cassandra/encoder.py | 68 ++--- cassandra/io/asyncorereactor.py | 1 - cassandra/io/eventletreactor.py | 4 +- cassandra/io/geventreactor.py | 1 - cassandra/io/libevreactor.py | 1 - cassandra/marshal.py | 42 +-- cassandra/metadata.py | 32 +- cassandra/murmur3.py | 1 - cassandra/protocol.py | 14 +- cassandra/query.py | 8 +- cassandra/scylla/cloud.py | 0 cassandra/segment.py | 4 - cassandra/util.py | 288 ++---------------- docs/installation.rst | 8 +- .../execute_async_with_queue.py | 2 +- requirements.txt | 6 - setup.py | 3 +- tests/integration/__init__.py | 4 +- tests/integration/advanced/__init__.py | 2 +- tests/integration/advanced/graph/__init__.py | 23 +- .../advanced/graph/fluent/__init__.py | 11 +- .../advanced/graph/fluent/test_graph.py | 6 +- .../integration/advanced/graph/test_graph.py | 1 - .../advanced/graph/test_graph_datatype.py | 13 +- .../advanced/graph/test_graph_query.py | 7 +- .../integration/advanced/test_cont_paging.py | 1 - tests/integration/cloud/test_cloud.py | 8 +- .../columns/test_container_columns.py | 3 +- .../cqlengine/columns/test_value_io.py | 9 +- .../management/test_compaction_settings.py | 3 +- .../cqlengine/management/test_management.py | 1 - .../model/test_class_construction.py | 1 - .../operators/test_where_operators.py | 20 +- .../statements/test_base_statement.py | 3 +- .../statements/test_delete_statement.py | 21 +- .../statements/test_insert_statement.py | 8 +- .../statements/test_select_statement.py | 29 +- .../statements/test_update_statement.py | 13 +- .../cqlengine/statements/test_where_clause.py | 3 +- .../integration/cqlengine/test_batch_query.py | 3 - .../cqlengine/test_lwt_conditional.py | 3 +- tests/integration/datatype_utils.py | 11 +- tests/integration/long/test_ipv6.py | 1 - .../integration/simulacron/test_connection.py | 1 - tests/integration/simulacron/utils.py | 2 +- .../standard/test_authentication.py | 1 - .../standard/test_client_warnings.py | 1 - tests/integration/standard/test_concurrent.py | 2 - tests/integration/standard/test_connection.py | 1 - .../standard/test_custom_payload.py | 8 +- .../standard/test_custom_protocol_handler.py | 3 +- tests/integration/standard/test_metadata.py | 17 +- tests/integration/standard/test_query.py | 5 +- .../integration/standard/test_query_paging.py | 1 - .../standard/test_single_interface.py | 4 +- tests/integration/standard/test_types.py | 33 +- tests/integration/standard/test_udts.py | 7 +- tests/unit/advanced/cloud/test_cloud.py | 4 +- tests/unit/advanced/test_graph.py | 22 +- tests/unit/cqlengine/test_connection.py | 2 - tests/unit/io/utils.py | 15 +- tests/unit/test_auth.py | 5 +- tests/unit/test_cluster.py | 3 +- tests/unit/test_concurrent.py | 2 +- tests/unit/test_connection.py | 7 +- tests/unit/test_control_connection.py | 4 +- tests/unit/test_metadata.py | 23 +- tests/unit/test_orderedmap.py | 7 +- tests/unit/test_parameter_binding.py | 7 +- tests/unit/test_policies.py | 15 +- tests/unit/test_protocol.py | 1 - tests/unit/test_query.py | 4 +- tests/unit/test_response_future.py | 1 - tests/unit/test_segment.py | 34 +-- tests/unit/test_timestamps.py | 6 +- tests/unit/test_types.py | 8 +- tox.ini | 1 - 109 files changed, 410 insertions(+), 984 deletions(-) delete mode 100644 cassandra/compat.py create mode 100644 cassandra/scylla/cloud.py diff --git a/.gitignore b/.gitignore index 5c9cbec957..30ff731f85 100644 --- a/.gitignore +++ b/.gitignore @@ -42,3 +42,6 @@ tests/unit/cython/bytesio_testhelper.c #iPython *.ipynb +venv +docs/venv +.eggs \ No newline at end of file diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index cdd742c063..e5da81d74f 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -26,7 +26,6 @@ To protect the community, all contributors are required to `sign the DataStax Co Design and Implementation Guidelines ------------------------------------ -- We support Python 2.7+, so any changes must work in any of these runtimes (we use ``six``, ``futures``, and some internal backports for compatability) - We have integrations (notably Cassandra cqlsh) that require pure Python and minimal external dependencies. We try to avoid new external dependencies. Where compiled extensions are concerned, there should always be a pure Python fallback implementation. - This project follows `semantic versioning `_, so breaking API changes will only be introduced in major versions. - Legacy ``cqlengine`` has varying degrees of overreaching client-side validation. Going forward, we will avoid client validation where server feedback is adequate and not overly expensive. diff --git a/appveyor.yml b/appveyor.yml index d1daaa6ec6..f8a3fd7660 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,9 +1,6 @@ environment: matrix: - - PYTHON: "C:\\Python27-x64" - cassandra_version: 3.11.2 - ci_type: standard - - PYTHON: "C:\\Python35-x64" + - PYTHON: "C:\\Python37-x64" cassandra_version: 3.11.2 ci_type: standard os: Visual Studio 2015 diff --git a/benchmarks/callback_full_pipeline.py b/benchmarks/callback_full_pipeline.py index e3ecfe3be5..a4a4c33315 100644 --- a/benchmarks/callback_full_pipeline.py +++ b/benchmarks/callback_full_pipeline.py @@ -18,7 +18,6 @@ from threading import Event from base import benchmark, BenchmarkThread -from six.moves import range log = logging.getLogger(__name__) diff --git a/benchmarks/future_batches.py b/benchmarks/future_batches.py index 8cd915ebab..de4484e617 100644 --- a/benchmarks/future_batches.py +++ b/benchmarks/future_batches.py @@ -14,7 +14,7 @@ import logging from base import benchmark, BenchmarkThread -from six.moves import queue +import queue log = logging.getLogger(__name__) diff --git a/benchmarks/future_full_pipeline.py b/benchmarks/future_full_pipeline.py index 9a9fcfcd50..901573c18e 100644 --- a/benchmarks/future_full_pipeline.py +++ b/benchmarks/future_full_pipeline.py @@ -14,7 +14,7 @@ import logging from base import benchmark, BenchmarkThread -from six.moves import queue +import queue log = logging.getLogger(__name__) diff --git a/benchmarks/sync.py b/benchmarks/sync.py index f2a45fcd7d..96e744f700 100644 --- a/benchmarks/sync.py +++ b/benchmarks/sync.py @@ -13,7 +13,6 @@ # limitations under the License. from base import benchmark, BenchmarkThread -from six.moves import range class Runner(BenchmarkThread): diff --git a/cassandra/auth.py b/cassandra/auth.py index 3d2f751ac0..10200aa387 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -32,8 +32,6 @@ except ImportError: SASLClient = None -import six - log = logging.getLogger(__name__) # Custom payload keys related to DSE Unified Auth @@ -270,15 +268,15 @@ def __init__(self, username, password): self.password = password def get_mechanism(self): - return six.b("PLAIN") + return b"PLAIN" def get_initial_challenge(self): - return six.b("PLAIN-START") + return b"PLAIN-START" def evaluate_challenge(self, challenge): - if challenge == six.b('PLAIN-START'): + if challenge == b'PLAIN-START': data = "\x00%s\x00%s" % (self.username, self.password) - return data if six.PY2 else data.encode() + return data.encode() raise Exception('Did not receive a valid challenge response from server') @@ -297,13 +295,13 @@ def __init__(self, host, service, qops, properties): self.sasl = SASLClient(host, service, 'GSSAPI', qops=qops, **properties) def get_mechanism(self): - return six.b("GSSAPI") + return b"GSSAPI" def get_initial_challenge(self): - return six.b("GSSAPI-START") + return b"GSSAPI-START" def evaluate_challenge(self, challenge): - if challenge == six.b('GSSAPI-START'): + if challenge == b'GSSAPI-START': return self.sasl.process() else: return self.sasl.process(challenge) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 00240186c0..6514838050 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -21,6 +21,7 @@ import atexit from binascii import hexlify from collections import defaultdict +from collections.abc import Mapping from concurrent.futures import ThreadPoolExecutor, FIRST_COMPLETED, wait as wait_futures from copy import copy from functools import partial, wraps @@ -29,8 +30,8 @@ import logging from warnings import warn from random import random -import six -from six.moves import filter, range, queue as Queue +import re +import queue import socket import sys import time @@ -79,7 +80,6 @@ HostTargetingStatement) from cassandra.marshal import int64_pack from cassandra.timestamps import MonotonicTimestampGenerator -from cassandra.compat import Mapping from cassandra.util import _resolve_contact_points_to_string_map, Version from cassandra.datastax.insights.reporter import MonitorReporter @@ -111,9 +111,6 @@ except ImportError: from cassandra.util import WeakSet # NOQA -if six.PY3: - long = int - def _is_eventlet_monkey_patched(): if 'eventlet.patcher' not in sys.modules: return False @@ -1158,7 +1155,7 @@ def __init__(self, else: self._contact_points_explicit = True - if isinstance(contact_points, six.string_types): + if isinstance(contact_points, str): raise TypeError("contact_points should not be a string, it should be a sequence (e.g. list) of strings") if None in contact_points: @@ -1793,8 +1790,8 @@ def _new_session(self, keyspace): return session def _session_register_user_types(self, session): - for keyspace, type_map in six.iteritems(self._user_types): - for udt_name, klass in six.iteritems(type_map): + for keyspace, type_map in self._user_types.items(): + for udt_name, klass in type_map.items(): session.user_type_registered(keyspace, udt_name, klass) def _cleanup_failed_on_up_handling(self, host): @@ -2683,7 +2680,7 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None """ custom_payload = custom_payload if custom_payload else {} if execute_as: - custom_payload[_proxy_execute_key] = six.b(execute_as) + custom_payload[_proxy_execute_key] = execute_as.encode() future = self._create_response_future( query, parameters, trace, custom_payload, timeout, @@ -2747,8 +2744,8 @@ def execute_graph_async(self, query, parameters=None, trace=False, execution_pro custom_payload = execution_profile.graph_options.get_options_map() if execute_as: - custom_payload[_proxy_execute_key] = six.b(execute_as) - custom_payload[_request_timeout_key] = int64_pack(long(execution_profile.request_timeout * 1000)) + custom_payload[_proxy_execute_key] = execute_as.encode() + custom_payload[_request_timeout_key] = int64_pack(int(execution_profile.request_timeout * 1000)) future = self._create_response_future(query, parameters=None, trace=trace, custom_payload=custom_payload, timeout=_NOT_SET, execution_profile=execution_profile) @@ -2885,7 +2882,7 @@ def _create_response_future(self, query, parameters, trace, custom_payload, prepared_statement = None - if isinstance(query, six.string_types): + if isinstance(query, str): query = SimpleStatement(query) elif isinstance(query, PreparedStatement): query = query.bind(parameters) @@ -3353,10 +3350,6 @@ def user_type_registered(self, keyspace, user_type, klass): 'User type %s does not exist in keyspace %s' % (user_type, keyspace)) field_names = type_meta.field_names - if six.PY2: - # go from unicode to string to avoid decode errors from implicit - # decode when formatting non-ascii values - field_names = [fn.encode('utf-8') for fn in field_names] def encode(val): return '{ %s }' % ' , '.join('%s : %s' % ( @@ -4035,7 +4028,7 @@ def _get_schema_mismatches(self, peers_result, local_result, local_address): log.debug("[control connection] Schemas match") return None - return dict((version, list(nodes)) for version, nodes in six.iteritems(versions)) + return dict((version, list(nodes)) for version, nodes in versions.items()) def _get_peers_query(self, peers_query_type, connection=None): """ @@ -4155,7 +4148,7 @@ class _Scheduler(Thread): is_shutdown = False def __init__(self, executor): - self._queue = Queue.PriorityQueue() + self._queue = queue.PriorityQueue() self._scheduled_tasks = set() self._count = count() self._executor = executor @@ -4213,7 +4206,7 @@ def run(self): else: self._queue.put_nowait((run_at, i, task)) break - except Queue.Empty: + except queue.Empty: pass time.sleep(0.1) diff --git a/cassandra/compat.py b/cassandra/compat.py deleted file mode 100644 index 83c1b104e5..0000000000 --- a/cassandra/compat.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six - -if six.PY2: - from collections import Mapping -elif six.PY3: - from collections.abc import Mapping diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index 0228f297fe..fb8f26e1cc 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -16,8 +16,6 @@ from collections import namedtuple from heapq import heappush, heappop from itertools import cycle -import six -from six.moves import xrange, zip from threading import Condition import sys @@ -119,7 +117,7 @@ def execute(self, concurrency, fail_fast): self._current = 0 self._exec_count = 0 with self._condition: - for n in xrange(concurrency): + for n in range(concurrency): if not self._execute_next(): break return self._results() @@ -143,17 +141,13 @@ def _execute(self, idx, statement, params): callback=self._on_success, callback_args=args, errback=self._on_error, errback_args=args) except Exception as exc: - # exc_info with fail_fast to preserve stack trace info when raising on the client thread - # (matches previous behavior -- not sure why we wouldn't want stack trace in the other case) - e = sys.exc_info() if self._fail_fast and six.PY2 else exc - # If we're not failing fast and all executions are raising, there is a chance of recursing # here as subsequent requests are attempted. If we hit this threshold, schedule this result/retry # and let the event loop thread return. if self._exec_depth < self.max_error_recursion: - self._put_result(e, idx, False) + self._put_result(exc, idx, False) else: - self.session.submit(self._put_result, e, idx, False) + self.session.submit(self._put_result, exc, idx, False) self._exec_depth -= 1 def _on_success(self, result, future, idx): @@ -163,14 +157,6 @@ def _on_success(self, result, future, idx): def _on_error(self, result, future, idx): self._put_result(result, idx, False) - @staticmethod - def _raise(exc): - if six.PY2 and isinstance(exc, tuple): - (exc_type, value, traceback) = exc - six.reraise(exc_type, value, traceback) - else: - raise exc - class ConcurrentExecutorGenResults(_ConcurrentExecutor): @@ -190,7 +176,7 @@ def _results(self): try: self._condition.release() if self._fail_fast and not res[0]: - self._raise(res[1]) + raise res[1] yield res finally: self._condition.acquire() @@ -221,9 +207,9 @@ def _results(self): while self._current < self._exec_count: self._condition.wait() if self._exception and self._fail_fast: - self._raise(self._exception) + raise self._exception if self._exception and self._fail_fast: # raise the exception even if there was no wait - self._raise(self._exception) + raise self._exception return [r[1] for r in sorted(self._results_queue)] diff --git a/cassandra/connection.py b/cassandra/connection.py index 0869584663..195c93c889 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -19,8 +19,6 @@ from heapq import heappush, heappop import io import logging -import six -from six.moves import range import socket import struct import sys @@ -33,7 +31,7 @@ if 'gevent.monkey' in sys.modules: from gevent.queue import Queue, Empty else: - from six.moves.queue import Queue, Empty # noqa + from queue import Queue, Empty # noqa from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut, ProtocolVersion from cassandra.marshal import int32_pack @@ -605,12 +603,6 @@ def wrapper(self, *args, **kwargs): DEFAULT_CQL_VERSION = '3.0.0' -if six.PY3: - def int_from_buf_item(i): - return i -else: - int_from_buf_item = ord - class _ConnectionIOBuffer(object): """ @@ -1122,7 +1114,7 @@ def _read_frame_header(self): buf = self._io_buffer.cql_frame_buffer.getvalue() pos = len(buf) if pos: - version = int_from_buf_item(buf[0]) & PROTOCOL_VERSION_MASK + version = buf[0] & PROTOCOL_VERSION_MASK if version not in ProtocolVersion.SUPPORTED_VERSIONS: raise ProtocolError("This version of the driver does not support protocol version %d" % version) frame_header = frame_header_v3 if version >= 3 else frame_header_v1_v2 @@ -1321,7 +1313,7 @@ def _handle_options_response(self, options_response): remote_supported_compressions) else: compression_type = None - if isinstance(self.compression, six.string_types): + if isinstance(self.compression, str): # the user picked a specific compression type ('snappy' or 'lz4') if self.compression not in remote_supported_compressions: raise ProtocolError( diff --git a/cassandra/cqlengine/__init__.py b/cassandra/cqlengine/__init__.py index e2a952d682..b9466e961b 100644 --- a/cassandra/cqlengine/__init__.py +++ b/cassandra/cqlengine/__init__.py @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - - # Caching constants. CACHING_ALL = "ALL" CACHING_KEYS_ONLY = "KEYS_ONLY" @@ -31,7 +28,4 @@ class ValidationError(CQLEngineException): class UnicodeMixin(object): - if six.PY3: - __str__ = lambda x: x.__unicode__() - else: - __str__ = lambda x: six.text_type(x).encode('utf-8') + __str__ = lambda x: x.__unicode__() diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 49116129fc..7c20ec6642 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -15,7 +15,6 @@ from copy import deepcopy, copy from datetime import date, datetime, timedelta import logging -import six from uuid import UUID as _UUID from cassandra import util @@ -327,7 +326,7 @@ class Blob(Column): def to_database(self, value): - if not isinstance(value, (six.binary_type, bytearray)): + if not isinstance(value, (bytes, bytearray)): raise Exception("expecting a binary, got a %s" % type(value)) val = super(Bytes, self).to_database(value) @@ -381,7 +380,7 @@ def __init__(self, min_length=None, max_length=None, **kwargs): def validate(self, value): value = super(Text, self).validate(value) - if not isinstance(value, (six.string_types, bytearray)) and value is not None: + if not isinstance(value, (str, bytearray)) and value is not None: raise ValidationError('{0} {1} is not a string'.format(self.column_name, type(value))) if self.max_length is not None: if value and len(value) > self.max_length: @@ -655,7 +654,7 @@ def validate(self, value): return if isinstance(val, _UUID): return val - if isinstance(val, six.string_types): + if isinstance(val, str): try: return _UUID(val) except ValueError: diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 90e6d90317..588e512a2d 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -14,7 +14,6 @@ from collections import defaultdict import logging -import six import threading from cassandra.cluster import Cluster, _ConfigMode, _NOT_SET, NoHostAvailable, UserTypeDoesNotExist, ConsistencyLevel @@ -346,7 +345,7 @@ def execute(query, params=None, consistency_level=None, timeout=NOT_SET, connect elif isinstance(query, BaseCQLStatement): params = query.get_context() query = SimpleStatement(str(query), consistency_level=consistency_level, fetch_size=query.fetch_size) - elif isinstance(query, six.string_types): + elif isinstance(query, str): query = SimpleStatement(query, consistency_level=consistency_level) log.debug(format_log_context('Query: {}, Params: {}'.format(query.query_string, params), connection=connection)) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 536bde6349..6790a117c7 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -16,7 +16,6 @@ import json import logging import os -import six import warnings from itertools import product @@ -232,7 +231,7 @@ def _sync_table(model, connection=None): except CQLEngineException as ex: # 1.2 doesn't return cf names, so we have to examine the exception # and ignore if it says the column family already exists - if "Cannot add already existing column family" not in six.text_type(ex): + if "Cannot add already existing column family" not in str(ex): raise else: log.debug(format_log_context("sync_table checking existing table %s", keyspace=ks_name, connection=connection), cf_name) @@ -477,7 +476,7 @@ def _update_options(model, connection=None): except KeyError: msg = format_log_context("Invalid table option: '%s'; known options: %s", keyspace=ks_name, connection=connection) raise KeyError(msg % (name, existing_options.keys())) - if isinstance(existing_value, six.string_types): + if isinstance(existing_value, str): if value != existing_value: update_options[name] = value else: diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index b3c7c9e37f..bc00001666 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -14,7 +14,6 @@ import logging import re -import six from warnings import warn from cassandra.cqlengine import CQLEngineException, ValidationError @@ -614,7 +613,7 @@ def __iter__(self): def __getitem__(self, key): """ Returns column's value. """ - if not isinstance(key, six.string_types): + if not isinstance(key, str): raise TypeError if key not in self._columns.keys(): raise KeyError @@ -622,7 +621,7 @@ def __getitem__(self, key): def __setitem__(self, key, val): """ Sets a column's value. """ - if not isinstance(key, six.string_types): + if not isinstance(key, str): raise TypeError if key not in self._columns.keys(): raise KeyError @@ -1042,8 +1041,7 @@ def _transform_column(col_name, col_obj): return klass -@six.add_metaclass(ModelMetaClass) -class Model(BaseModel): +class Model(BaseModel, metaclass=ModelMetaClass): __abstract__ = True """ *Optional.* Indicates that this model is only intended to be used as a base class for other models. diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index bba505583c..2adf51758d 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import six from cassandra.cqlengine import UnicodeMixin @@ -44,8 +43,7 @@ def __init__(cls, name, bases, dct): super(OpMapMeta, cls).__init__(name, bases, dct) -@six.add_metaclass(OpMapMeta) -class BaseWhereOperator(BaseQueryOperator): +class BaseWhereOperator(BaseQueryOperator, metaclass=OpMapMeta): """ base operator used for where clauses """ @classmethod def get_operator(cls, symbol): diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 1978d319f4..73f48a5928 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -16,7 +16,6 @@ from datetime import datetime, timedelta from functools import partial import time -import six from warnings import warn from cassandra.query import SimpleStatement, BatchType as CBatchType, BatchStatement @@ -103,29 +102,29 @@ def in_(self, item): used where you'd typically want to use python's `in` operator """ - return WhereClause(six.text_type(self), InOperator(), item) + return WhereClause(str(self), InOperator(), item) def contains_(self, item): """ Returns a CONTAINS operator """ - return WhereClause(six.text_type(self), ContainsOperator(), item) + return WhereClause(str(self), ContainsOperator(), item) def __eq__(self, other): - return WhereClause(six.text_type(self), EqualsOperator(), self._to_database(other)) + return WhereClause(str(self), EqualsOperator(), self._to_database(other)) def __gt__(self, other): - return WhereClause(six.text_type(self), GreaterThanOperator(), self._to_database(other)) + return WhereClause(str(self), GreaterThanOperator(), self._to_database(other)) def __ge__(self, other): - return WhereClause(six.text_type(self), GreaterThanOrEqualOperator(), self._to_database(other)) + return WhereClause(str(self), GreaterThanOrEqualOperator(), self._to_database(other)) def __lt__(self, other): - return WhereClause(six.text_type(self), LessThanOperator(), self._to_database(other)) + return WhereClause(str(self), LessThanOperator(), self._to_database(other)) def __le__(self, other): - return WhereClause(six.text_type(self), LessThanOrEqualOperator(), self._to_database(other)) + return WhereClause(str(self), LessThanOrEqualOperator(), self._to_database(other)) class BatchType(object): @@ -231,7 +230,7 @@ def execute(self): opener = 'BEGIN ' + (str(batch_type) + ' ' if batch_type else '') + ' BATCH' if self.timestamp: - if isinstance(self.timestamp, six.integer_types): + if isinstance(self.timestamp, int): ts = self.timestamp elif isinstance(self.timestamp, (datetime, timedelta)): ts = self.timestamp @@ -407,7 +406,7 @@ def _execute(self, statement): return result def __unicode__(self): - return six.text_type(self._select_query()) + return str(self._select_query()) def __str__(self): return str(self.__unicode__()) @@ -604,7 +603,7 @@ def batch(self, batch_obj): def first(self): try: - return six.next(iter(self)) + return next(iter(self)) except StopIteration: return None @@ -901,7 +900,7 @@ def limit(self, v): if v is None: v = 0 - if not isinstance(v, six.integer_types): + if not isinstance(v, int): raise TypeError if v == self._limit: return self @@ -925,7 +924,7 @@ def fetch_size(self, v): print(user) """ - if not isinstance(v, six.integer_types): + if not isinstance(v, int): raise TypeError if v == self._fetch_size: return self diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index c6ceb16607..d92d0b2452 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -14,8 +14,6 @@ from datetime import datetime, timedelta import time -import six -from six.moves import filter from cassandra.query import FETCH_SIZE_UNSET from cassandra.cqlengine import columns @@ -114,7 +112,7 @@ def __init__(self, field, operator, value, quote_field=True): def __unicode__(self): field = ('"{0}"' if self.quote_field else '{0}').format(self.field) - return u'{0} {1} {2}'.format(field, self.operator, six.text_type(self.query_value)) + return u'{0} {1} {2}'.format(field, self.operator, str(self.query_value)) def __hash__(self): return super(WhereClause, self).__hash__() ^ hash(self.operator) @@ -186,8 +184,7 @@ def __init__(cls, name, bases, dct): super(ContainerUpdateTypeMapMeta, cls).__init__(name, bases, dct) -@six.add_metaclass(ContainerUpdateTypeMapMeta) -class ContainerUpdateClause(AssignmentClause): +class ContainerUpdateClause(AssignmentClause, metaclass=ContainerUpdateTypeMapMeta): def __init__(self, field, value, operation=None, previous=None): super(ContainerUpdateClause, self).__init__(field, value) @@ -563,7 +560,7 @@ def add_conditional_clause(self, clause): self.conditionals.append(clause) def _get_conditionals(self): - return 'IF {0}'.format(' AND '.join([six.text_type(c) for c in self.conditionals])) + return 'IF {0}'.format(' AND '.join([str(c) for c in self.conditionals])) def get_context_size(self): return len(self.get_context()) @@ -584,7 +581,7 @@ def timestamp_normalized(self): if not self.timestamp: return None - if isinstance(self.timestamp, six.integer_types): + if isinstance(self.timestamp, int): return self.timestamp if isinstance(self.timestamp, timedelta): @@ -602,7 +599,7 @@ def __repr__(self): @property def _where(self): - return 'WHERE {0}'.format(' AND '.join([six.text_type(c) for c in self.where_clauses])) + return 'WHERE {0}'.format(' AND '.join([str(c) for c in self.where_clauses])) class SelectStatement(BaseCQLStatement): @@ -629,10 +626,10 @@ def __init__(self, fetch_size=fetch_size ) - self.fields = [fields] if isinstance(fields, six.string_types) else (fields or []) + self.fields = [fields] if isinstance(fields, str) else (fields or []) self.distinct_fields = distinct_fields self.count = count - self.order_by = [order_by] if isinstance(order_by, six.string_types) else order_by + self.order_by = [order_by] if isinstance(order_by, str) else order_by self.limit = limit self.allow_filtering = allow_filtering @@ -653,7 +650,7 @@ def __unicode__(self): qs += [self._where] if self.order_by and not self.count: - qs += ['ORDER BY {0}'.format(', '.join(six.text_type(o) for o in self.order_by))] + qs += ['ORDER BY {0}'.format(', '.join(str(o) for o in self.order_by))] if self.limit: qs += ['LIMIT {0}'.format(self.limit)] @@ -798,7 +795,7 @@ def __unicode__(self): qs += ["USING {0}".format(" AND ".join(using_options))] qs += ['SET'] - qs += [', '.join([six.text_type(c) for c in self.assignments])] + qs += [', '.join([str(c) for c in self.assignments])] if self.where_clauses: qs += [self._where] @@ -849,7 +846,7 @@ def __init__(self, table, fields=None, where=None, timestamp=None, conditionals= conditionals=conditionals ) self.fields = [] - if isinstance(fields, six.string_types): + if isinstance(fields, str): fields = [fields] for field in fields or []: self.add_field(field) @@ -874,7 +871,7 @@ def get_context(self): return ctx def add_field(self, field): - if isinstance(field, six.string_types): + if isinstance(field, str): field = FieldDeleteClause(field) if not isinstance(field, BaseClause): raise StatementException("only instances of AssignmentClause can be added to statements") diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index 155068d99e..7fa85f1919 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -13,7 +13,6 @@ # limitations under the License. import re -import six from cassandra.util import OrderedDict from cassandra.cqlengine import CQLEngineException @@ -72,7 +71,7 @@ def __ne__(self, other): return not self.__eq__(other) def __str__(self): - return "{{{0}}}".format(', '.join("'{0}': {1}".format(k, getattr(self, k)) for k, v in six.iteritems(self._values))) + return "{{{0}}}".format(', '.join("'{0}': {1}".format(k, getattr(self, k)) for k, v in self._values.items())) def has_changed_fields(self): return any(v.changed for v in self._values.values()) @@ -93,14 +92,14 @@ def __getattr__(self, attr): raise AttributeError(attr) def __getitem__(self, key): - if not isinstance(key, six.string_types): + if not isinstance(key, str): raise TypeError if key not in self._fields.keys(): raise KeyError return getattr(self, key) def __setitem__(self, key, val): - if not isinstance(key, six.string_types): + if not isinstance(key, str): raise TypeError if key not in self._fields.keys(): raise KeyError @@ -198,8 +197,7 @@ def _transform_column(field_name, field_obj): return klass -@six.add_metaclass(UserTypeMetaClass) -class UserType(BaseUserType): +class UserType(BaseUserType, metaclass=UserTypeMetaClass): """ This class is used to model User Defined Types. To define a type, declare a class inheriting from this, and assign field types as class attributes: diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 6cc89aafbb..d1d7e888f9 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -39,8 +39,6 @@ import re import socket import time -import six -from six.moves import range import struct import sys from uuid import UUID @@ -54,10 +52,7 @@ from cassandra import util _little_endian_flag = 1 # we always serialize LE -if six.PY3: - import ipaddress - -_ord = ord if six.PY2 else lambda x: x +import ipaddress apache_cassandra_type_prefix = 'org.apache.cassandra.db.marshal.' @@ -66,16 +61,12 @@ log = logging.getLogger(__name__) -if six.PY3: - _number_types = frozenset((int, float)) - long = int +_number_types = frozenset((int, float)) + - def _name_from_hex_string(encoded_name): - bin_str = unhexlify(encoded_name) - return bin_str.decode('ascii') -else: - _number_types = frozenset((int, long, float)) - _name_from_hex_string = unhexlify +def _name_from_hex_string(encoded_name): + bin_str = unhexlify(encoded_name) + return bin_str.decode('ascii') def trim_if_startswith(s, prefix): @@ -279,8 +270,7 @@ def __str__(self): EMPTY = EmptyValue() -@six.add_metaclass(CassandraTypeType) -class _CassandraType(object): +class _CassandraType(object, metaclass=CassandraTypeType): subtypes = () num_subtypes = 0 empty_binary_ok = False @@ -383,8 +373,6 @@ def apply_parameters(cls, subtypes, names=None): raise ValueError("%s types require %d subtypes (%d given)" % (cls.typename, cls.num_subtypes, len(subtypes))) newname = cls.cass_parameterized_type_with(subtypes) - if six.PY2 and isinstance(newname, unicode): - newname = newname.encode('utf-8') return type(newname, (cls,), {'subtypes': subtypes, 'cassname': cls.cassname, 'fieldnames': names}) @classmethod @@ -415,16 +403,10 @@ class _UnrecognizedType(_CassandraType): num_subtypes = 'UNKNOWN' -if six.PY3: - def mkUnrecognizedType(casstypename): - return CassandraTypeType(casstypename, - (_UnrecognizedType,), - {'typename': "'%s'" % casstypename}) -else: - def mkUnrecognizedType(casstypename): # noqa - return CassandraTypeType(casstypename.encode('utf8'), - (_UnrecognizedType,), - {'typename': "'%s'" % casstypename}) +def mkUnrecognizedType(casstypename): + return CassandraTypeType(casstypename, + (_UnrecognizedType,), + {'typename': "'%s'" % casstypename}) class BytesType(_CassandraType): @@ -433,7 +415,7 @@ class BytesType(_CassandraType): @staticmethod def serialize(val, protocol_version): - return six.binary_type(val) + return bytes(val) class DecimalType(_CassandraType): @@ -500,25 +482,20 @@ def serialize(byts, protocol_version): return int8_pack(byts) -if six.PY2: - class AsciiType(_CassandraType): - typename = 'ascii' - empty_binary_ok = True -else: - class AsciiType(_CassandraType): - typename = 'ascii' - empty_binary_ok = True +class AsciiType(_CassandraType): + typename = 'ascii' + empty_binary_ok = True - @staticmethod - def deserialize(byts, protocol_version): - return byts.decode('ascii') + @staticmethod + def deserialize(byts, protocol_version): + return byts.decode('ascii') - @staticmethod - def serialize(var, protocol_version): - try: - return var.encode('ascii') - except UnicodeDecodeError: - return var + @staticmethod + def serialize(var, protocol_version): + try: + return var.encode('ascii') + except UnicodeDecodeError: + return var class FloatType(_CassandraType): @@ -603,7 +580,7 @@ def serialize(addr, protocol_version): # since we've already determined the AF return socket.inet_aton(addr) except: - if six.PY3 and isinstance(addr, (ipaddress.IPv4Address, ipaddress.IPv6Address)): + if isinstance(addr, (ipaddress.IPv4Address, ipaddress.IPv6Address)): return addr.packed raise ValueError("can't interpret %r as an inet address" % (addr,)) @@ -662,7 +639,7 @@ def serialize(v, protocol_version): raise TypeError('DateType arguments must be a datetime, date, or timestamp') timestamp = v - return int64_pack(long(timestamp)) + return int64_pack(int(timestamp)) class TimestampType(DateType): @@ -706,7 +683,7 @@ def serialize(val, protocol_version): try: days = val.days_from_epoch except AttributeError: - if isinstance(val, six.integer_types): + if isinstance(val, int): # the DB wants offset int values, but util.Date init takes days from epoch # here we assume int values are offset, as they would appear in CQL # short circuit to avoid subtracting just to add offset @@ -826,7 +803,7 @@ def deserialize_safe(cls, byts, protocol_version): @classmethod def serialize_safe(cls, items, protocol_version): - if isinstance(items, six.string_types): + if isinstance(items, str): raise TypeError("Received a string for a type that expects a sequence") subtype, = cls.subtypes @@ -900,7 +877,7 @@ def serialize_safe(cls, themap, protocol_version): buf = io.BytesIO() buf.write(pack(len(themap))) try: - items = six.iteritems(themap) + items = themap.items() except AttributeError: raise TypeError("Got a non-map object for a map value") inner_proto = max(3, protocol_version) @@ -975,9 +952,6 @@ class UserType(TupleType): def make_udt_class(cls, keyspace, udt_name, field_names, field_types): assert len(field_names) == len(field_types) - if six.PY2 and isinstance(udt_name, unicode): - udt_name = udt_name.encode('utf-8') - instance = cls._cache.get((keyspace, udt_name)) if not instance or instance.fieldnames != field_names or instance.subtypes != field_types: instance = type(udt_name, (cls,), {'subtypes': field_types, @@ -992,8 +966,6 @@ def make_udt_class(cls, keyspace, udt_name, field_names, field_types): @classmethod def evict_udt_class(cls, keyspace, udt_name): - if six.PY2 and isinstance(udt_name, unicode): - udt_name = udt_name.encode('utf-8') try: del cls._cache[(keyspace, udt_name)] except KeyError: @@ -1150,7 +1122,7 @@ def serialize_safe(cls, val, protocol_version): def is_counter_type(t): - if isinstance(t, six.string_types): + if isinstance(t, str): t = lookup_casstype(t) return issubclass(t, CounterColumnType) @@ -1186,7 +1158,7 @@ def serialize(val, protocol_version): @staticmethod def deserialize(byts, protocol_version): - is_little_endian = bool(_ord(byts[0])) + is_little_endian = bool(byts[0]) point = point_le if is_little_endian else point_be return util.Point(*point.unpack_from(byts, 5)) # ofs = endian byte + int type @@ -1203,7 +1175,7 @@ def serialize(val, protocol_version): @staticmethod def deserialize(byts, protocol_version): - is_little_endian = bool(_ord(byts[0])) + is_little_endian = bool(byts[0]) point = point_le if is_little_endian else point_be coords = ((point.unpack_from(byts, offset) for offset in range(1 + 4 + 4, len(byts), point.size))) # start = endian + int type + int count return util.LineString(coords) @@ -1232,7 +1204,7 @@ def serialize(val, protocol_version): @staticmethod def deserialize(byts, protocol_version): - is_little_endian = bool(_ord(byts[0])) + is_little_endian = bool(byts[0]) if is_little_endian: int_fmt = ' MAX_INT32 or value < MIN_INT32): + if type(value) is int and (value > MAX_INT32 or value < MIN_INT32): return Int64TypeIO return Int32TypeIO @@ -164,9 +158,7 @@ class Int64TypeIO(IntegerTypeIO): @classmethod def deserialize(cls, value, reader=None): - if six.PY3: - return value - return long(value) + return value class FloatTypeIO(GraphSONTypeIO): @@ -274,8 +266,7 @@ class BlobTypeIO(GraphSONTypeIO): @classmethod def serialize(cls, value, writer=None): value = base64.b64encode(value) - if six.PY3: - value = value.decode('utf-8') + value = value.decode('utf-8') return value @classmethod @@ -343,7 +334,7 @@ def deserialize(cls, value, reader=None): raise ValueError('Invalid duration: {0}'.format(value)) duration = {k: float(v) if v is not None else 0 - for k, v in six.iteritems(duration.groupdict())} + for k, v in duration.groupdict().items()} return datetime.timedelta(days=duration['days'], hours=duration['hours'], minutes=duration['minutes'], seconds=duration['seconds']) @@ -512,7 +503,7 @@ class JsonMapTypeIO(GraphSONTypeIO): @classmethod def serialize(cls, value, writer=None): out = {} - for k, v in six.iteritems(value): + for k, v in value.items(): out[k] = writer.serialize(v, writer) return out @@ -528,7 +519,7 @@ class MapTypeIO(GraphSONTypeIO): def definition(cls, value, writer=None): out = OrderedDict([('cqlType', cls.cql_type)]) out['definition'] = [] - for k, v in six.iteritems(value): + for k, v in value.items(): # we just need the first pair to write the def out['definition'].append(writer.definition(k)) out['definition'].append(writer.definition(v)) @@ -538,7 +529,7 @@ def definition(cls, value, writer=None): @classmethod def serialize(cls, value, writer=None): out = [] - for k, v in six.iteritems(value): + for k, v in value.items(): out.append(writer.serialize(k, writer)) out.append(writer.serialize(v, writer)) @@ -841,16 +832,10 @@ class GraphSON1Serializer(_BaseGraphSONSerializer): ]) -if ipaddress: - GraphSON1Serializer.register(ipaddress.IPv4Address, InetTypeIO) - GraphSON1Serializer.register(ipaddress.IPv6Address, InetTypeIO) - -if six.PY2: - GraphSON1Serializer.register(buffer, ByteBufferTypeIO) - GraphSON1Serializer.register(unicode, TextTypeIO) -else: - GraphSON1Serializer.register(memoryview, ByteBufferTypeIO) - GraphSON1Serializer.register(bytes, ByteBufferTypeIO) +GraphSON1Serializer.register(ipaddress.IPv4Address, InetTypeIO) +GraphSON1Serializer.register(ipaddress.IPv6Address, InetTypeIO) +GraphSON1Serializer.register(memoryview, ByteBufferTypeIO) +GraphSON1Serializer.register(bytes, ByteBufferTypeIO) class _BaseGraphSONDeserializer(object): @@ -922,9 +907,7 @@ def deserialize_int(cls, value): @classmethod def deserialize_bigint(cls, value): - if six.PY3: - return cls.deserialize_int(value) - return long(value) + return cls.deserialize_int(value) @classmethod def deserialize_double(cls, value): @@ -1007,8 +990,6 @@ def serialize(self, value, writer=None): GraphSON2Serializer.register(int, IntegerTypeIO) -if six.PY2: - GraphSON2Serializer.register(long, IntegerTypeIO) class GraphSON2Deserializer(_BaseGraphSONDeserializer): @@ -1055,7 +1036,7 @@ def deserialize(self, obj): except KeyError: pass # list and map are treated as normal json objs (could be isolated deserializers) - return {self.deserialize(k): self.deserialize(v) for k, v in six.iteritems(obj)} + return {self.deserialize(k): self.deserialize(v) for k, v in obj.items()} elif isinstance(obj, list): return [self.deserialize(o) for o in obj] else: @@ -1109,7 +1090,7 @@ def get_serializer(self, value): if self.user_types is None: try: user_types = self.context['cluster']._user_types[self.context['graph_name']] - self.user_types = dict(map(reversed, six.iteritems(user_types))) + self.user_types = dict(map(reversed, user_types.items())) except KeyError: self.user_types = {} diff --git a/cassandra/datastax/graph/query.py b/cassandra/datastax/graph/query.py index 7c0e265dbf..866df7a94c 100644 --- a/cassandra/datastax/graph/query.py +++ b/cassandra/datastax/graph/query.py @@ -15,8 +15,6 @@ import json from warnings import warn -import six - from cassandra import ConsistencyLevel from cassandra.query import Statement, SimpleStatement from cassandra.datastax.graph.types import Vertex, Edge, Path, VertexProperty @@ -77,7 +75,7 @@ def __init__(self, **kwargs): self._graph_options = {} kwargs.setdefault('graph_source', 'g') kwargs.setdefault('graph_language', GraphOptions.DEFAULT_GRAPH_LANGUAGE) - for attr, value in six.iteritems(kwargs): + for attr, value in kwargs.items(): if attr not in _graph_option_names: warn("Unknown keyword argument received for GraphOptions: {0}".format(attr)) setattr(self, attr, value) @@ -103,7 +101,7 @@ def get_options_map(self, other_options=None): for cl in ('graph-write-consistency', 'graph-read-consistency'): cl_enum = options.get(cl) if cl_enum is not None: - options[cl] = six.b(ConsistencyLevel.value_to_name[cl_enum]) + options[cl] = ConsistencyLevel.value_to_name[cl_enum].encode() return options def set_source_default(self): @@ -157,8 +155,8 @@ def get(self, key=opt[2]): def set(self, value, key=opt[2]): if value is not None: # normalize text here so it doesn't have to be done every time we get options map - if isinstance(value, six.text_type) and not isinstance(value, six.binary_type): - value = six.b(value) + if isinstance(value, str): + value = value.encode() self._graph_options[key] = value else: self._graph_options.pop(key, None) @@ -278,7 +276,7 @@ def __getattr__(self, attr): raise AttributeError("Result has no top-level attribute %r" % (attr,)) def __getitem__(self, item): - if isinstance(self.value, dict) and isinstance(item, six.string_types): + if isinstance(self.value, dict) and isinstance(item, str): return self.value[item] elif isinstance(self.value, list) and isinstance(item, int): return self.value[item] diff --git a/cassandra/datastax/insights/registry.py b/cassandra/datastax/insights/registry.py index 3dd1d255ae..03daebd86e 100644 --- a/cassandra/datastax/insights/registry.py +++ b/cassandra/datastax/insights/registry.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six from collections import OrderedDict from warnings import warn @@ -59,7 +58,7 @@ def _get_serializer(self, cls): try: return self._mapping_dict[cls] except KeyError: - for registered_cls, serializer in six.iteritems(self._mapping_dict): + for registered_cls, serializer in self._mapping_dict.items(): if issubclass(cls, registered_cls): return self._mapping_dict[registered_cls] raise ValueError diff --git a/cassandra/datastax/insights/reporter.py b/cassandra/datastax/insights/reporter.py index b05a88deb0..83205fc458 100644 --- a/cassandra/datastax/insights/reporter.py +++ b/cassandra/datastax/insights/reporter.py @@ -24,7 +24,6 @@ import sys from threading import Event, Thread import time -import six from cassandra.policies import HostDistance from cassandra.util import ms_timestamp_from_datetime @@ -199,9 +198,9 @@ def _get_startup_data(self): }, 'platformInfo': { 'os': { - 'name': uname_info.system if six.PY3 else uname_info[0], - 'version': uname_info.release if six.PY3 else uname_info[2], - 'arch': uname_info.machine if six.PY3 else uname_info[4] + 'name': uname_info.system, + 'version': uname_info.release, + 'arch': uname_info.machine }, 'cpus': { 'length': multiprocessing.cpu_count(), diff --git a/cassandra/datastax/insights/serializers.py b/cassandra/datastax/insights/serializers.py index aec4467a6a..289c165e8a 100644 --- a/cassandra/datastax/insights/serializers.py +++ b/cassandra/datastax/insights/serializers.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - def initialize_registry(insights_registry): # This will be called from the cluster module, so we put all this behavior @@ -203,8 +201,8 @@ def graph_options_insights_serializer(options): 'language': options.graph_language, 'graphProtocol': options.graph_protocol } - updates = {k: v.decode('utf-8') for k, v in six.iteritems(rv) - if isinstance(v, six.binary_type)} + updates = {k: v.decode('utf-8') for k, v in rv.items() + if isinstance(v, bytes)} rv.update(updates) return rv diff --git a/cassandra/deserializers.pyx b/cassandra/deserializers.pyx index 7de6949099..7c256674b0 100644 --- a/cassandra/deserializers.pyx +++ b/cassandra/deserializers.pyx @@ -29,8 +29,6 @@ from uuid import UUID from cassandra import cqltypes from cassandra import util -cdef bint PY2 = six.PY2 - cdef class Deserializer: """Cython-based deserializer class for a cqltype""" @@ -90,8 +88,6 @@ cdef class DesAsciiType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): if buf.size == 0: return "" - if PY2: - return to_bytes(buf) return to_bytes(buf).decode('ascii') diff --git a/cassandra/encoder.py b/cassandra/encoder.py index f2c3f8dfed..31d90549f4 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -27,28 +27,15 @@ import sys import types from uuid import UUID -import six +import ipaddress from cassandra.util import (OrderedDict, OrderedMap, OrderedMapSerializedKey, sortedset, Time, Date, Point, LineString, Polygon) -if six.PY3: - import ipaddress - -if six.PY3: - long = int - def cql_quote(term): - # The ordering of this method is important for the result of this method to - # be a native str type (for both Python 2 and 3) - if isinstance(term, str): return "'%s'" % str(term).replace("'", "''") - # This branch of the if statement will only be used by Python 2 to catch - # unicode strings, text_type is used to prevent type errors with Python 3. - elif isinstance(term, six.text_type): - return "'%s'" % term.encode('utf8').replace("'", "''") else: return str(term) @@ -97,21 +84,13 @@ def __init__(self): Polygon: self.cql_encode_str_quoted } - if six.PY2: - self.mapping.update({ - unicode: self.cql_encode_unicode, - buffer: self.cql_encode_bytes, - long: self.cql_encode_object, - types.NoneType: self.cql_encode_none, - }) - else: - self.mapping.update({ - memoryview: self.cql_encode_bytes, - bytes: self.cql_encode_bytes, - type(None): self.cql_encode_none, - ipaddress.IPv4Address: self.cql_encode_ipaddress, - ipaddress.IPv6Address: self.cql_encode_ipaddress - }) + self.mapping.update({ + memoryview: self.cql_encode_bytes, + bytes: self.cql_encode_bytes, + type(None): self.cql_encode_none, + ipaddress.IPv4Address: self.cql_encode_ipaddress, + ipaddress.IPv6Address: self.cql_encode_ipaddress + }) def cql_encode_none(self, val): """ @@ -134,16 +113,8 @@ def cql_encode_str(self, val): def cql_encode_str_quoted(self, val): return "'%s'" % val - if six.PY3: - def cql_encode_bytes(self, val): - return (b'0x' + hexlify(val)).decode('utf-8') - elif sys.version_info >= (2, 7): - def cql_encode_bytes(self, val): # noqa - return b'0x' + hexlify(val) - else: - # python 2.6 requires string or read-only buffer for hexlify - def cql_encode_bytes(self, val): # noqa - return b'0x' + hexlify(buffer(val)) + def cql_encode_bytes(self, val): + return (b'0x' + hexlify(val)).decode('utf-8') def cql_encode_object(self, val): """ @@ -169,7 +140,7 @@ def cql_encode_datetime(self, val): with millisecond precision. """ timestamp = calendar.timegm(val.utctimetuple()) - return str(long(timestamp * 1e3 + getattr(val, 'microsecond', 0) / 1e3)) + return str(int(timestamp * 1e3 + getattr(val, 'microsecond', 0) / 1e3)) def cql_encode_date(self, val): """ @@ -214,7 +185,7 @@ def cql_encode_map_collection(self, val): return '{%s}' % ', '.join('%s: %s' % ( self.mapping.get(type(k), self.cql_encode_object)(k), self.mapping.get(type(v), self.cql_encode_object)(v) - ) for k, v in six.iteritems(val)) + ) for k, v in val.items()) def cql_encode_list_collection(self, val): """ @@ -236,14 +207,13 @@ def cql_encode_all_types(self, val, as_text_type=False): if :attr:`~Encoder.mapping` does not contain an entry for the type. """ encoded = self.mapping.get(type(val), self.cql_encode_object)(val) - if as_text_type and not isinstance(encoded, six.text_type): + if as_text_type and not isinstance(encoded, str): return encoded.decode('utf-8') return encoded - if six.PY3: - def cql_encode_ipaddress(self, val): - """ - Converts an ipaddress (IPV4Address, IPV6Address) to a CQL string. This - is suitable for ``inet`` type columns. - """ - return "'%s'" % val.compressed + def cql_encode_ipaddress(self, val): + """ + Converts an ipaddress (IPV4Address, IPV6Address) to a CQL string. This + is suitable for ``inet`` type columns. + """ + return "'%s'" % val.compressed diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 0abdbbfe0a..a45d657828 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -24,7 +24,6 @@ import sys import ssl -from six.moves import range try: from weakref import WeakSet diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index 162661f468..42874036d5 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -23,8 +23,6 @@ from threading import Event import time -from six.moves import xrange - from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager try: from eventlet.green.OpenSSL import SSL @@ -190,5 +188,5 @@ def handle_read(self): def push(self, data): chunk_size = self.out_buffer_size - for i in xrange(0, len(data), chunk_size): + for i in range(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index ebc664d485..4f1f158aa7 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -20,7 +20,6 @@ import logging import time -from six.moves import range from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 54e2d0de03..484690da89 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -21,7 +21,6 @@ from threading import Lock, Thread import time -from six.moves import range from cassandra.connection import (Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager) diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 43cb627b08..726f0819eb 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import struct @@ -45,35 +44,16 @@ def _make_packer(format_string): v3_header_unpack = v3_header_struct.unpack -if six.PY3: - def byte2int(b): - return b - - - def varint_unpack(term): - val = int(''.join("%02x" % i for i in term), 16) - if (term[0] & 128) != 0: - len_term = len(term) # pulling this out of the expression to avoid overflow in cython optimized code - val -= 1 << (len_term * 8) - return val -else: - def byte2int(b): - return ord(b) - - - def varint_unpack(term): # noqa - val = int(term.encode('hex'), 16) - if (ord(term[0]) & 128) != 0: - len_term = len(term) # pulling this out of the expression to avoid overflow in cython optimized code - val = val - (1 << (len_term * 8)) - return val +def varint_unpack(term): + val = int(''.join("%02x" % i for i in term), 16) + if (term[0] & 128) != 0: + len_term = len(term) # pulling this out of the expression to avoid overflow in cython optimized code + val -= 1 << (len_term * 8) + return val def bit_length(n): - if six.PY3 or isinstance(n, int): - return int.bit_length(n) - else: - return long.bit_length(n) + return int.bit_length(n) def varint_pack(big): @@ -91,7 +71,7 @@ def varint_pack(big): if pos and revbytes[-1] & 0x80: revbytes.append(0) revbytes.reverse() - return six.binary_type(revbytes) + return bytes(revbytes) point_be = struct.Struct('>dd') @@ -113,7 +93,7 @@ def vints_unpack(term): # noqa values = [] n = 0 while n < len(term): - first_byte = byte2int(term[n]) + first_byte = term[n] if (first_byte & 128) == 0: val = first_byte @@ -124,7 +104,7 @@ def vints_unpack(term): # noqa while n < end: n += 1 val <<= 8 - val |= byte2int(term[n]) & 0xff + val |= term[n] & 0xff n += 1 values.append(decode_zig_zag(val)) @@ -162,4 +142,4 @@ def vints_pack(values): revbytes.append(abs(v)) revbytes.reverse() - return six.binary_type(revbytes) + return bytes(revbytes) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index a82fbe48e3..f52bfd9317 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -15,13 +15,12 @@ from binascii import unhexlify from bisect import bisect_left from collections import defaultdict +from collections.abc import Mapping from functools import total_ordering from hashlib import md5 import json import logging import re -import six -from six.moves import zip import sys from threading import RLock import struct @@ -42,7 +41,6 @@ from cassandra.util import OrderedDict, Version from cassandra.pool import HostDistance from cassandra.connection import EndPoint -from cassandra.compat import Mapping log = logging.getLogger(__name__) @@ -292,7 +290,7 @@ def rebuild_token_map(self, partitioner, token_map): token_to_host_owner = {} ring = [] - for host, token_strings in six.iteritems(token_map): + for host, token_strings in token_map.items(): for token_string in token_strings: token = token_class.from_string(token_string) ring.append(token) @@ -350,7 +348,7 @@ def get_host(self, endpoint_or_address, port=None): return self._hosts.get(endpoint_or_address) def _get_host_by_address(self, address, port=None): - for host in six.itervalues(self._hosts): + for host in self._hosts.values(): if (host.broadcast_rpc_address == address and (port is None or host.broadcast_rpc_port is None or host.broadcast_rpc_port == port)): return host @@ -387,8 +385,7 @@ def __new__(metacls, name, bases, dct): -@six.add_metaclass(ReplicationStrategyTypeType) -class _ReplicationStrategy(object): +class _ReplicationStrategy(object, metaclass=ReplicationStrategyTypeType): options_map = None @classmethod @@ -627,7 +624,7 @@ def make_token_replica_map(self, token_to_host_owner, ring): racks_this_dc = dc_racks[dc] hosts_this_dc = len(hosts_per_dc[dc]) - for token_offset_index in six.moves.range(index, index+num_tokens): + for token_offset_index in range(index, index+num_tokens): if token_offset_index >= len(token_offsets): token_offset_index = token_offset_index - len(token_offsets) @@ -854,7 +851,7 @@ def _add_table_metadata(self, table_metadata): # note the intentional order of add before remove # this makes sure the maps are never absent something that existed before this update - for index_name, index_metadata in six.iteritems(table_metadata.indexes): + for index_name, index_metadata in table_metadata.indexes.items(): self.indexes[index_name] = index_metadata for index_name in (n for n in old_indexes if n not in table_metadata.indexes): @@ -1341,7 +1338,7 @@ def _all_as_cql(self): if self.extensions: registry = _RegisteredExtensionType._extension_registry - for k in six.viewkeys(registry) & self.extensions: # no viewkeys on OrderedMapSerializeKey + for k in registry.keys() & self.extensions: # no viewkeys on OrderedMapSerializeKey ext = registry[k] cql = ext.after_table_cql(self, k, self.extensions[k]) if cql: @@ -1557,8 +1554,7 @@ def __new__(mcs, name, bases, dct): return cls -@six.add_metaclass(_RegisteredExtensionType) -class RegisteredTableExtension(TableExtensionInterface): +class RegisteredTableExtension(TableExtensionInterface, metaclass=_RegisteredExtensionType): """ Extending this class registers it by name (associated by key in the `system_schema.tables.extensions` map). """ @@ -1864,7 +1860,7 @@ class MD5Token(HashToken): @classmethod def hash_fn(cls, key): - if isinstance(key, six.text_type): + if isinstance(key, str): key = key.encode('UTF-8') return abs(varint_unpack(md5(key).digest())) @@ -1878,7 +1874,7 @@ class BytesToken(Token): def from_string(cls, token_string): """ `token_string` should be the string representation from the server. """ # unhexlify works fine with unicode input in everythin but pypy3, where it Raises "TypeError: 'str' does not support the buffer interface" - if isinstance(token_string, six.text_type): + if isinstance(token_string, str): token_string = token_string.encode('ascii') # The BOP stores a hex string return cls(unhexlify(token_string)) @@ -2970,17 +2966,17 @@ def _build_table_graph_metadata(table_meta): try: # Make sure we process vertices before edges - for table_meta in [t for t in six.itervalues(keyspace_meta.tables) + for table_meta in [t for t in keyspace_meta.tables.values() if t.name in self.keyspace_table_vertex_rows[keyspace_meta.name]]: _build_table_graph_metadata(table_meta) # all other tables... - for table_meta in [t for t in six.itervalues(keyspace_meta.tables) + for table_meta in [t for t in keyspace_meta.tables.values() if t.name not in self.keyspace_table_vertex_rows[keyspace_meta.name]]: _build_table_graph_metadata(table_meta) except Exception: # schema error, remove all graph metadata for this keyspace - for t in six.itervalues(keyspace_meta.tables): + for t in keyspace_meta.tables.values(): t.edge = t.vertex = None keyspace_meta._exc_info = sys.exc_info() log.exception("Error while parsing graph metadata for keyspace %s", keyspace_meta.name) @@ -3194,7 +3190,7 @@ def as_cql_query(self, formatted=False): if self.extensions: registry = _RegisteredExtensionType._extension_registry - for k in six.viewkeys(registry) & self.extensions: # no viewkeys on OrderedMapSerializeKey + for k in registry.keys() & self.extensions: # no viewkeys on OrderedMapSerializeKey ext = registry[k] cql = ext.after_table_cql(self, k, self.extensions[k]) if cql: diff --git a/cassandra/murmur3.py b/cassandra/murmur3.py index 7c8d641b32..282c43578d 100644 --- a/cassandra/murmur3.py +++ b/cassandra/murmur3.py @@ -1,4 +1,3 @@ -from six.moves import range import struct diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 5e3610811e..3e4e984410 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -18,8 +18,6 @@ import socket from uuid import UUID -import six -from six.moves import range import io from cassandra import ProtocolVersion @@ -86,8 +84,7 @@ def __init__(cls, name, bases, dct): register_class(cls) -@six.add_metaclass(_RegisterMessageType) -class _MessageType(object): +class _MessageType(object, metaclass=_RegisterMessageType): tracing = False custom_payload = None @@ -137,8 +134,6 @@ def recv_body(cls, f, protocol_version, *args): def summary_msg(self): msg = 'Error from server: code=%04x [%s] message="%s"' \ % (self.code, self.summary, self.message) - if six.PY2 and isinstance(msg, six.text_type): - msg = msg.encode('utf-8') return msg def __str__(self): @@ -159,8 +154,7 @@ def __init__(cls, name, bases, dct): error_classes[cls.error_code] = cls -@six.add_metaclass(ErrorMessageSubclass) -class ErrorMessageSub(ErrorMessage): +class ErrorMessageSub(ErrorMessage, metaclass=ErrorMessageSubclass): error_code = None @@ -1358,7 +1352,7 @@ def read_binary_string(f): def write_string(f, s): - if isinstance(s, six.text_type): + if isinstance(s, str): s = s.encode('utf8') write_short(f, len(s)) f.write(s) @@ -1375,7 +1369,7 @@ def read_longstring(f): def write_longstring(f, s): - if isinstance(s, six.text_type): + if isinstance(s, str): s = s.encode('utf8') write_int(f, len(s)) f.write(s) diff --git a/cassandra/query.py b/cassandra/query.py index 7e4efc2511..e656124403 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -23,8 +23,6 @@ import re import struct import time -import six -from six.moves import range, zip import warnings from cassandra import ConsistencyLevel, OperationTimedOut @@ -817,7 +815,7 @@ def add(self, statement, parameters=None): Like with other statements, parameters must be a sequence, even if there is only one item. """ - if isinstance(statement, six.string_types): + if isinstance(statement, str): if parameters: encoder = Encoder() if self._session is None else self._session.encoder statement = bind_params(statement, parameters, encoder) @@ -901,10 +899,8 @@ def __str__(self): def bind_params(query, params, encoder): - if six.PY2 and isinstance(query, six.text_type): - query = query.encode('utf-8') if isinstance(params, dict): - return query % dict((k, encoder.cql_encode_all_types(v)) for k, v in six.iteritems(params)) + return query % dict((k, encoder.cql_encode_all_types(v)) for k, v in params.items()) else: return query % tuple(encoder.cql_encode_all_types(v) for v in params) diff --git a/cassandra/scylla/cloud.py b/cassandra/scylla/cloud.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cassandra/segment.py b/cassandra/segment.py index e3881c4402..78161fe520 100644 --- a/cassandra/segment.py +++ b/cassandra/segment.py @@ -13,7 +13,6 @@ # limitations under the License. import zlib -import six from cassandra import DriverException from cassandra.marshal import int32_pack @@ -54,9 +53,6 @@ def compute_crc24(data, length): def compute_crc32(data, value): crc32 = zlib.crc32(data, value) - if six.PY2: - crc32 &= 0xffffffff - return crc32 diff --git a/cassandra/util.py b/cassandra/util.py index dd5c58b01d..06d338f2e1 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -13,16 +13,22 @@ # limitations under the License. from __future__ import with_statement +from _weakref import ref import calendar +from collections import OrderedDict +from collections.abc import Mapping import datetime from functools import total_ordering -import logging from itertools import chain +import keyword +import logging +import pickle import random import re -import six -import uuid +import socket import sys +import time +import uuid _HAS_GEOMET = True try: @@ -212,147 +218,6 @@ def _resolve_contact_points_to_string_map(contact_points): ) -try: - from collections import OrderedDict -except ImportError: - # OrderedDict from Python 2.7+ - - # Copyright (c) 2009 Raymond Hettinger - # - # Permission is hereby granted, free of charge, to any person - # obtaining a copy of this software and associated documentation files - # (the "Software"), to deal in the Software without restriction, - # including without limitation the rights to use, copy, modify, merge, - # publish, distribute, sublicense, and/or sell copies of the Software, - # and to permit persons to whom the Software is furnished to do so, - # subject to the following conditions: - # - # The above copyright notice and this permission notice shall be - # included in all copies or substantial portions of the Software. - # - # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - # OTHER DEALINGS IN THE SOFTWARE. - from UserDict import DictMixin - - class OrderedDict(dict, DictMixin): # noqa - """ A dictionary which maintains the insertion order of keys. """ - - def __init__(self, *args, **kwds): - """ A dictionary which maintains the insertion order of keys. """ - - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__end - except AttributeError: - self.clear() - self.update(*args, **kwds) - - def clear(self): - self.__end = end = [] - end += [None, end, end] # sentinel node for doubly linked list - self.__map = {} # key --> [key, prev, next] - dict.clear(self) - - def __setitem__(self, key, value): - if key not in self: - end = self.__end - curr = end[1] - curr[2] = end[1] = self.__map[key] = [key, curr, end] - dict.__setitem__(self, key, value) - - def __delitem__(self, key): - dict.__delitem__(self, key) - key, prev, next = self.__map.pop(key) - prev[2] = next - next[1] = prev - - def __iter__(self): - end = self.__end - curr = end[2] - while curr is not end: - yield curr[0] - curr = curr[2] - - def __reversed__(self): - end = self.__end - curr = end[1] - while curr is not end: - yield curr[0] - curr = curr[1] - - def popitem(self, last=True): - if not self: - raise KeyError('dictionary is empty') - if last: - key = next(reversed(self)) - else: - key = next(iter(self)) - value = self.pop(key) - return key, value - - def __reduce__(self): - items = [[k, self[k]] for k in self] - tmp = self.__map, self.__end - del self.__map, self.__end - inst_dict = vars(self).copy() - self.__map, self.__end = tmp - if inst_dict: - return (self.__class__, (items,), inst_dict) - return self.__class__, (items,) - - def keys(self): - return list(self) - - setdefault = DictMixin.setdefault - update = DictMixin.update - pop = DictMixin.pop - values = DictMixin.values - items = DictMixin.items - iterkeys = DictMixin.iterkeys - itervalues = DictMixin.itervalues - iteritems = DictMixin.iteritems - - def __repr__(self): - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, self.items()) - - def copy(self): - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - d = cls() - for key in iterable: - d[key] = value - return d - - def __eq__(self, other): - if isinstance(other, OrderedDict): - if len(self) != len(other): - return False - for p, q in zip(self.items(), other.items()): - if p != q: - return False - return True - return dict.__eq__(self, other) - - def __ne__(self, other): - return not self == other - - -# WeakSet from Python 2.7+ (https://code.google.com/p/weakrefset) - -from _weakref import ref - - class _IterationGuard(object): # This context manager registers itself in the current iterators of the # weak container, such as to delay all removals until the context manager @@ -789,10 +654,6 @@ def _find_insertion(self, x): sortedset = SortedSet # backwards-compatibility -from cassandra.compat import Mapping -from six.moves import cPickle - - class OrderedMap(Mapping): ''' An ordered map that accepts non-hashable types for keys. It also maintains the @@ -835,7 +696,7 @@ def __init__(self, *args, **kwargs): for k, v in e: self._insert(k, v) - for k, v in six.iteritems(kwargs): + for k, v in kwargs.items(): self._insert(k, v) def _insert(self, key, value): @@ -901,7 +762,7 @@ def popitem(self): raise KeyError() def _serialize_key(self, key): - return cPickle.dumps(key) + return pickle.dumps(key) class OrderedMapSerializedKey(OrderedMap): @@ -919,13 +780,6 @@ def _serialize_key(self, key): return self.cass_key_type.serialize(key, self.protocol_version) -import datetime -import time - -if six.PY3: - long = int - - @total_ordering class Time(object): ''' @@ -951,11 +805,11 @@ def __init__(self, value): - datetime.time: built-in time - string_type: a string time of the form "HH:MM:SS[.mmmuuunnn]" """ - if isinstance(value, six.integer_types): + if isinstance(value, int): self._from_timestamp(value) elif isinstance(value, datetime.time): self._from_time(value) - elif isinstance(value, six.string_types): + elif isinstance(value, str): self._from_timestring(value) else: raise TypeError('Time arguments must be a whole number, datetime.time, or string') @@ -1031,7 +885,7 @@ def __eq__(self, other): if isinstance(other, Time): return self.nanosecond_time == other.nanosecond_time - if isinstance(other, six.integer_types): + if isinstance(other, int): return self.nanosecond_time == other return self.nanosecond_time % Time.MICRO == 0 and \ @@ -1080,11 +934,11 @@ def __init__(self, value): - datetime.date: built-in date - string_type: a string time of the form "yyyy-mm-dd" """ - if isinstance(value, six.integer_types): + if isinstance(value, int): self.days_from_epoch = value elif isinstance(value, (datetime.date, datetime.datetime)): self._from_timetuple(value.timetuple()) - elif isinstance(value, six.string_types): + elif isinstance(value, str): self._from_datestring(value) else: raise TypeError('Date arguments must be a whole number, datetime.date, or string') @@ -1124,7 +978,7 @@ def __eq__(self, other): if isinstance(other, Date): return self.days_from_epoch == other.days_from_epoch - if isinstance(other, six.integer_types): + if isinstance(other, int): return self.days_from_epoch == other try: @@ -1151,97 +1005,9 @@ def __str__(self): # If we overflow datetime.[MIN|MAX] return str(self.days_from_epoch) -import socket -if hasattr(socket, 'inet_pton'): - inet_pton = socket.inet_pton - inet_ntop = socket.inet_ntop -else: - """ - Windows doesn't have socket.inet_pton and socket.inet_ntop until Python 3.4 - This is an alternative impl using ctypes, based on this win_inet_pton project: - https://github.com/hickeroar/win_inet_pton - """ - import ctypes - - class sockaddr(ctypes.Structure): - """ - Shared struct for ipv4 and ipv6. - - https://msdn.microsoft.com/en-us/library/windows/desktop/ms740496(v=vs.85).aspx - - ``__pad1`` always covers the port. - - When being used for ``sockaddr_in6``, ``ipv4_addr`` actually covers ``sin6_flowinfo``, resulting - in proper alignment for ``ipv6_addr``. - """ - _fields_ = [("sa_family", ctypes.c_short), - ("__pad1", ctypes.c_ushort), - ("ipv4_addr", ctypes.c_byte * 4), - ("ipv6_addr", ctypes.c_byte * 16), - ("__pad2", ctypes.c_ulong)] - - if hasattr(ctypes, 'windll'): - WSAStringToAddressA = ctypes.windll.ws2_32.WSAStringToAddressA - WSAAddressToStringA = ctypes.windll.ws2_32.WSAAddressToStringA - else: - def not_windows(*args): - raise OSError("IPv6 addresses cannot be handled on Windows. " - "Missing ctypes.windll") - WSAStringToAddressA = not_windows - WSAAddressToStringA = not_windows - - def inet_pton(address_family, ip_string): - if address_family == socket.AF_INET: - return socket.inet_aton(ip_string) - - addr = sockaddr() - addr.sa_family = address_family - addr_size = ctypes.c_int(ctypes.sizeof(addr)) - - if WSAStringToAddressA( - ip_string, - address_family, - None, - ctypes.byref(addr), - ctypes.byref(addr_size) - ) != 0: - raise socket.error(ctypes.FormatError()) - - if address_family == socket.AF_INET6: - return ctypes.string_at(addr.ipv6_addr, 16) - - raise socket.error('unknown address family') - - def inet_ntop(address_family, packed_ip): - if address_family == socket.AF_INET: - return socket.inet_ntoa(packed_ip) - - addr = sockaddr() - addr.sa_family = address_family - addr_size = ctypes.c_int(ctypes.sizeof(addr)) - ip_string = ctypes.create_string_buffer(128) - ip_string_size = ctypes.c_int(ctypes.sizeof(ip_string)) - - if address_family == socket.AF_INET6: - if len(packed_ip) != ctypes.sizeof(addr.ipv6_addr): - raise socket.error('packed IP wrong length for inet_ntoa') - ctypes.memmove(addr.ipv6_addr, packed_ip, 16) - else: - raise socket.error('unknown address family') - - if WSAAddressToStringA( - ctypes.byref(addr), - addr_size, - None, - ip_string, - ctypes.byref(ip_string_size) - ) != 0: - raise socket.error(ctypes.FormatError()) - - return ip_string[:ip_string_size.value - 1] - -import keyword +inet_pton = socket.inet_pton +inet_ntop = socket.inet_ntop # similar to collections.namedtuple, reproduced here because Python 2.6 did not have the rename logic @@ -1688,7 +1454,7 @@ def __init__(self, value, precision): if value is None: milliseconds = None - elif isinstance(value, six.integer_types): + elif isinstance(value, int): milliseconds = value elif isinstance(value, datetime.datetime): value = value.replace( @@ -1956,12 +1722,10 @@ def __init__(self, version): try: self.major = int(parts.pop()) - except ValueError: - six.reraise( - ValueError, - ValueError("Couldn't parse version {}. Version should start with a number".format(version)), - sys.exc_info()[2] - ) + except ValueError as e: + raise ValueError( + "Couldn't parse version {}. Version should start with a number".format(version))\ + .with_traceback(e.__traceback__) try: self.minor = int(parts.pop()) if parts else 0 self.patch = int(parts.pop()) if parts else 0 @@ -1994,8 +1758,8 @@ def __str__(self): @staticmethod def _compare_version_part(version, other_version, cmp): - if not (isinstance(version, six.integer_types) and - isinstance(other_version, six.integer_types)): + if not (isinstance(version, int) and + isinstance(other_version, int)): version = str(version) other_version = str(other_version) diff --git a/docs/installation.rst b/docs/installation.rst index bea6d6ece5..17a4e63324 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -70,10 +70,10 @@ support this:: *Optional:* Column-Level Encryption (CLE) Support -------------------------------------------------- The driver has built-in support for client-side encryption and -decryption of data. For more, see :doc:`column_encryption`. +decryption of data. For more, see :doc:`column_encryption`. -CLE depends on the Python `cryptography `_ module. -When installing Python driver 3.27.0. the `cryptography` module is +CLE depends on the Python `cryptography `_ module. +When installing Python driver 3.27.0. the `cryptography` module is also downloaded and installed. If you are using Python driver 3.28.0 or later and want to use CLE, you must install the `cryptography `_ module. @@ -211,7 +211,7 @@ If your sudo configuration does not allow SETENV, you must push the option flag applies these options to all dependencies (which break on the custom flag). Therefore, you must first install dependencies, then use install-option:: - sudo pip install six futures + sudo pip install futures sudo pip install --install-option="--no-cython" diff --git a/examples/concurrent_executions/execute_async_with_queue.py b/examples/concurrent_executions/execute_async_with_queue.py index 60d2a69c3c..72d2c101cb 100644 --- a/examples/concurrent_executions/execute_async_with_queue.py +++ b/examples/concurrent_executions/execute_async_with_queue.py @@ -19,7 +19,7 @@ import time import uuid -from six.moves import queue +import queue from cassandra.cluster import Cluster diff --git a/requirements.txt b/requirements.txt index f784fba1b9..100a12905a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1 @@ geomet>=0.1,<0.3 -six >=1.9 -futures <=2.2.0 -# Futures is not required for Python 3, but it works up through 2.2.0 (after which it introduced breaking syntax). -# This is left here to make sure install -r works with any runtime. When installing via setup.py, futures is omitted -# for Python 3, in favor of the standard library implementation. -# see PYTHON-393 diff --git a/setup.py b/setup.py index 30ce602c3e..86e50e8b22 100644 --- a/setup.py +++ b/setup.py @@ -401,8 +401,7 @@ def run_setup(extensions): else: sys.stderr.write("Bypassing Cython setup requirement\n") - dependencies = ['six >=1.9', - 'geomet>=0.1,<0.3'] + dependencies = ['geomet>=0.1,<0.3'] _EXTRAS_REQUIRE = { 'graph': ['gremlinpython==3.4.6'], diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index a344931a4e..b158ed2bc0 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -30,7 +30,6 @@ from threading import Event from subprocess import call from itertools import groupby -import six import shutil from cassandra import OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure, AlreadyExists,\ @@ -343,7 +342,6 @@ def _id_and_mark(f): lessthandse60 = unittest.skipUnless(DSE_VERSION and DSE_VERSION < Version('6.0'), "DSE version less than 6.0 required") pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") -notpy3 = unittest.skipIf(sys.version_info >= (3, 0), "Test not applicable for Python 3.x runtime") requiresmallclockgranularity = unittest.skipIf("Windows" in platform.system() or "asyncore" in EVENT_LOOP_MANAGER, "This test is not suitible for environments with large clock granularity") requiressimulacron = unittest.skipIf(SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"), "Simulacron jar hasn't been specified or C* version is 2.0") @@ -610,7 +608,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, if os.name == "nt": if CCM_CLUSTER: - for node in six.itervalues(CCM_CLUSTER.nodes): + for node in CCM_CLUSTER.nodes.items(): os.system("taskkill /F /PID " + str(node.pid)) else: call(["pkill", "-9", "-f", ".ccm"]) diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py index e2fa1a4a4a..dffaccd190 100644 --- a/tests/integration/advanced/__init__.py +++ b/tests/integration/advanced/__init__.py @@ -14,7 +14,7 @@ import unittest -from six.moves.urllib.request import build_opener, Request, HTTPHandler +from urllib.request import build_opener, Request, HTTPHandler import re import os import time diff --git a/tests/integration/advanced/graph/__init__.py b/tests/integration/advanced/graph/__init__.py index 6c9458dd02..91c9287e11 100644 --- a/tests/integration/advanced/graph/__init__.py +++ b/tests/integration/advanced/graph/__init__.py @@ -22,7 +22,6 @@ import datetime from cassandra.util import Point, LineString, Polygon, Duration -import six from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT from cassandra.cluster import GraphAnalyticsExecutionProfile, GraphExecutionProfile, EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, \ @@ -457,15 +456,11 @@ def datatypes(): "duration1": ["Duration()", datetime.timedelta(1, 16, 0), GraphSON1Deserializer.deserialize_duration], "duration2": ["Duration()", datetime.timedelta(days=1, seconds=16, milliseconds=15), - GraphSON1Deserializer.deserialize_duration] + GraphSON1Deserializer.deserialize_duration], + "blob3": ["Blob()", bytes(b"Hello World Again"), GraphSON1Deserializer.deserialize_blob], + "blob4": ["Blob()", memoryview(b"And Again Hello World"), GraphSON1Deserializer.deserialize_blob] } - if six.PY2: - data["blob2"] = ["Blob()", buffer(b"Hello World"), GraphSON1Deserializer.deserialize_blob] - else: - data["blob3"] = ["Blob()", bytes(b"Hello World Again"), GraphSON1Deserializer.deserialize_blob] - data["blob4"] = ["Blob()", memoryview(b"And Again Hello World"), GraphSON1Deserializer.deserialize_blob] - if DSE_VERSION >= Version("5.1"): data["time1"] = ["Time()", datetime.time(12, 6, 12, 444), GraphSON1Deserializer.deserialize_time] data["time2"] = ["Time()", datetime.time(12, 6, 12), GraphSON1Deserializer.deserialize_time] @@ -965,7 +960,7 @@ def generate_tests(cls, schema=None, graphson=None, traversal=False): """Generate tests for a graph configuration""" def decorator(klass): if DSE_VERSION: - predicate = inspect.ismethod if six.PY2 else inspect.isfunction + predicate = inspect.isfunction for name, func in inspect.getmembers(klass, predicate=predicate): if not name.startswith('_test'): continue @@ -984,7 +979,7 @@ def generate_schema_tests(cls, schema=None): """Generate schema tests for a graph configuration""" def decorator(klass): if DSE_VERSION: - predicate = inspect.ismethod if six.PY2 else inspect.isfunction + predicate = inspect.isfunction for name, func in inspect.getmembers(klass, predicate=predicate): if not name.startswith('_test'): continue @@ -1026,7 +1021,7 @@ def __init__(self, properties): @property def non_pk_properties(self): - return {p: v for p, v in six.iteritems(self.properties) if p != 'pkid'} + return {p: v for p, v in self.properties.items() if p != 'pkid'} class GraphSchema(object): @@ -1134,7 +1129,7 @@ def clear(session): @classmethod def create_vertex_label(cls, session, vertex_label, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): statements = ["schema.propertyKey('pkid').Int().ifNotExists().create();"] - for k, v in six.iteritems(vertex_label.non_pk_properties): + for k, v in vertex_label.non_pk_properties.items(): typ = cls.sanitize_type(v) statements.append("schema.propertyKey('{name}').{type}.create();".format( name=k, type=typ @@ -1142,7 +1137,7 @@ def create_vertex_label(cls, session, vertex_label, execution_profile=EXEC_PROFI statements.append("schema.vertexLabel('{label}').partitionKey('pkid').properties(".format( label=vertex_label.label)) - property_names = [name for name in six.iterkeys(vertex_label.non_pk_properties)] + property_names = [name for name in vertex_label.non_pk_properties.keys()] statements.append(", ".join(["'{}'".format(p) for p in property_names])) statements.append(").create();") @@ -1189,7 +1184,7 @@ def create_vertex_label(cls, session, vertex_label, execution_profile=EXEC_PROFI statements = ["schema.vertexLabel('{label}').partitionBy('pkid', Int)".format( label=vertex_label.label)] - for name, typ in six.iteritems(vertex_label.non_pk_properties): + for name, typ in vertex_label.non_pk_properties.items(): typ = cls.sanitize_type(typ) statements.append(".property('{name}', {type})".format(name=name, type=typ)) statements.append(".create();") diff --git a/tests/integration/advanced/graph/fluent/__init__.py b/tests/integration/advanced/graph/fluent/__init__.py index 3962029f45..155de026c5 100644 --- a/tests/integration/advanced/graph/fluent/__init__.py +++ b/tests/integration/advanced/graph/fluent/__init__.py @@ -14,7 +14,6 @@ import sys import datetime -import six import time from collections import namedtuple from packaging.version import Version @@ -457,10 +456,10 @@ def _test_udt_with_namedtuples(self, schema, graphson): def _write_and_read_data_types(self, schema, graphson, use_schema=True): g = self.fetch_traversal_source(graphson) ep = self.get_execution_profile(graphson) - for data in six.itervalues(schema.fixtures.datatypes()): + for data in schema.fixtures.datatypes().values(): typ, value, deserializer = data vertex_label = VertexLabel([typ]) - property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + property_name = next(iter(vertex_label.non_pk_properties.keys())) if use_schema or schema is CoreGraphSchema: schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) @@ -536,9 +535,9 @@ def __test_udt(self, schema, graphson, address_class, address_with_tags_class, } g = self.fetch_traversal_source(graphson) - for typ, value in six.itervalues(data): + for typ, value in data.values(): vertex_label = VertexLabel([typ]) - property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + property_name = next(iter(vertex_label.non_pk_properties.keys())) schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) write_traversal = g.addV(str(vertex_label.label)).property('pkid', vertex_label.id). \ @@ -597,7 +596,7 @@ def _validate_prop(key, value, unittest): elif any(key.startswith(t) for t in ('Linestring',)): typ = LineString elif any(key.startswith(t) for t in ('neg',)): - typ = six.string_types + typ = str elif any(key.startswith(t) for t in ('date',)): typ = datetime.date elif any(key.startswith(t) for t in ('time',)): diff --git a/tests/integration/advanced/graph/fluent/test_graph.py b/tests/integration/advanced/graph/fluent/test_graph.py index d46a74a146..911e6d5d57 100644 --- a/tests/integration/advanced/graph/fluent/test_graph.py +++ b/tests/integration/advanced/graph/fluent/test_graph.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - from cassandra import cluster from cassandra.cluster import ContinuousPagingOptions from cassandra.datastax.graph.fluent import DseGraph @@ -120,10 +118,10 @@ def _send_batch_and_read_results(self, schema, graphson, add_all=False, use_sche ep = self.get_execution_profile(graphson) batch = DseGraph.batch(session=self.session, execution_profile=self.get_execution_profile(graphson, traversal=True)) - for data in six.itervalues(datatypes): + for data in datatypes.values(): typ, value, deserializer = data vertex_label = VertexLabel([typ]) - property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + property_name = next(iter(vertex_label.non_pk_properties.keys())) values[property_name] = value if use_schema or schema is CoreGraphSchema: schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) diff --git a/tests/integration/advanced/graph/test_graph.py b/tests/integration/advanced/graph/test_graph.py index 277283ea5a..7f55229911 100644 --- a/tests/integration/advanced/graph/test_graph.py +++ b/tests/integration/advanced/graph/test_graph.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import re from cassandra import OperationTimedOut, InvalidRequest diff --git a/tests/integration/advanced/graph/test_graph_datatype.py b/tests/integration/advanced/graph/test_graph_datatype.py index 0445ce8030..8a261c94d9 100644 --- a/tests/integration/advanced/graph/test_graph_datatype.py +++ b/tests/integration/advanced/graph/test_graph_datatype.py @@ -15,7 +15,6 @@ import unittest import time -import six import logging from packaging.version import Version from collections import namedtuple @@ -67,13 +66,13 @@ def _validate_type(self, vertex): if any(type_indicator.startswith(t) for t in ('int', 'short', 'long', 'bigint', 'decimal', 'smallint', 'varint')): - typ = six.integer_types + typ = int elif any(type_indicator.startswith(t) for t in ('float', 'double')): typ = float elif any(type_indicator.startswith(t) for t in ('duration', 'date', 'negdate', 'time', 'blob', 'timestamp', 'point', 'linestring', 'polygon', 'inet', 'uuid')): - typ = six.text_type + typ = str else: pass self.fail("Received unexpected type: %s" % type_indicator) @@ -85,10 +84,10 @@ class GenericGraphDataTypeTest(GraphUnitTestCase): def _test_all_datatypes(self, schema, graphson): ep = self.get_execution_profile(graphson) - for data in six.itervalues(schema.fixtures.datatypes()): + for data in schema.fixtures.datatypes().values(): typ, value, deserializer = data vertex_label = VertexLabel([typ]) - property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + property_name = next(iter(vertex_label.non_pk_properties.keys())) schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) vertex = list(schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] @@ -167,9 +166,9 @@ def __test_udt(self, schema, graphson, address_class, address_with_tags_class, ), 'hello')] } - for typ, value in six.itervalues(data): + for typ, value in data.values(): vertex_label = VertexLabel([typ]) - property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + property_name = next(iter(vertex_label.non_pk_properties.keys())) schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) vertex = list(schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] diff --git a/tests/integration/advanced/graph/test_graph_query.py b/tests/integration/advanced/graph/test_graph_query.py index 9bc23e611a..0c889938d8 100644 --- a/tests/integration/advanced/graph/test_graph_query.py +++ b/tests/integration/advanced/graph/test_graph_query.py @@ -14,7 +14,6 @@ import sys -import six from packaging.version import Version from copy import copy @@ -83,7 +82,7 @@ def test_consistency_passing(self): res = s.execute_graph("null") for k, v in cl.items(): - self.assertEqual(res.response_future.message.custom_payload[graph_params[k]], six.b(ConsistencyLevel.value_to_name[v])) + self.assertEqual(res.response_future.message.custom_payload[graph_params[k]], ConsistencyLevel.value_to_name[v].encode()) # passed profile values override session defaults cl = {0: ConsistencyLevel.ALL, 1: ConsistencyLevel.QUORUM} @@ -97,7 +96,7 @@ def test_consistency_passing(self): res = s.execute_graph("null", execution_profile=tmp_profile) for k, v in cl.items(): - self.assertEqual(res.response_future.message.custom_payload[graph_params[k]], six.b(ConsistencyLevel.value_to_name[v])) + self.assertEqual(res.response_future.message.custom_payload[graph_params[k]], ConsistencyLevel.value_to_name[v].encode()) finally: default_profile.graph_options = default_graph_opts @@ -588,7 +587,7 @@ def _test_basic_query_with_type_wrapper(self, schema, graphson): vl = VertexLabel(['tupleOf(Int, Bigint)']) schema.create_vertex_label(self.session, vl, execution_profile=ep) - prop_name = next(six.iterkeys(vl.non_pk_properties)) + prop_name = next(iter(vl.non_pk_properties.keys())) with self.assertRaises(InvalidRequest): schema.add_vertex(self.session, vl, prop_name, (1, 42), execution_profile=ep) diff --git a/tests/integration/advanced/test_cont_paging.py b/tests/integration/advanced/test_cont_paging.py index 2e75d7061d..99de82647d 100644 --- a/tests/integration/advanced/test_cont_paging.py +++ b/tests/integration/advanced/test_cont_paging.py @@ -21,7 +21,6 @@ import unittest from itertools import cycle, count -from six.moves import range from packaging.version import Version import time diff --git a/tests/integration/cloud/test_cloud.py b/tests/integration/cloud/test_cloud.py index ef4909a257..03ff8237be 100644 --- a/tests/integration/cloud/test_cloud.py +++ b/tests/integration/cloud/test_cloud.py @@ -20,7 +20,6 @@ import unittest -import six from ssl import SSLContext, PROTOCOL_TLS from cassandra import DriverException, ConsistencyLevel, InvalidRequest @@ -114,10 +113,7 @@ def test_error_when_bundle_doesnt_exist(self): try: self.connect('/invalid/path/file.zip') except Exception as e: - if six.PY2: - self.assertIsInstance(e, IOError) - else: - self.assertIsInstance(e, FileNotFoundError) + self.assertIsInstance(e, FileNotFoundError) def test_load_balancing_policy_is_dcawaretokenlbp(self): self.connect(self.creds) @@ -163,7 +159,7 @@ def test_default_consistency(self): self.assertEqual(self.session.default_consistency_level, ConsistencyLevel.LOCAL_QUORUM) # Verify EXEC_PROFILE_DEFAULT, EXEC_PROFILE_GRAPH_DEFAULT, # EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT - for ep_key in six.iterkeys(self.cluster.profile_manager.profiles): + for ep_key in self.cluster.profile_manager.profiles.keys(): ep = self.cluster.profile_manager.profiles[ep_key] self.assertEqual( ep.consistency_level, diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index 2acf36457b..1f51770eac 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -15,7 +15,6 @@ from datetime import datetime, timedelta import json import logging -import six import sys import traceback from uuid import uuid4 @@ -48,7 +47,7 @@ class JsonTestColumn(columns.Column): def to_python(self, value): if value is None: return - if isinstance(value, six.string_types): + if isinstance(value, str): return json.loads(value) else: return value diff --git a/tests/integration/cqlengine/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py index 2c82fe16f7..758ca714a6 100644 --- a/tests/integration/cqlengine/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -16,7 +16,6 @@ from datetime import datetime, timedelta, time from decimal import Decimal from uuid import uuid1, uuid4, UUID -import six from cassandra.cqlengine import columns from cassandra.cqlengine.management import sync_table @@ -101,15 +100,15 @@ def test_column_io(self): class TestBlobIO(BaseColumnIOTest): column = columns.Blob - pkey_val = six.b('blake'), uuid4().bytes - data_val = six.b('eggleston'), uuid4().bytes + pkey_val = b'blake', uuid4().bytes + data_val = b'eggleston', uuid4().bytes class TestBlobIO2(BaseColumnIOTest): column = columns.Blob - pkey_val = bytearray(six.b('blake')), uuid4().bytes - data_val = bytearray(six.b('eggleston')), uuid4().bytes + pkey_val = bytearray(b'blake'), uuid4().bytes + data_val = bytearray(b'eggleston'), uuid4().bytes class TestTextIO(BaseColumnIOTest): diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index 604e225586..554d941ecc 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -14,7 +14,6 @@ import copy from mock import patch -import six from cassandra.cqlengine import columns from cassandra.cqlengine.management import drop_table, sync_table, _get_table_metadata, _update_options @@ -110,7 +109,7 @@ def _verify_options(self, table_meta, expected_options): cql = table_meta.export_as_string() for name, value in expected_options.items(): - if isinstance(value, six.string_types): + if isinstance(value, str): self.assertIn("%s = '%s'" % (name, value), cql) else: start = cql.find("%s = {" % (name,)) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 2fd35b865e..e4febcc14b 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -13,7 +13,6 @@ # limitations under the License. import unittest -import six import mock import logging from packaging.version import Version diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index f764e78e5c..dae97c4438 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -15,7 +15,6 @@ from uuid import uuid4 import warnings -import six from cassandra.cqlengine import columns, CQLEngineException from cassandra.cqlengine.models import Model, ModelException, ModelDefinitionException, ColumnQueryEvaluator from cassandra.cqlengine.query import ModelQuerySet, DMLQuery diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index 555af11025..1e0134dbac 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -27,8 +27,6 @@ from tests.integration.cqlengine.operators import check_lookup from tests.integration import greaterthanorequalcass30 -import six - class TestWhereOperators(unittest.TestCase): @@ -47,15 +45,15 @@ def test_symbol_lookup(self): def test_operator_rendering(self): """ tests symbols are rendered properly """ - self.assertEqual("=", six.text_type(EqualsOperator())) - self.assertEqual("!=", six.text_type(NotEqualsOperator())) - self.assertEqual("IN", six.text_type(InOperator())) - self.assertEqual(">", six.text_type(GreaterThanOperator())) - self.assertEqual(">=", six.text_type(GreaterThanOrEqualOperator())) - self.assertEqual("<", six.text_type(LessThanOperator())) - self.assertEqual("<=", six.text_type(LessThanOrEqualOperator())) - self.assertEqual("CONTAINS", six.text_type(ContainsOperator())) - self.assertEqual("LIKE", six.text_type(LikeOperator())) + self.assertEqual("=", str(EqualsOperator())) + self.assertEqual("!=", str(NotEqualsOperator())) + self.assertEqual("IN", str(InOperator())) + self.assertEqual(">", str(GreaterThanOperator())) + self.assertEqual(">=", str(GreaterThanOrEqualOperator())) + self.assertEqual("<", str(LessThanOperator())) + self.assertEqual("<=", str(LessThanOrEqualOperator())) + self.assertEqual("CONTAINS", str(ContainsOperator())) + self.assertEqual("LIKE", str(LikeOperator())) class TestIsNotNull(BaseCassEngTestCase): diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index 3b5be60520..f245744352 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -14,7 +14,6 @@ import unittest from uuid import uuid4 -import six from cassandra.query import FETCH_SIZE_UNSET from cassandra.cqlengine.statements import BaseCQLStatement @@ -127,7 +126,7 @@ def test_like_operator(self): ss = SelectStatement(self.table_name) like_clause = "text_for_%" ss.add_where(Column(db_field='text'), LikeOperator(), like_clause) - self.assertEqual(six.text_type(ss), + self.assertEqual(str(ss), 'SELECT * FROM {} WHERE "text" LIKE %(0)s'.format(self.table_name)) result = execute(ss) diff --git a/tests/integration/cqlengine/statements/test_delete_statement.py b/tests/integration/cqlengine/statements/test_delete_statement.py index 5e2894a06b..745881f42f 100644 --- a/tests/integration/cqlengine/statements/test_delete_statement.py +++ b/tests/integration/cqlengine/statements/test_delete_statement.py @@ -17,7 +17,6 @@ from cassandra.cqlengine.columns import Column from cassandra.cqlengine.statements import DeleteStatement, WhereClause, MapDeleteClause, ConditionalClause from cassandra.cqlengine.operators import * -import six class DeleteStatementTests(TestCase): @@ -31,24 +30,24 @@ def test_single_field_is_listified(self): def test_field_rendering(self): """ tests that fields are properly added to the select statement """ ds = DeleteStatement('table', ['f1', 'f2']) - self.assertTrue(six.text_type(ds).startswith('DELETE "f1", "f2"'), six.text_type(ds)) + self.assertTrue(str(ds).startswith('DELETE "f1", "f2"'), str(ds)) self.assertTrue(str(ds).startswith('DELETE "f1", "f2"'), str(ds)) def test_none_fields_rendering(self): """ tests that a '*' is added if no fields are passed in """ ds = DeleteStatement('table', None) - self.assertTrue(six.text_type(ds).startswith('DELETE FROM'), six.text_type(ds)) + self.assertTrue(str(ds).startswith('DELETE FROM'), str(ds)) self.assertTrue(str(ds).startswith('DELETE FROM'), str(ds)) def test_table_rendering(self): ds = DeleteStatement('table', None) - self.assertTrue(six.text_type(ds).startswith('DELETE FROM table'), six.text_type(ds)) + self.assertTrue(str(ds).startswith('DELETE FROM table'), str(ds)) self.assertTrue(str(ds).startswith('DELETE FROM table'), str(ds)) def test_where_clause_rendering(self): ds = DeleteStatement('table', None) ds.add_where(Column(db_field='a'), EqualsOperator(), 'b') - self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" = %(0)s', six.text_type(ds)) + self.assertEqual(str(ds), 'DELETE FROM table WHERE "a" = %(0)s', str(ds)) def test_context_update(self): ds = DeleteStatement('table', None) @@ -56,7 +55,7 @@ def test_context_update(self): ds.add_where(Column(db_field='a'), EqualsOperator(), 'b') ds.update_context_id(7) - self.assertEqual(six.text_type(ds), 'DELETE "d"[%(8)s] FROM table WHERE "a" = %(7)s') + self.assertEqual(str(ds), 'DELETE "d"[%(8)s] FROM table WHERE "a" = %(7)s') self.assertEqual(ds.get_context(), {'7': 'b', '8': 3}) def test_context(self): @@ -69,23 +68,23 @@ def test_range_deletion_rendering(self): ds.add_where(Column(db_field='a'), EqualsOperator(), 'b') ds.add_where(Column(db_field='created_at'), GreaterThanOrEqualOperator(), '0') ds.add_where(Column(db_field='created_at'), LessThanOrEqualOperator(), '10') - self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" = %(0)s AND "created_at" >= %(1)s AND "created_at" <= %(2)s', six.text_type(ds)) + self.assertEqual(str(ds), 'DELETE FROM table WHERE "a" = %(0)s AND "created_at" >= %(1)s AND "created_at" <= %(2)s', str(ds)) ds = DeleteStatement('table', None) ds.add_where(Column(db_field='a'), EqualsOperator(), 'b') ds.add_where(Column(db_field='created_at'), InOperator(), ['0', '10', '20']) - self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" = %(0)s AND "created_at" IN %(1)s', six.text_type(ds)) + self.assertEqual(str(ds), 'DELETE FROM table WHERE "a" = %(0)s AND "created_at" IN %(1)s', str(ds)) ds = DeleteStatement('table', None) ds.add_where(Column(db_field='a'), NotEqualsOperator(), 'b') - self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" != %(0)s', six.text_type(ds)) + self.assertEqual(str(ds), 'DELETE FROM table WHERE "a" != %(0)s', str(ds)) def test_delete_conditional(self): where = [WhereClause('id', EqualsOperator(), 1)] conditionals = [ConditionalClause('f0', 'value0'), ConditionalClause('f1', 'value1')] ds = DeleteStatement('table', where=where, conditionals=conditionals) self.assertEqual(len(ds.conditionals), len(conditionals)) - self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "id" = %(0)s IF "f0" = %(1)s AND "f1" = %(2)s', six.text_type(ds)) + self.assertEqual(str(ds), 'DELETE FROM table WHERE "id" = %(0)s IF "f0" = %(1)s AND "f1" = %(2)s', str(ds)) fields = ['one', 'two'] ds = DeleteStatement('table', fields=fields, where=where, conditionals=conditionals) - self.assertEqual(six.text_type(ds), 'DELETE "one", "two" FROM table WHERE "id" = %(0)s IF "f0" = %(1)s AND "f1" = %(2)s', six.text_type(ds)) + self.assertEqual(str(ds), 'DELETE "one", "two" FROM table WHERE "id" = %(0)s IF "f0" = %(1)s AND "f1" = %(2)s', str(ds)) diff --git a/tests/integration/cqlengine/statements/test_insert_statement.py b/tests/integration/cqlengine/statements/test_insert_statement.py index a1dcd08968..45485af912 100644 --- a/tests/integration/cqlengine/statements/test_insert_statement.py +++ b/tests/integration/cqlengine/statements/test_insert_statement.py @@ -13,8 +13,6 @@ # limitations under the License. import unittest -import six - from cassandra.cqlengine.columns import Column from cassandra.cqlengine.statements import InsertStatement @@ -27,7 +25,7 @@ def test_statement(self): ist.add_assignment(Column(db_field='c'), 'd') self.assertEqual( - six.text_type(ist), + str(ist), 'INSERT INTO table ("a", "c") VALUES (%(0)s, %(1)s)' ) @@ -38,7 +36,7 @@ def test_context_update(self): ist.update_context_id(4) self.assertEqual( - six.text_type(ist), + str(ist), 'INSERT INTO table ("a", "c") VALUES (%(4)s, %(5)s)' ) ctx = ist.get_context() @@ -48,4 +46,4 @@ def test_additional_rendering(self): ist = InsertStatement('table', ttl=60) ist.add_assignment(Column(db_field='a'), 'b') ist.add_assignment(Column(db_field='c'), 'd') - self.assertIn('USING TTL 60', six.text_type(ist)) + self.assertIn('USING TTL 60', str(ist)) diff --git a/tests/integration/cqlengine/statements/test_select_statement.py b/tests/integration/cqlengine/statements/test_select_statement.py index c6d1ac69f4..26c9c804cb 100644 --- a/tests/integration/cqlengine/statements/test_select_statement.py +++ b/tests/integration/cqlengine/statements/test_select_statement.py @@ -16,7 +16,6 @@ from cassandra.cqlengine.columns import Column from cassandra.cqlengine.statements import SelectStatement, WhereClause from cassandra.cqlengine.operators import * -import six class SelectStatementTests(unittest.TestCase): @@ -28,42 +27,42 @@ def test_single_field_is_listified(self): def test_field_rendering(self): """ tests that fields are properly added to the select statement """ ss = SelectStatement('table', ['f1', 'f2']) - self.assertTrue(six.text_type(ss).startswith('SELECT "f1", "f2"'), six.text_type(ss)) + self.assertTrue(str(ss).startswith('SELECT "f1", "f2"'), str(ss)) self.assertTrue(str(ss).startswith('SELECT "f1", "f2"'), str(ss)) def test_none_fields_rendering(self): """ tests that a '*' is added if no fields are passed in """ ss = SelectStatement('table') - self.assertTrue(six.text_type(ss).startswith('SELECT *'), six.text_type(ss)) + self.assertTrue(str(ss).startswith('SELECT *'), str(ss)) self.assertTrue(str(ss).startswith('SELECT *'), str(ss)) def test_table_rendering(self): ss = SelectStatement('table') - self.assertTrue(six.text_type(ss).startswith('SELECT * FROM table'), six.text_type(ss)) + self.assertTrue(str(ss).startswith('SELECT * FROM table'), str(ss)) self.assertTrue(str(ss).startswith('SELECT * FROM table'), str(ss)) def test_where_clause_rendering(self): ss = SelectStatement('table') ss.add_where(Column(db_field='a'), EqualsOperator(), 'b') - self.assertEqual(six.text_type(ss), 'SELECT * FROM table WHERE "a" = %(0)s', six.text_type(ss)) + self.assertEqual(str(ss), 'SELECT * FROM table WHERE "a" = %(0)s', str(ss)) def test_count(self): ss = SelectStatement('table', count=True, limit=10, order_by='d') ss.add_where(Column(db_field='a'), EqualsOperator(), 'b') - self.assertEqual(six.text_type(ss), 'SELECT COUNT(*) FROM table WHERE "a" = %(0)s LIMIT 10', six.text_type(ss)) - self.assertIn('LIMIT', six.text_type(ss)) - self.assertNotIn('ORDER', six.text_type(ss)) + self.assertEqual(str(ss), 'SELECT COUNT(*) FROM table WHERE "a" = %(0)s LIMIT 10', str(ss)) + self.assertIn('LIMIT', str(ss)) + self.assertNotIn('ORDER', str(ss)) def test_distinct(self): ss = SelectStatement('table', distinct_fields=['field2']) ss.add_where(Column(db_field='field1'), EqualsOperator(), 'b') - self.assertEqual(six.text_type(ss), 'SELECT DISTINCT "field2" FROM table WHERE "field1" = %(0)s', six.text_type(ss)) + self.assertEqual(str(ss), 'SELECT DISTINCT "field2" FROM table WHERE "field1" = %(0)s', str(ss)) ss = SelectStatement('table', distinct_fields=['field1', 'field2']) - self.assertEqual(six.text_type(ss), 'SELECT DISTINCT "field1", "field2" FROM table') + self.assertEqual(str(ss), 'SELECT DISTINCT "field1", "field2" FROM table') ss = SelectStatement('table', distinct_fields=['field1'], count=True) - self.assertEqual(six.text_type(ss), 'SELECT DISTINCT COUNT("field1") FROM table') + self.assertEqual(str(ss), 'SELECT DISTINCT COUNT("field1") FROM table') def test_context(self): ss = SelectStatement('table') @@ -89,20 +88,20 @@ def test_additional_rendering(self): limit=15, allow_filtering=True ) - qstr = six.text_type(ss) + qstr = str(ss) self.assertIn('LIMIT 15', qstr) self.assertIn('ORDER BY x, y', qstr) self.assertIn('ALLOW FILTERING', qstr) def test_limit_rendering(self): ss = SelectStatement('table', None, limit=10) - qstr = six.text_type(ss) + qstr = str(ss) self.assertIn('LIMIT 10', qstr) ss = SelectStatement('table', None, limit=0) - qstr = six.text_type(ss) + qstr = str(ss) self.assertNotIn('LIMIT', qstr) ss = SelectStatement('table', None, limit=None) - qstr = six.text_type(ss) + qstr = str(ss) self.assertNotIn('LIMIT', qstr) diff --git a/tests/integration/cqlengine/statements/test_update_statement.py b/tests/integration/cqlengine/statements/test_update_statement.py index 99105069dd..4429625bf4 100644 --- a/tests/integration/cqlengine/statements/test_update_statement.py +++ b/tests/integration/cqlengine/statements/test_update_statement.py @@ -18,7 +18,6 @@ from cassandra.cqlengine.statements import (UpdateStatement, WhereClause, AssignmentClause, SetUpdateClause, ListUpdateClause) -import six class UpdateStatementTests(unittest.TestCase): @@ -26,7 +25,7 @@ class UpdateStatementTests(unittest.TestCase): def test_table_rendering(self): """ tests that fields are properly added to the select statement """ us = UpdateStatement('table') - self.assertTrue(six.text_type(us).startswith('UPDATE table SET'), six.text_type(us)) + self.assertTrue(str(us).startswith('UPDATE table SET'), str(us)) self.assertTrue(str(us).startswith('UPDATE table SET'), str(us)) def test_rendering(self): @@ -34,10 +33,10 @@ def test_rendering(self): us.add_assignment(Column(db_field='a'), 'b') us.add_assignment(Column(db_field='c'), 'd') us.add_where(Column(db_field='a'), EqualsOperator(), 'x') - self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s', six.text_type(us)) + self.assertEqual(str(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s', str(us)) us.add_where(Column(db_field='a'), NotEqualsOperator(), 'y') - self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s AND "a" != %(3)s', six.text_type(us)) + self.assertEqual(str(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s AND "a" != %(3)s', str(us)) def test_context(self): us = UpdateStatement('table') @@ -52,19 +51,19 @@ def test_context_update(self): us.add_assignment(Column(db_field='c'), 'd') us.add_where(Column(db_field='a'), EqualsOperator(), 'x') us.update_context_id(3) - self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(4)s, "c" = %(5)s WHERE "a" = %(3)s') + self.assertEqual(str(us), 'UPDATE table SET "a" = %(4)s, "c" = %(5)s WHERE "a" = %(3)s') self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'}) def test_additional_rendering(self): us = UpdateStatement('table', ttl=60) us.add_assignment(Column(db_field='a'), 'b') us.add_where(Column(db_field='a'), EqualsOperator(), 'x') - self.assertIn('USING TTL 60', six.text_type(us)) + self.assertIn('USING TTL 60', str(us)) def test_update_set_add(self): us = UpdateStatement('table') us.add_update(Set(Text, db_field='a'), set((1,)), 'add') - self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') + self.assertEqual(str(us), 'UPDATE table SET "a" = "a" + %(0)s') def test_update_empty_set_add_does_not_assign(self): us = UpdateStatement('table') diff --git a/tests/integration/cqlengine/statements/test_where_clause.py b/tests/integration/cqlengine/statements/test_where_clause.py index 21671be086..0090fa0123 100644 --- a/tests/integration/cqlengine/statements/test_where_clause.py +++ b/tests/integration/cqlengine/statements/test_where_clause.py @@ -13,7 +13,6 @@ # limitations under the License. import unittest -import six from cassandra.cqlengine.operators import EqualsOperator from cassandra.cqlengine.statements import StatementException, WhereClause @@ -30,7 +29,7 @@ def test_where_clause_rendering(self): wc = WhereClause('a', EqualsOperator(), 'c') wc.set_context_id(5) - self.assertEqual('"a" = %(5)s', six.text_type(wc), six.text_type(wc)) + self.assertEqual('"a" = %(5)s', str(wc), str(wc)) self.assertEqual('"a" = %(5)s', str(wc), type(wc)) def test_equality_method(self): diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index 94496727a7..7887949bb8 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -13,9 +13,6 @@ # limitations under the License. import warnings -import six -import sure - from cassandra.cqlengine import columns from cassandra.cqlengine.management import drop_table, sync_table from cassandra.cqlengine.models import Model diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index f8459a95ad..45dbf86c68 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -14,7 +14,6 @@ import unittest import mock -import six from uuid import uuid4 from cassandra.cqlengine import columns @@ -113,7 +112,7 @@ def test_conditional_clause(self): tc = ConditionalClause('some_value', 23) tc.set_context_id(3) - self.assertEqual('"some_value" = %(3)s', six.text_type(tc)) + self.assertEqual('"some_value" = %(3)s', str(tc)) self.assertEqual('"some_value" = %(3)s', str(tc)) def test_batch_update_conditional(self): diff --git a/tests/integration/datatype_utils.py b/tests/integration/datatype_utils.py index 8a1c813baa..1f7fb50a05 100644 --- a/tests/integration/datatype_utils.py +++ b/tests/integration/datatype_utils.py @@ -14,8 +14,8 @@ from decimal import Decimal from datetime import datetime, date, time +import ipaddress from uuid import uuid1, uuid4 -import six from cassandra.util import OrderedMap, Date, Time, sortedset, Duration @@ -91,11 +91,10 @@ def get_sample_data(): sample_data[datatype] = 3.4028234663852886e+38 elif datatype == 'inet': - sample_data[datatype] = ('123.123.123.123', '2001:db8:85a3:8d3:1319:8a2e:370:7348') - if six.PY3: - import ipaddress - sample_data[datatype] += (ipaddress.IPv4Address("123.123.123.123"), - ipaddress.IPv6Address('2001:db8:85a3:8d3:1319:8a2e:370:7348')) + sample_data[datatype] = ('123.123.123.123', + '2001:db8:85a3:8d3:1319:8a2e:370:7348', + ipaddress.IPv4Address("123.123.123.123"), + ipaddress.IPv6Address('2001:db8:85a3:8d3:1319:8a2e:370:7348')) elif datatype == 'int': sample_data[datatype] = 2147483647 diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index 3e2f2ffc5e..4a741b70b3 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -13,7 +13,6 @@ # limitations under the License. import os, socket, errno -import six from ccmlib import common from cassandra.cluster import NoHostAvailable diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 1def601d2e..d08676659f 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -14,7 +14,6 @@ import unittest import logging -import six import time from mock import Mock, patch diff --git a/tests/integration/simulacron/utils.py b/tests/integration/simulacron/utils.py index ba9573fd23..01d94fc539 100644 --- a/tests/integration/simulacron/utils.py +++ b/tests/integration/simulacron/utils.py @@ -15,7 +15,7 @@ import json import subprocess import time -from six.moves.urllib.request import build_opener, Request, HTTPHandler +from urllib.request import build_opener, Request, HTTPHandler from cassandra.metadata import SchemaParserV4, SchemaParserDSE68 diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 2f8ffbb068..94f77a6916 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -16,7 +16,6 @@ import logging import time -import six from cassandra.cluster import NoHostAvailable from cassandra.auth import PlainTextAuthProvider, SASLClient, SaslAuthProvider diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index 37003d5213..8d6818c91a 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -15,7 +15,6 @@ import unittest -import six from cassandra.query import BatchStatement from tests.integration import use_singledc, PROTOCOL_VERSION, local, TestCluster diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index 15da526bde..ba891b4bd0 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -24,8 +24,6 @@ from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster -from six import next - import unittest log = logging.getLogger(__name__) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 76c8216d41..3323baf20b 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -17,7 +17,6 @@ from functools import partial from mock import patch import logging -from six.moves import range import sys import threading from threading import Thread, Event diff --git a/tests/integration/standard/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py index 3290852862..f33ab4f04f 100644 --- a/tests/integration/standard/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -15,8 +15,6 @@ import unittest -import six - from cassandra.query import (SimpleStatement, BatchStatement, BatchType) from tests.integration import use_singledc, PROTOCOL_VERSION, local, TestCluster @@ -138,16 +136,16 @@ def validate_various_custom_payloads(self, statement): # Long key value pair key_value = "x" * 10 - custom_payload = {key_value: six.b(key_value)} + custom_payload = {key_value: key_value.encode()} self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) # Max supported value key pairs according C* binary protocol v4 should be 65534 (unsigned short max value) for i in range(65534): - custom_payload[str(i)] = six.b('x') + custom_payload[str(i)] = b'x' self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) # Add one custom payload to this is too many key value pairs and should fail - custom_payload[str(65535)] = six.b('x') + custom_payload[str(65535)] = b'x' with self.assertRaises(ValueError): self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 60c5fb8969..aa74f18d1c 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -25,7 +25,6 @@ TestCluster, greaterthanorequalcass40, requirecassandra from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES from tests.integration.standard.utils import create_table_with_all_types, get_all_primitive_params -from six import binary_type import uuid import mock @@ -78,7 +77,7 @@ def test_custom_raw_uuid_row_results(self): session.client_protocol_handler = CustomTestRawRowType result_set = session.execute("SELECT schema_version FROM system.local") raw_value = result_set[0][0] - self.assertTrue(isinstance(raw_value, binary_type)) + self.assertTrue(isinstance(raw_value, bytes)) self.assertEqual(len(raw_value), 16) # Ensure that we get normal uuid back when we re-connect diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index b83df22032..3534f29f9f 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -17,7 +17,6 @@ from collections import defaultdict import difflib import logging -import six import sys import time import os @@ -990,7 +989,7 @@ class Ext1(Ext0): update_v = s.prepare('UPDATE system_schema.views SET extensions=? WHERE keyspace_name=? AND view_name=?') # extensions registered, one present # -------------------------------------- - ext_map = {Ext0.name: six.b("THA VALUE")} + ext_map = {Ext0.name: b"THA VALUE"} [(s.execute(update_t, (ext_map, ks, t)), s.execute(update_v, (ext_map, ks, v))) for _ in self.cluster.metadata.all_hosts()] # we're manipulating metadata - do it on all hosts self.cluster.refresh_table_metadata(ks, t) @@ -1012,8 +1011,8 @@ class Ext1(Ext0): # extensions registered, one present # -------------------------------------- - ext_map = {Ext0.name: six.b("THA VALUE"), - Ext1.name: six.b("OTHA VALUE")} + ext_map = {Ext0.name: b"THA VALUE", + Ext1.name: b"OTHA VALUE"} [(s.execute(update_t, (ext_map, ks, t)), s.execute(update_v, (ext_map, ks, v))) for _ in self.cluster.metadata.all_hosts()] # we're manipulating metadata - do it on all hosts self.cluster.refresh_table_metadata(ks, t) @@ -1046,7 +1045,7 @@ def test_export_schema(self): cluster = TestCluster() cluster.connect() - self.assertIsInstance(cluster.metadata.export_schema_as_string(), six.string_types) + self.assertIsInstance(cluster.metadata.export_schema_as_string(), str) cluster.shutdown() def test_export_keyspace_schema(self): @@ -1059,8 +1058,8 @@ def test_export_keyspace_schema(self): for keyspace in cluster.metadata.keyspaces: keyspace_metadata = cluster.metadata.keyspaces[keyspace] - self.assertIsInstance(keyspace_metadata.export_as_string(), six.string_types) - self.assertIsInstance(keyspace_metadata.as_cql_query(), six.string_types) + self.assertIsInstance(keyspace_metadata.export_as_string(), str) + self.assertIsInstance(keyspace_metadata.as_cql_query(), str) cluster.shutdown() def assert_equal_diff(self, received, expected): @@ -1238,8 +1237,8 @@ def test_replicas(self): cluster.connect('test3rf') - self.assertNotEqual(list(cluster.metadata.get_replicas('test3rf', six.b('key'))), []) - host = list(cluster.metadata.get_replicas('test3rf', six.b('key')))[0] + self.assertNotEqual(list(cluster.metadata.get_replicas('test3rf', b'key')), []) + host = list(cluster.metadata.get_replicas('test3rf', b'key'))[0] self.assertEqual(host.datacenter, 'dc1') self.assertEqual(host.rack, 'r1') cluster.shutdown() diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 8d2a3d74e2..8157e4c96a 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -34,7 +34,6 @@ import re import mock -import six log = logging.getLogger(__name__) @@ -460,10 +459,10 @@ def make_query_plan(self, working_keyspace=None, query=None): try: host = [live_hosts[self.host_index_to_use]] except IndexError as e: - six.raise_from(IndexError( + raise IndexError( 'You specified an index larger than the number of hosts. Total hosts: {}. Index specified: {}'.format( len(live_hosts), self.host_index_to_use - )), e) + )) from e return host diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index 8e0ca8becc..26c1ca0da6 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -19,7 +19,6 @@ import unittest from itertools import cycle, count -from six.moves import range from threading import Event from cassandra import ConsistencyLevel diff --git a/tests/integration/standard/test_single_interface.py b/tests/integration/standard/test_single_interface.py index 8d407be958..3a49541032 100644 --- a/tests/integration/standard/test_single_interface.py +++ b/tests/integration/standard/test_single_interface.py @@ -14,8 +14,6 @@ import unittest -import six - from cassandra import ConsistencyLevel from cassandra.query import SimpleStatement @@ -54,7 +52,7 @@ def test_single_interface(self): broadcast_rpc_ports = [] broadcast_ports = [] self.assertEqual(len(hosts), 3) - for endpoint, host in six.iteritems(hosts): + for endpoint, host in hosts.items(): self.assertEqual(endpoint.address, host.broadcast_rpc_address) self.assertEqual(endpoint.port, host.broadcast_rpc_port) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 6e2e9f7328..016c2b9785 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -15,9 +15,9 @@ import unittest from datetime import datetime +import ipaddress import math from packaging.version import Version -import six import cassandra from cassandra import InvalidRequest @@ -60,25 +60,7 @@ def test_can_insert_blob_type_as_string(self): params = ['key1', b'blobbyblob'] query = "INSERT INTO blobstring (a, b) VALUES (%s, %s)" - # In python2, with Cassandra > 2.0, we don't treat the 'byte str' type as a blob, so we'll encode it - # as a string literal and have the following failure. - if six.PY2 and self.cql_version >= (3, 1, 0): - # Blob values can't be specified using string notation in CQL 3.1.0 and - # above which is used by default in Cassandra 2.0. - if self.cass_version >= (2, 1, 0): - msg = r'.*Invalid STRING constant \(.*?\) for "b" of type blob.*' - else: - msg = r'.*Invalid STRING constant \(.*?\) for b of type blob.*' - self.assertRaisesRegex(InvalidRequest, msg, s.execute, query, params) - return - - # In python2, with Cassandra < 2.0, we can manually encode the 'byte str' type as hex for insertion in a blob. - if six.PY2: - cass_params = [params[0], params[1].encode('hex')] - s.execute(query, cass_params) - # In python 3, the 'bytes' type is treated as a blob, so we can correctly encode it with hex notation. - else: - s.execute(query, params) + s.execute(query, params) results = s.execute("SELECT * FROM blobstring")[0] for expected, actual in zip(params, results): @@ -176,10 +158,9 @@ def test_can_insert_primitive_datatypes(self): # verify data result = s.execute("SELECT {0} FROM alltypes WHERE zz=%s".format(single_columns_string), (key,))[0][1] compare_value = data_sample - if six.PY3: - import ipaddress - if isinstance(data_sample, ipaddress.IPv4Address) or isinstance(data_sample, ipaddress.IPv6Address): - compare_value = str(data_sample) + + if isinstance(data_sample, ipaddress.IPv4Address) or isinstance(data_sample, ipaddress.IPv6Address): + compare_value = str(data_sample) self.assertEqual(result, compare_value) # try the same thing with a prepared statement @@ -1061,7 +1042,7 @@ def _daterange_round_trip(self, to_insert, expected=None): dr = results[0].dr # sometimes this is truncated in the assertEqual output on failure; - if isinstance(expected, six.string_types): + if isinstance(expected, str): self.assertEqual(str(dr), expected) else: self.assertEqual(dr, expected or to_insert) @@ -1115,7 +1096,7 @@ def _daterange_round_trip(self, to_insert, expected=None): dr = results[0].dr # sometimes this is truncated in the assertEqual output on failure; - if isinstance(expected, six.string_types): + if isinstance(expected, str): self.assertEqual(str(dr), expected) else: self.assertEqual(dr, expected or to_insert) diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 4c7826fb98..ae056d7773 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -16,7 +16,6 @@ from collections import namedtuple from functools import partial -import six from cassandra import InvalidRequest from cassandra.cluster import UserTypeDoesNotExist, ExecutionProfile, EXEC_PROFILE_DEFAULT @@ -291,9 +290,9 @@ def test_can_insert_udts_with_nulls(self): self.assertEqual((None, None, None, None), s.execute(select)[0].b) # also test empty strings - s.execute(insert, [User('', None, None, six.binary_type())]) + s.execute(insert, [User('', None, None, bytes())]) results = s.execute("SELECT b FROM mytable WHERE a=0") - self.assertEqual(('', None, None, six.binary_type()), results[0].b) + self.assertEqual(('', None, None, bytes()), results[0].b) c.shutdown() @@ -718,7 +717,7 @@ def test_type_alteration(self): s.execute("INSERT INTO %s (k, v) VALUES (0, {v0 : 3, v1 : 0xdeadbeef})" % (self.table_name,)) val = s.execute('SELECT v FROM %s' % self.table_name)[0][0] self.assertEqual(val['v0'], 3) - self.assertEqual(val['v1'], six.b('\xde\xad\xbe\xef')) + self.assertEqual(val['v1'], b'\xde\xad\xbe\xef') @lessthancass30 def test_alter_udt(self): diff --git a/tests/unit/advanced/cloud/test_cloud.py b/tests/unit/advanced/cloud/test_cloud.py index a7cd83a8ce..f253e70454 100644 --- a/tests/unit/advanced/cloud/test_cloud.py +++ b/tests/unit/advanced/cloud/test_cloud.py @@ -9,7 +9,6 @@ import tempfile import os import shutil -import six import unittest @@ -96,8 +95,7 @@ def clean_tmp_dir(): } # The directory is not writtable.. we expect a permission error - exc = PermissionError if six.PY3 else OSError - with self.assertRaises(exc): + with self.assertRaises(PermissionError): cloud.get_cloud_config(config) # With use_default_tempdir, we expect an connection refused diff --git a/tests/unit/advanced/test_graph.py b/tests/unit/advanced/test_graph.py index a98a48c82f..2870b9b1ee 100644 --- a/tests/unit/advanced/test_graph.py +++ b/tests/unit/advanced/test_graph.py @@ -17,8 +17,6 @@ import unittest -import six - from cassandra import ConsistencyLevel from cassandra.policies import RetryPolicy from cassandra.graph import (SimpleGraphStatement, GraphOptions, GraphProtocol, Result, @@ -278,7 +276,7 @@ def test_get_options(self): other = GraphOptions(**kwargs) options = base.get_options_map(other) updated = self.opt_mapping['graph_name'] - self.assertEqual(options[updated], six.b('unit_test')) + self.assertEqual(options[updated], b'unit_test') for name in (n for n in self.opt_mapping.values() if n != updated): self.assertEqual(options[name], base._graph_options[name]) @@ -288,22 +286,22 @@ def test_get_options(self): def test_set_attr(self): expected = 'test@@@@' opts = GraphOptions(graph_name=expected) - self.assertEqual(opts.graph_name, six.b(expected)) + self.assertEqual(opts.graph_name, expected.encode()) expected = 'somethingelse####' opts.graph_name = expected - self.assertEqual(opts.graph_name, six.b(expected)) + self.assertEqual(opts.graph_name, expected.encode()) # will update options with set value another = GraphOptions() self.assertIsNone(another.graph_name) another.update(opts) - self.assertEqual(another.graph_name, six.b(expected)) + self.assertEqual(another.graph_name, expected.encode()) opts.graph_name = None self.assertIsNone(opts.graph_name) # will not update another with its set-->unset value another.update(opts) - self.assertEqual(another.graph_name, six.b(expected)) # remains unset + self.assertEqual(another.graph_name, expected.encode()) # remains unset opt_map = another.get_options_map(opts) self.assertEqual(opt_map, another._graph_options) @@ -318,7 +316,7 @@ def _verify_api_params(self, opts, api_params): self.assertEqual(len(opts._graph_options), len(api_params)) for name, value in api_params.items(): try: - value = six.b(value) + value = value.encode() except: pass # already bytes self.assertEqual(getattr(opts, name), value) @@ -335,8 +333,8 @@ def test_consistency_levels(self): # mapping from base opt_map = opts.get_options_map() - self.assertEqual(opt_map['graph-read-consistency'], six.b(ConsistencyLevel.value_to_name[read_cl])) - self.assertEqual(opt_map['graph-write-consistency'], six.b(ConsistencyLevel.value_to_name[write_cl])) + self.assertEqual(opt_map['graph-read-consistency'], ConsistencyLevel.value_to_name[read_cl].encode()) + self.assertEqual(opt_map['graph-write-consistency'], ConsistencyLevel.value_to_name[write_cl].encode()) # empty by default new_opts = GraphOptions() @@ -346,8 +344,8 @@ def test_consistency_levels(self): # set from other opt_map = new_opts.get_options_map(opts) - self.assertEqual(opt_map['graph-read-consistency'], six.b(ConsistencyLevel.value_to_name[read_cl])) - self.assertEqual(opt_map['graph-write-consistency'], six.b(ConsistencyLevel.value_to_name[write_cl])) + self.assertEqual(opt_map['graph-read-consistency'], ConsistencyLevel.value_to_name[read_cl].encode()) + self.assertEqual(opt_map['graph-write-consistency'], ConsistencyLevel.value_to_name[write_cl].encode()) def test_graph_source_convenience_attributes(self): opts = GraphOptions() diff --git a/tests/unit/cqlengine/test_connection.py b/tests/unit/cqlengine/test_connection.py index 962ee06b52..56136b6e8b 100644 --- a/tests/unit/cqlengine/test_connection.py +++ b/tests/unit/cqlengine/test_connection.py @@ -14,8 +14,6 @@ import unittest -import six - from cassandra.cluster import _ConfigMode from cassandra.cqlengine import connection from cassandra.query import dict_factory diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index ddfa2c3198..1475347ca6 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -26,8 +26,7 @@ import random from functools import wraps from itertools import cycle -import six -from six import binary_type, BytesIO +from io import BytesIO from mock import Mock import errno @@ -202,7 +201,7 @@ def set_socket(self, connection, obj): return setattr(connection, self.socket_attr_name, obj) def make_header_prefix(self, message_class, version=2, stream_id=0): - return binary_type().join(map(uint8_pack, [ + return bytes().join(map(uint8_pack, [ 0xff & (HEADER_DIRECTION_TO_CLIENT | version), 0, # flags (compression) stream_id, @@ -230,7 +229,7 @@ def make_error_body(self, code, msg): write_string(buf, msg) return buf.getvalue() - def make_msg(self, header, body=binary_type()): + def make_msg(self, header, body=bytes()): return header + uint32_pack(len(body)) + body def test_successful_connection(self): @@ -289,7 +288,7 @@ def recv_side_effect(*args): c.process_io_buffer = Mock() def chunk(size): - return six.b('a') * size + return b'a' * size buf_size = c.in_buffer_size @@ -436,7 +435,7 @@ def test_partial_header_read(self): self.get_socket(c).recv.return_value = message[1:] c.handle_read(*self.null_handle_function_args) - self.assertEqual(six.binary_type(), c._io_buffer.io_buffer.getvalue()) + self.assertEqual(bytes(), c._io_buffer.io_buffer.getvalue()) # let it write out a StartupMessage c.handle_write(*self.null_handle_function_args) @@ -463,7 +462,7 @@ def test_partial_message_read(self): # ... then read in the rest self.get_socket(c).recv.return_value = message[9:] c.handle_read(*self.null_handle_function_args) - self.assertEqual(six.binary_type(), c._io_buffer.io_buffer.getvalue()) + self.assertEqual(bytes(), c._io_buffer.io_buffer.getvalue()) # let it write out a StartupMessage c.handle_write(*self.null_handle_function_args) @@ -499,7 +498,7 @@ def test_mixed_message_and_buffer_sizes(self): for i in range(1, 15): c.process_io_buffer.reset_mock() c._io_buffer._io_buffer = io.BytesIO() - message = io.BytesIO(six.b('a') * (2**i)) + message = io.BytesIO(b'a' * (2**i)) def recv_side_effect(*args): if random.randint(1,10) % 3 == 0: diff --git a/tests/unit/test_auth.py b/tests/unit/test_auth.py index 68cce526e7..0a2427c7ff 100644 --- a/tests/unit/test_auth.py +++ b/tests/unit/test_auth.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six from cassandra.auth import PlainTextAuthenticator import unittest @@ -24,6 +23,6 @@ class TestPlainTextAuthenticator(unittest.TestCase): def test_evaluate_challenge_with_unicode_data(self): authenticator = PlainTextAuthenticator("johnӁ", "doeӁ") self.assertEqual( - authenticator.evaluate_challenge(six.ensure_binary('PLAIN-START')), - six.ensure_binary("\x00johnӁ\x00doeӁ") + authenticator.evaluate_challenge(b'PLAIN-START'), + "\x00johnӁ\x00doeӁ".encode('utf-8') ) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 6755f118fd..c5f5def082 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -14,7 +14,6 @@ import unittest import logging -import six from mock import patch, Mock @@ -286,7 +285,7 @@ def test_default_profile(self): rf = session.execute_async("query", execution_profile='non-default') self._verify_response_future_profile(rf, non_default_profile) - for name, ep in six.iteritems(cluster.profile_manager.profiles): + for name, ep in cluster.profile_manager.profiles.items(): self.assertEqual(ep, session.get_execution_profile(name)) # invalid ep diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index 9f67531a3c..3e84220b27 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -19,7 +19,7 @@ from mock import Mock import time import threading -from six.moves.queue import PriorityQueue +from queue import PriorityQueue import sys import platform diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index bc6749a477..1d81376d4a 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -14,8 +14,7 @@ import unittest from mock import Mock, ANY, call, patch -import six -from six import BytesIO +from io import BytesIO import time from threading import Lock @@ -41,14 +40,14 @@ def make_connection(self): def make_header_prefix(self, message_class, version=Connection.protocol_version, stream_id=0): if Connection.protocol_version < 3: - return six.binary_type().join(map(uint8_pack, [ + return bytes().join(map(uint8_pack, [ 0xff & (HEADER_DIRECTION_TO_CLIENT | version), 0, # flags (compression) stream_id, message_class.opcode # opcode ])) else: - return six.binary_type().join(map(uint8_pack, [ + return bytes().join(map(uint8_pack, [ 0xff & (HEADER_DIRECTION_TO_CLIENT | version), 0, # flags (compression) 0, # MSB for v3+ stream diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index 53a5d6affc..cb863da0b1 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -14,8 +14,6 @@ import unittest -import six - from concurrent.futures import ThreadPoolExecutor from mock import Mock, ANY, call @@ -48,7 +46,7 @@ def __init__(self): def get_host(self, endpoint_or_address, port=None): if not isinstance(endpoint_or_address, EndPoint): - for host in six.itervalues(self.hosts): + for host in self.hosts.values(): if (host.address == endpoint_or_address and (port is None or host.broadcast_rpc_port is None or host.broadcast_rpc_port == port)): return host diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index b0a8b63b16..94fed13455 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -17,7 +17,6 @@ import logging from mock import Mock import os -import six import timeit import cassandra @@ -485,11 +484,11 @@ def test_murmur3_c(self): raise unittest.SkipTest('The cmurmur3 extension is not available') def _verify_hash(self, fn): - self.assertEqual(fn(six.b('123')), -7468325962851647638) + self.assertEqual(fn(b'123'), -7468325962851647638) self.assertEqual(fn(b'\x00\xff\x10\xfa\x99' * 10), 5837342703291459765) self.assertEqual(fn(b'\xfe' * 8), -8927430733708461935) self.assertEqual(fn(b'\x10' * 8), 1446172840243228796) - self.assertEqual(fn(six.b(str(cassandra.metadata.MAX_LONG))), 7162290910810015547) + self.assertEqual(fn(str(cassandra.metadata.MAX_LONG).encode()), 7162290910810015547) class MD5TokensTest(unittest.TestCase): @@ -504,28 +503,28 @@ def test_md5_tokens(self): class BytesTokensTest(unittest.TestCase): def test_bytes_tokens(self): - bytes_token = BytesToken(unhexlify(six.b('01'))) - self.assertEqual(bytes_token.value, six.b('\x01')) + bytes_token = BytesToken(unhexlify(b'01')) + self.assertEqual(bytes_token.value, b'\x01') self.assertEqual(str(bytes_token), "" % bytes_token.value) self.assertEqual(bytes_token.hash_fn('123'), '123') self.assertEqual(bytes_token.hash_fn(123), 123) self.assertEqual(bytes_token.hash_fn(str(cassandra.metadata.MAX_LONG)), str(cassandra.metadata.MAX_LONG)) def test_from_string(self): - from_unicode = BytesToken.from_string(six.text_type('0123456789abcdef')) - from_bin = BytesToken.from_string(six.b('0123456789abcdef')) + from_unicode = BytesToken.from_string('0123456789abcdef') + from_bin = BytesToken.from_string(b'0123456789abcdef') self.assertEqual(from_unicode, from_bin) - self.assertIsInstance(from_unicode.value, six.binary_type) - self.assertIsInstance(from_bin.value, six.binary_type) + self.assertIsInstance(from_unicode.value, bytes) + self.assertIsInstance(from_bin.value, bytes) def test_comparison(self): - tok = BytesToken.from_string(six.text_type('0123456789abcdef')) + tok = BytesToken.from_string('0123456789abcdef') token_high_order = uint16_unpack(tok.value[0:2]) self.assertLess(BytesToken(uint16_pack(token_high_order - 1)), tok) self.assertGreater(BytesToken(uint16_pack(token_high_order + 1)), tok) def test_comparison_unicode(self): - value = six.b('\'_-()"\xc2\xac') + value = b'\'_-()"\xc2\xac' t0 = BytesToken(value) t1 = BytesToken.from_string('00') self.assertGreater(t0, t1) @@ -642,7 +641,7 @@ class UnicodeIdentifiersTests(unittest.TestCase): Looking for encoding errors like PYTHON-447 """ - name = six.text_type(b'\'_-()"\xc2\xac'.decode('utf-8')) + name = b'\'_-()"\xc2\xac'.decode('utf-8') def test_keyspace_name(self): km = KeyspaceMetadata(self.name, False, 'SimpleStrategy', {'replication_factor': 1}) diff --git a/tests/unit/test_orderedmap.py b/tests/unit/test_orderedmap.py index 9ca5699204..5d99fc74a8 100644 --- a/tests/unit/test_orderedmap.py +++ b/tests/unit/test_orderedmap.py @@ -16,7 +16,6 @@ from cassandra.util import OrderedMap, OrderedMapSerializedKey from cassandra.cqltypes import EMPTY, UTF8Type, lookup_casstype -import six class OrderedMapTest(unittest.TestCase): def test_init(self): @@ -118,11 +117,11 @@ def test_iter(self): itr = iter(om) self.assertEqual(sum([1 for _ in itr]), len(keys)) - self.assertRaises(StopIteration, six.next, itr) + self.assertRaises(StopIteration, next, itr) self.assertEqual(list(iter(om)), keys) - self.assertEqual(list(six.iteritems(om)), items) - self.assertEqual(list(six.itervalues(om)), values) + self.assertEqual(list(om.items()), items) + self.assertEqual(list(om.values()), values) def test_len(self): self.assertEqual(len(OrderedMap()), 0) diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index 8820114dc3..78f3898e01 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -21,9 +21,6 @@ from cassandra.cqltypes import Int32Type from cassandra.util import OrderedDict -from six.moves import xrange -import six - class ParamBindingTest(unittest.TestCase): @@ -40,7 +37,7 @@ def test_sequence_param(self): self.assertEqual(result, "(1, 'a', 2.0)") def test_generator_param(self): - result = bind_params("%s", ((i for i in xrange(3)),), Encoder()) + result = bind_params("%s", ((i for i in range(3)),), Encoder()) self.assertEqual(result, "[0, 1, 2]") def test_none_param(self): @@ -149,7 +146,7 @@ def test_missing_value(self): def test_extra_value(self): self.bound.bind({'rk0': 0, 'rk1': 0, 'ck0': 0, 'v0': 0, 'should_not_be_here': 123}) # okay to have extra keys in dict - self.assertEqual(self.bound.values, [six.b('\x00') * 4] * 4) # four encoded zeros + self.assertEqual(self.bound.values, [b'\x00' * 4] * 4) # four encoded zeros self.assertRaises(ValueError, self.bound.bind, (0, 0, 0, 0, 123)) def test_values_none(self): diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index ec004ca9fe..f340f58634 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -17,8 +17,7 @@ from itertools import islice, cycle from mock import Mock, patch, call from random import randint -import six -from six.moves._thread import LockType +from _thread import LockType import sys import struct from threading import Thread @@ -37,8 +36,6 @@ from cassandra.pool import Host from cassandra.query import Statement -from six.moves import xrange - class LoadBalancingPolicyTest(unittest.TestCase): def test_non_implemented(self): @@ -75,7 +72,7 @@ def test_multiple_query_plans(self): hosts = [0, 1, 2, 3] policy = RoundRobinPolicy() policy.populate(None, hosts) - for i in xrange(20): + for i in range(20): qplan = list(policy.make_query_plan()) self.assertEqual(sorted(qplan), hosts) @@ -121,17 +118,17 @@ def test_thread_safety_during_modification(self): def check_query_plan(): try: - for i in xrange(100): + for i in range(100): list(policy.make_query_plan()) except Exception as exc: errors.append(exc) def host_up(): - for i in xrange(1000): + for i in range(1000): policy.on_up(randint(0, 99)) def host_down(): - for i in xrange(1000): + for i in range(1000): policy.on_down(randint(0, 99)) threads = [] @@ -142,7 +139,7 @@ def host_down(): # make the GIL switch after every instruction, maximizing # the chance of race conditions - check = six.PY2 or '__pypy__' in sys.builtin_module_names + check = '__pypy__' in sys.builtin_module_names if check: original_interval = sys.getcheckinterval() else: diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index 0f251ffc0e..eec9d73ca4 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -14,7 +14,6 @@ import unittest -import six from mock import Mock from cassandra import ProtocolVersion, UnsupportedOperation diff --git a/tests/unit/test_query.py b/tests/unit/test_query.py index 2a2901aaff..8a3f00fa9d 100644 --- a/tests/unit/test_query.py +++ b/tests/unit/test_query.py @@ -14,8 +14,6 @@ import unittest -import six - from cassandra.query import BatchStatement, SimpleStatement @@ -25,7 +23,7 @@ class BatchStatementTest(unittest.TestCase): def test_clear(self): keyspace = 'keyspace' routing_key = 'routing_key' - custom_payload = {'key': six.b('value')} + custom_payload = {'key': b'value'} ss = SimpleStatement('whatever', keyspace=keyspace, routing_key=routing_key, custom_payload=custom_payload) diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 273490072f..8d5850413e 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -17,7 +17,6 @@ from collections import deque from threading import RLock -import six from mock import Mock, MagicMock, ANY from cassandra import ConsistencyLevel, Unavailable, SchemaTargetType, SchemaChangeType, OperationTimedOut diff --git a/tests/unit/test_segment.py b/tests/unit/test_segment.py index f794b38b1d..0d0f146c16 100644 --- a/tests/unit/test_segment.py +++ b/tests/unit/test_segment.py @@ -14,7 +14,7 @@ import unittest -import six +from io import BytesIO from cassandra import DriverException from cassandra.segment import Segment, CrcException @@ -22,8 +22,6 @@ def to_bits(b): - if six.PY2: - b = six.byte2int(b) return '{:08b}'.format(b) class SegmentCodecTest(unittest.TestCase): @@ -50,7 +48,7 @@ def _header_to_bits(data): return bits[7:24] + bits[6:7] + bits[:6] def test_encode_uncompressed_header(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_no_compression.encode_header(buffer, len(self.small_msg), -1, True) self.assertEqual(buffer.tell(), 6) self.assertEqual( @@ -59,7 +57,7 @@ def test_encode_uncompressed_header(self): @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_encode_compressed_header(self): - buffer = six.BytesIO() + buffer = BytesIO() compressed_length = len(segment_codec_lz4.compress(self.small_msg)) segment_codec_lz4.encode_header(buffer, compressed_length, len(self.small_msg), True) @@ -69,7 +67,7 @@ def test_encode_compressed_header(self): "{:017b}".format(compressed_length) + "00000000000110010" + "1" + "00000") def test_encode_uncompressed_header_with_max_payload(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_no_compression.encode_header(buffer, len(self.max_msg), -1, True) self.assertEqual(buffer.tell(), 6) self.assertEqual( @@ -77,13 +75,13 @@ def test_encode_uncompressed_header_with_max_payload(self): "11111111111111111" + "1" + "000000") def test_encode_header_fails_if_payload_too_big(self): - buffer = six.BytesIO() + buffer = BytesIO() for codec in [c for c in [segment_codec_no_compression, segment_codec_lz4] if c is not None]: with self.assertRaises(DriverException): codec.encode_header(buffer, len(self.large_msg), -1, False) def test_encode_uncompressed_header_not_self_contained_msg(self): - buffer = six.BytesIO() + buffer = BytesIO() # simulate the first chunk with the max size segment_codec_no_compression.encode_header(buffer, len(self.max_msg), -1, False) self.assertEqual(buffer.tell(), 6) @@ -95,7 +93,7 @@ def test_encode_uncompressed_header_not_self_contained_msg(self): @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_encode_compressed_header_with_max_payload(self): - buffer = six.BytesIO() + buffer = BytesIO() compressed_length = len(segment_codec_lz4.compress(self.max_msg)) segment_codec_lz4.encode_header(buffer, compressed_length, len(self.max_msg), True) self.assertEqual(buffer.tell(), 8) @@ -105,7 +103,7 @@ def test_encode_compressed_header_with_max_payload(self): @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_encode_compressed_header_not_self_contained_msg(self): - buffer = six.BytesIO() + buffer = BytesIO() # simulate the first chunk with the max size compressed_length = len(segment_codec_lz4.compress(self.max_msg)) segment_codec_lz4.encode_header(buffer, compressed_length, len(self.max_msg), False) @@ -118,7 +116,7 @@ def test_encode_compressed_header_not_self_contained_msg(self): "00000")) def test_decode_uncompressed_header(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_no_compression.encode_header(buffer, len(self.small_msg), -1, True) buffer.seek(0) header = segment_codec_no_compression.decode_header(buffer) @@ -128,7 +126,7 @@ def test_decode_uncompressed_header(self): @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_decode_compressed_header(self): - buffer = six.BytesIO() + buffer = BytesIO() compressed_length = len(segment_codec_lz4.compress(self.small_msg)) segment_codec_lz4.encode_header(buffer, compressed_length, len(self.small_msg), True) buffer.seek(0) @@ -138,7 +136,7 @@ def test_decode_compressed_header(self): self.assertEqual(header.is_self_contained, True) def test_decode_header_fails_if_corrupted(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_no_compression.encode_header(buffer, len(self.small_msg), -1, True) # corrupt one byte buffer.seek(buffer.tell()-1) @@ -149,7 +147,7 @@ def test_decode_header_fails_if_corrupted(self): segment_codec_no_compression.decode_header(buffer) def test_decode_uncompressed_self_contained_segment(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_no_compression.encode(buffer, self.small_msg) buffer.seek(0) @@ -163,7 +161,7 @@ def test_decode_uncompressed_self_contained_segment(self): @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_decode_compressed_self_contained_segment(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_lz4.encode(buffer, self.small_msg) buffer.seek(0) @@ -176,7 +174,7 @@ def test_decode_compressed_self_contained_segment(self): self.assertEqual(segment.payload, self.small_msg) def test_decode_multi_segments(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_no_compression.encode(buffer, self.large_msg) buffer.seek(0) @@ -194,7 +192,7 @@ def test_decode_multi_segments(self): @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_decode_fails_if_corrupted(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_lz4.encode(buffer, self.small_msg) buffer.seek(buffer.tell()-1) buffer.write(b'0') @@ -205,7 +203,7 @@ def test_decode_fails_if_corrupted(self): @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_decode_tiny_msg_not_compressed(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_lz4.encode(buffer, b'b') buffer.seek(0) header = segment_codec_lz4.decode_header(buffer) diff --git a/tests/unit/test_timestamps.py b/tests/unit/test_timestamps.py index fc1be071ad..ef8ac36f7b 100644 --- a/tests/unit/test_timestamps.py +++ b/tests/unit/test_timestamps.py @@ -15,7 +15,6 @@ import unittest import mock -import six from cassandra import timestamps from threading import Thread, Lock @@ -106,10 +105,7 @@ def assertLastCallArgRegex(self, call, pattern): last_warn_args, last_warn_kwargs = call self.assertEqual(len(last_warn_args), 1) self.assertEqual(len(last_warn_kwargs), 0) - six.assertRegex(self, - last_warn_args[0], - pattern, - ) + self.assertRegex(last_warn_args[0], pattern) def test_basic_log_content(self): """ diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index e85f5dbe67..a06bbd452d 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -18,8 +18,6 @@ import time from binascii import unhexlify -import six - import cassandra from cassandra import util from cassandra.cqltypes import ( @@ -167,7 +165,7 @@ def __init__(self, subtypes, names): @classmethod def apply_parameters(cls, subtypes, names): - return cls(subtypes, [unhexlify(six.b(name)) if name is not None else name for name in names]) + return cls(subtypes, [unhexlify(name.encode()) if name is not None else name for name in names]) class BarType(FooType): typename = 'org.apache.cassandra.db.marshal.BarType' @@ -556,8 +554,8 @@ class no_bounds_object(object): self.assertRaises(ValueError, DateRangeType.serialize, no_bounds_object, 5) def test_serialized_value_round_trip(self): - vals = [six.b('\x01\x00\x00\x01%\xe9a\xf9\xd1\x06\x00\x00\x01v\xbb>o\xff\x00'), - six.b('\x01\x00\x00\x00\xdcm\x03-\xd1\x06\x00\x00\x01v\xbb>o\xff\x00')] + vals = [b'\x01\x00\x00\x01%\xe9a\xf9\xd1\x06\x00\x00\x01v\xbb>o\xff\x00', + b'\x01\x00\x00\x00\xdcm\x03-\xd1\x06\x00\x00\x01v\xbb>o\xff\x00'] for serialized in vals: self.assertEqual( serialized, diff --git a/tox.ini b/tox.ini index 4aae7a0140..7d4dfe898e 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,6 @@ envlist = py{37,38},pypy [base] deps = nose mock<=1.0.1 - six packaging cython eventlet From 7845379b61fd50d4c3d6f20b215454f3d74d2c28 Mon Sep 17 00:00:00 2001 From: MohammadHossein Shahmohammadi Date: Mon, 30 Oct 2023 18:04:53 +0330 Subject: [PATCH 1940/2005] docs: Fix typo in add_callbacks (#1177) --- docs/api/cassandra/cluster.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 2b3d7828a8..a9a9d378a4 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -215,7 +215,7 @@ .. automethod:: add_errback(fn, *args, **kwargs) - .. automethod:: add_callbacks(callback, errback, callback_args=(), callback_kwargs=None, errback_args=(), errback_args=None) + .. automethod:: add_callbacks(callback, errback, callback_args=(), callback_kwargs=None, errback_args=(), errback_kwargs=None) .. autoclass:: ResultSet () :members: From 0e25845502ce4fed12648c365189c7805dc28c1d Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Mon, 6 Nov 2023 13:54:06 -0500 Subject: [PATCH 1941/2005] Removed backup(.bak) files (#1185) --- Jenkinsfile.bak | 873 ------------------------------------------------ build.yaml.bak | 264 --------------- 2 files changed, 1137 deletions(-) delete mode 100644 Jenkinsfile.bak delete mode 100644 build.yaml.bak diff --git a/Jenkinsfile.bak b/Jenkinsfile.bak deleted file mode 100644 index 87b20804ca..0000000000 --- a/Jenkinsfile.bak +++ /dev/null @@ -1,873 +0,0 @@ -#!groovy - -def initializeEnvironment() { - env.DRIVER_DISPLAY_NAME = 'Cassandra Python Driver' - env.DRIVER_METRIC_TYPE = 'oss' - if (env.GIT_URL.contains('riptano/python-driver')) { - env.DRIVER_DISPLAY_NAME = 'private ' + env.DRIVER_DISPLAY_NAME - env.DRIVER_METRIC_TYPE = 'oss-private' - } else if (env.GIT_URL.contains('python-dse-driver')) { - env.DRIVER_DISPLAY_NAME = 'DSE Python Driver' - env.DRIVER_METRIC_TYPE = 'dse' - } - - env.GIT_SHA = "${env.GIT_COMMIT.take(7)}" - env.GITHUB_PROJECT_URL = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" - env.GITHUB_BRANCH_URL = "${GITHUB_PROJECT_URL}/tree/${env.BRANCH_NAME}" - env.GITHUB_COMMIT_URL = "${GITHUB_PROJECT_URL}/commit/${env.GIT_COMMIT}" - - sh label: 'Assign Python global environment', script: '''#!/bin/bash -lex - pyenv global ${PYTHON_VERSION} - ''' - - sh label: 'Install socat; required for unix socket tests', script: '''#!/bin/bash -lex - sudo apt-get install socat - ''' - - sh label: 'Install the latest setuptools', script: '''#!/bin/bash -lex - pip install --upgrade pip - pip install -U setuptools - ''' - - sh label: 'Install CCM', script: '''#!/bin/bash -lex - pip install ${HOME}/ccm - ''' - - // Determine if server version is Apache Cassandra� or DataStax Enterprise - if (env.CASSANDRA_VERSION.split('-')[0] == 'dse') { - sh label: 'Install DataStax Enterprise requirements', script: '''#!/bin/bash -lex - pip install -r test-datastax-requirements.txt - ''' - } else { - sh label: 'Install Apache CassandraⓇ requirements', script: '''#!/bin/bash -lex - pip install -r test-requirements.txt - ''' - - sh label: 'Uninstall the geomet dependency since it is not required for Cassandra', script: '''#!/bin/bash -lex - pip uninstall -y geomet - ''' - - } - - sh label: 'Install unit test modules', script: '''#!/bin/bash -lex - pip install nose-ignore-docstring nose-exclude service_identity - ''' - - if (env.CYTHON_ENABLED == 'True') { - sh label: 'Install cython modules', script: '''#!/bin/bash -lex - pip install cython numpy - ''' - } - - sh label: 'Download Apache CassandraⓇ or DataStax Enterprise', script: '''#!/bin/bash -lex - . ${CCM_ENVIRONMENT_SHELL} ${CASSANDRA_VERSION} - ''' - - sh label: 'Display Python and environment information', script: '''#!/bin/bash -le - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - python --version - pip --version - printenv | sort - ''' -} - -def installDriverAndCompileExtensions() { - if (env.CYTHON_ENABLED == 'True') { - sh label: 'Install the driver and compile with C extensions with Cython', script: '''#!/bin/bash -lex - python setup.py build_ext --inplace - ''' - } else { - sh label: 'Install the driver and compile with C extensions without Cython', script: '''#!/bin/bash -lex - python setup.py build_ext --inplace --no-cython - ''' - } -} - -def executeStandardTests() { - - sh label: 'Execute unit tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true - EVENT_LOOP_MANAGER=eventlet VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true - EVENT_LOOP_MANAGER=gevent VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true - ''' - - sh label: 'Execute Simulacron integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - SIMULACRON_JAR="${HOME}/simulacron.jar" - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true - - # Run backpressure tests separately to avoid memory issue - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || true - ''' - - sh label: 'Execute CQL engine integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true - ''' - - sh label: 'Execute Apache CassandraⓇ integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true - ''' - - if (env.CASSANDRA_VERSION.split('-')[0] == 'dse' && env.CASSANDRA_VERSION.split('-')[1] != '4.8') { - sh label: 'Execute DataStax Enterprise integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true - ''' - } - - sh label: 'Execute DataStax Constellation integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true - ''' - - if (env.EXECUTE_LONG_TESTS == 'True') { - sh label: 'Execute long running integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true - ''' - } -} - -def executeDseSmokeTests() { - sh label: 'Execute profile DataStax Enterprise smoke test integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} DSE_VERSION=${DSE_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true - ''' -} - -def executeEventLoopTests() { - sh label: 'Execute profile event loop manager integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_TESTS=( - "tests/integration/standard/test_cluster.py" - "tests/integration/standard/test_concurrent.py" - "tests/integration/standard/test_connection.py" - "tests/integration/standard/test_control_connection.py" - "tests/integration/standard/test_metrics.py" - "tests/integration/standard/test_query.py" - "tests/integration/simulacron/test_endpoint.py" - "tests/integration/long/test_ssl.py" - ) - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true - ''' -} - -def executeUpgradeTests() { - sh label: 'Execute profile upgrade integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=upgrade_results.xml tests/integration/upgrade || true - ''' -} - -def executeTests() { - switch(params.PROFILE) { - case 'DSE-SMOKE-TEST': - executeDseSmokeTests() - break - case 'EVENT-LOOP': - executeEventLoopTests() - break - case 'UPGRADE': - executeUpgradeTests() - break - default: - executeStandardTests() - break - } -} - -def notifySlack(status = 'started') { - // Set the global pipeline scoped environment (this is above each matrix) - env.BUILD_STATED_SLACK_NOTIFIED = 'true' - - def buildType = 'Commit' - if (params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') { - buildType = "${params.CI_SCHEDULE.toLowerCase().capitalize()}" - } - - def color = 'good' // Green - if (status.equalsIgnoreCase('aborted')) { - color = '808080' // Grey - } else if (status.equalsIgnoreCase('unstable')) { - color = 'warning' // Orange - } else if (status.equalsIgnoreCase('failed')) { - color = 'danger' // Red - } - - def message = """Build ${status} for ${env.DRIVER_DISPLAY_NAME} [${buildType}] -<${env.GITHUB_BRANCH_URL}|${env.BRANCH_NAME}> - <${env.RUN_DISPLAY_URL}|#${env.BUILD_NUMBER}> - <${env.GITHUB_COMMIT_URL}|${env.GIT_SHA}>""" - if (params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') { - message += " - ${params.CI_SCHEDULE_PYTHON_VERSION} - ${params.EVENT_LOOP_MANAGER}" - } - if (!status.equalsIgnoreCase('Started')) { - message += """ -${status} after ${currentBuild.durationString - ' and counting'}""" - } - - slackSend color: "${color}", - channel: "#python-driver-dev-bots", - message: "${message}" -} - -def submitCIMetrics(buildType) { - long durationMs = currentBuild.duration - long durationSec = durationMs / 1000 - long nowSec = (currentBuild.startTimeInMillis + durationMs) / 1000 - def branchNameNoPeriods = env.BRANCH_NAME.replaceAll('\\.', '_') - def durationMetric = "okr.ci.python.${env.DRIVER_METRIC_TYPE}.${buildType}.${branchNameNoPeriods} ${durationSec} ${nowSec}" - - timeout(time: 1, unit: 'MINUTES') { - withCredentials([string(credentialsId: 'lab-grafana-address', variable: 'LAB_GRAFANA_ADDRESS'), - string(credentialsId: 'lab-grafana-port', variable: 'LAB_GRAFANA_PORT')]) { - withEnv(["DURATION_METRIC=${durationMetric}"]) { - sh label: 'Send runtime metrics to labgrafana', script: '''#!/bin/bash -lex - echo "${DURATION_METRIC}" | nc -q 5 ${LAB_GRAFANA_ADDRESS} ${LAB_GRAFANA_PORT} - ''' - } - } - } -} - -def describePerCommitStage() { - script { - def type = 'standard' - def serverDescription = 'current Apache CassandaraⓇ and supported DataStax Enterprise versions' - if (env.BRANCH_NAME ==~ /long-python.*/) { - type = 'long' - } else if (env.BRANCH_NAME ==~ /dev-python.*/) { - type = 'dev' - } - - currentBuild.displayName = "Per-Commit (${env.EVENT_LOOP_MANAGER} | ${type.capitalize()})" - currentBuild.description = "Per-Commit build and ${type} testing of ${serverDescription} against Python v2.7.18 and v3.5.9 using ${env.EVENT_LOOP_MANAGER} event loop manager" - } - - sh label: 'Describe the python environment', script: '''#!/bin/bash -lex - python -V - pip freeze - ''' -} - -def describeScheduledTestingStage() { - script { - def type = params.CI_SCHEDULE.toLowerCase().capitalize() - def displayName = "${type} schedule (${env.EVENT_LOOP_MANAGER}" - if (env.CYTHON_ENABLED == 'True') { - displayName += " | Cython" - } - if (params.PROFILE != 'NONE') { - displayName += " | ${params.PROFILE}" - } - displayName += ")" - currentBuild.displayName = displayName - - def serverVersionDescription = "${params.CI_SCHEDULE_SERVER_VERSION.replaceAll(' ', ', ')} server version(s) in the matrix" - def pythonVersionDescription = "${params.CI_SCHEDULE_PYTHON_VERSION.replaceAll(' ', ', ')} Python version(s) in the matrix" - def description = "${type} scheduled testing using ${env.EVENT_LOOP_MANAGER} event loop manager" - if (env.CYTHON_ENABLED == 'True') { - description += ", with Cython enabled" - } - if (params.PROFILE != 'NONE') { - description += ", ${params.PROFILE} profile" - } - description += ", ${serverVersionDescription}, and ${pythonVersionDescription}" - currentBuild.description = description - } -} - -def describeAdhocTestingStage() { - script { - def serverType = params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION.split('-')[0] - def serverDisplayName = 'Apache CassandaraⓇ' - def serverVersion = " v${serverType}" - if (serverType == 'ALL') { - serverDisplayName = "all ${serverDisplayName} and DataStax Enterprise server versions" - serverVersion = '' - } else { - try { - serverVersion = " v${env.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION.split('-')[1]}" - } catch (e) { - ;; // no-op - } - if (serverType == 'dse') { - serverDisplayName = 'DataStax Enterprise' - } - } - def displayName = "${params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION} for v${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION} (${env.EVENT_LOOP_MANAGER}" - if (env.CYTHON_ENABLED == 'True') { - displayName += " | Cython" - } - if (params.PROFILE != 'NONE') { - displayName += " | ${params.PROFILE}" - } - displayName += ")" - currentBuild.displayName = displayName - - def description = "Testing ${serverDisplayName} ${serverVersion} using ${env.EVENT_LOOP_MANAGER} against Python ${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION}" - if (env.CYTHON_ENABLED == 'True') { - description += ", with Cython" - } - if (params.PROFILE == 'NONE') { - if (params.EXECUTE_LONG_TESTS) { - description += ", with" - } else { - description += ", without" - } - description += " long tests executed" - } else { - description += ", ${params.PROFILE} profile" - } - currentBuild.description = description - } -} - -def branchPatternCron = ~"(master)" -def riptanoPatternCron = ~"(riptano)" - -pipeline { - agent none - - // Global pipeline timeout - options { - timeout(time: 10, unit: 'HOURS') - buildDiscarder(logRotator(artifactNumToKeepStr: '10', // Keep only the last 10 artifacts - numToKeepStr: '50')) // Keep only the last 50 build records - } - - parameters { - choice( - name: 'ADHOC_BUILD_TYPE', - choices: ['BUILD', 'BUILD-AND-EXECUTE-TESTS'], - description: '''

Perform a adhoc build operation

-
FULL All server versions, python runtimes tested with and without Cython.
DEVELOPSmaller matrix for dev purpose.
CASSANDRA All cassandra server versions.DEFAULT Default to the build context.
2.1Apache CassandraⓇ; v2.1.x
2.2Apache CassandarⓇ; v2.2.x
3.0 Apache CassandraⓇ v3.0.x4.0 Apache CassandraⓇ v4.0.x
dse-5.0.15DataStax Enterprise v5.0.x (Long Term Support)
dse-5.1.35 DataStax Enterprise v5.1.x
dse-6.0.18DataStax Enterprise v6.0.x
dse-6.7.17DataStax Enterprise v6.7.x
dse-6.8.30 DataStax Enterprise v6.8.x (CURRENTLY UNDER DEVELOPMENT)
- - - - - - - - - - - - - - -
ChoiceDescription
BUILDPerforms a Per-Commit build
BUILD-AND-EXECUTE-TESTSPerforms a build and executes the integration and unit tests
''') - choice( - name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION', - choices: ['2.7.18', '3.4.10', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], - description: 'Python version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY!') - choice( - name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION', - choices: ['2.1', // Legacy Apache CassandraⓇ - '2.2', // Legacy Apache CassandraⓇ - '3.0', // Previous Apache CassandraⓇ - '3.11', // Current Apache CassandraⓇ - '4.0', // Development Apache CassandraⓇ - 'dse-5.0', // Long Term Support DataStax Enterprise - 'dse-5.1', // Legacy DataStax Enterprise - 'dse-6.0', // Previous DataStax Enterprise - 'dse-6.7', // Previous DataStax Enterprise - 'dse-6.8', // Current DataStax Enterprise - 'ALL'], - description: '''Apache CassandraⓇ and DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
2.1Apache CassandaraⓇ; v2.1.x
2.2Apache CassandarⓇ; v2.2.x
3.0Apache CassandaraⓇ v3.0.x
3.11Apache CassandaraⓇ v3.11.x
4.0Apache CassandaraⓇ v4.x (CURRENTLY UNDER DEVELOPMENT)
dse-5.0DataStax Enterprise v5.0.x (Long Term Support)
dse-5.1DataStax Enterprise v5.1.x
dse-6.0DataStax Enterprise v6.0.x
dse-6.7DataStax Enterprise v6.7.x
dse-6.8DataStax Enterprise v6.8.x (CURRENTLY UNDER DEVELOPMENT)
''') - booleanParam( - name: 'CYTHON', - defaultValue: false, - description: 'Flag to determine if Cython should be enabled for scheduled or adhoc builds') - booleanParam( - name: 'EXECUTE_LONG_TESTS', - defaultValue: false, - description: 'Flag to determine if long integration tests should be executed for scheduled or adhoc builds') - choice( - name: 'EVENT_LOOP_MANAGER', - choices: ['LIBEV', 'GEVENT', 'EVENTLET', 'ASYNCIO', 'ASYNCORE', 'TWISTED'], - description: '''

Event loop manager to utilize for scheduled or adhoc builds

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
LIBEVA full-featured and high-performance event loop that is loosely modeled after libevent, but without its limitations and bugs
GEVENTA co-routine -based Python networking library that uses greenlet to provide a high-level synchronous API on top of the libev or libuv event loop
EVENTLETA concurrent networking library for Python that allows you to change how you run your code, not how you write it
ASYNCIOA library to write concurrent code using the async/await syntax
ASYNCOREA module provides the basic infrastructure for writing asynchronous socket service clients and servers
TWISTEDAn event-driven networking engine written in Python and licensed under the open source MIT license
''') - choice( - name: 'PROFILE', - choices: ['NONE', 'DSE-SMOKE-TEST', 'EVENT-LOOP', 'UPGRADE'], - description: '''

Profile to utilize for scheduled or adhoc builds

- - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
NONEExecute the standard tests for the driver
DSE-SMOKE-TESTExecute only the DataStax Enterprise smoke tests
EVENT-LOOPExecute only the event loop tests for the specified event loop manager (see: EVENT_LOOP_MANAGER)
UPGRADEExecute only the upgrade tests
''') - choice( - name: 'CI_SCHEDULE', - choices: ['DO-NOT-CHANGE-THIS-SELECTION', 'WEEKNIGHTS', 'WEEKENDS'], - description: 'CI testing schedule to execute periodically scheduled builds and tests of the driver (DO NOT CHANGE THIS SELECTION)') - string( - name: 'CI_SCHEDULE_PYTHON_VERSION', - defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', - description: 'CI testing python version to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') - string( - name: 'CI_SCHEDULE_SERVER_VERSION', - defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', - description: 'CI testing server version to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') - } - - triggers { - parameterizedCron((branchPatternCron.matcher(env.BRANCH_NAME).matches() && !riptanoPatternCron.matcher(GIT_URL).find()) ? """ - # Every weeknight (Monday - Friday) around 4:00 AM - # These schedules will run with and without Cython enabled for Python v2.7.18 and v3.5.9 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 - - # Every Saturday around 12:00, 4:00 and 8:00 PM - # These schedules are for weekly libev event manager runs with and without Cython for most of the Python versions (excludes v3.5.9.x) - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.4.10;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - # These schedules are for weekly gevent event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - # These schedules are for weekly eventlet event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - - # Every Sunday around 12:00 and 4:00 AM - # These schedules are for weekly asyncore event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - # These schedules are for weekly twisted event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - """ : "") - } - - environment { - OS_VERSION = 'ubuntu/bionic64/python-driver' - CYTHON_ENABLED = "${params.CYTHON ? 'True' : 'False'}" - EVENT_LOOP_MANAGER = "${params.EVENT_LOOP_MANAGER.toLowerCase()}" - EXECUTE_LONG_TESTS = "${params.EXECUTE_LONG_TESTS ? 'True' : 'False'}" - CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' - CCM_MAX_HEAP_SIZE = '1536M' - } - - stages { - stage ('Per-Commit') { - options { - timeout(time: 2, unit: 'HOURS') - } - when { - beforeAgent true - branch pattern: '((dev|long)-)?python-.*', comparator: 'REGEXP' - allOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD' } - expression { params.CI_SCHEDULE == 'DO-NOT-CHANGE-THIS-SELECTION' } - not { buildingTag() } - } - } - - matrix { - axes { - axis { - name 'CASSANDRA_VERSION' - values '3.11', // Current Apache Cassandra - 'dse-6.8' // Current DataStax Enterprise - } - axis { - name 'PYTHON_VERSION' - values '2.7.18', '3.5.9' - } - axis { - name 'CYTHON_ENABLED' - values 'False' - } - } - - agent { - label "${OS_VERSION}" - } - - stages { - stage('Initialize-Environment') { - steps { - initializeEnvironment() - script { - if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { - notifySlack() - } - } - } - } - stage('Describe-Build') { - steps { - describePerCommitStage() - } - } - stage('Install-Driver-And-Compile-Extensions') { - steps { - installDriverAndCompileExtensions() - } - } - stage('Execute-Tests') { - steps { - - script { - if (env.BRANCH_NAME ==~ /long-python.*/) { - withEnv(["EXECUTE_LONG_TESTS=True"]) { - executeTests() - } - } - else { - executeTests() - } - } - } - post { - always { - junit testResults: '*_results.xml' - } - } - } - } - } - post { - always { - node('master') { - submitCIMetrics('commit') - } - } - aborted { - notifySlack('aborted') - } - success { - notifySlack('completed') - } - unstable { - notifySlack('unstable') - } - failure { - notifySlack('FAILED') - } - } - } - - stage ('Scheduled-Testing') { - when { - beforeAgent true - allOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD' } - expression { params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION' } - not { buildingTag() } - } - } - matrix { - axes { - axis { - name 'CASSANDRA_VERSION' - values '2.1', // Legacy Apache Cassandra - '2.2', // Legacy Apache Cassandra - '3.0', // Previous Apache Cassandra - '3.11', // Current Apache Cassandra - 'dse-5.1', // Legacy DataStax Enterprise - 'dse-6.0', // Previous DataStax Enterprise - 'dse-6.7' // Current DataStax Enterprise - } - axis { - name 'CYTHON_ENABLED' - values 'True', 'False' - } - } - when { - beforeAgent true - allOf { - expression { return params.CI_SCHEDULE_SERVER_VERSION.split(' ').any { it =~ /(ALL|${env.CASSANDRA_VERSION})/ } } - } - } - - environment { - PYTHON_VERSION = "${params.CI_SCHEDULE_PYTHON_VERSION}" - } - agent { - label "${OS_VERSION}" - } - - stages { - stage('Initialize-Environment') { - steps { - initializeEnvironment() - script { - if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { - notifySlack() - } - } - } - } - stage('Describe-Build') { - steps { - describeScheduledTestingStage() - } - } - stage('Install-Driver-And-Compile-Extensions') { - steps { - installDriverAndCompileExtensions() - } - } - stage('Execute-Tests') { - steps { - executeTests() - } - post { - always { - junit testResults: '*_results.xml' - } - } - } - } - } - post { - aborted { - notifySlack('aborted') - } - success { - notifySlack('completed') - } - unstable { - notifySlack('unstable') - } - failure { - notifySlack('FAILED') - } - } - } - - - stage('Adhoc-Testing') { - when { - beforeAgent true - allOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD-AND-EXECUTE-TESTS' } - not { buildingTag() } - } - } - - environment { - CYTHON_ENABLED = "${params.CYTHON ? 'True' : 'False'}" - PYTHON_VERSION = "${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION}" - } - - matrix { - axes { - axis { - name 'CASSANDRA_VERSION' - values '2.1', // Legacy Apache Cassandra - '2.2', // Legacy Apache Cassandra - '3.0', // Previous Apache Cassandra - '3.11', // Current Apache Cassandra - '4.0', // Development Apache Cassandra - 'dse-5.0', // Long Term Support DataStax Enterprise - 'dse-5.1', // Legacy DataStax Enterprise - 'dse-6.0', // Previous DataStax Enterprise - 'dse-6.7', // Current DataStax Enterprise - 'dse-6.8' // Development DataStax Enterprise - } - } - when { - beforeAgent true - allOf { - expression { params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION ==~ /(ALL|${env.CASSANDRA_VERSION})/ } - } - } - - agent { - label "${OS_VERSION}" - } - - stages { - stage('Describe-Build') { - steps { - describeAdhocTestingStage() - } - } - stage('Initialize-Environment') { - steps { - initializeEnvironment() - } - } - stage('Install-Driver-And-Compile-Extensions') { - steps { - installDriverAndCompileExtensions() - } - } - stage('Execute-Tests') { - steps { - executeTests() - } - post { - always { - junit testResults: '*_results.xml' - } - } - } - } - } - } - } -} diff --git a/build.yaml.bak b/build.yaml.bak deleted file mode 100644 index 100c86558a..0000000000 --- a/build.yaml.bak +++ /dev/null @@ -1,264 +0,0 @@ -schedules: - nightly_master: - schedule: nightly - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='libev' - matrix: - exclude: - - python: [3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', '4.0', 'test-dse'] - - commit_long_test: - schedule: per_commit - disable_pull_requests: true - branches: - include: [/long-python.*/] - env_vars: | - EVENT_LOOP_MANAGER='libev' - matrix: - exclude: - - python: [3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', 'test-dse'] - - commit_branches: - schedule: per_commit - disable_pull_requests: true - branches: - include: [/python.*/] - env_vars: | - EVENT_LOOP_MANAGER='libev' - EXCLUDE_LONG=1 - matrix: - exclude: - - python: [3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', 'test-dse'] - - commit_branches_dev: - schedule: per_commit - disable_pull_requests: true - branches: - include: [/dev-python.*/] - env_vars: | - EVENT_LOOP_MANAGER='libev' - EXCLUDE_LONG=1 - matrix: - exclude: - - python: [2.7, 3.7, 3.6, 3.8] - - cassandra: ['2.0', '2.1', '2.2', '3.0', '4.0', 'test-dse', 'dse-4.8', 'dse-5.0', 'dse-6.0', 'dse-6.8'] - - release_test: - schedule: per_commit - disable_pull_requests: true - branches: - include: [/release-.+/] - env_vars: | - EVENT_LOOP_MANAGER='libev' - - weekly_master: - schedule: 0 10 * * 6 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='libev' - matrix: - exclude: - - python: [3.5] - - cassandra: ['2.2', '3.1'] - - weekly_gevent: - schedule: 0 14 * * 6 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='gevent' - JUST_EVENT_LOOP=1 - - weekly_eventlet: - schedule: 0 18 * * 6 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='eventlet' - JUST_EVENT_LOOP=1 - - weekly_asyncio: - schedule: 0 22 * * 6 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='asyncio' - JUST_EVENT_LOOP=1 - matrix: - exclude: - - python: [2.7] - - weekly_async: - schedule: 0 10 * * 7 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='asyncore' - JUST_EVENT_LOOP=1 - - weekly_twister: - schedule: 0 14 * * 7 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='twisted' - JUST_EVENT_LOOP=1 - - upgrade_tests: - schedule: adhoc - branches: - include: [master, python-546] - env_vars: | - EVENT_LOOP_MANAGER='libev' - JUST_UPGRADE=True - matrix: - exclude: - - python: [3.6, 3.7, 3.8] - - cassandra: ['2.0', '2.1', '2.2', '3.0', '4.0', 'test-dse'] - -python: - - 2.7 - - 3.5 - - 3.6 - - 3.7 - - 3.8 - -os: - - ubuntu/bionic64/python-driver - -cassandra: - - '2.1' - - '2.2' - - '3.0' - - '3.11' - - '4.0' - - 'dse-4.8' - - 'dse-5.0' - - 'dse-5.1' - - 'dse-6.0' - - 'dse-6.7' - - 'dse-6.8.0' - -env: - CYTHON: - - CYTHON - - NO_CYTHON - -build: - - script: | - export JAVA_HOME=$CCM_JAVA_HOME - export PATH=$JAVA_HOME/bin:$PATH - export PYTHONPATH="" - export CCM_MAX_HEAP_SIZE=1024M - - # Required for unix socket tests - sudo apt-get install socat - - # Install latest setuptools - pip install --upgrade pip - pip install -U setuptools - - pip install git+ssh://git@github.com/riptano/ccm-private.git@cassandra-7544-native-ports-with-dse-fix - - #pip install $HOME/ccm - - if [ -n "$CCM_IS_DSE" ]; then - pip install -r test-datastax-requirements.txt - else - pip install -r test-requirements.txt - fi - - pip install nose-ignore-docstring - pip install nose-exclude - pip install service_identity - - FORCE_CYTHON=False - if [[ $CYTHON == 'CYTHON' ]]; then - FORCE_CYTHON=True - pip install cython - pip install numpy - # Install the driver & compile C extensions - python setup.py build_ext --inplace - else - # Install the driver & compile C extensions with no cython - python setup.py build_ext --inplace --no-cython - fi - - echo "JUST_UPGRADE: $JUST_UPGRADE" - if [[ $JUST_UPGRADE == 'True' ]]; then - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=upgrade_results.xml tests/integration/upgrade || true - exit 0 - fi - - if [[ $JUST_SMOKE == 'true' ]]; then - # When we ONLY want to run the smoke tests - echo "JUST_SMOKE: $JUST_SMOKE" - echo "==========RUNNING SMOKE TESTS===========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION DSE_VERSION='6.7.0' MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true - exit 0 - fi - - # Run the unit tests, this is not done in travis because - # it takes too much time for the whole matrix to build with cython - if [[ $CYTHON == 'CYTHON' ]]; then - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true - EVENT_LOOP_MANAGER=eventlet VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true - EVENT_LOOP_MANAGER=gevent VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true - fi - - if [ -n "$JUST_EVENT_LOOP" ]; then - echo "Running integration event loop subset with $EVENT_LOOP_MANAGER" - EVENT_LOOP_TESTS=( - "tests/integration/standard/test_cluster.py" - "tests/integration/standard/test_concurrent.py" - "tests/integration/standard/test_connection.py" - "tests/integration/standard/test_control_connection.py" - "tests/integration/standard/test_metrics.py" - "tests/integration/standard/test_query.py" - "tests/integration/simulacron/test_endpoint.py" - "tests/integration/long/test_ssl.py" - ) - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true - exit 0 - fi - - echo "Running with event loop manager: $EVENT_LOOP_MANAGER" - echo "==========RUNNING SIMULACRON TESTS==========" - SIMULACRON_JAR="$HOME/simulacron.jar" - SIMULACRON_JAR=$SIMULACRON_JAR EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true - - echo "Running with event loop manager: $EVENT_LOOP_MANAGER" - echo "==========RUNNING CQLENGINE TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true - - echo "==========RUNNING INTEGRATION TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true - - if [ -n "$DSE_VERSION" ] && ! [[ $DSE_VERSION == "4.8"* ]]; then - echo "==========RUNNING DSE INTEGRATION TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR DSE_VERSION=$DSE_VERSION ADS_HOME=$HOME/ VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true - fi - - echo "==========RUNNING CLOUD TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CLOUD_PROXY_PATH="$HOME/proxy/" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true - - if [ -z "$EXCLUDE_LONG" ]; then - echo "==========RUNNING LONG INTEGRATION TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true - fi - - - xunit: - - "*_results.xml" From 3ca41e2115f58277c80732eaa892a9b860126de7 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Fri, 10 Nov 2023 16:26:57 -0600 Subject: [PATCH 1942/2005] PYTHON-1366 Handle removal of asyncore in Python 3.12 (#1187) --- cassandra/__init__.py | 16 +++++ cassandra/cluster.py | 67 +++++++++++++------ cassandra/io/asyncorereactor.py | 10 ++- cassandra/io/libevreactor.py | 4 +- tests/__init__.py | 16 +++-- tests/integration/standard/test_connection.py | 15 +++-- 6 files changed, 96 insertions(+), 32 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index b048bd9358..4398c86f69 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -728,3 +728,19 @@ class UnresolvableContactPoints(DriverException): contact points, only when lookup fails for all hosts """ pass + +class DependencyException(Exception): + """ + Specific exception class for handling issues with driver dependencies + """ + + excs = [] + """ + A sequence of child exceptions + """ + + def __init__(self, msg, excs=[]): + complete_msg = msg + if excs: + complete_msg += ("The following exceptions were observed: \n" + '\n'.join(str(e) for e in excs)) + Exception.__init__(self, complete_msg) \ No newline at end of file diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 6514838050..e0c09ca64f 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -24,7 +24,7 @@ from collections.abc import Mapping from concurrent.futures import ThreadPoolExecutor, FIRST_COMPLETED, wait as wait_futures from copy import copy -from functools import partial, wraps +from functools import partial, reduce, wraps from itertools import groupby, count, chain import json import logging @@ -44,7 +44,7 @@ from cassandra import (ConsistencyLevel, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, SchemaTargetType, DriverException, ProtocolVersion, - UnresolvableContactPoints) + UnresolvableContactPoints, DependencyException) from cassandra.auth import _proxy_execute_key, PlainTextAuthProvider from cassandra.connection import (ConnectionException, ConnectionShutdown, ConnectionHeartbeat, ProtocolVersionUnsupported, @@ -111,6 +111,19 @@ except ImportError: from cassandra.util import WeakSet # NOQA +def _is_gevent_monkey_patched(): + if 'gevent.monkey' not in sys.modules: + return False + import gevent.socket + return socket.socket is gevent.socket.socket + +def _try_gevent_import(): + if _is_gevent_monkey_patched(): + from cassandra.io.geventreactor import GeventConnection + return (GeventConnection,None) + else: + return (None,None) + def _is_eventlet_monkey_patched(): if 'eventlet.patcher' not in sys.modules: return False @@ -121,26 +134,42 @@ def _is_eventlet_monkey_patched(): except AttributeError: return False +def _try_eventlet_import(): + if _is_eventlet_monkey_patched(): + from cassandra.io.eventletreactor import EventletConnection + return (EventletConnection,None) + else: + return (None,None) -def _is_gevent_monkey_patched(): - if 'gevent.monkey' not in sys.modules: - return False - import gevent.socket - return socket.socket is gevent.socket.socket - +def _try_libev_import(): + try: + from cassandra.io.libevreactor import LibevConnection + return (LibevConnection,None) + except DependencyException as e: + return (None, e) -# default to gevent when we are monkey patched with gevent, eventlet when -# monkey patched with eventlet, otherwise if libev is available, use that as -# the default because it's fastest. Otherwise, use asyncore. -if _is_gevent_monkey_patched(): - from cassandra.io.geventreactor import GeventConnection as DefaultConnection -elif _is_eventlet_monkey_patched(): - from cassandra.io.eventletreactor import EventletConnection as DefaultConnection -else: +def _try_asyncore_import(): try: - from cassandra.io.libevreactor import LibevConnection as DefaultConnection # NOQA - except ImportError: - from cassandra.io.asyncorereactor import AsyncoreConnection as DefaultConnection # NOQA + from cassandra.io.asyncorereactor import AsyncoreConnection + return (AsyncoreConnection,None) + except DependencyException as e: + return (None, e) + +def _connection_reduce_fn(val,import_fn): + (rv, excs) = val + # If we've already found a workable Connection class return immediately + if rv: + return val + (import_result, exc) = import_fn() + if exc: + excs.append(exc) + return (rv or import_result, excs) + +conn_fns = (_try_gevent_import, _try_eventlet_import, _try_libev_import, _try_asyncore_import) +(conn_class, excs) = reduce(_connection_reduce_fn, conn_fns, (None,[])) +if excs: + raise DependencyException("Exception loading connection class dependencies", excs) +DefaultConnection = conn_class # Forces load of utf8 encoding module to avoid deadlock that occurs # if code that is being imported tries to import the module in a seperate diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index a45d657828..a50b719c5d 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -30,7 +30,15 @@ except ImportError: from cassandra.util import WeakSet # noqa -import asyncore +from cassandra import DependencyException +try: + import asyncore +except ModuleNotFoundError: + raise DependencyException( + "Unable to import asyncore module. Note that this module has been removed in Python 3.12 " + "so when using the driver with this version (or anything newer) you will need to use one of the " + "other event loop implementations." + ) from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 484690da89..4d4098ca7b 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -21,13 +21,13 @@ from threading import Lock, Thread import time - +from cassandra import DependencyException from cassandra.connection import (Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager) try: import cassandra.io.libevwrapper as libev except ImportError: - raise ImportError( + raise DependencyException( "The C extension needed to use libev was not found. This " "probably means that you didn't have the required build dependencies " "when installing the driver. See " diff --git a/tests/__init__.py b/tests/__init__.py index 48c589c424..4735bbd383 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -20,6 +20,8 @@ import os from concurrent.futures import ThreadPoolExecutor +from cassandra import DependencyException + log = logging.getLogger() log.setLevel('DEBUG') # if nose didn't already attach a log handler, add one here @@ -32,9 +34,12 @@ def is_eventlet_monkey_patched(): if 'eventlet.patcher' not in sys.modules: return False - import eventlet.patcher - return eventlet.patcher.is_monkey_patched('socket') - + try: + import eventlet.patcher + return eventlet.patcher.is_monkey_patched('socket') + # Yet another case related to PYTHON-1364 + except AttributeError: + return False def is_gevent_monkey_patched(): if 'gevent.monkey' not in sys.modules: @@ -86,17 +91,18 @@ def is_monkey_patched(): elif "asyncio" in EVENT_LOOP_MANAGER: from cassandra.io.asyncioreactor import AsyncioConnection connection_class = AsyncioConnection - else: + log.debug("Using default event loop (libev)") try: from cassandra.io.libevreactor import LibevConnection connection_class = LibevConnection - except ImportError as e: + except DependencyException as e: log.debug('Could not import LibevConnection, ' 'using connection_class=None; ' 'failed with error:\n {}'.format( repr(e) )) + log.debug("Will attempt to set connection class at cluster initialization") connection_class = None diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 3323baf20b..463080fc32 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -23,12 +23,9 @@ import time from unittest import SkipTest -from cassandra import ConsistencyLevel, OperationTimedOut +from cassandra import ConsistencyLevel, OperationTimedOut, DependencyException from cassandra.cluster import NoHostAvailable, ConnectionShutdown, ExecutionProfile, EXEC_PROFILE_DEFAULT -import cassandra.io.asyncorereactor -from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.protocol import QueryMessage -from cassandra.connection import Connection from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, HostStateListener from cassandra.pool import HostConnectionPool @@ -36,10 +33,16 @@ from tests.integration import use_singledc, get_node, CASSANDRA_IP, local, \ requiresmallclockgranularity, greaterthancass20, TestCluster +try: + import cassandra.io.asyncorereactor + from cassandra.io.asyncorereactor import AsyncoreConnection +except DependencyException: + AsyncoreConnection = None + try: from cassandra.io.libevreactor import LibevConnection import cassandra.io.libevreactor -except ImportError: +except DependencyException: LibevConnection = None @@ -440,6 +443,8 @@ class AsyncoreConnectionTests(ConnectionTests, unittest.TestCase): def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") + if AsyncoreConnection is None: + raise unittest.SkipTest('Unable to import asyncore module') ConnectionTests.setUp(self) def clean_global_loop(self): From cea6e006e63ee20b5558d1e355286301adafa2d2 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 13 Nov 2023 09:22:55 -0600 Subject: [PATCH 1943/2005] PYTHON-1368 Avoid installing DSE deps + executing DSE tests for Python 3.12 (#1188) --- Jenkinsfile | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 7e4a3c4761..d654558b8c 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -177,9 +177,14 @@ def initializeEnvironment() { // Determine if server version is Apache CassandraⓇ or DataStax Enterprise if (env.CASSANDRA_VERSION.split('-')[0] == 'dse') { - sh label: 'Install DataStax Enterprise requirements', script: '''#!/bin/bash -lex - pip install -r test-datastax-requirements.txt - ''' + if (env.PYTHON_VERSION =~ /3\.12\.\d+/) { + echo "Cannot install DSE dependencies for Python 3.12.x. See PYTHON-1368 for more detail." + } + else { + sh label: 'Install DataStax Enterprise requirements', script: '''#!/bin/bash -lex + pip install -r test-datastax-requirements.txt + ''' + } } else { sh label: 'Install Apache CassandraⓇ requirements', script: '''#!/bin/bash -lex pip install -r test-requirements.txt @@ -292,17 +297,22 @@ def executeStandardTests() { ''' if (env.CASSANDRA_VERSION.split('-')[0] == 'dse' && env.CASSANDRA_VERSION.split('-')[1] != '4.8') { - sh label: 'Execute DataStax Enterprise integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true - ''' + if (env.PYTHON_VERSION =~ /3\.12\.\d+/) { + echo "Cannot install DSE dependencies for Python 3.12.x. See PYTHON-1368 for more detail." + } + else { + sh label: 'Execute DataStax Enterprise integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variable + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true + ''' + } } - sh label: 'Execute DataStax Constellation integration tests', script: '''#!/bin/bash -lex + sh label: 'Execute DataStax Astra integration tests', script: '''#!/bin/bash -lex # Load CCM environment variable set -o allexport . ${HOME}/environment.txt From 120277da36f880ac6a5508480144fe436bf0d8c1 Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Mon, 13 Nov 2023 15:12:14 -0500 Subject: [PATCH 1944/2005] Remove outdated Python pre-3.7 references (#1186) --- README-dev.rst | 2 +- test-requirements.txt | 7 ++----- tests/integration/long/test_ssl.py | 2 +- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/README-dev.rst b/README-dev.rst index 5c0555f3a7..bcc1777ac8 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -176,7 +176,7 @@ Use tee to capture logs and see them on your terminal:: Testing Multiple Python Versions -------------------------------- -If you want to test all of python 2.7, 3.5, 3.6, 3.7, and pypy, use tox (this is what +If you want to test all of python 3.7, 3.8, and pypy, use tox (this is what TravisCI runs):: tox diff --git a/test-requirements.txt b/test-requirements.txt index 996cf4341f..7d3c021240 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -6,13 +6,10 @@ ccm>=2.1.2 pytz sure pure-sasl -twisted[tls]; python_version >= '3.5' -twisted[tls]==19.2.1; python_version < '3.5' +twisted[tls] gevent>=1.0 eventlet cython>=0.20,<0.30 packaging -backports.ssl_match_hostname; python_version < '2.7.9' futurist; python_version >= '3.7' -asynctest; python_version >= '3.5' -ipaddress; python_version < '3.3.0' +asynctest diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 69285001f8..0e39cb21ad 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -28,7 +28,7 @@ if not hasattr(ssl, 'match_hostname'): try: - from backports.ssl_match_hostname import match_hostname + from ssl import match_hostname ssl.match_hostname = match_hostname except ImportError: pass # tests will fail From e90c0f5d71f4cac94ed80ed72c8789c0818e11d0 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Sun, 17 Dec 2023 23:54:16 -0600 Subject: [PATCH 1945/2005] PYTHON-1371 Add explicit exception type for serialization failures (#1193) --- cassandra/__init__.py | 8 ++++- cassandra/cqltypes.py | 17 ++++++++-- tests/unit/test_types.py | 73 ++++++++++++++++++++++++++++++++++------ 3 files changed, 84 insertions(+), 14 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 4398c86f69..8b4b6f1a1b 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -743,4 +743,10 @@ def __init__(self, msg, excs=[]): complete_msg = msg if excs: complete_msg += ("The following exceptions were observed: \n" + '\n'.join(str(e) for e in excs)) - Exception.__init__(self, complete_msg) \ No newline at end of file + Exception.__init__(self, complete_msg) + +class VectorDeserializationFailure(DriverException): + """ + The driver was unable to deserialize a given vector + """ + pass diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index d1d7e888f9..b413b1c9e5 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -49,7 +49,7 @@ float_pack, float_unpack, double_pack, double_unpack, varint_pack, varint_unpack, point_be, point_le, vints_pack, vints_unpack) -from cassandra import util +from cassandra import util, VectorDeserializationFailure _little_endian_flag = 1 # we always serialize LE import ipaddress @@ -461,6 +461,7 @@ def serialize(uuid, protocol_version): class BooleanType(_CassandraType): typename = 'boolean' + serial_size = 1 @staticmethod def deserialize(byts, protocol_version): @@ -500,6 +501,7 @@ def serialize(var, protocol_version): class FloatType(_CassandraType): typename = 'float' + serial_size = 4 @staticmethod def deserialize(byts, protocol_version): @@ -512,6 +514,7 @@ def serialize(byts, protocol_version): class DoubleType(_CassandraType): typename = 'double' + serial_size = 8 @staticmethod def deserialize(byts, protocol_version): @@ -524,6 +527,7 @@ def serialize(byts, protocol_version): class LongType(_CassandraType): typename = 'bigint' + serial_size = 8 @staticmethod def deserialize(byts, protocol_version): @@ -536,6 +540,7 @@ def serialize(byts, protocol_version): class Int32Type(_CassandraType): typename = 'int' + serial_size = 4 @staticmethod def deserialize(byts, protocol_version): @@ -648,6 +653,7 @@ class TimestampType(DateType): class TimeUUIDType(DateType): typename = 'timeuuid' + serial_size = 16 def my_timestamp(self): return util.unix_time_from_uuid1(self.val) @@ -694,6 +700,7 @@ def serialize(val, protocol_version): class ShortType(_CassandraType): typename = 'smallint' + serial_size = 2 @staticmethod def deserialize(byts, protocol_version): @@ -706,6 +713,7 @@ def serialize(byts, protocol_version): class TimeType(_CassandraType): typename = 'time' + serial_size = 8 @staticmethod def deserialize(byts, protocol_version): @@ -1411,8 +1419,11 @@ def apply_parameters(cls, params, names): @classmethod def deserialize(cls, byts, protocol_version): - indexes = (4 * x for x in range(0, cls.vector_size)) - return [cls.subtype.deserialize(byts[idx:idx + 4], protocol_version) for idx in indexes] + serialized_size = getattr(cls.subtype, "serial_size", None) + if not serialized_size: + raise VectorDeserializationFailure("Cannot determine serialized size for vector with subtype %s" % cls.subtype.__name__) + indexes = (serialized_size * x for x in range(0, cls.vector_size)) + return [cls.subtype.deserialize(byts[idx:idx + serialized_size], protocol_version) for idx in indexes] @classmethod def serialize(cls, v, protocol_version): diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index a06bbd452d..5db7f087b7 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -16,10 +16,11 @@ import datetime import tempfile import time +import uuid from binascii import unhexlify import cassandra -from cassandra import util +from cassandra import util, VectorDeserializationFailure from cassandra.cqltypes import ( CassandraType, DateRangeType, DateType, DecimalType, EmptyValue, LongType, SetType, UTF8Type, @@ -308,15 +309,67 @@ def test_cql_quote(self): self.assertEqual(cql_quote('test'), "'test'") self.assertEqual(cql_quote(0), '0') - def test_vector_round_trip(self): - base = [3.4, 2.9, 41.6, 12.0] - ctype = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 4)") - base_bytes = ctype.serialize(base, 0) - self.assertEqual(16, len(base_bytes)) - result = ctype.deserialize(base_bytes, 0) - self.assertEqual(len(base), len(result)) - for idx in range(0,len(base)): - self.assertAlmostEqual(base[idx], result[idx], places=5) + def test_vector_round_trip_types_with_serialized_size(self): + # Test all the types which specify a serialized size... see PYTHON-1371 for details + self._round_trip_test([True, False, False, True], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.BooleanType, 4)") + self._round_trip_test([3.4, 2.9, 41.6, 12.0], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 4)") + self._round_trip_test([3.4, 2.9, 41.6, 12.0], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.DoubleType, 4)") + self._round_trip_test([3, 2, 41, 12], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.LongType, 4)") + self._round_trip_test([3, 2, 41, 12], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.Int32Type, 4)") + self._round_trip_test([uuid.uuid1(), uuid.uuid1(), uuid.uuid1(), uuid.uuid1()], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.TimeUUIDType, 4)") + self._round_trip_test([3, 2, 41, 12], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.ShortType, 4)") + self._round_trip_test([datetime.time(1,1,1), datetime.time(2,2,2), datetime.time(3,3,3)], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.TimeType, 3)") + + def test_vector_round_trip_types_without_serialized_size(self): + # Test all the types which do not specify a serialized size... see PYTHON-1371 for details + # Varints + with self.assertRaises(VectorDeserializationFailure): + self._round_trip_test([3, 2, 41, 12], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.IntegerType, 4)") + # ASCII text + with self.assertRaises(VectorDeserializationFailure): + self._round_trip_test(["abc", "def", "ghi", "jkl"], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.AsciiType, 4)") + # UTF8 text + with self.assertRaises(VectorDeserializationFailure): + self._round_trip_test(["abc", "def", "ghi", "jkl"], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.UTF8Type, 4)") + # Duration (containts varints) + with self.assertRaises(VectorDeserializationFailure): + self._round_trip_test([util.Duration(1,1,1), util.Duration(2,2,2), util.Duration(3,3,3)], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.DurationType, 3)") + # List (of otherwise serializable type) + with self.assertRaises(VectorDeserializationFailure): + self._round_trip_test([[3.4], [2.9], [41.6], [12.0]], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.FloatType), 4)") + # Set (of otherwise serializable type) + with self.assertRaises(VectorDeserializationFailure): + self._round_trip_test([set([3.4]), set([2.9]), set([41.6]), set([12.0])], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.FloatType), 4)") + # Map (of otherwise serializable types) + with self.assertRaises(VectorDeserializationFailure): + self._round_trip_test([{1:3.4}, {2:2.9}, {3:41.6}, {4:12.0}], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.MapType \ + (org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.FloatType), 4)") + + def _round_trip_test(self, data, ctype_str): + ctype = parse_casstype_args(ctype_str) + data_bytes = ctype.serialize(data, 0) + serialized_size = getattr(ctype.subtype, "serial_size", None) + if serialized_size: + self.assertEqual(serialized_size * len(data), len(data_bytes)) + result = ctype.deserialize(data_bytes, 0) + self.assertEqual(len(data), len(result)) + for idx in range(0,len(data)): + self.assertAlmostEqual(data[idx], result[idx], places=5) def test_vector_cql_parameterized_type(self): ctype = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 4)") From 8ff0ba0db62512cdbd868b809ac15a16e01ef94b Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 18 Dec 2023 23:35:24 -0600 Subject: [PATCH 1946/2005] PYTHON-1331 ssl.match_hostname() is deprecated in 3.7 (#1191) --- Jenkinsfile | 8 +++- cassandra/__init__.py | 2 +- cassandra/cluster.py | 20 +++++--- cassandra/connection.py | 84 ++++++++++++++++++++++++--------- cassandra/io/eventletreactor.py | 15 +++--- test-requirements.txt | 2 +- 6 files changed, 91 insertions(+), 40 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index d654558b8c..fdc5e74269 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -178,7 +178,10 @@ def initializeEnvironment() { // Determine if server version is Apache CassandraⓇ or DataStax Enterprise if (env.CASSANDRA_VERSION.split('-')[0] == 'dse') { if (env.PYTHON_VERSION =~ /3\.12\.\d+/) { - echo "Cannot install DSE dependencies for Python 3.12.x. See PYTHON-1368 for more detail." + echo "Cannot install DSE dependencies for Python 3.12.x; installing Apache CassandraⓇ requirements only. See PYTHON-1368 for more detail." + sh label: 'Install Apache CassandraⓇ requirements', script: '''#!/bin/bash -lex + pip install -r test-requirements.txt + ''' } else { sh label: 'Install DataStax Enterprise requirements', script: '''#!/bin/bash -lex @@ -196,7 +199,8 @@ def initializeEnvironment() { } sh label: 'Install unit test modules', script: '''#!/bin/bash -lex - pip install pynose nose-ignore-docstring nose-exclude service_identity + pip install --no-deps nose-ignore-docstring nose-exclude + pip install service_identity ''' if (env.CYTHON_ENABLED == 'True') { diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 8b4b6f1a1b..8d453f5975 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -742,7 +742,7 @@ class DependencyException(Exception): def __init__(self, msg, excs=[]): complete_msg = msg if excs: - complete_msg += ("The following exceptions were observed: \n" + '\n'.join(str(e) for e in excs)) + complete_msg += ("\nThe following exceptions were observed: \n - " + '\n - '.join(str(e) for e in excs)) Exception.__init__(self, complete_msg) class VectorDeserializationFailure(DriverException): diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e0c09ca64f..d5f80290a9 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -165,10 +165,12 @@ def _connection_reduce_fn(val,import_fn): excs.append(exc) return (rv or import_result, excs) +log = logging.getLogger(__name__) + conn_fns = (_try_gevent_import, _try_eventlet_import, _try_libev_import, _try_asyncore_import) (conn_class, excs) = reduce(_connection_reduce_fn, conn_fns, (None,[])) -if excs: - raise DependencyException("Exception loading connection class dependencies", excs) +if not conn_class: + raise DependencyException("Unable to load a default connection class", excs) DefaultConnection = conn_class # Forces load of utf8 encoding module to avoid deadlock that occurs @@ -177,8 +179,6 @@ def _connection_reduce_fn(val,import_fn): # See http://bugs.python.org/issue10923 "".encode('utf8') -log = logging.getLogger(__name__) - DEFAULT_MIN_REQUESTS = 5 DEFAULT_MAX_REQUESTS = 100 @@ -811,9 +811,9 @@ def default_retry_policy(self, policy): Using ssl_options without ssl_context is deprecated and will be removed in the next major release. - An optional dict which will be used as kwargs for ``ssl.SSLContext.wrap_socket`` (or - ``ssl.wrap_socket()`` if used without ssl_context) when new sockets are created. - This should be used when client encryption is enabled in Cassandra. + An optional dict which will be used as kwargs for ``ssl.SSLContext.wrap_socket`` + when new sockets are created. This should be used when client encryption is enabled + in Cassandra. The following documentation only applies when ssl_options is used without ssl_context. @@ -829,6 +829,12 @@ def default_retry_policy(self, policy): should almost always require the option ``'cert_reqs': ssl.CERT_REQUIRED``. Note also that this functionality was not built into Python standard library until (2.7.9, 3.2). To enable this mechanism in earlier versions, patch ``ssl.match_hostname`` with a custom or `back-ported function `_. + + .. versionchanged:: 3.29.0 + + ``ssl.match_hostname`` has been deprecated since Python 3.7 (and removed in Python 3.12). This functionality is now implemented + via ``ssl.SSLContext.check_hostname``. All options specified above (including ``check_hostname``) should continue to behave in a + way that is consistent with prior implementations. """ ssl_context = None diff --git a/cassandra/connection.py b/cassandra/connection.py index 195c93c889..bfe38fc702 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -733,7 +733,6 @@ class Connection(object): _socket = None _socket_impl = socket - _ssl_impl = ssl _check_hostname = False _product_type = None @@ -757,7 +756,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.endpoint = host if isinstance(host, EndPoint) else DefaultEndPoint(host, port) self.authenticator = authenticator - self.ssl_options = ssl_options.copy() if ssl_options else None + self.ssl_options = ssl_options.copy() if ssl_options else {} self.ssl_context = ssl_context self.sockopts = sockopts self.compression = compression @@ -777,15 +776,20 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self._on_orphaned_stream_released = on_orphaned_stream_released if ssl_options: - self._check_hostname = bool(self.ssl_options.pop('check_hostname', False)) - if self._check_hostname: - if not getattr(ssl, 'match_hostname', None): - raise RuntimeError("ssl_options specify 'check_hostname', but ssl.match_hostname is not provided. " - "Patch or upgrade Python to use this option.") self.ssl_options.update(self.endpoint.ssl_options or {}) elif self.endpoint.ssl_options: self.ssl_options = self.endpoint.ssl_options + # PYTHON-1331 + # + # We always use SSLContext.wrap_socket() now but legacy configs may have other params that were passed to ssl.wrap_socket()... + # and either could have 'check_hostname'. Remove these params into a separate map and use them to build an SSLContext if + # we need to do so. + # + # Note the use of pop() here; we are very deliberately removing these params from ssl_options if they're present. After this + # operation ssl_options should contain only args needed for the ssl_context.wrap_socket() call. + if not self.ssl_context and self.ssl_options: + self.ssl_context = self._build_ssl_context_from_options() if protocol_version >= 3: self.max_request_id = min(self.max_in_flight - 1, (2 ** 15) - 1) @@ -852,21 +856,57 @@ def factory(cls, endpoint, timeout, *args, **kwargs): else: return conn + def _build_ssl_context_from_options(self): + + # Extract a subset of names from self.ssl_options which apply to SSLContext creation + ssl_context_opt_names = ['ssl_version', 'cert_reqs', 'check_hostname', 'keyfile', 'certfile', 'ca_certs', 'ciphers'] + opts = {k:self.ssl_options.get(k, None) for k in ssl_context_opt_names if k in self.ssl_options} + + # Python >= 3.10 requires either PROTOCOL_TLS_CLIENT or PROTOCOL_TLS_SERVER so we'll get ahead of things by always + # being explicit + ssl_version = opts.get('ssl_version', None) or ssl.PROTOCOL_TLS_CLIENT + cert_reqs = opts.get('cert_reqs', None) or ssl.CERT_REQUIRED + rv = ssl.SSLContext(protocol=int(ssl_version)) + rv.check_hostname = bool(opts.get('check_hostname', False)) + rv.options = int(cert_reqs) + + certfile = opts.get('certfile', None) + keyfile = opts.get('keyfile', None) + if certfile: + rv.load_cert_chain(certfile, keyfile) + ca_certs = opts.get('ca_certs', None) + if ca_certs: + rv.load_verify_locations(ca_certs) + ciphers = opts.get('ciphers', None) + if ciphers: + rv.set_ciphers(ciphers) + + return rv + def _wrap_socket_from_context(self): - ssl_options = self.ssl_options or {} + + # Extract a subset of names from self.ssl_options which apply to SSLContext.wrap_socket (or at least the parts + # of it that don't involve building an SSLContext under the covers) + wrap_socket_opt_names = ['server_side', 'do_handshake_on_connect', 'suppress_ragged_eofs', 'server_hostname'] + opts = {k:self.ssl_options.get(k, None) for k in wrap_socket_opt_names if k in self.ssl_options} + # PYTHON-1186: set the server_hostname only if the SSLContext has # check_hostname enabled and it is not already provided by the EndPoint ssl options - if (self.ssl_context.check_hostname and - 'server_hostname' not in ssl_options): - ssl_options = ssl_options.copy() - ssl_options['server_hostname'] = self.endpoint.address - self._socket = self.ssl_context.wrap_socket(self._socket, **ssl_options) + #opts['server_hostname'] = self.endpoint.address + if (self.ssl_context.check_hostname and 'server_hostname' not in opts): + server_hostname = self.endpoint.address + opts['server_hostname'] = server_hostname + + return self.ssl_context.wrap_socket(self._socket, **opts) def _initiate_connection(self, sockaddr): self._socket.connect(sockaddr) - def _match_hostname(self): - ssl.match_hostname(self._socket.getpeercert(), self.endpoint.address) + # PYTHON-1331 + # + # Allow implementations specific to an event loop to add additional behaviours + def _validate_hostname(self): + pass def _get_socket_addresses(self): address, port = self.endpoint.resolve() @@ -887,16 +927,18 @@ def _connect_socket(self): try: self._socket = self._socket_impl.socket(af, socktype, proto) if self.ssl_context: - self._wrap_socket_from_context() - elif self.ssl_options: - if not self._ssl_impl: - raise RuntimeError("This version of Python was not compiled with SSL support") - self._socket = self._ssl_impl.wrap_socket(self._socket, **self.ssl_options) + self._socket = self._wrap_socket_from_context() self._socket.settimeout(self.connect_timeout) self._initiate_connection(sockaddr) self._socket.settimeout(None) + + # PYTHON-1331 + # + # Most checking is done via the check_hostname param on the SSLContext. + # Subclasses can add additional behaviours via _validate_hostname() so + # run that here. if self._check_hostname: - self._match_hostname() + self._validate_hostname() sockerr = None break except socket.error as err: diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index 42874036d5..c51bfd7591 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -103,11 +103,12 @@ def __init__(self, *args, **kwargs): def _wrap_socket_from_context(self): _check_pyopenssl() - self._socket = SSL.Connection(self.ssl_context, self._socket) - self._socket.set_connect_state() + rv = SSL.Connection(self.ssl_context, self._socket) + rv.set_connect_state() if self.ssl_options and 'server_hostname' in self.ssl_options: # This is necessary for SNI - self._socket.set_tlsext_host_name(self.ssl_options['server_hostname'].encode('ascii')) + rv.set_tlsext_host_name(self.ssl_options['server_hostname'].encode('ascii')) + return rv def _initiate_connection(self, sockaddr): if self.uses_legacy_ssl_options: @@ -117,14 +118,12 @@ def _initiate_connection(self, sockaddr): if self.ssl_context or self.ssl_options: self._socket.do_handshake() - def _match_hostname(self): - if self.uses_legacy_ssl_options: - super(EventletConnection, self)._match_hostname() - else: + def _validate_hostname(self): + if not self.uses_legacy_ssl_options: cert_name = self._socket.get_peer_certificate().get_subject().commonName if cert_name != self.endpoint.address: raise Exception("Hostname verification failed! Certificate name '{}' " - "doesn't endpoint '{}'".format(cert_name, self.endpoint.address)) + "doesn't match endpoint '{}'".format(cert_name, self.endpoint.address)) def close(self): with self.lock: diff --git a/test-requirements.txt b/test-requirements.txt index 7d3c021240..4ebb23df53 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,6 +1,6 @@ -r requirements.txt scales -nose +pynose mock>1.1 ccm>=2.1.2 pytz From 1a947f8437d10e52a66488c1eab45456d089a92d Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 20 Dec 2023 10:47:23 -0600 Subject: [PATCH 1947/2005] Documentation (and other) updates for 3.29.0 (#1194) --- .travis.yml | 7 ++++--- CHANGELOG.rst | 16 ++++++++++++++++ README-dev.rst | 2 +- README.rst | 2 +- cassandra/__init__.py | 2 +- cassandra/scylla/cloud.py | 0 docs/index.rst | 2 +- docs/installation.rst | 15 +++++++++------ setup.py | 6 +++++- test-requirements.txt | 4 ++-- tests/unit/test_cluster.py | 4 +++- tox.ini | 19 +++++++------------ 12 files changed, 50 insertions(+), 29 deletions(-) delete mode 100644 cassandra/scylla/cloud.py diff --git a/.travis.yml b/.travis.yml index dbabf61378..5a483f9a03 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,10 +1,11 @@ -dist: xenial +dist: jammy sudo: false language: python python: - - "3.7" - "3.8" + - "3.9" + - "3.10" env: - CASS_DRIVER_NO_CYTHON=1 @@ -13,7 +14,7 @@ addons: apt: packages: - build-essential - - python-dev + - python3-dev - pypy-dev - libc-ares-dev - libev4 diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 472881dbc5..a7780b4ade 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,19 @@ +3.29.0 +====== +December 19, 2023 + +Features +-------- +* Add support for Python 3.9 through 3.12, drop support for 3.7 (PYTHON-1283) +* Removal of dependency on six module (PR 1172) +* Raise explicit exception when deserializing a vector with a subtype that isn’t a constant size (PYTHON-1371) + +Others +------ +* Remove outdated Python pre-3.7 references (PR 1186) +* Remove backup(.bak) files (PR 1185) +* Fix doc typo in add_callbacks (PR 1177) + 3.28.0 ====== June 5, 2023 diff --git a/README-dev.rst b/README-dev.rst index bcc1777ac8..adca510412 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -176,7 +176,7 @@ Use tee to capture logs and see them on your terminal:: Testing Multiple Python Versions -------------------------------- -If you want to test all of python 3.7, 3.8, and pypy, use tox (this is what +Use tox to test all of Python 3.8 through 3.12 and pypy (this is what TravisCI runs):: tox diff --git a/README.rst b/README.rst index 47483f3881..98884008b0 100644 --- a/README.rst +++ b/README.rst @@ -7,7 +7,7 @@ DataStax Driver for Apache Cassandra A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (2.1+) and DataStax Enterprise (4.7+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. -The driver supports Python 3.7 and 3.8. +The driver supports Python 3.8 through 3.12. **Note:** DataStax products do not support big-endian systems. diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 8d453f5975..f4c88c1c91 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 28, 0) +__version_info__ = (3, 29, 0) __version__ = '.'.join(map(str, __version_info__)) diff --git a/cassandra/scylla/cloud.py b/cassandra/scylla/cloud.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docs/index.rst b/docs/index.rst index 6f34f249fb..2370ccefaf 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,7 +4,7 @@ A Python client driver for `Apache Cassandra® `_. This driver works exclusively with the Cassandra Query Language v3 (CQL3) and Cassandra's native protocol. Cassandra 2.1+ is supported, including DSE 4.7+. -The driver supports Python 3.7 and 3.8. +The driver supports Python 3.8 through 3.12. This driver is open source under the `Apache v2 License `_. diff --git a/docs/installation.rst b/docs/installation.rst index 17a4e63324..e235f398fe 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -3,7 +3,7 @@ Installation Supported Platforms ------------------- -Python 3.7 and 3.8 are supported. Both CPython (the standard Python +Python 3.8 through 3.12 are supported. Both CPython (the standard Python implementation) and `PyPy `_ are supported and tested. Linux, OSX, and Windows are supported. @@ -26,7 +26,7 @@ To check if the installation was successful, you can run:: python -c 'import cassandra; print cassandra.__version__' -It should print something like "3.27.0". +It should print something like "3.29.0". .. _installation-datastax-graph: @@ -215,12 +215,15 @@ dependencies, then use install-option:: sudo pip install --install-option="--no-cython" +Supported Event Loops +^^^^^^^^^^^^^^^^^^^^^ +For Python versions before 3.12 the driver uses the ``asyncore`` module for its default +event loop. Other event loops such as ``libev``, ``gevent`` and ``eventlet`` are also +available via Python modules or C extensions. Python 3.12 has removed ``asyncore`` entirely +so for this platform one of these other event loops must be used. + libev support ^^^^^^^^^^^^^ -The driver currently uses Python's ``asyncore`` module for its default -event loop. For better performance, ``libev`` is also supported through -a C extension. - If you're on Linux, you should be able to install libev through a package manager. For example, on Debian/Ubuntu:: diff --git a/setup.py b/setup.py index 86e50e8b22..386bdfb9af 100644 --- a/setup.py +++ b/setup.py @@ -413,6 +413,7 @@ def run_setup(extensions): version=__version__, description=' DataStax Driver for Apache Cassandra', long_description=long_description, + long_description_content_type='text/x-rst', url='http://github.com/datastax/python-driver', project_urls={ 'Documentation': 'https://docs.datastax.com/en/developer/python-driver/latest/', @@ -438,8 +439,11 @@ def run_setup(extensions): 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', - 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3.12', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Software Development :: Libraries :: Python Modules' diff --git a/test-requirements.txt b/test-requirements.txt index 4ebb23df53..e3f8e1cac6 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,9 +7,9 @@ pytz sure pure-sasl twisted[tls] -gevent>=1.0 +gevent eventlet cython>=0.20,<0.30 packaging -futurist; python_version >= '3.7' +futurist asynctest diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index c5f5def082..d6e00407f7 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -14,6 +14,7 @@ import unittest import logging +import socket from mock import patch, Mock @@ -88,8 +89,9 @@ class ClusterTest(unittest.TestCase): def test_tuple_for_contact_points(self): cluster = Cluster(contact_points=[('localhost', 9045), ('127.0.0.2', 9046), '127.0.0.3'], port=9999) + localhost_addr = set([addr[0] for addr in [t for (_,_,_,_,t) in socket.getaddrinfo("localhost",80)]]) for cp in cluster.endpoints_resolved: - if cp.address in ('::1', '127.0.0.1'): + if cp.address in localhost_addr: self.assertEqual(cp.port, 9045) elif cp.address == '127.0.0.2': self.assertEqual(cp.port, 9046) diff --git a/tox.ini b/tox.ini index 7d4dfe898e..b4a01e53df 100644 --- a/tox.ini +++ b/tox.ini @@ -1,23 +1,22 @@ [tox] -envlist = py{37,38},pypy +envlist = py{38,39,310,311,312},pypy [base] -deps = nose - mock<=1.0.1 +deps = pynose + mock>1.1 packaging - cython + cython>=0.20,<0.30 eventlet - twisted <15.5.0 + gevent + twisted[tls] pure-sasl kerberos futurist - greenlet>=0.4.14,<0.4.17 + lz4 cryptography>=35.0 -lz4_dependency = py37,py38: lz4 [testenv] deps = {[base]deps} - {[base]lz4_dependency} setenv = LIBEV_EMBED=0 CARES_EMBED=0 @@ -28,8 +27,6 @@ commands = nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/ [testenv:gevent_loop] deps = {[base]deps} - {[base]lz4_dependency} - gevent>=1.4,<1.5 setenv = LIBEV_EMBED=0 CARES_EMBED=0 @@ -41,8 +38,6 @@ commands = [testenv:eventlet_loop] deps = {[base]deps} - {[base]lz4_dependency} - gevent>=1.4,<1.5 setenv = LIBEV_EMBED=0 CARES_EMBED=0 From 9941ddb5908229b7cdb32f6347c4574c31b49489 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 21 Dec 2023 17:24:17 -0600 Subject: [PATCH 1948/2005] Added 3.29.0 to docs.yaml --- docs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs.yaml b/docs.yaml index 7dde5a0299..07e2742637 100644 --- a/docs.yaml +++ b/docs.yaml @@ -22,6 +22,8 @@ sections: # build extensions like libev CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: + - name: '3.29' + ref: 1a947f84 - name: '3.28' ref: 4325afb6 - name: '3.27' From 2c61ab22792475445d21b77bd165cf330ee9b87b Mon Sep 17 00:00:00 2001 From: Yago Riveiro Date: Tue, 27 Feb 2024 22:01:23 +0000 Subject: [PATCH 1949/2005] fix build from source on macos using homebrew (#1196) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 386bdfb9af..e8842146fb 100644 --- a/setup.py +++ b/setup.py @@ -144,7 +144,7 @@ def __init__(self, ext): libev_ext = Extension('cassandra.io.libevwrapper', sources=['cassandra/io/libevwrapper.c'], - include_dirs=['/usr/include/libev', '/usr/local/include', '/opt/local/include'], + include_dirs=['/usr/include/libev', '/usr/local/include', '/opt/local/include', '/opt/homebrew/include', os.path.expanduser('~/homebrew/include')], libraries=['ev'], library_dirs=['/usr/local/lib', '/opt/local/lib']) From 7e0923a86e6b8d55f5a88698f4c1e6ded65a348b Mon Sep 17 00:00:00 2001 From: Alexandre Detiste Date: Tue, 27 Feb 2024 23:54:42 +0100 Subject: [PATCH 1950/2005] clean up last "import six" (#1197) --- docs/upgrading.rst | 4 ---- tests/integration/standard/test_cluster.py | 1 - 2 files changed, 5 deletions(-) diff --git a/docs/upgrading.rst b/docs/upgrading.rst index 3a600e9ac0..3fd937d7bc 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -382,7 +382,3 @@ The following dependencies have officially been made optional: * ``scales`` * ``blist`` - -And one new dependency has been added (to enable Python 3 support): - -* ``six`` diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index ae6e3e5a4e..11a9fba0ab 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -23,7 +23,6 @@ import warnings from packaging.version import Version -import six import cassandra from cassandra.cluster import NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT, ControlConnection, Cluster from cassandra.concurrent import execute_concurrent From dbd4ea56cc0633e36a72d7abafd265d0da17d555 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Tue, 19 Mar 2024 12:15:40 -0500 Subject: [PATCH 1951/2005] PYTHON-1378 Expand search directories for includes (#1198) --- setup.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index e8842146fb..fa93fc5d8f 100644 --- a/setup.py +++ b/setup.py @@ -138,15 +138,22 @@ class BuildFailed(Exception): def __init__(self, ext): self.ext = ext +is_windows = sys.platform.startswith('win32') +is_macos = sys.platform.startswith('darwin') murmur3_ext = Extension('cassandra.cmurmur3', sources=['cassandra/cmurmur3.c']) +libev_includes = ['/usr/include/libev', '/usr/local/include', '/opt/local/include', '/usr/include'] +libev_libdirs = ['/usr/local/lib', '/opt/local/lib', '/usr/lib64'] +if is_macos: + libev_includes.extend(['/opt/homebrew/include', os.path.expanduser('~/homebrew/include')]) + libev_libdirs.extend(['/opt/homebrew/lib']) libev_ext = Extension('cassandra.io.libevwrapper', sources=['cassandra/io/libevwrapper.c'], - include_dirs=['/usr/include/libev', '/usr/local/include', '/opt/local/include', '/opt/homebrew/include', os.path.expanduser('~/homebrew/include')], + include_dirs=libev_includes, libraries=['ev'], - library_dirs=['/usr/local/lib', '/opt/local/lib']) + library_dirs=libev_libdirs) platform_unsupported_msg = \ """ @@ -169,8 +176,6 @@ def __init__(self, ext): ================================================================================= """ -is_windows = os.name == 'nt' - is_pypy = "PyPy" in sys.version if is_pypy: sys.stderr.write(pypy_unsupported_msg) From 9629c2ad4b828d0f719c45e2a3eade11152707a6 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Tue, 19 Mar 2024 12:44:21 -0500 Subject: [PATCH 1952/2005] Release 3.29.1: changelog & version (#1199) --- CHANGELOG.rst | 9 +++++++++ cassandra/__init__.py | 2 +- docs/installation.rst | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a7780b4ade..9dce17dcb6 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,12 @@ +3.29.1 +====== +March 19, 2024 + +Bug Fixes +-------- +* cassandra-driver for Python 3.12 Linux is compiled without libev support (PYTHON-1378) +* Consider moving to native wheel builds for OS X and removing universal2 wheels (PYTHON-1379) + 3.29.0 ====== December 19, 2023 diff --git a/cassandra/__init__.py b/cassandra/__init__.py index f4c88c1c91..4a5b8b29a3 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 29, 0) +__version_info__ = (3, 29, 1) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/installation.rst b/docs/installation.rst index e235f398fe..5a400387e5 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -26,7 +26,7 @@ To check if the installation was successful, you can run:: python -c 'import cassandra; print cassandra.__version__' -It should print something like "3.29.0". +It should print something like "3.29.1". .. _installation-datastax-graph: From 49e1b49e63126ecce6e4ba4cdb02f909bd20024f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Wei=C3=9Fl?= Date: Tue, 26 Mar 2024 05:56:09 +0100 Subject: [PATCH 1953/2005] PYTHON-1359 Fix for numpy>=1.24.0 (#1169) --- cassandra/numpy_parser.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/numpy_parser.pyx b/cassandra/numpy_parser.pyx index bb5b9a1c8c..030c2c65c7 100644 --- a/cassandra/numpy_parser.pyx +++ b/cassandra/numpy_parser.pyx @@ -134,7 +134,7 @@ def make_array(coltype, array_size): """ try: a = np.ma.empty((array_size,), dtype=_cqltype_to_numpy[coltype]) - a.mask = np.zeros((array_size,), dtype=np.bool) + a.mask = np.zeros((array_size,), dtype=bool) except KeyError: a = np.empty((array_size,), dtype=obj_dtype) return a From 622523b83971e8a181eb4853b7d877420c0351ef Mon Sep 17 00:00:00 2001 From: Emmanuel Arias Date: Tue, 26 Mar 2024 02:13:42 -0300 Subject: [PATCH 1954/2005] Replace deprecated method assertRaisesRegexp for assertRaisesRegex (#1195) --- tests/unit/test_response_future.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 8d5850413e..ff60ac6d42 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -626,7 +626,7 @@ def test_timeout_does_not_release_stream_id(self): rf._on_timeout() pool.return_connection.assert_called_once_with(connection, stream_was_orphaned=True) - self.assertRaisesRegexp(OperationTimedOut, "Client request timeout", rf.result) + self.assertRaisesRegex(OperationTimedOut, "Client request timeout", rf.result) assert len(connection.request_ids) == 0, \ "Request IDs should be empty but it's not: {}".format(connection.request_ids) From 41f751b55e1b27959a6baa8345c04852a3a0e74d Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Wed, 1 May 2024 16:28:24 -0400 Subject: [PATCH 1955/2005] Update installation.rst with Python3 print() syntax (#1204) --- docs/installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/installation.rst b/docs/installation.rst index 5a400387e5..8081f3b22c 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -24,7 +24,7 @@ Verifying your Installation --------------------------- To check if the installation was successful, you can run:: - python -c 'import cassandra; print cassandra.__version__' + python -c 'import cassandra; print(cassandra.__version__)' It should print something like "3.29.1". From 7780e9043cdf14e6e4d311c72100b1eca0508395 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 2 May 2024 10:14:28 -0500 Subject: [PATCH 1956/2005] Using Jabba to explicitly enable Java8 (for ccm + Simulacron compat) before running integration tests (#1202) --- Jenkinsfile | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index fdc5e74269..444b0f48b8 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -272,6 +272,9 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport + . ${JABBA_SHELL} + jabba use 1.8 + SIMULACRON_JAR="${HOME}/simulacron.jar" SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true @@ -288,6 +291,9 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport + . ${JABBA_SHELL} + jabba use 1.8 + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true ''' @@ -297,6 +303,9 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport + . ${JABBA_SHELL} + jabba use 1.8 + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true ''' @@ -311,6 +320,9 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport + . ${JABBA_SHELL} + jabba use 1.8 + EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true ''' } @@ -322,6 +334,9 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport + . ${JABBA_SHELL} + jabba use 1.8 + EVENT_LOOP=${EVENT_LOOP} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true ''' @@ -332,6 +347,9 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport + . ${JABBA_SHELL} + jabba use 1.8 + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true ''' } @@ -344,6 +362,9 @@ def executeDseSmokeTests() { . ${HOME}/environment.txt set +o allexport + . ${JABBA_SHELL} + jabba use 1.8 + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} DSE_VERSION=${DSE_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true ''' } @@ -355,6 +376,9 @@ def executeEventLoopTests() { . ${HOME}/environment.txt set +o allexport + . ${JABBA_SHELL} + jabba use 1.8 + EVENT_LOOP_TESTS=( "tests/integration/standard/test_cluster.py" "tests/integration/standard/test_concurrent.py" @@ -628,6 +652,7 @@ pipeline { OS_VERSION = 'ubuntu/bionic64/python-driver' CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' CCM_MAX_HEAP_SIZE = '1536M' + JABBA_SHELL = '/usr/lib/jabba/jabba.sh' } stages { From 1efde7659c240ce17858d355bf83a1f1807a0f4b Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Fri, 3 May 2024 09:32:03 -0500 Subject: [PATCH 1957/2005] PYTHON-1384 Remove usages of unittest.assertDictContainsSubset (#1206) --- tests/integration/cqlengine/management/test_management.py | 2 +- tests/integration/standard/test_metadata.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index e4febcc14b..6655a78fc4 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -254,7 +254,7 @@ def test_table_property_update(self): table_options = management._get_table_metadata(ModelWithTableProperties).options - self.assertDictContainsSubset(ModelWithTableProperties.__options__, table_options) + self.assertLessEqual(ModelWithTableProperties.__options__.items(), table_options.items()) def test_bogus_option_update(self): sync_table(ModelWithTableProperties) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 3534f29f9f..c8930a56fc 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1751,9 +1751,9 @@ def test_init_cond(self): cql_init = encoder.cql_encode_all_types(init_cond) with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('update_map', 'map', init_cond=cql_init)) as va: map_res = s.execute("SELECT %s(v) AS map_res FROM t" % va.function_kwargs['name'])[0].map_res - self.assertDictContainsSubset(expected_map_values, map_res) + self.assertLessEqual(expected_map_values.items(), map_res.items()) init_not_updated = dict((k, init_cond[k]) for k in set(init_cond) - expected_key_set) - self.assertDictContainsSubset(init_not_updated, map_res) + self.assertLessEqual(init_not_updated.items(), map_res.items()) c.shutdown() def test_aggregates_after_functions(self): From a2720ce79f94c10cd701cfc6df4bcb9e9ee66e73 Mon Sep 17 00:00:00 2001 From: absurdfarce Date: Tue, 28 May 2024 09:59:01 -0500 Subject: [PATCH 1958/2005] RST format fix for changelog --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 9dce17dcb6..38a7d03f89 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -3,7 +3,7 @@ March 19, 2024 Bug Fixes --------- +--------- * cassandra-driver for Python 3.12 Linux is compiled without libev support (PYTHON-1378) * Consider moving to native wheel builds for OS X and removing universal2 wheels (PYTHON-1379) From 8ee855f21325c1ad952ae26ac785e37421809591 Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Wed, 29 May 2024 17:38:17 -0400 Subject: [PATCH 1959/2005] Updated AppVeyor to python 3.8 (#1203) --- appveyor.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/appveyor.yml b/appveyor.yml index f8a3fd7660..12c43d57a0 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,6 +1,6 @@ environment: matrix: - - PYTHON: "C:\\Python37-x64" + - PYTHON: "C:\\Python38-x64" cassandra_version: 3.11.2 ci_type: standard os: Visual Studio 2015 From 6cd0cb1fee26139f8dc47630cf68d3fb4d789c53 Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Wed, 29 May 2024 18:01:46 -0400 Subject: [PATCH 1960/2005] Update README.rst with badges for version and license (#1210) --- README.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.rst b/README.rst index 98884008b0..be7c02b210 100644 --- a/README.rst +++ b/README.rst @@ -1,3 +1,6 @@ +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +[![PyPI version](https://badge.fury.io/py/cassandra-driver.svg)](https://badge.fury.io/py/cassandra-driver) + DataStax Driver for Apache Cassandra ==================================== From 9aca00be33d96559f0eabc1c8a26bb439dcebbd7 Mon Sep 17 00:00:00 2001 From: Alexandre Detiste Date: Thu, 30 May 2024 06:06:58 +0200 Subject: [PATCH 1961/2005] remove dependency on old mock external module (#1201) --- cassandra/cqlengine/functions.py | 12 ++---------- setup.py | 2 +- test-requirements.txt | 1 - tests/integration/cloud/test_cloud.py | 2 +- .../cqlengine/management/test_compaction_settings.py | 2 +- .../cqlengine/management/test_management.py | 2 +- tests/integration/cqlengine/model/test_model.py | 2 +- .../integration/cqlengine/model/test_polymorphism.py | 2 +- tests/integration/cqlengine/model/test_udts.py | 2 +- tests/integration/cqlengine/model/test_updates.py | 2 +- .../integration/cqlengine/query/test_batch_query.py | 2 +- tests/integration/cqlengine/query/test_queryset.py | 2 +- tests/integration/cqlengine/test_batch_query.py | 2 +- tests/integration/cqlengine/test_consistency.py | 2 +- tests/integration/cqlengine/test_ifexists.py | 3 +-- tests/integration/cqlengine/test_ifnotexists.py | 3 +-- tests/integration/cqlengine/test_lwt_conditional.py | 3 +-- tests/integration/cqlengine/test_timestamp.py | 2 +- tests/integration/cqlengine/test_ttl.py | 2 +- tests/integration/long/test_failure_types.py | 3 +-- tests/integration/simulacron/test_connection.py | 3 +-- tests/integration/standard/test_cluster.py | 2 +- tests/integration/standard/test_connection.py | 2 +- .../standard/test_custom_protocol_handler.py | 2 +- tests/integration/standard/test_metadata.py | 2 +- tests/integration/standard/test_query.py | 2 +- tests/unit/advanced/cloud/test_cloud.py | 4 +--- tests/unit/advanced/test_insights.py | 2 +- tests/unit/advanced/test_policies.py | 3 +-- tests/unit/cqlengine/test_connection.py | 3 +-- tests/unit/io/test_asyncioreactor.py | 2 +- tests/unit/io/test_asyncorereactor.py | 2 +- tests/unit/io/test_eventletreactor.py | 2 +- tests/unit/io/test_geventreactor.py | 3 +-- tests/unit/io/test_libevreactor.py | 2 +- tests/unit/io/test_twistedreactor.py | 2 +- tests/unit/io/utils.py | 2 +- tests/unit/test_cluster.py | 2 +- tests/unit/test_concurrent.py | 2 +- tests/unit/test_connection.py | 3 +-- tests/unit/test_control_connection.py | 2 +- tests/unit/test_endpoints.py | 2 +- tests/unit/test_host_connection_pool.py | 3 +-- tests/unit/test_metadata.py | 2 +- tests/unit/test_policies.py | 2 +- tests/unit/test_protocol.py | 2 +- tests/unit/test_response_future.py | 3 +-- tests/unit/test_resultset.py | 5 ++--- tests/unit/test_timestamps.py | 3 +-- tests/unit/utils.py | 5 +++-- tox.ini | 1 - 51 files changed, 53 insertions(+), 77 deletions(-) diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index 5cb0f673d1..acbbaa6263 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -17,16 +17,8 @@ from cassandra.cqlengine import UnicodeMixin, ValidationError -import sys - -if sys.version_info >= (2, 7): - def get_total_seconds(td): - return td.total_seconds() -else: - def get_total_seconds(td): - # integer division used here to emulate built-in total_seconds - return ((86400 * td.days + td.seconds) * 10 ** 6 + td.microseconds) / 10 ** 6 - +def get_total_seconds(td): + return td.total_seconds() class QueryValue(UnicodeMixin): """ diff --git a/setup.py b/setup.py index fa93fc5d8f..9d7f6efd09 100644 --- a/setup.py +++ b/setup.py @@ -436,7 +436,7 @@ def run_setup(extensions): include_package_data=True, install_requires=dependencies, extras_require=_EXTRAS_REQUIRE, - tests_require=['nose', 'mock>=2.0.0', 'PyYAML', 'pytz', 'sure'], + tests_require=['nose', 'PyYAML', 'pytz', 'sure'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', diff --git a/test-requirements.txt b/test-requirements.txt index e3f8e1cac6..c82e6a65a9 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,7 +1,6 @@ -r requirements.txt scales pynose -mock>1.1 ccm>=2.1.2 pytz sure diff --git a/tests/integration/cloud/test_cloud.py b/tests/integration/cloud/test_cloud.py index 03ff8237be..1c1e75c1ee 100644 --- a/tests/integration/cloud/test_cloud.py +++ b/tests/integration/cloud/test_cloud.py @@ -28,7 +28,7 @@ from cassandra.auth import PlainTextAuthProvider from cassandra.policies import TokenAwarePolicy, DCAwareRoundRobinPolicy, ConstantReconnectionPolicy -from mock import patch +from unittest.mock import patch from tests.integration import requirescloudproxy from tests.util import wait_until_not_raised diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index 554d941ecc..ac56e4b074 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -13,7 +13,7 @@ # limitations under the License. import copy -from mock import patch +from unittest.mock import patch from cassandra.cqlengine import columns from cassandra.cqlengine.management import drop_table, sync_table, _get_table_metadata, _update_options diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 6655a78fc4..eafcf5de50 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -import mock +from unittest import mock import logging from packaging.version import Version from cassandra.cqlengine.connection import get_session, get_cluster diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index 859facf0e1..168de4a928 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -from mock import patch +from unittest.mock import patch from cassandra.cqlengine import columns, CQLEngineException from cassandra.cqlengine.management import sync_table, drop_table, create_keyspace_simple, drop_keyspace diff --git a/tests/integration/cqlengine/model/test_polymorphism.py b/tests/integration/cqlengine/model/test_polymorphism.py index e78fef498e..f27703367d 100644 --- a/tests/integration/cqlengine/model/test_polymorphism.py +++ b/tests/integration/cqlengine/model/test_polymorphism.py @@ -13,7 +13,7 @@ # limitations under the License. import uuid -import mock +from unittest import mock from cassandra.cqlengine import columns from cassandra.cqlengine import models diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 1e3adf9a71..30a284137d 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -15,7 +15,7 @@ from datetime import datetime, date, time from decimal import Decimal -from mock import Mock +from unittest.mock import Mock from uuid import UUID, uuid4 from cassandra.cqlengine.models import Model diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index 17eed8ddd9..b03f6a5444 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from unittest.mock import patch from uuid import uuid4 -from mock import patch from cassandra.cqlengine import ValidationError from tests.integration import greaterthancass21 diff --git a/tests/integration/cqlengine/query/test_batch_query.py b/tests/integration/cqlengine/query/test_batch_query.py index f0c9c43266..db9bab8588 100644 --- a/tests/integration/cqlengine/query/test_batch_query.py +++ b/tests/integration/cqlengine/query/test_batch_query.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock from cassandra.cqlengine import columns from cassandra.cqlengine.connection import NOT_SET diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index ec5044b707..7b1b4283a9 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -24,7 +24,7 @@ from cassandra import InvalidRequest from tests.integration.cqlengine.base import BaseCassEngTestCase from cassandra.cqlengine.connection import NOT_SET -import mock +from unittest import mock from cassandra.cqlengine import functions from cassandra.cqlengine.management import sync_table, drop_table from cassandra.cqlengine.models import Model diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index 7887949bb8..4a1cd633a1 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -19,7 +19,7 @@ from cassandra.cqlengine.query import BatchQuery from tests.integration.cqlengine.base import BaseCassEngTestCase -from mock import patch +from unittest.mock import patch class TestMultiKeyModel(Model): partition = columns.Integer(primary_key=True) diff --git a/tests/integration/cqlengine/test_consistency.py b/tests/integration/cqlengine/test_consistency.py index dc0aa32c64..c9797184f2 100644 --- a/tests/integration/cqlengine/test_consistency.py +++ b/tests/integration/cqlengine/test_consistency.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock from uuid import uuid4 from cassandra import ConsistencyLevel as CL, ConsistencyLevel diff --git a/tests/integration/cqlengine/test_ifexists.py b/tests/integration/cqlengine/test_ifexists.py index 1189bc0ff5..1674bbd266 100644 --- a/tests/integration/cqlengine/test_ifexists.py +++ b/tests/integration/cqlengine/test_ifexists.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import unittest - -import mock +from unittest import mock from uuid import uuid4 from cassandra.cqlengine import columns diff --git a/tests/integration/cqlengine/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py index 260e132731..5b24070690 100644 --- a/tests/integration/cqlengine/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import unittest - -import mock +from unittest import mock from uuid import uuid4 from cassandra.cqlengine import columns diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index 45dbf86c68..6f2e13361f 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import unittest - -import mock +from unittest import mock from uuid import uuid4 from cassandra.cqlengine import columns diff --git a/tests/integration/cqlengine/test_timestamp.py b/tests/integration/cqlengine/test_timestamp.py index abf751ec47..bfed7f9808 100644 --- a/tests/integration/cqlengine/test_timestamp.py +++ b/tests/integration/cqlengine/test_timestamp.py @@ -13,7 +13,7 @@ # limitations under the License. from datetime import timedelta, datetime -import mock +from unittest import mock import sure from uuid import uuid4 diff --git a/tests/integration/cqlengine/test_ttl.py b/tests/integration/cqlengine/test_ttl.py index 55457ff56a..bd0c7f60aa 100644 --- a/tests/integration/cqlengine/test_ttl.py +++ b/tests/integration/cqlengine/test_ttl.py @@ -23,7 +23,7 @@ from cassandra.cqlengine.models import Model from uuid import uuid4 from cassandra.cqlengine import columns -import mock +from unittest import mock from cassandra.cqlengine.connection import get_session from tests.integration import CASSANDRA_VERSION, greaterthancass20 diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index 2ca01066b0..ea8897185a 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -17,8 +17,7 @@ import traceback import time from packaging.version import Version - -from mock import Mock +from unittest.mock import Mock from cassandra.policies import HostFilterPolicy, RoundRobinPolicy from cassandra import ( diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index d08676659f..6af180af27 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -15,8 +15,7 @@ import logging import time - -from mock import Mock, patch +from unittest.mock import Mock, patch from cassandra import OperationTimedOut from cassandra.cluster import (EXEC_PROFILE_DEFAULT, Cluster, ExecutionProfile, diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 11a9fba0ab..4c286426a5 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -16,7 +16,7 @@ from collections import deque from copy import copy -from mock import Mock, call, patch, ANY +from unittest.mock import Mock, call, patch, ANY import time from uuid import uuid4 import logging diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 463080fc32..b109728921 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -15,7 +15,7 @@ import unittest from functools import partial -from mock import patch +from unittest.mock import patch import logging import sys import threading diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index aa74f18d1c..16d43bbd65 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -27,7 +27,7 @@ from tests.integration.standard.utils import create_table_with_all_types, get_all_primitive_params import uuid -import mock +from unittest import mock def setup_module(): diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index c8930a56fc..30db02fbd8 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -21,7 +21,7 @@ import time import os from packaging.version import Version -from mock import Mock, patch +from unittest.mock import Mock, patch from cassandra import AlreadyExists, SignatureDescriptor, UserFunctionDescriptor, UserAggregateDescriptor diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 8157e4c96a..89486802b4 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -33,7 +33,7 @@ import random import re -import mock +from unittest import mock log = logging.getLogger(__name__) diff --git a/tests/unit/advanced/cloud/test_cloud.py b/tests/unit/advanced/cloud/test_cloud.py index f253e70454..04cbf883f3 100644 --- a/tests/unit/advanced/cloud/test_cloud.py +++ b/tests/unit/advanced/cloud/test_cloud.py @@ -9,14 +9,12 @@ import tempfile import os import shutil - import unittest +from unittest.mock import patch from cassandra import DriverException from cassandra.datastax import cloud -from mock import patch - from tests import notwindows class CloudTests(unittest.TestCase): diff --git a/tests/unit/advanced/test_insights.py b/tests/unit/advanced/test_insights.py index 4f1dd7ac12..4047fe12b8 100644 --- a/tests/unit/advanced/test_insights.py +++ b/tests/unit/advanced/test_insights.py @@ -16,8 +16,8 @@ import unittest import logging -from mock import sentinel import sys +from unittest.mock import sentinel from cassandra import ConsistencyLevel from cassandra.cluster import ( diff --git a/tests/unit/advanced/test_policies.py b/tests/unit/advanced/test_policies.py index b8e4a4e757..553e7dba87 100644 --- a/tests/unit/advanced/test_policies.py +++ b/tests/unit/advanced/test_policies.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import unittest - -from mock import Mock +from unittest.mock import Mock from cassandra.pool import Host from cassandra.policies import RoundRobinPolicy diff --git a/tests/unit/cqlengine/test_connection.py b/tests/unit/cqlengine/test_connection.py index 56136b6e8b..76266cff23 100644 --- a/tests/unit/cqlengine/test_connection.py +++ b/tests/unit/cqlengine/test_connection.py @@ -13,13 +13,12 @@ # limitations under the License. import unittest +from unittest.mock import Mock from cassandra.cluster import _ConfigMode from cassandra.cqlengine import connection from cassandra.query import dict_factory -from mock import Mock - class ConnectionTest(unittest.TestCase): diff --git a/tests/unit/io/test_asyncioreactor.py b/tests/unit/io/test_asyncioreactor.py index aa00a32943..65708d41dc 100644 --- a/tests/unit/io/test_asyncioreactor.py +++ b/tests/unit/io/test_asyncioreactor.py @@ -10,7 +10,7 @@ from tests import is_monkey_patched, connection_class from tests.unit.io.utils import TimerCallback, TimerTestMixin -from mock import patch +from unittest.mock import patch import unittest import time diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 6f493896d0..6e3fc2dfe4 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -from mock import patch +from unittest.mock import patch import socket import cassandra.io.asyncorereactor as asyncorereactor from cassandra.io.asyncorereactor import AsyncoreConnection diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index e2b6a533a8..21837bb52d 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -19,7 +19,7 @@ from tests import notpypy, EVENT_LOOP_MANAGER from eventlet import monkey_patch -from mock import patch +from unittest.mock import patch try: from cassandra.io.eventletreactor import EventletConnection diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py index 466b9ae5d5..58aa02869d 100644 --- a/tests/unit/io/test_geventreactor.py +++ b/tests/unit/io/test_geventreactor.py @@ -13,6 +13,7 @@ # limitations under the License. import unittest +from unittest.mock import patch from tests.unit.io.utils import TimerTestMixin @@ -23,8 +24,6 @@ except ImportError: GeventConnection = None # noqa -from mock import patch - skip_condition = GeventConnection is None or EVENT_LOOP_MANAGER != "gevent" @unittest.skipIf(skip_condition, "Skipping the gevent tests because it's not installed") diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 67ab5fc7d6..3ea6c669f3 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -from mock import patch, Mock +from unittest.mock import patch, Mock import weakref import socket diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index b426a820c4..0da9915cec 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -from mock import Mock, patch +from unittest.mock import Mock, patch from cassandra.connection import DefaultEndPoint diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 1475347ca6..be721dc7d3 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -27,7 +27,7 @@ from functools import wraps from itertools import cycle from io import BytesIO -from mock import Mock +from unittest.mock import Mock import errno import logging diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index d6e00407f7..90bcfbdca8 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -16,7 +16,7 @@ import logging import socket -from mock import patch, Mock +from unittest.mock import patch, Mock from cassandra import ConsistencyLevel, DriverException, Timeout, Unavailable, RequestExecutionException, ReadTimeout, WriteTimeout, CoordinationFailure, ReadFailure, WriteFailure, FunctionFailure, AlreadyExists,\ InvalidRequest, Unauthorized, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, RequestValidationException, ConfigurationException, ProtocolVersion diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index 3e84220b27..bdfd08126e 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -16,7 +16,7 @@ import unittest from itertools import cycle -from mock import Mock +from unittest.mock import Mock import time import threading from queue import PriorityQueue diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 1d81376d4a..51e6247313 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -12,11 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import unittest - -from mock import Mock, ANY, call, patch from io import BytesIO import time from threading import Lock +from unittest.mock import Mock, ANY, call, patch from cassandra import OperationTimedOut from cassandra.cluster import Cluster diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index cb863da0b1..a8a64219e6 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -15,7 +15,7 @@ import unittest from concurrent.futures import ThreadPoolExecutor -from mock import Mock, ANY, call +from unittest.mock import Mock, ANY, call from cassandra import OperationTimedOut, SchemaTargetType, SchemaChangeType from cassandra.protocol import ResultMessage, RESULT_KIND_ROWS diff --git a/tests/unit/test_endpoints.py b/tests/unit/test_endpoints.py index 18f245e64b..b0841962ca 100644 --- a/tests/unit/test_endpoints.py +++ b/tests/unit/test_endpoints.py @@ -12,7 +12,7 @@ from cassandra.connection import DefaultEndPoint, SniEndPoint, SniEndPointFactory -from mock import patch +from unittest.mock import patch def socket_getaddrinfo(*args): diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index 86d4bf9843..97a3334acf 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -13,9 +13,8 @@ # limitations under the License. import unittest - -from mock import Mock, NonCallableMagicMock from threading import Thread, Event, Lock +from unittest.mock import Mock, NonCallableMagicMock from cassandra.cluster import Session from cassandra.connection import Connection diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 94fed13455..578c493e21 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -15,7 +15,7 @@ from binascii import unhexlify import logging -from mock import Mock +from unittest.mock import Mock import os import timeit diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index f340f58634..e1bd17a00c 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -15,7 +15,7 @@ import unittest from itertools import islice, cycle -from mock import Mock, patch, call +from unittest.mock import Mock, patch, call from random import randint from _thread import LockType import sys diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index eec9d73ca4..907f62f2bb 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -14,7 +14,7 @@ import unittest -from mock import Mock +from unittest.mock import Mock from cassandra import ProtocolVersion, UnsupportedOperation from cassandra.protocol import ( diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index ff60ac6d42..ba3bd5b140 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -16,8 +16,7 @@ from collections import deque from threading import RLock - -from mock import Mock, MagicMock, ANY +from unittest.mock import Mock, MagicMock, ANY from cassandra import ConsistencyLevel, Unavailable, SchemaTargetType, SchemaChangeType, OperationTimedOut from cassandra.cluster import Session, ResponseFuture, NoHostAvailable, ProtocolVersion diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index 97002d90d7..7ff6352394 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -11,13 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from cassandra.query import named_tuple_factory, dict_factory, tuple_factory import unittest - -from mock import Mock, PropertyMock, patch +from unittest.mock import Mock, PropertyMock, patch from cassandra.cluster import ResultSet +from cassandra.query import named_tuple_factory, dict_factory, tuple_factory class ResultSetTests(unittest.TestCase): diff --git a/tests/unit/test_timestamps.py b/tests/unit/test_timestamps.py index ef8ac36f7b..151c004c90 100644 --- a/tests/unit/test_timestamps.py +++ b/tests/unit/test_timestamps.py @@ -13,8 +13,7 @@ # limitations under the License. import unittest - -import mock +from unittest import mock from cassandra import timestamps from threading import Thread, Lock diff --git a/tests/unit/utils.py b/tests/unit/utils.py index 3c9cc34e22..ec9a674799 100644 --- a/tests/unit/utils.py +++ b/tests/unit/utils.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from concurrent.futures import Future from functools import wraps +from unittest.mock import patch + +from concurrent.futures import Future from cassandra.cluster import Session -from mock import patch def mock_session_pools(f): diff --git a/tox.ini b/tox.ini index b4a01e53df..a101bbc8e9 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,6 @@ envlist = py{38,39,310,311,312},pypy [base] deps = pynose - mock>1.1 packaging cython>=0.20,<0.30 eventlet From 9cc42251fa08ac5f961fb7c601c9bcbc19abb447 Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Thu, 30 May 2024 21:51:36 -0400 Subject: [PATCH 1962/2005] removed future print_function, division, and with and some pre 3.7 handling (#1208) --- cassandra/cqlengine/functions.py | 1 - cassandra/util.py | 1 - examples/request_init_listener.py | 1 - ez_setup.py | 8 +------- setup.py | 1 - tests/integration/advanced/graph/__init__.py | 5 ----- tests/integration/long/utils.py | 1 - 7 files changed, 1 insertion(+), 17 deletions(-) diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index acbbaa6263..606f5bc330 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import division from datetime import datetime from cassandra.cqlengine import UnicodeMixin, ValidationError diff --git a/cassandra/util.py b/cassandra/util.py index 06d338f2e1..e481b826b2 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import with_statement from _weakref import ref import calendar from collections import OrderedDict diff --git a/examples/request_init_listener.py b/examples/request_init_listener.py index b90bce6b64..fcbaf18ad7 100644 --- a/examples/request_init_listener.py +++ b/examples/request_init_listener.py @@ -18,7 +18,6 @@ # about the encoded request size. Note that the counts would be available using the internal 'metrics' tracking -- # this is just demonstrating a way to track a few custom attributes. -from __future__ import print_function from cassandra.cluster import Cluster from greplin import scales diff --git a/ez_setup.py b/ez_setup.py index 2535472190..1c5e5b73a8 100644 --- a/ez_setup.py +++ b/ez_setup.py @@ -197,13 +197,7 @@ def _extractall(self, path=".", members=None): self.extract(tarinfo, path) # Reverse sort directories. - if sys.version_info < (2, 4): - def sorter(dir1, dir2): - return cmp(dir1.name, dir2.name) - directories.sort(sorter) - directories.reverse() - else: - directories.sort(key=operator.attrgetter('name'), reverse=True) + directories.sort(key=operator.attrgetter('name'), reverse=True) # Set correct owner, mtime and filemode on directories. for tarinfo in directories: diff --git a/setup.py b/setup.py index 9d7f6efd09..41c3eae702 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function import os import sys import warnings diff --git a/tests/integration/advanced/graph/__init__.py b/tests/integration/advanced/graph/__init__.py index 91c9287e11..cc40c6906a 100644 --- a/tests/integration/advanced/graph/__init__.py +++ b/tests/integration/advanced/graph/__init__.py @@ -50,11 +50,6 @@ def setup_module(): MIN_LONG = -9223372036854775808 ZERO_LONG = 0 -if sys.version_info < (3, 0): - MAX_LONG = long(MAX_LONG) - MIN_LONG = long(MIN_LONG) - ZERO_LONG = long(ZERO_LONG) - MAKE_STRICT = "schema.config().option('graph.schema_mode').set('production')" MAKE_NON_STRICT = "schema.config().option('graph.schema_mode').set('development')" ALLOW_SCANS = "schema.config().option('graph.allow_scan').set('true')" diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index a5b5bdd226..58c3241a42 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function import logging import time From 90c71ad9f124d12565940765418795b80ad17322 Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Fri, 31 May 2024 13:24:48 -0400 Subject: [PATCH 1963/2005] Update geomet to latest in requirements.txt (#1207) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 100a12905a..1d5f0bcfc4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -geomet>=0.1,<0.3 +geomet>=1.1 From ed8ec9328d37d3aa428c41ac39fe621420f29f17 Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Fri, 31 May 2024 13:29:34 -0400 Subject: [PATCH 1964/2005] Change format of badges in README from .md to .rst (#1212) --- README.rst | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index be7c02b210..8682bff298 100644 --- a/README.rst +++ b/README.rst @@ -1,5 +1,10 @@ -[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) -[![PyPI version](https://badge.fury.io/py/cassandra-driver.svg)](https://badge.fury.io/py/cassandra-driver) + +.. |license| image:: https://img.shields.io/badge/License-Apache%202.0-blue.svg + :target: https://opensource.org/licenses/Apache-2.0 +.. |version| image:: https://badge.fury.io/py/cassandra-driver.svg + :target: https://badge.fury.io/py/cassandra-driver + +|license| |version| DataStax Driver for Apache Cassandra ==================================== From 7cdbdbb4cc0c7b4c152b1fcd909fdfc89358982b Mon Sep 17 00:00:00 2001 From: xyz <153530308+rtb-zla-karma@users.noreply.github.com> Date: Sat, 29 Jun 2024 00:28:24 +0200 Subject: [PATCH 1965/2005] Remove problematic escape sequences in some docstrings to avoid SyntaxWarning in Python 3.12 (#1205) --- cassandra/cqlengine/connection.py | 2 +- cassandra/cqlengine/query.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 588e512a2d..20ef497a73 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -323,7 +323,7 @@ def setup( :param int consistency: The global default :class:`~.ConsistencyLevel` - default is the same as :attr:`.Session.default_consistency_level` :param bool lazy_connect: True if should not connect until first use :param bool retry_connect: True if we should retry to connect even if there was a connection failure initially - :param \*\*kwargs: Pass-through keyword arguments for :class:`cassandra.cluster.Cluster` + :param kwargs: Pass-through keyword arguments for :class:`cassandra.cluster.Cluster` """ from cassandra.cqlengine import models diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 73f48a5928..afc7ceeef6 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -205,8 +205,8 @@ def add_callback(self, fn, *args, **kwargs): :param fn: Callable object :type fn: callable - :param \*args: Positional arguments to be passed to the callback at the time of execution - :param \*\*kwargs: Named arguments to be passed to the callback at the time of execution + :param args: Positional arguments to be passed to the callback at the time of execution + :param kwargs: Named arguments to be passed to the callback at the time of execution """ if not callable(fn): raise ValueError("Value for argument 'fn' is {0} and is not a callable object.".format(type(fn))) @@ -276,8 +276,8 @@ class ContextQuery(object): A Context manager to allow a Model to switch context easily. Presently, the context only specifies a keyspace for model IO. - :param \*args: One or more models. A model should be a class type, not an instance. - :param \*\*kwargs: (optional) Context parameters: can be *keyspace* or *connection* + :param args: One or more models. A model should be a class type, not an instance. + :param kwargs: (optional) Context parameters: can be *keyspace* or *connection* For example: From b347653eefa482939383424d4446aacb92596cd8 Mon Sep 17 00:00:00 2001 From: Kefu Chai Date: Tue, 9 Jul 2024 05:54:51 +0800 Subject: [PATCH 1966/2005] use timezone-aware API to avoid deprecated warning (#1213) before this change, when testing with cqlsh, we have warnings like: ``` :488: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). ```` in this change, we replace the deprecated API with timezone-aware API, to avoid this warning. to keep the backward compatibility, `DateTime.to_python()` still returns an offset-naive timestamp. Signed-off-by: Kefu Chai --- cassandra/cqlengine/columns.py | 4 ++-- cassandra/query.py | 4 ++-- cassandra/util.py | 4 ++-- .../integration/cqlengine/columns/test_validation.py | 6 +++--- tests/integration/cqlengine/model/test_model_io.py | 6 +++--- tests/integration/cqlengine/model/test_udts.py | 4 ++-- tests/unit/cython/types_testhelper.pyx | 4 ++-- tests/unit/test_types.py | 12 ++++++------ 8 files changed, 22 insertions(+), 22 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 7c20ec6642..b8248e99ac 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -13,7 +13,7 @@ # limitations under the License. from copy import deepcopy, copy -from datetime import date, datetime, timedelta +from datetime import date, datetime, timedelta, timezone import logging from uuid import UUID as _UUID @@ -551,7 +551,7 @@ def to_python(self, value): elif isinstance(value, date): return datetime(*(value.timetuple()[:6])) - return datetime.utcfromtimestamp(value) + return datetime.fromtimestamp(value, tz=timezone.utc).replace(tzinfo=None) def to_database(self, value): value = super(DateTime, self).to_database(value) diff --git a/cassandra/query.py b/cassandra/query.py index e656124403..e29c2a3113 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -19,7 +19,7 @@ """ from collections import namedtuple -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone import re import struct import time @@ -1085,7 +1085,7 @@ class TraceEvent(object): def __init__(self, description, timeuuid, source, source_elapsed, thread_name): self.description = description - self.datetime = datetime.utcfromtimestamp(unix_time_from_uuid1(timeuuid)) + self.datetime = datetime.fromtimestamp(unix_time_from_uuid1(timeuuid), tz=timezone.utc) self.source = source if source_elapsed is not None: self.source_elapsed = timedelta(microseconds=source_elapsed) diff --git a/cassandra/util.py b/cassandra/util.py index e481b826b2..9c07339759 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -38,8 +38,8 @@ from cassandra import DriverException -DATETIME_EPOC = datetime.datetime(1970, 1, 1) -UTC_DATETIME_EPOC = datetime.datetime.utcfromtimestamp(0) +DATETIME_EPOC = datetime.datetime(1970, 1, 1).replace(tzinfo=None) +UTC_DATETIME_EPOC = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc).replace(tzinfo=None) _nan = float('nan') diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 21fe1581ff..32f20d52ff 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -15,7 +15,7 @@ import unittest import sys -from datetime import datetime, timedelta, date, tzinfo, time +from datetime import datetime, timedelta, date, tzinfo, time, timezone from decimal import Decimal as D from uuid import uuid4, uuid1 from packaging.version import Version @@ -97,7 +97,7 @@ def test_datetime_timestamp(self): dt_value = 1454520554 self.DatetimeTest.objects.create(test_id=5, created_at=dt_value) dt2 = self.DatetimeTest.objects(test_id=5).first() - self.assertEqual(dt2.created_at, datetime.utcfromtimestamp(dt_value)) + self.assertEqual(dt2.created_at, datetime.fromtimestamp(dt_value, tz=timezone.utc).replace(tzinfo=None)) def test_datetime_large(self): dt_value = datetime(2038, 12, 31, 10, 10, 10, 123000) @@ -809,7 +809,7 @@ def test_conversion_specific_date(self): assert isinstance(uuid, UUID) ts = (uuid.time - 0x01b21dd213814000) / 1e7 # back to a timestamp - new_dt = datetime.utcfromtimestamp(ts) + new_dt = datetime.fromtimestamp(ts, tz=timezone.utc).replace(tzinfo=None) # checks that we created a UUID1 with the proper timestamp assert new_dt == dt diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 3c4088cc83..ce43422e28 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -15,7 +15,7 @@ from uuid import uuid4, UUID import random -from datetime import datetime, date, time +from datetime import datetime, date, time, timezone from decimal import Decimal from operator import itemgetter @@ -197,13 +197,13 @@ class AllDatatypesModel(Model): sync_table(AllDatatypesModel) - input = ['ascii', 2 ** 63 - 1, bytearray(b'hello world'), True, datetime.utcfromtimestamp(872835240), + input = ['ascii', 2 ** 63 - 1, bytearray(b'hello world'), True, datetime.fromtimestamp(872835240, tz=timezone.utc).replace(tzinfo=None), Decimal('12.3E+7'), 2.39, 3.4028234663852886e+38, '123.123.123.123', 2147483647, 'text', UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), int(str(2147483647) + '000')] AllDatatypesModel.create(id=0, a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, - e=datetime.utcfromtimestamp(872835240), f=Decimal('12.3E+7'), g=2.39, + e=datetime.fromtimestamp(872835240, tz=timezone.utc), f=Decimal('12.3E+7'), g=2.39, h=3.4028234663852886e+38, i='123.123.123.123', j=2147483647, k='text', l=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), m=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), n=int(str(2147483647) + '000'), diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 30a284137d..7063df8caa 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -from datetime import datetime, date, time +from datetime import datetime, date, time, timezone from decimal import Decimal from unittest.mock import Mock from uuid import UUID, uuid4 @@ -272,7 +272,7 @@ def test_can_insert_udts_with_all_datatypes(self): self.addCleanup(drop_table, AllDatatypesModel) input = AllDatatypes(a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, - e=datetime.utcfromtimestamp(872835240), f=Decimal('12.3E+7'), g=2.39, + e=datetime.fromtimestamp(872835240, tz=timezone.utc).replace(tzinfo=None), f=Decimal('12.3E+7'), g=2.39, h=3.4028234663852886e+38, i='123.123.123.123', j=2147483647, k='text', l=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), m=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), n=int(str(2147483647) + '000')) diff --git a/tests/unit/cython/types_testhelper.pyx b/tests/unit/cython/types_testhelper.pyx index 55fd310837..66d2516319 100644 --- a/tests/unit/cython/types_testhelper.pyx +++ b/tests/unit/cython/types_testhelper.pyx @@ -38,7 +38,7 @@ def test_datetype(assert_equal): cdef BytesIOReader reader cdef Buffer buf - dt = datetime.datetime.utcfromtimestamp(timestamp) + dt = datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc) bytes = io.BytesIO() write_value(bytes, DateType.serialize(dt, 0)) @@ -52,7 +52,7 @@ def test_datetype(assert_equal): # deserialize # epoc expected = 0 - assert_equal(deserialize(expected), datetime.datetime.utcfromtimestamp(expected)) + assert_equal(deserialize(expected), datetime.datetime.fromtimestamp(expected, tz=datetime.timezone.utc).replace(tzinfo=None)) # beyond 32b expected = 2 ** 33 diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 5db7f087b7..25641c046d 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -201,7 +201,7 @@ def test_empty_value(self): def test_datetype(self): now_time_seconds = time.time() - now_datetime = datetime.datetime.utcfromtimestamp(now_time_seconds) + now_datetime = datetime.datetime.fromtimestamp(now_time_seconds, tz=datetime.timezone.utc) # Cassandra timestamps in millis now_timestamp = now_time_seconds * 1e3 @@ -212,23 +212,23 @@ def test_datetype(self): # deserialize # epoc expected = 0 - self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime.utcfromtimestamp(expected)) + self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime.fromtimestamp(expected, tz=datetime.timezone.utc).replace(tzinfo=None)) # beyond 32b expected = 2 ** 33 - self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime(2242, 3, 16, 12, 56, 32)) + self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime(2242, 3, 16, 12, 56, 32, tzinfo=datetime.timezone.utc).replace(tzinfo=None)) # less than epoc (PYTHON-119) expected = -770172256 - self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime(1945, 8, 5, 23, 15, 44)) + self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime(1945, 8, 5, 23, 15, 44, tzinfo=datetime.timezone.utc).replace(tzinfo=None)) # work around rounding difference among Python versions (PYTHON-230) expected = 1424817268.274 - self.assertEqual(DateType.deserialize(int64_pack(int(1000 * expected)), 0), datetime.datetime(2015, 2, 24, 22, 34, 28, 274000)) + self.assertEqual(DateType.deserialize(int64_pack(int(1000 * expected)), 0), datetime.datetime(2015, 2, 24, 22, 34, 28, 274000, tzinfo=datetime.timezone.utc).replace(tzinfo=None)) # Large date overflow (PYTHON-452) expected = 2177403010.123 - self.assertEqual(DateType.deserialize(int64_pack(int(1000 * expected)), 0), datetime.datetime(2038, 12, 31, 10, 10, 10, 123000)) + self.assertEqual(DateType.deserialize(int64_pack(int(1000 * expected)), 0), datetime.datetime(2038, 12, 31, 10, 10, 10, 123000, tzinfo=datetime.timezone.utc).replace(tzinfo=None)) def test_collection_null_support(self): """ From 611431c3ff720566a0255c48d586e6d9b1a0f655 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Fri, 12 Jul 2024 12:25:54 -0500 Subject: [PATCH 1967/2005] PYTHON-1389 Removing deprecated test aliases (#1216) --- .../column_encryption/test_policies.py | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/integration/standard/column_encryption/test_policies.py b/tests/integration/standard/column_encryption/test_policies.py index dea6b6d39e..84c4e76871 100644 --- a/tests/integration/standard/column_encryption/test_policies.py +++ b/tests/integration/standard/column_encryption/test_policies.py @@ -58,14 +58,14 @@ def test_end_to_end_prepared(self): # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted # values here to confirm that we don't interfere with regular processing of unencrypted vals. (encrypted,unencrypted) = session.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() - self.assertEquals(expected, encrypted) - self.assertEquals(expected, unencrypted) + self.assertEqual(expected, encrypted) + self.assertEqual(expected, unencrypted) # Confirm the same behaviour from a subsequent prepared statement as well prepared = session.prepare("select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering") (encrypted,unencrypted) = session.execute(prepared, [expected]).one() - self.assertEquals(expected, encrypted) - self.assertEquals(expected, unencrypted) + self.assertEqual(expected, encrypted) + self.assertEqual(expected, unencrypted) def test_end_to_end_simple(self): @@ -86,14 +86,14 @@ def test_end_to_end_simple(self): # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted # values here to confirm that we don't interfere with regular processing of unencrypted vals. (encrypted,unencrypted) = session.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() - self.assertEquals(expected, encrypted) - self.assertEquals(expected, unencrypted) + self.assertEqual(expected, encrypted) + self.assertEqual(expected, unencrypted) # Confirm the same behaviour from a subsequent prepared statement as well prepared = session.prepare("select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering") (encrypted,unencrypted) = session.execute(prepared, [expected]).one() - self.assertEquals(expected, encrypted) - self.assertEquals(expected, unencrypted) + self.assertEqual(expected, encrypted) + self.assertEqual(expected, unencrypted) def test_end_to_end_different_cle_contexts_different_ivs(self): """ @@ -135,8 +135,8 @@ def test_end_to_end_different_cle_contexts_different_ivs(self): cluster2 = TestCluster(column_encryption_policy=cl_policy2) session2 = cluster2.connect() (encrypted,unencrypted) = session2.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() - self.assertEquals(expected, encrypted) - self.assertEquals(expected, unencrypted) + self.assertEqual(expected, encrypted) + self.assertEqual(expected, unencrypted) def test_end_to_end_different_cle_contexts_different_policies(self): """ @@ -161,10 +161,10 @@ def test_end_to_end_different_cle_contexts_different_policies(self): # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted # values here to confirm that we don't interfere with regular processing of unencrypted vals. (encrypted,unencrypted) = session2.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() - self.assertEquals(cl_policy.encode_and_encrypt(col_desc, expected), encrypted) - self.assertEquals(expected, unencrypted) + self.assertEqual(cl_policy.encode_and_encrypt(col_desc, expected), encrypted) + self.assertEqual(expected, unencrypted) # Confirm the same behaviour from a subsequent prepared statement as well prepared = session2.prepare("select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering") (encrypted,unencrypted) = session2.execute(prepared, [expected]).one() - self.assertEquals(cl_policy.encode_and_encrypt(col_desc, expected), encrypted) + self.assertEqual(cl_policy.encode_and_encrypt(col_desc, expected), encrypted) From 9952e2ab22c7e034b96cc89330791d73c221546b Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Tue, 16 Jul 2024 11:39:24 -0500 Subject: [PATCH 1968/2005] PYTHON-1297 Convert to pytest for running unit and integration tests (#1215) --- .gitignore | 1 - .travis.yml | 1 + Jenkinsfile | 30 +++++++++--------- README-dev.rst | 25 +++++---------- appveyor/run_test.ps1 | 10 +++--- cassandra/io/asyncorereactor.py | 1 - cassandra/io/libevreactor.py | 5 +-- cassandra/io/twistedreactor.py | 3 ++ setup.py | 31 +------------------ test-requirements.txt | 2 +- tests/integration/__init__.py | 1 + tests/integration/cloud/conftest.py | 9 ++++++ tests/integration/conftest.py | 9 ++++++ tests/integration/cqlengine/conftest.py | 12 +++++++ .../integration/cqlengine/test_batch_query.py | 4 ++- tests/integration/simulacron/conftest.py | 9 ++++++ tests/integration/standard/conftest.py | 13 ++++++++ tests/unit/io/test_asyncorereactor.py | 18 ++++++++--- tests/unit/io/test_libevreactor.py | 10 +++--- tests/unit/io/test_twistedreactor.py | 4 ++- tests/unit/io/utils.py | 2 +- tests/unit/test_host_connection_pool.py | 3 ++ tox.ini | 8 ++--- 23 files changed, 122 insertions(+), 89 deletions(-) create mode 100644 tests/integration/cloud/conftest.py create mode 100644 tests/integration/conftest.py create mode 100644 tests/integration/cqlengine/conftest.py create mode 100644 tests/integration/simulacron/conftest.py create mode 100644 tests/integration/standard/conftest.py diff --git a/.gitignore b/.gitignore index 30ff731f85..7983f44b87 100644 --- a/.gitignore +++ b/.gitignore @@ -11,7 +11,6 @@ build MANIFEST dist .coverage -nosetests.xml cover/ docs/_build/ tests/integration/ccm diff --git a/.travis.yml b/.travis.yml index 5a483f9a03..859394584c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,6 +6,7 @@ python: - "3.8" - "3.9" - "3.10" + - "3.11" env: - CASS_DRIVER_NO_CYTHON=1 diff --git a/Jenkinsfile b/Jenkinsfile index 444b0f48b8..b43cf6c659 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -261,9 +261,9 @@ def executeStandardTests() { . ${HOME}/environment.txt set +o allexport - EVENT_LOOP=${EVENT_LOOP} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true - EVENT_LOOP=eventlet VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true - EVENT_LOOP=gevent VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true + EVENT_LOOP=${EVENT_LOOP} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=unit_results.xml tests/unit/ || true + EVENT_LOOP_MANAGER=eventlet VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true + EVENT_LOOP_MANAGER=gevent VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true ''' sh label: 'Execute Simulacron integration tests', script: '''#!/bin/bash -lex @@ -276,13 +276,13 @@ def executeStandardTests() { jabba use 1.8 SIMULACRON_JAR="${HOME}/simulacron.jar" - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_results.xml tests/integration/simulacron/ || true # Run backpressure tests separately to avoid memory issue - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || true ''' sh label: 'Execute CQL engine integration tests', script: '''#!/bin/bash -lex @@ -294,7 +294,7 @@ def executeStandardTests() { . ${JABBA_SHELL} jabba use 1.8 - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=cqle_results.xml tests/integration/cqlengine/ || true ''' sh label: 'Execute Apache CassandraⓇ integration tests', script: '''#!/bin/bash -lex @@ -306,7 +306,7 @@ def executeStandardTests() { . ${JABBA_SHELL} jabba use 1.8 - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=standard_results.xml tests/integration/standard/ || true ''' if (env.CASSANDRA_VERSION.split('-')[0] == 'dse' && env.CASSANDRA_VERSION.split('-')[1] != '4.8') { @@ -323,7 +323,7 @@ def executeStandardTests() { . ${JABBA_SHELL} jabba use 1.8 - EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true + EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=dse_results.xml tests/integration/advanced/ || true ''' } } @@ -337,7 +337,7 @@ def executeStandardTests() { . ${JABBA_SHELL} jabba use 1.8 - EVENT_LOOP=${EVENT_LOOP} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true + EVENT_LOOP=${EVENT_LOOP} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=advanced_results.xml tests/integration/cloud/ || true ''' if (env.PROFILE == 'FULL') { @@ -350,7 +350,7 @@ def executeStandardTests() { . ${JABBA_SHELL} jabba use 1.8 - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=tests/integration/long/upgrade --junit-xml=long_results.xml tests/integration/long/ || true ''' } } @@ -365,7 +365,7 @@ def executeDseSmokeTests() { . ${JABBA_SHELL} jabba use 1.8 - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} DSE_VERSION=${DSE_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} DSE_VERSION=${DSE_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=standard_results.xml tests/integration/standard/test_dse.py || true ''' } @@ -389,7 +389,7 @@ def executeEventLoopTests() { "tests/integration/simulacron/test_endpoint.py" "tests/integration/long/test_ssl.py" ) - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} pynose -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true ''' } diff --git a/README-dev.rst b/README-dev.rst index adca510412..7ffa52b0b1 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -135,49 +135,38 @@ Running Unit Tests ------------------ Unit tests can be run like so:: - nosetests -w tests/unit/ + pytest tests/unit/ You can run a specific test method like so:: - nosetests -w tests/unit/test_connection.py:ConnectionTest.test_bad_protocol_version + pytest tests/unit/test_connection.py::ConnectionTest::test_bad_protocol_version Running Integration Tests ------------------------- In order to run integration tests, you must specify a version to run using the ``CASSANDRA_VERSION`` or ``DSE_VERSION`` environment variable:: - CASSANDRA_VERSION=2.0.9 nosetests -w tests/integration/standard + CASSANDRA_VERSION=2.0.9 pytest tests/integration/standard Or you can specify a cassandra directory (to test unreleased versions):: - CASSANDRA_DIR=/home/thobbs/cassandra nosetests -w tests/integration/standard/ + CASSANDRA_DIR=/path/to/cassandra pytest tests/integration/standard/ Specifying the usage of an already running Cassandra cluster ------------------------------------------------------------ The test will start the appropriate Cassandra clusters when necessary but if you don't want this to happen because a Cassandra cluster is already running the flag ``USE_CASS_EXTERNAL`` can be used, for example:: - USE_CASS_EXTERNAL=1 CASSANDRA_VERSION=2.0.9 nosetests -w tests/integration/standard + USE_CASS_EXTERNAL=1 CASSANDRA_VERSION=2.0.9 pytest tests/integration/standard Specify a Protocol Version for Tests ------------------------------------ The protocol version defaults to 1 for cassandra 1.2 and 2 otherwise. You can explicitly set it with the ``PROTOCOL_VERSION`` environment variable:: - PROTOCOL_VERSION=3 nosetests -w tests/integration/standard - -Seeing Test Logs in Real Time ------------------------------ -Sometimes it's useful to output logs for the tests as they run:: - - nosetests -w tests/unit/ --nocapture --nologcapture - -Use tee to capture logs and see them on your terminal:: - - nosetests -w tests/unit/ --nocapture --nologcapture 2>&1 | tee test.log + PROTOCOL_VERSION=3 pytest tests/integration/standard Testing Multiple Python Versions -------------------------------- -Use tox to test all of Python 3.8 through 3.12 and pypy (this is what -TravisCI runs):: +Use tox to test all of Python 3.8 through 3.12 and pypy (this is what TravisCI runs):: tox diff --git a/appveyor/run_test.ps1 b/appveyor/run_test.ps1 index fc95ec7e52..9b8c23fd8b 100644 --- a/appveyor/run_test.ps1 +++ b/appveyor/run_test.ps1 @@ -15,12 +15,12 @@ $wc = New-Object 'System.Net.WebClient' if($env:ci_type -eq 'unit'){ echo "Running Unit tests" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit + pytest -s -v --junit-xml=unit_results.xml .\tests\unit $env:EVENT_LOOP_MANAGER="gevent" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit\io\test_geventreactor.py + pytest -s -v --junit-xml=unit_results.xml .\tests\unit\io\test_geventreactor.py $env:EVENT_LOOP_MANAGER="eventlet" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit\io\test_eventletreactor.py + pytest -s -v --junit-xml=unit_results.xml .\tests\unit\io\test_eventletreactor.py $env:EVENT_LOOP_MANAGER="asyncore" echo "uploading unit results" @@ -31,13 +31,13 @@ if($env:ci_type -eq 'unit'){ if($env:ci_type -eq 'standard'){ echo "Running CQLEngine integration tests" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=cqlengine_results.xml .\tests\integration\cqlengine + pytest -s -v --junit-xml=cqlengine_results.xml .\tests\integration\cqlengine $cqlengine_tests_result = $lastexitcode $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\cqlengine_results.xml)) echo "uploading CQLEngine test results" echo "Running standard integration tests" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml .\tests\integration\standard + pytest -s -v --junit-xml=standard_results.xml .\tests\integration\standard $integration_tests_result = $lastexitcode $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\standard_results.xml)) echo "uploading standard integration test results" diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index a50b719c5d..d2ea62cc3c 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -24,7 +24,6 @@ import sys import ssl - try: from weakref import WeakSet except ImportError: diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 4d4098ca7b..d28c6af0f1 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -22,8 +22,6 @@ import time from cassandra import DependencyException -from cassandra.connection import (Connection, ConnectionShutdown, - NONBLOCKING, Timer, TimerManager) try: import cassandra.io.libevwrapper as libev except ImportError: @@ -35,6 +33,9 @@ "for instructions on installing build dependencies and building " "the C extension.") +from cassandra.connection import (Connection, ConnectionShutdown, + NONBLOCKING, Timer, TimerManager) + log = logging.getLogger(__name__) diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index 9b3ff09398..e4605a7446 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -102,6 +102,9 @@ def maybe_start(self): self._thread.start() atexit.register(partial(_cleanup, weakref.ref(self))) + def _reactor_stopped(self): + return reactor._stopped + def _cleanup(self): if self._thread: reactor.callFromThread(reactor.stop) diff --git a/setup.py b/setup.py index 41c3eae702..1558512fcf 100644 --- a/setup.py +++ b/setup.py @@ -16,16 +16,6 @@ import sys import warnings -if __name__ == '__main__' and sys.argv[1] == "gevent_nosetests": - print("Running gevent tests") - from gevent.monkey import patch_all - patch_all() - -if __name__ == '__main__' and sys.argv[1] == "eventlet_nosetests": - print("Running eventlet tests") - from eventlet import monkey_patch - monkey_patch() - import ez_setup ez_setup.use_setuptools() @@ -48,19 +38,6 @@ with open("README.rst") as f: long_description = f.read() - -try: - from nose.commands import nosetests -except ImportError: - gevent_nosetests = None - eventlet_nosetests = None -else: - class gevent_nosetests(nosetests): - description = "run nosetests with gevent monkey patching" - - class eventlet_nosetests(nosetests): - description = "run nosetests with eventlet monkey patching" - has_cqlengine = False if __name__ == '__main__' and sys.argv[1] == "install": try: @@ -382,12 +359,6 @@ def pre_build_check(): def run_setup(extensions): kw = {'cmdclass': {'doc': DocCommand}} - if gevent_nosetests is not None: - kw['cmdclass']['gevent_nosetests'] = gevent_nosetests - - if eventlet_nosetests is not None: - kw['cmdclass']['eventlet_nosetests'] = eventlet_nosetests - kw['cmdclass']['build_ext'] = build_extensions kw['ext_modules'] = [Extension('DUMMY', [])] # dummy extension makes sure build_ext is called for install @@ -435,7 +406,7 @@ def run_setup(extensions): include_package_data=True, install_requires=dependencies, extras_require=_EXTRAS_REQUIRE, - tests_require=['nose', 'PyYAML', 'pytz', 'sure'], + tests_require=['pytest', 'PyYAML', 'pytz', 'sure'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', diff --git a/test-requirements.txt b/test-requirements.txt index c82e6a65a9..94ac611707 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,6 +1,6 @@ -r requirements.txt scales -pynose +pytest ccm>=2.1.2 pytz sure diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index b158ed2bc0..b7246522ae 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -997,6 +997,7 @@ def assert_startswith(s, prefix): class TestCluster(object): + __test__ = False DEFAULT_PROTOCOL_VERSION = default_protocol_version DEFAULT_CASSANDRA_IP = CASSANDRA_IP DEFAULT_ALLOW_BETA = ALLOW_BETA_PROTOCOL diff --git a/tests/integration/cloud/conftest.py b/tests/integration/cloud/conftest.py new file mode 100644 index 0000000000..fb08b04194 --- /dev/null +++ b/tests/integration/cloud/conftest.py @@ -0,0 +1,9 @@ +import pytest + +from tests.integration.cloud import setup_package, teardown_package + +@pytest.fixture(scope='session', autouse=True) +def setup_and_teardown_packages(): + setup_package() + yield + teardown_package() \ No newline at end of file diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 0000000000..b2eb9a02fc --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,9 @@ +import pytest + +from tests.integration import teardown_package + +@pytest.fixture(scope='session', autouse=True) +def setup_and_teardown_packages(): + print('setup') + yield + teardown_package() \ No newline at end of file diff --git a/tests/integration/cqlengine/conftest.py b/tests/integration/cqlengine/conftest.py new file mode 100644 index 0000000000..2dc695828b --- /dev/null +++ b/tests/integration/cqlengine/conftest.py @@ -0,0 +1,12 @@ +import pytest + +from tests.integration import teardown_package as parent_teardown_package +from tests.integration.cqlengine import setup_package, teardown_package + + +@pytest.fixture(scope='session', autouse=True) +def setup_and_teardown_packages(): + setup_package() + yield + teardown_package() + parent_teardown_package() \ No newline at end of file diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index 4a1cd633a1..2d30bd0f53 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -13,6 +13,8 @@ # limitations under the License. import warnings +import pytest + from cassandra.cqlengine import columns from cassandra.cqlengine.management import drop_table, sync_table from cassandra.cqlengine.models import Model @@ -215,7 +217,7 @@ def test_callbacks_work_multiple_times(self): def my_callback(*args, **kwargs): call_history.append(args) - with warnings.catch_warnings(record=True) as w: + with pytest.warns() as w: with BatchQuery() as batch: batch.add_callback(my_callback) batch.execute() diff --git a/tests/integration/simulacron/conftest.py b/tests/integration/simulacron/conftest.py new file mode 100644 index 0000000000..a4377996bb --- /dev/null +++ b/tests/integration/simulacron/conftest.py @@ -0,0 +1,9 @@ +import pytest + +from tests.integration.simulacron import teardown_package + +@pytest.fixture(scope='session', autouse=True) +def setup_and_teardown_packages(): + print('setup') + yield + teardown_package() \ No newline at end of file diff --git a/tests/integration/standard/conftest.py b/tests/integration/standard/conftest.py new file mode 100644 index 0000000000..6028c2a06d --- /dev/null +++ b/tests/integration/standard/conftest.py @@ -0,0 +1,13 @@ +import pytest +import logging + +# from https://github.com/streamlit/streamlit/pull/5047/files +def pytest_sessionfinish(): + # We're not waiting for scriptrunner threads to cleanly close before ending the PyTest, + # which results in raised exception ValueError: I/O operation on closed file. + # This is well known issue in PyTest, check out these discussions for more: + # * https://github.com/pytest-dev/pytest/issues/5502 + # * https://github.com/pytest-dev/pytest/issues/5282 + # To prevent the exception from being raised on pytest_sessionfinish + # we disable exception raising in logging module + logging.raiseExceptions = False \ No newline at end of file diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 6e3fc2dfe4..06328a6f45 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -11,12 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import platform +import socket import unittest from unittest.mock import patch -import socket -import cassandra.io.asyncorereactor as asyncorereactor -from cassandra.io.asyncorereactor import AsyncoreConnection +from packaging.version import Version + +from cassandra import DependencyException +try: + import cassandra.io.asyncorereactor as asyncorereactor + from cassandra.io.asyncorereactor import AsyncoreConnection +except DependencyException: + AsyncoreConnection = None + from tests import is_monkey_patched from tests.unit.io.utils import ReactorTestMixin, TimerTestMixin, noop_if_monkey_patched @@ -54,7 +62,8 @@ def tearDownClass(cls): except: pass - +has_asyncore = Version(platform.python_version()) < Version("3.12.0") +@unittest.skipUnless(has_asyncore, "asyncore has been removed in Python 3.12") class AsyncoreConnectionTest(ReactorTestMixin, AsyncorePatcher): connection_class = AsyncoreConnection @@ -65,6 +74,7 @@ def setUp(self): raise unittest.SkipTest("Can't test asyncore with monkey patching") +@unittest.skipUnless(has_asyncore, "asyncore has been removed in Python 3.12") class TestAsyncoreTimer(TimerTestMixin, AsyncorePatcher): connection_class = AsyncoreConnection diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 3ea6c669f3..6e6f3d8faa 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -17,16 +17,16 @@ import weakref import socket -from tests import is_monkey_patched -from tests.unit.io.utils import ReactorTestMixin, TimerTestMixin, noop_if_monkey_patched - - +from cassandra import DependencyException try: from cassandra.io.libevreactor import _cleanup as libev__cleanup from cassandra.io.libevreactor import LibevConnection -except ImportError: +except DependencyException: LibevConnection = None # noqa +from tests import is_monkey_patched +from tests.unit.io.utils import ReactorTestMixin, TimerTestMixin, noop_if_monkey_patched + class LibevConnectionTest(ReactorTestMixin, unittest.TestCase): diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index 0da9915cec..e545620033 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -66,7 +66,9 @@ def setUp(self): self.tr.protocol = self.obj_ut def tearDown(self): - pass + loop = twistedreactor.TwistedConnection._loop + if not loop._reactor_stopped(): + loop._cleanup() def test_makeConnection(self): """ diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index be721dc7d3..765a561207 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -181,7 +181,7 @@ def test_timer_cancellation(self): timer = self.create_timer(timeout, callback.invoke) timer.cancel() # Release context allow for timer thread to run. - time.sleep(.2) + time.sleep(timeout * 2) timer_manager = self._timers # Assert that the cancellation was honored self.assertFalse(timer_manager._queue) diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index 97a3334acf..ded5bd1a86 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -23,6 +23,7 @@ from cassandra.policies import HostDistance, SimpleConvictionPolicy class _PoolTests(unittest.TestCase): + __test__ = False PoolImpl = None uses_single_connection = None @@ -204,6 +205,7 @@ def test_host_equality(self): class HostConnectionPoolTests(_PoolTests): + __test__ = True PoolImpl = HostConnectionPool uses_single_connection = False @@ -252,6 +254,7 @@ def get_conn(): class HostConnectionTests(_PoolTests): + __test__ = True PoolImpl = HostConnection uses_single_connection = True diff --git a/tox.ini b/tox.ini index a101bbc8e9..a8dda2d9c2 100644 --- a/tox.ini +++ b/tox.ini @@ -2,7 +2,7 @@ envlist = py{38,39,310,311,312},pypy [base] -deps = pynose +deps = pytest packaging cython>=0.20,<0.30 eventlet @@ -21,7 +21,7 @@ setenv = LIBEV_EMBED=0 CARES_EMBED=0 LC_ALL=en_US.UTF-8 changedir = {envtmpdir} -commands = nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/ +commands = pytest -v {toxinidir}/tests/unit/ [testenv:gevent_loop] @@ -32,7 +32,7 @@ setenv = LIBEV_EMBED=0 EVENT_LOOP_MANAGER=gevent changedir = {envtmpdir} commands = - nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/io/test_geventreactor.py + pytest -v {toxinidir}/tests/unit/io/test_geventreactor.py [testenv:eventlet_loop] @@ -43,4 +43,4 @@ setenv = LIBEV_EMBED=0 EVENT_LOOP_MANAGER=eventlet changedir = {envtmpdir} commands = - nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/io/test_eventletreactor.py + pytest -v {toxinidir}/tests/unit/io/test_eventletreactor.py From 421b01e5de6f75126d4b3b5dad4d2fbbcdef876c Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Tue, 30 Jul 2024 10:51:18 -0500 Subject: [PATCH 1969/2005] PYTHON-1393 Add support for Cassandra 4.1.x and 5.0 releases to CI (#1220) --- Jenkinsfile | 8 +++- tests/integration/__init__.py | 47 +++++++++++++++++-- .../cqlengine/query/test_queryset.py | 14 +++++- 3 files changed, 62 insertions(+), 7 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index b43cf6c659..0aec3b7ba6 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -30,7 +30,7 @@ import com.datastax.jenkins.drivers.python.Slack slack = new Slack() -DEFAULT_CASSANDRA = ['3.0', '3.11', '4.0'] +DEFAULT_CASSANDRA = ['3.0', '3.11', '4.0', '4.1', '5.0-beta1'] DEFAULT_DSE = ['dse-5.1.35', 'dse-6.8.30'] DEFAULT_RUNTIME = ['3.8.16', '3.9.16', '3.10.11', '3.11.3', '3.12.0'] DEFAULT_CYTHON = ["True", "False"] @@ -557,6 +557,10 @@ pipeline { 4.0 Apache CassandraⓇ v4.0.x + + 5.0-beta1 + Apache CassandraⓇ v5.0-beta1 + dse-5.1.35 DataStax Enterprise v5.1.x @@ -644,7 +648,7 @@ pipeline { parameterizedCron(branchPatternCron().matcher(env.BRANCH_NAME).matches() ? """ # Every weeknight (Monday - Friday) around 4:00 AM # These schedules will run with and without Cython enabled for Python 3.8.16 and 3.12.0 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.16 3.12.0;CI_SCHEDULE_SERVER_VERSION=3.11 4.0 dse-5.1.35 dse-6.8.30 + H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.16 3.12.0;CI_SCHEDULE_SERVER_VERSION=3.11 4.0 5.0-beta1 dse-5.1.35 dse-6.8.30 """ : "") } diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index b7246522ae..5aa702c727 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import re import os from cassandra.cluster import Cluster @@ -561,18 +562,23 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, CCM_CLUSTER.set_dse_configuration_options(dse_options) else: - CCM_CLUSTER = CCMCluster(path, cluster_name, **ccm_options) + ccm_cluster_clz = CCMCluster if Version(cassandra_version) < Version('4.1') else Cassandra41CCMCluster + CCM_CLUSTER = ccm_cluster_clz(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) if Version(cassandra_version) >= Version('2.2'): CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) if Version(cassandra_version) >= Version('3.0'): - CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) - if Version(cassandra_version) >= Version('4.0-a'): + # The config.yml option below is deprecated in C* 4.0 per CASSANDRA-17280 + if Version(cassandra_version) < Version('4.0'): + CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) + else: + # Cassandra version >= 4.0 CCM_CLUSTER.set_configuration_options({ 'enable_materialized_views': True, 'enable_sasi_indexes': True, 'enable_transient_replication': True, }) + common.switch_cluster(path, cluster_name) CCM_CLUSTER.set_configuration_options(configuration_options) CCM_CLUSTER.populate(nodes, ipformat=ipformat, use_single_interface=use_single_interface) @@ -1011,3 +1017,38 @@ def __new__(cls, **kwargs): kwargs['allow_beta_protocol_version'] = cls.DEFAULT_ALLOW_BETA return Cluster(**kwargs) +# Subclass of CCMCluster (i.e. ccmlib.cluster.Cluster) which transparently performs +# conversion of cassandra.yml directives into something matching the new syntax +# introduced by CASSANDRA-15234 +class Cassandra41CCMCluster(CCMCluster): + __test__ = False + IN_MS_REGEX = re.compile('^(\w+)_in_ms$') + IN_KB_REGEX = re.compile('^(\w+)_in_kb$') + ENABLE_REGEX = re.compile('^enable_(\w+)$') + + def _get_config_key(self, k, v): + if "." in k: + return k + m = self.IN_MS_REGEX.match(k) + if m: + return m.group(1) + m = self.ENABLE_REGEX.search(k) + if m: + return "%s_enabled" % (m.group(1)) + m = self.IN_KB_REGEX.match(k) + if m: + return m.group(1) + return k + + def _get_config_val(self, k, v): + m = self.IN_MS_REGEX.match(k) + if m: + return "%sms" % (v) + m = self.IN_KB_REGEX.match(k) + if m: + return "%sKiB" % (v) + return v + + def set_configuration_options(self, values=None, *args, **kwargs): + new_values = {self._get_config_key(k, str(v)):self._get_config_val(k, str(v)) for (k,v) in values.items()} + super(Cassandra41CCMCluster, self).set_configuration_options(values=new_values, *args, **kwargs) \ No newline at end of file diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 7b1b4283a9..9e16a63fa8 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -1362,11 +1362,21 @@ def tearDownClass(cls): super(TestModelQueryWithFetchSize, cls).tearDownClass() drop_table(TestModelSmall) - @execute_count(9) + @execute_count(19) def test_defaultFetchSize(self): + # Use smaller batch sizes to avoid hitting the max. We trigger an InvalidRequest + # response for Cassandra 4.1.x and 5.0.x if we just do the whole thing as one + # large batch. We're just using this to populate values for a test, however, + # so shifting to smaller batches should be fine. + for i in range(0, 5000, 500): + with BatchQuery() as b: + range_max = i + 500 + for j in range(i, range_max): + TestModelSmall.batch(b).create(test_id=j) with BatchQuery() as b: - for i in range(5100): + for i in range(5000, 5100): TestModelSmall.batch(b).create(test_id=i) + self.assertEqual(len(TestModelSmall.objects.fetch_size(1)), 5100) self.assertEqual(len(TestModelSmall.objects.fetch_size(500)), 5100) self.assertEqual(len(TestModelSmall.objects.fetch_size(4999)), 5100) From 6f621ada3fe4bd93cc859587b19c2340f2a24705 Mon Sep 17 00:00:00 2001 From: Dmitry Kropachev Date: Tue, 20 Aug 2024 09:12:52 -0400 Subject: [PATCH 1970/2005] Remove leftover debugging code (#1222) --- cassandra/cqltypes.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index b413b1c9e5..12d90e2746 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -252,7 +252,6 @@ def lookup_casstype(casstype): try: return parse_casstype_args(casstype) except (ValueError, AssertionError, IndexError) as e: - log.debug("Exception in parse_casstype_args: %s" % e) raise ValueError("Don't know how to parse type string %r: %s" % (casstype, e)) From d05e9d33090db7dcb6d46234dde0556a2bfba75c Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 22 Aug 2024 16:58:46 -0500 Subject: [PATCH 1971/2005] PYTHON-1386 Make libev extension consistent with other extensions and allow specifying of libev install via env vars (#1223) --- setup.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 1558512fcf..0e48c3c949 100644 --- a/setup.py +++ b/setup.py @@ -120,8 +120,14 @@ def __init__(self, ext): murmur3_ext = Extension('cassandra.cmurmur3', sources=['cassandra/cmurmur3.c']) -libev_includes = ['/usr/include/libev', '/usr/local/include', '/opt/local/include', '/usr/include'] -libev_libdirs = ['/usr/local/lib', '/opt/local/lib', '/usr/lib64'] +def eval_env_var_as_array(varname): + val = os.environ.get(varname) + return None if not val else [v.strip() for v in val.split(',')] + +DEFAULT_LIBEV_INCLUDES = ['/usr/include/libev', '/usr/local/include', '/opt/local/include', '/usr/include'] +DEFAULT_LIBEV_LIBDIRS = ['/usr/local/lib', '/opt/local/lib', '/usr/lib64'] +libev_includes = eval_env_var_as_array('CASS_DRIVER_LIBEV_INCLUDES') or DEFAULT_LIBEV_INCLUDES +libev_libdirs = eval_env_var_as_array('CASS_DRIVER_LIBEV_LIBS') or DEFAULT_LIBEV_LIBDIRS if is_macos: libev_includes.extend(['/opt/homebrew/include', os.path.expanduser('~/homebrew/include')]) libev_libdirs.extend(['/opt/homebrew/lib']) @@ -165,7 +171,7 @@ def __init__(self, ext): try_extensions = "--no-extensions" not in sys.argv and is_supported_platform and is_supported_arch and not os.environ.get('CASS_DRIVER_NO_EXTENSIONS') try_murmur3 = try_extensions and "--no-murmur3" not in sys.argv -try_libev = try_extensions and "--no-libev" not in sys.argv and not is_pypy and not is_windows +try_libev = try_extensions and "--no-libev" not in sys.argv and not is_pypy and not os.environ.get('CASS_DRIVER_NO_LIBEV') try_cython = try_extensions and "--no-cython" not in sys.argv and not is_pypy and not os.environ.get('CASS_DRIVER_NO_CYTHON') try_cython &= 'egg_info' not in sys.argv # bypass setup_requires for pip egg_info calls, which will never have --install-option"--no-cython" coming fomr pip @@ -280,6 +286,7 @@ def _setup_extensions(self): self.extensions.append(murmur3_ext) if try_libev: + sys.stderr.write("Appending libev extension %s" % libev_ext) self.extensions.append(libev_ext) if try_cython: From c4a808d58aa3d6b64bd491f24548b47c716b0185 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 4 Sep 2024 11:26:40 -0500 Subject: [PATCH 1972/2005] PYTHON-1369 Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0) (#1217) --- cassandra/__init__.py | 6 - cassandra/cqltypes.py | 97 ++++++++-- cassandra/encoder.py | 5 + cassandra/marshal.py | 46 ++++- tests/integration/__init__.py | 7 +- tests/integration/standard/test_types.py | 189 ++++++++++++++++++- tests/unit/test_types.py | 226 ++++++++++++++++++----- 7 files changed, 504 insertions(+), 72 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 4a5b8b29a3..045fc98cdc 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -744,9 +744,3 @@ def __init__(self, msg, excs=[]): if excs: complete_msg += ("\nThe following exceptions were observed: \n - " + '\n - '.join(str(e) for e in excs)) Exception.__init__(self, complete_msg) - -class VectorDeserializationFailure(DriverException): - """ - The driver was unable to deserialize a given vector - """ - pass diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 12d90e2746..4c3af57887 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -48,8 +48,8 @@ int32_pack, int32_unpack, int64_pack, int64_unpack, float_pack, float_unpack, double_pack, double_unpack, varint_pack, varint_unpack, point_be, point_le, - vints_pack, vints_unpack) -from cassandra import util, VectorDeserializationFailure + vints_pack, vints_unpack, uvint_unpack, uvint_pack) +from cassandra import util _little_endian_flag = 1 # we always serialize LE import ipaddress @@ -392,6 +392,9 @@ def cass_parameterized_type(cls, full=False): """ return cls.cass_parameterized_type_with(cls.subtypes, full=full) + @classmethod + def serial_size(cls): + return None # it's initially named with a _ to avoid registering it as a real type, but # client programs may want to use the name still for isinstance(), etc @@ -457,10 +460,12 @@ def serialize(uuid, protocol_version): except AttributeError: raise TypeError("Got a non-UUID object for a UUID value") + @classmethod + def serial_size(cls): + return 16 class BooleanType(_CassandraType): typename = 'boolean' - serial_size = 1 @staticmethod def deserialize(byts, protocol_version): @@ -470,6 +475,10 @@ def deserialize(byts, protocol_version): def serialize(truth, protocol_version): return int8_pack(truth) + @classmethod + def serial_size(cls): + return 1 + class ByteType(_CassandraType): typename = 'tinyint' @@ -500,7 +509,6 @@ def serialize(var, protocol_version): class FloatType(_CassandraType): typename = 'float' - serial_size = 4 @staticmethod def deserialize(byts, protocol_version): @@ -510,10 +518,12 @@ def deserialize(byts, protocol_version): def serialize(byts, protocol_version): return float_pack(byts) + @classmethod + def serial_size(cls): + return 4 class DoubleType(_CassandraType): typename = 'double' - serial_size = 8 @staticmethod def deserialize(byts, protocol_version): @@ -523,10 +533,12 @@ def deserialize(byts, protocol_version): def serialize(byts, protocol_version): return double_pack(byts) + @classmethod + def serial_size(cls): + return 8 class LongType(_CassandraType): typename = 'bigint' - serial_size = 8 @staticmethod def deserialize(byts, protocol_version): @@ -536,10 +548,12 @@ def deserialize(byts, protocol_version): def serialize(byts, protocol_version): return int64_pack(byts) + @classmethod + def serial_size(cls): + return 8 class Int32Type(_CassandraType): typename = 'int' - serial_size = 4 @staticmethod def deserialize(byts, protocol_version): @@ -549,6 +563,9 @@ def deserialize(byts, protocol_version): def serialize(byts, protocol_version): return int32_pack(byts) + @classmethod + def serial_size(cls): + return 4 class IntegerType(_CassandraType): typename = 'varint' @@ -645,6 +662,9 @@ def serialize(v, protocol_version): return int64_pack(int(timestamp)) + @classmethod + def serial_size(cls): + return 8 class TimestampType(DateType): pass @@ -652,7 +672,6 @@ class TimestampType(DateType): class TimeUUIDType(DateType): typename = 'timeuuid' - serial_size = 16 def my_timestamp(self): return util.unix_time_from_uuid1(self.val) @@ -668,6 +687,9 @@ def serialize(timeuuid, protocol_version): except AttributeError: raise TypeError("Got a non-UUID object for a UUID value") + @classmethod + def serial_size(cls): + return 16 class SimpleDateType(_CassandraType): typename = 'date' @@ -699,7 +721,6 @@ def serialize(val, protocol_version): class ShortType(_CassandraType): typename = 'smallint' - serial_size = 2 @staticmethod def deserialize(byts, protocol_version): @@ -709,10 +730,14 @@ def deserialize(byts, protocol_version): def serialize(byts, protocol_version): return int16_pack(byts) - class TimeType(_CassandraType): typename = 'time' - serial_size = 8 + # Time should be a fixed size 8 byte type but Cassandra 5.0 code marks it as + # variable size... and we have to match what the server expects since the server + # uses that specification to encode data of that type. + #@classmethod + #def serial_size(cls): + # return 8 @staticmethod def deserialize(byts, protocol_version): @@ -1409,6 +1434,11 @@ class VectorType(_CassandraType): vector_size = 0 subtype = None + @classmethod + def serial_size(cls): + serialized_size = cls.subtype.serial_size() + return cls.vector_size * serialized_size if serialized_size is not None else None + @classmethod def apply_parameters(cls, params, names): assert len(params) == 2 @@ -1418,19 +1448,50 @@ def apply_parameters(cls, params, names): @classmethod def deserialize(cls, byts, protocol_version): - serialized_size = getattr(cls.subtype, "serial_size", None) - if not serialized_size: - raise VectorDeserializationFailure("Cannot determine serialized size for vector with subtype %s" % cls.subtype.__name__) - indexes = (serialized_size * x for x in range(0, cls.vector_size)) - return [cls.subtype.deserialize(byts[idx:idx + serialized_size], protocol_version) for idx in indexes] + serialized_size = cls.subtype.serial_size() + if serialized_size is not None: + expected_byte_size = serialized_size * cls.vector_size + if len(byts) != expected_byte_size: + raise ValueError( + "Expected vector of type {0} and dimension {1} to have serialized size {2}; observed serialized size of {3} instead"\ + .format(cls.subtype.typename, cls.vector_size, expected_byte_size, len(byts))) + indexes = (serialized_size * x for x in range(0, cls.vector_size)) + return [cls.subtype.deserialize(byts[idx:idx + serialized_size], protocol_version) for idx in indexes] + + idx = 0 + rv = [] + while (len(rv) < cls.vector_size): + try: + size, bytes_read = uvint_unpack(byts[idx:]) + idx += bytes_read + rv.append(cls.subtype.deserialize(byts[idx:idx + size], protocol_version)) + idx += size + except: + raise ValueError("Error reading additional data during vector deserialization after successfully adding {} elements"\ + .format(len(rv))) + + # If we have any additional data in the serialized vector treat that as an error as well + if idx < len(byts): + raise ValueError("Additional bytes remaining after vector deserialization completed") + return rv @classmethod def serialize(cls, v, protocol_version): + v_length = len(v) + if cls.vector_size != v_length: + raise ValueError( + "Expected sequence of size {0} for vector of type {1} and dimension {0}, observed sequence of length {2}"\ + .format(cls.vector_size, cls.subtype.typename, v_length)) + + serialized_size = cls.subtype.serial_size() buf = io.BytesIO() for item in v: - buf.write(cls.subtype.serialize(item, protocol_version)) + item_bytes = cls.subtype.serialize(item, protocol_version) + if serialized_size is None: + buf.write(uvint_pack(len(item_bytes))) + buf.write(item_bytes) return buf.getvalue() @classmethod def cql_parameterized_type(cls): - return "%s<%s, %s>" % (cls.typename, cls.subtype.typename, cls.vector_size) + return "%s<%s, %s>" % (cls.typename, cls.subtype.cql_parameterized_type(), cls.vector_size) diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 31d90549f4..e834550fd3 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -21,6 +21,7 @@ log = logging.getLogger(__name__) from binascii import hexlify +from decimal import Decimal import calendar import datetime import math @@ -59,6 +60,7 @@ class Encoder(object): def __init__(self): self.mapping = { float: self.cql_encode_float, + Decimal: self.cql_encode_decimal, bytearray: self.cql_encode_bytes, str: self.cql_encode_str, int: self.cql_encode_object, @@ -217,3 +219,6 @@ def cql_encode_ipaddress(self, val): is suitable for ``inet`` type columns. """ return "'%s'" % val.compressed + + def cql_encode_decimal(self, val): + return self.cql_encode_float(float(val)) \ No newline at end of file diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 726f0819eb..a527a9e1d7 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -111,7 +111,6 @@ def vints_unpack(term): # noqa return tuple(values) - def vints_pack(values): revbytes = bytearray() values = [int(v) for v in values[::-1]] @@ -143,3 +142,48 @@ def vints_pack(values): revbytes.reverse() return bytes(revbytes) + +def uvint_unpack(bytes): + first_byte = bytes[0] + + if (first_byte & 128) == 0: + return (first_byte,1) + + num_extra_bytes = 8 - (~first_byte & 0xff).bit_length() + rv = first_byte & (0xff >> num_extra_bytes) + for idx in range(1,num_extra_bytes + 1): + new_byte = bytes[idx] + rv <<= 8 + rv |= new_byte & 0xff + + return (rv, num_extra_bytes + 1) + +def uvint_pack(val): + rv = bytearray() + if val < 128: + rv.append(val) + else: + v = val + num_extra_bytes = 0 + num_bits = v.bit_length() + # We need to reserve (num_extra_bytes+1) bits in the first byte + # ie. with 1 extra byte, the first byte needs to be something like '10XXXXXX' # 2 bits reserved + # ie. with 8 extra bytes, the first byte needs to be '11111111' # 8 bits reserved + reserved_bits = num_extra_bytes + 1 + while num_bits > (8-(reserved_bits)): + num_extra_bytes += 1 + num_bits -= 8 + reserved_bits = min(num_extra_bytes + 1, 8) + rv.append(v & 0xff) + v >>= 8 + + if num_extra_bytes > 8: + raise ValueError('Value %d is too big and cannot be encoded as vint' % val) + + # We can now store the last bits in the first byte + n = 8 - num_extra_bytes + v |= (0xff >> n << n) + rv.append(abs(v)) + + rv.reverse() + return bytes(rv) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 5aa702c727..e389742b74 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -330,9 +330,10 @@ def _id_and_mark(f): greaterthanorequalcass36 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.6'), 'Cassandra version 3.6 or greater required') greaterthanorequalcass3_10 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.10'), 'Cassandra version 3.10 or greater required') greaterthanorequalcass3_11 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.11'), 'Cassandra version 3.11 or greater required') -greaterthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION >= Version('4.0-a'), 'Cassandra version 4.0 or greater required') -lessthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION <= Version('4.0-a'), 'Cassandra version less or equal to 4.0 required') -lessthancass40 = unittest.skipUnless(CASSANDRA_VERSION < Version('4.0-a'), 'Cassandra version less than 4.0 required') +greaterthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION >= Version('4.0'), 'Cassandra version 4.0 or greater required') +greaterthanorequalcass50 = unittest.skipUnless(CASSANDRA_VERSION >= Version('5.0-beta'), 'Cassandra version 5.0 or greater required') +lessthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION <= Version('4.0'), 'Cassandra version less or equal to 4.0 required') +lessthancass40 = unittest.skipUnless(CASSANDRA_VERSION < Version('4.0'), 'Cassandra version less than 4.0 required') lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < Version('3.0'), 'Cassandra version less then 3.0 required') greaterthanorequaldse68 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.8'), "DSE 6.8 or greater required for this test") greaterthanorequaldse67 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.7'), "DSE 6.7 or greater required for this test") diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 016c2b9785..55bf117ace 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -14,9 +14,17 @@ import unittest -from datetime import datetime import ipaddress import math +import random +import string +import socket +import uuid + +from datetime import datetime, date, time, timedelta +from decimal import Decimal +from functools import partial + from packaging.version import Version import cassandra @@ -31,7 +39,7 @@ from tests.integration import use_singledc, execute_until_pass, notprotocolv1, \ BasicSharedKeyspaceUnitTestCase, greaterthancass21, lessthancass30, greaterthanorequaldse51, \ - DSE_VERSION, greaterthanorequalcass3_10, requiredse, TestCluster + DSE_VERSION, greaterthanorequalcass3_10, requiredse, TestCluster, greaterthanorequalcass50 from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, PRIMITIVE_DATATYPES_KEYS, \ get_sample, get_all_samples, get_collection_sample @@ -1291,3 +1299,180 @@ def run_inserts_at_version(self, proto_ver): finally: session.cluster.shutdown() + +@greaterthanorequalcass50 +class TypeTestsVector(BasicSharedKeyspaceUnitTestCase): + + def _get_first_j(self, rs): + rows = rs.all() + self.assertEqual(len(rows), 1) + return rows[0].j + + def _get_row_simple(self, idx, table_name): + rs = self.session.execute("select j from {0}.{1} where i = {2}".format(self.keyspace_name, table_name, idx)) + return self._get_first_j(rs) + + def _get_row_prepared(self, idx, table_name): + cql = "select j from {0}.{1} where i = ?".format(self.keyspace_name, table_name) + ps = self.session.prepare(cql) + rs = self.session.execute(ps, [idx]) + return self._get_first_j(rs) + + def _round_trip_test(self, subtype, subtype_fn, test_fn, use_positional_parameters=True): + + table_name = subtype.replace("<","A").replace(">", "B").replace(",", "C") + "isH" + + def random_subtype_vector(): + return [subtype_fn() for _ in range(3)] + + ddl = """CREATE TABLE {0}.{1} ( + i int PRIMARY KEY, + j vector<{2}, 3>)""".format(self.keyspace_name, table_name, subtype) + self.session.execute(ddl) + + if use_positional_parameters: + cql = "insert into {0}.{1} (i,j) values (%s,%s)".format(self.keyspace_name, table_name) + expected1 = random_subtype_vector() + data1 = {1:random_subtype_vector(), 2:expected1, 3:random_subtype_vector()} + for k,v in data1.items(): + # Attempt a set of inserts using the driver's support for positional params + self.session.execute(cql, (k,v)) + + cql = "insert into {0}.{1} (i,j) values (?,?)".format(self.keyspace_name, table_name) + expected2 = random_subtype_vector() + ps = self.session.prepare(cql) + data2 = {4:random_subtype_vector(), 5:expected2, 6:random_subtype_vector()} + for k,v in data2.items(): + # Add some additional rows via prepared statements + self.session.execute(ps, [k,v]) + + # Use prepared queries to gather data from the rows we added via simple queries and vice versa + if use_positional_parameters: + observed1 = self._get_row_prepared(2, table_name) + for idx in range(0, 3): + test_fn(observed1[idx], expected1[idx]) + + observed2 = self._get_row_simple(5, table_name) + for idx in range(0, 3): + test_fn(observed2[idx], expected2[idx]) + + def test_round_trip_integers(self): + self._round_trip_test("int", partial(random.randint, 0, 2 ** 31), self.assertEqual) + self._round_trip_test("bigint", partial(random.randint, 0, 2 ** 63), self.assertEqual) + self._round_trip_test("smallint", partial(random.randint, 0, 2 ** 15), self.assertEqual) + self._round_trip_test("tinyint", partial(random.randint, 0, (2 ** 7) - 1), self.assertEqual) + self._round_trip_test("varint", partial(random.randint, 0, 2 ** 63), self.assertEqual) + + def test_round_trip_floating_point(self): + _almost_equal_test_fn = partial(self.assertAlmostEqual, places=5) + def _random_decimal(): + return Decimal(random.uniform(0.0, 100.0)) + + # Max value here isn't really connected to max value for floating point nums in IEEE 754... it's used here + # mainly as a convenient benchmark + self._round_trip_test("float", partial(random.uniform, 0.0, 100.0), _almost_equal_test_fn) + self._round_trip_test("double", partial(random.uniform, 0.0, 100.0), _almost_equal_test_fn) + self._round_trip_test("decimal", _random_decimal, _almost_equal_test_fn) + + def test_round_trip_text(self): + def _random_string(): + return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(24)) + + self._round_trip_test("ascii", _random_string, self.assertEqual) + self._round_trip_test("text", _random_string, self.assertEqual) + + def test_round_trip_date_and_time(self): + _almost_equal_test_fn = partial(self.assertAlmostEqual, delta=timedelta(seconds=1)) + def _random_datetime(): + return datetime.today() - timedelta(hours=random.randint(0,18), days=random.randint(1,1000)) + def _random_date(): + return _random_datetime().date() + def _random_time(): + return _random_datetime().time() + + self._round_trip_test("date", _random_date, self.assertEqual) + self._round_trip_test("time", _random_time, self.assertEqual) + self._round_trip_test("timestamp", _random_datetime, _almost_equal_test_fn) + + def test_round_trip_uuid(self): + self._round_trip_test("uuid", uuid.uuid1, self.assertEqual) + self._round_trip_test("timeuuid", uuid.uuid1, self.assertEqual) + + def test_round_trip_miscellany(self): + def _random_bytes(): + return random.getrandbits(32).to_bytes(4,'big') + def _random_boolean(): + return random.choice([True, False]) + def _random_duration(): + return Duration(random.randint(0,11), random.randint(0,11), random.randint(0,10000)) + def _random_inet(): + return socket.inet_ntoa(_random_bytes()) + + self._round_trip_test("boolean", _random_boolean, self.assertEqual) + self._round_trip_test("duration", _random_duration, self.assertEqual) + self._round_trip_test("inet", _random_inet, self.assertEqual) + self._round_trip_test("blob", _random_bytes, self.assertEqual) + + def test_round_trip_collections(self): + def _random_seq(): + return [random.randint(0,100000) for _ in range(8)] + def _random_set(): + return set(_random_seq()) + def _random_map(): + return {k:v for (k,v) in zip(_random_seq(), _random_seq())} + + # Goal here is to test collections of both fixed and variable size subtypes + self._round_trip_test("list", _random_seq, self.assertEqual) + self._round_trip_test("list", _random_seq, self.assertEqual) + self._round_trip_test("set", _random_set, self.assertEqual) + self._round_trip_test("set", _random_set, self.assertEqual) + self._round_trip_test("map", _random_map, self.assertEqual) + self._round_trip_test("map", _random_map, self.assertEqual) + self._round_trip_test("map", _random_map, self.assertEqual) + self._round_trip_test("map", _random_map, self.assertEqual) + + def test_round_trip_vector_of_vectors(self): + def _random_vector(): + return [random.randint(0,100000) for _ in range(2)] + + self._round_trip_test("vector", _random_vector, self.assertEqual) + self._round_trip_test("vector", _random_vector, self.assertEqual) + + def test_round_trip_tuples(self): + def _random_tuple(): + return (random.randint(0,100000),random.randint(0,100000)) + + # Unfortunately we can't use positional parameters when inserting tuples because the driver will try to encode + # them as lists before sending them to the server... and that confuses the parsing logic. + self._round_trip_test("tuple", _random_tuple, self.assertEqual, use_positional_parameters=False) + self._round_trip_test("tuple", _random_tuple, self.assertEqual, use_positional_parameters=False) + self._round_trip_test("tuple", _random_tuple, self.assertEqual, use_positional_parameters=False) + self._round_trip_test("tuple", _random_tuple, self.assertEqual, use_positional_parameters=False) + + def test_round_trip_udts(self): + def _udt_equal_test_fn(udt1, udt2): + self.assertEqual(udt1.a, udt2.a) + self.assertEqual(udt1.b, udt2.b) + + self.session.execute("create type {}.fixed_type (a int, b int)".format(self.keyspace_name)) + self.session.execute("create type {}.mixed_type_one (a int, b varint)".format(self.keyspace_name)) + self.session.execute("create type {}.mixed_type_two (a varint, b int)".format(self.keyspace_name)) + self.session.execute("create type {}.var_type (a varint, b varint)".format(self.keyspace_name)) + + class GeneralUDT: + def __init__(self, a, b): + self.a = a + self.b = b + + self.cluster.register_user_type(self.keyspace_name,'fixed_type', GeneralUDT) + self.cluster.register_user_type(self.keyspace_name,'mixed_type_one', GeneralUDT) + self.cluster.register_user_type(self.keyspace_name,'mixed_type_two', GeneralUDT) + self.cluster.register_user_type(self.keyspace_name,'var_type', GeneralUDT) + + def _random_udt(): + return GeneralUDT(random.randint(0,100000),random.randint(0,100000)) + + self._round_trip_test("fixed_type", _random_udt, _udt_equal_test_fn) + self._round_trip_test("mixed_type_one", _random_udt, _udt_equal_test_fn) + self._round_trip_test("mixed_type_two", _random_udt, _udt_equal_test_fn) + self._round_trip_test("var_type", _random_udt, _udt_equal_test_fn) diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 25641c046d..aba11d4ced 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -20,7 +20,7 @@ from binascii import unhexlify import cassandra -from cassandra import util, VectorDeserializationFailure +from cassandra import util from cassandra.cqltypes import ( CassandraType, DateRangeType, DateType, DecimalType, EmptyValue, LongType, SetType, UTF8Type, @@ -196,6 +196,16 @@ def test_parse_casstype_vector(self): self.assertEqual(3, ctype.vector_size) self.assertEqual(FloatType, ctype.subtype) + def test_parse_casstype_vector_of_vectors(self): + inner_type = "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 4)" + ctype = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(%s, 3)" % (inner_type)) + self.assertTrue(issubclass(ctype, VectorType)) + self.assertEqual(3, ctype.vector_size) + sub_ctype = ctype.subtype + self.assertTrue(issubclass(sub_ctype, VectorType)) + self.assertEqual(4, sub_ctype.vector_size) + self.assertEqual(FloatType, sub_ctype.subtype) + def test_empty_value(self): self.assertEqual(str(EmptyValue()), 'EMPTY') @@ -309,8 +319,44 @@ def test_cql_quote(self): self.assertEqual(cql_quote('test'), "'test'") self.assertEqual(cql_quote(0), '0') - def test_vector_round_trip_types_with_serialized_size(self): - # Test all the types which specify a serialized size... see PYTHON-1371 for details + +class VectorTests(unittest.TestCase): + def _normalize_set(self, val): + if isinstance(val, set) or isinstance(val, util.SortedSet): + return frozenset([self._normalize_set(v) for v in val]) + return val + + def _round_trip_compare_fn(self, first, second): + if isinstance(first, float): + self.assertAlmostEqual(first, second, places=5) + elif isinstance(first, list): + self.assertEqual(len(first), len(second)) + for (felem, selem) in zip(first, second): + self._round_trip_compare_fn(felem, selem) + elif isinstance(first, set) or isinstance(first, frozenset): + self.assertEqual(len(first), len(second)) + first_norm = self._normalize_set(first) + second_norm = self._normalize_set(second) + self.assertEqual(first_norm, second_norm) + elif isinstance(first, dict): + for ((fk,fv), (sk,sv)) in zip(first.items(), second.items()): + self._round_trip_compare_fn(fk, sk) + self._round_trip_compare_fn(fv, sv) + else: + self.assertEqual(first,second) + + def _round_trip_test(self, data, ctype_str): + ctype = parse_casstype_args(ctype_str) + data_bytes = ctype.serialize(data, 0) + serialized_size = ctype.subtype.serial_size() + if serialized_size: + self.assertEqual(serialized_size * len(data), len(data_bytes)) + result = ctype.deserialize(data_bytes, 0) + self.assertEqual(len(data), len(result)) + for idx in range(0,len(data)): + self._round_trip_compare_fn(data[idx], result[idx]) + + def test_round_trip_basic_types_with_fixed_serialized_size(self): self._round_trip_test([True, False, False, True], \ "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.BooleanType, 4)") self._round_trip_test([3.4, 2.9, 41.6, 12.0], \ @@ -325,55 +371,151 @@ def test_vector_round_trip_types_with_serialized_size(self): "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.TimeUUIDType, 4)") self._round_trip_test([3, 2, 41, 12], \ "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.ShortType, 4)") - self._round_trip_test([datetime.time(1,1,1), datetime.time(2,2,2), datetime.time(3,3,3)], \ - "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.TimeType, 3)") - def test_vector_round_trip_types_without_serialized_size(self): - # Test all the types which do not specify a serialized size... see PYTHON-1371 for details + def test_round_trip_basic_types_without_fixed_serialized_size(self): # Varints - with self.assertRaises(VectorDeserializationFailure): - self._round_trip_test([3, 2, 41, 12], \ - "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.IntegerType, 4)") + self._round_trip_test([3, 2, 41, 12], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.IntegerType, 4)") # ASCII text - with self.assertRaises(VectorDeserializationFailure): - self._round_trip_test(["abc", "def", "ghi", "jkl"], \ - "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.AsciiType, 4)") + self._round_trip_test(["abc", "def", "ghi", "jkl"], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.AsciiType, 4)") # UTF8 text - with self.assertRaises(VectorDeserializationFailure): - self._round_trip_test(["abc", "def", "ghi", "jkl"], \ - "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.UTF8Type, 4)") + self._round_trip_test(["abc", "def", "ghi", "jkl"], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.UTF8Type, 4)") + # Time is something of a weird one. By rights it should be a fixed size type but C* code marks it as variable + # size. We're forced to follow the C* code base (since that's who'll be providing the data we're parsing) so + # we match what they're doing. + self._round_trip_test([datetime.time(1,1,1), datetime.time(2,2,2), datetime.time(3,3,3)], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.TimeType, 3)") # Duration (containts varints) - with self.assertRaises(VectorDeserializationFailure): - self._round_trip_test([util.Duration(1,1,1), util.Duration(2,2,2), util.Duration(3,3,3)], \ - "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.DurationType, 3)") - # List (of otherwise serializable type) - with self.assertRaises(VectorDeserializationFailure): - self._round_trip_test([[3.4], [2.9], [41.6], [12.0]], \ - "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.FloatType), 4)") - # Set (of otherwise serializable type) - with self.assertRaises(VectorDeserializationFailure): - self._round_trip_test([set([3.4]), set([2.9]), set([41.6]), set([12.0])], \ - "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.FloatType), 4)") - # Map (of otherwise serializable types) - with self.assertRaises(VectorDeserializationFailure): - self._round_trip_test([{1:3.4}, {2:2.9}, {3:41.6}, {4:12.0}], \ + self._round_trip_test([util.Duration(1,1,1), util.Duration(2,2,2), util.Duration(3,3,3)], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.DurationType, 3)") + + def test_round_trip_collection_types(self): + # List (subtype of fixed size) + self._round_trip_test([[1, 2, 3, 4], [5, 6], [7, 8, 9, 10], [11, 12]], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.ListType \ + (org.apache.cassandra.db.marshal.Int32Type), 4)") + # Set (subtype of fixed size) + self._round_trip_test([set([1, 2, 3, 4]), set([5, 6]), set([7, 8, 9, 10]), set([11, 12])], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.SetType \ + (org.apache.cassandra.db.marshal.Int32Type), 4)") + # Map (subtype of fixed size) + self._round_trip_test([{1:1.2}, {2:3.4}, {3:5.6}, {4:7.8}], \ "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.MapType \ (org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.FloatType), 4)") + # List (subtype without fixed size) + self._round_trip_test([["one","two"], ["three","four"], ["five","six"], ["seven","eight"]], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.ListType \ + (org.apache.cassandra.db.marshal.AsciiType), 4)") + # Set (subtype without fixed size) + self._round_trip_test([set(["one","two"]), set(["three","four"]), set(["five","six"]), set(["seven","eight"])], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.SetType \ + (org.apache.cassandra.db.marshal.AsciiType), 4)") + # Map (subtype without fixed size) + self._round_trip_test([{1:"one"}, {2:"two"}, {3:"three"}, {4:"four"}], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.MapType \ + (org.apache.cassandra.db.marshal.IntegerType,org.apache.cassandra.db.marshal.AsciiType), 4)") + # List of lists (subtype without fixed size) + data = [[["one","two"],["three"]], [["four"],["five"]], [["six","seven","eight"]], [["nine"]]] + ctype = "org.apache.cassandra.db.marshal.VectorType\ + (org.apache.cassandra.db.marshal.ListType\ + (org.apache.cassandra.db.marshal.ListType\ + (org.apache.cassandra.db.marshal.AsciiType)), 4)" + self._round_trip_test(data, ctype) + # Set of sets (subtype without fixed size) + data = [set([frozenset(["one","two"]),frozenset(["three"])]),\ + set([frozenset(["four"]),frozenset(["five"])]),\ + set([frozenset(["six","seven","eight"])]), + set([frozenset(["nine"])])] + ctype = "org.apache.cassandra.db.marshal.VectorType\ + (org.apache.cassandra.db.marshal.SetType\ + (org.apache.cassandra.db.marshal.SetType\ + (org.apache.cassandra.db.marshal.AsciiType)), 4)" + self._round_trip_test(data, ctype) + # Map of maps (subtype without fixed size) + data = [{100:{1:"one",2:"two",3:"three"}},\ + {200:{4:"four",5:"five"}},\ + {300:{}},\ + {400:{6:"six"}}] + ctype = "org.apache.cassandra.db.marshal.VectorType\ + (org.apache.cassandra.db.marshal.MapType\ + (org.apache.cassandra.db.marshal.Int32Type,\ + org.apache.cassandra.db.marshal.MapType \ + (org.apache.cassandra.db.marshal.IntegerType,org.apache.cassandra.db.marshal.AsciiType)), 4)" + self._round_trip_test(data, ctype) + + def test_round_trip_vector_of_vectors(self): + # Subytpes of subtypes with a fixed size + self._round_trip_test([[1.2, 3.4], [5.6, 7.8], [9.10, 11.12], [13.14, 15.16]], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.VectorType \ + (org.apache.cassandra.db.marshal.FloatType,2), 4)") + + # Subytpes of subtypes without a fixed size + self._round_trip_test([["one", "two"], ["three", "four"], ["five", "six"], ["seven", "eight"]], \ + "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.VectorType \ + (org.apache.cassandra.db.marshal.AsciiType,2), 4)") + + # parse_casstype_args() is tested above... we're explicitly concerned about cql_parapmeterized_type() output here + def test_cql_parameterized_type(self): + # Base vector functionality + ctype = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 4)") + self.assertEqual(ctype.cql_parameterized_type(), "org.apache.cassandra.db.marshal.VectorType") - def _round_trip_test(self, data, ctype_str): - ctype = parse_casstype_args(ctype_str) - data_bytes = ctype.serialize(data, 0) - serialized_size = getattr(ctype.subtype, "serial_size", None) - if serialized_size: - self.assertEqual(serialized_size * len(data), len(data_bytes)) - result = ctype.deserialize(data_bytes, 0) - self.assertEqual(len(data), len(result)) - for idx in range(0,len(data)): - self.assertAlmostEqual(data[idx], result[idx], places=5) + # Test vector-of-vectors + inner_type = "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 4)" + ctype = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(%s, 3)" % (inner_type)) + inner_parsed_type = "org.apache.cassandra.db.marshal.VectorType" + self.assertEqual(ctype.cql_parameterized_type(), "org.apache.cassandra.db.marshal.VectorType<%s, 3>" % (inner_parsed_type)) + + def test_serialization_fixed_size_too_small(self): + ctype = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 5)") + with self.assertRaisesRegex(ValueError, "Expected sequence of size 5 for vector of type float and dimension 5, observed sequence of length 4"): + ctype.serialize([1.2, 3.4, 5.6, 7.8], 0) - def test_vector_cql_parameterized_type(self): + def test_serialization_fixed_size_too_big(self): ctype = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 4)") - self.assertEqual(ctype.cql_parameterized_type(), "org.apache.cassandra.db.marshal.VectorType") + with self.assertRaisesRegex(ValueError, "Expected sequence of size 4 for vector of type float and dimension 4, observed sequence of length 5"): + ctype.serialize([1.2, 3.4, 5.6, 7.8, 9.10], 0) + + def test_serialization_variable_size_too_small(self): + ctype = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.IntegerType, 5)") + with self.assertRaisesRegex(ValueError, "Expected sequence of size 5 for vector of type varint and dimension 5, observed sequence of length 4"): + ctype.serialize([1, 2, 3, 4], 0) + + def test_serialization_variable_size_too_big(self): + ctype = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.IntegerType, 4)") + with self.assertRaisesRegex(ValueError, "Expected sequence of size 4 for vector of type varint and dimension 4, observed sequence of length 5"): + ctype.serialize([1, 2, 3, 4, 5], 0) + + def test_deserialization_fixed_size_too_small(self): + ctype_four = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 4)") + ctype_four_bytes = ctype_four.serialize([1.2, 3.4, 5.6, 7.8], 0) + ctype_five = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 5)") + with self.assertRaisesRegex(ValueError, "Expected vector of type float and dimension 5 to have serialized size 20; observed serialized size of 16 instead"): + ctype_five.deserialize(ctype_four_bytes, 0) + + def test_deserialization_fixed_size_too_big(self): + ctype_five = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 5)") + ctype_five_bytes = ctype_five.serialize([1.2, 3.4, 5.6, 7.8, 9.10], 0) + ctype_four = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 4)") + with self.assertRaisesRegex(ValueError, "Expected vector of type float and dimension 4 to have serialized size 16; observed serialized size of 20 instead"): + ctype_four.deserialize(ctype_five_bytes, 0) + + def test_deserialization_variable_size_too_small(self): + ctype_four = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.IntegerType, 4)") + ctype_four_bytes = ctype_four.serialize([1, 2, 3, 4], 0) + ctype_five = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.IntegerType, 5)") + with self.assertRaisesRegex(ValueError, "Error reading additional data during vector deserialization after successfully adding 4 elements"): + ctype_five.deserialize(ctype_four_bytes, 0) + + def test_deserialization_variable_size_too_big(self): + ctype_five = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.IntegerType, 5)") + ctype_five_bytes = ctype_five.serialize([1, 2, 3, 4, 5], 0) + ctype_four = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.IntegerType, 4)") + with self.assertRaisesRegex(ValueError, "Additional bytes remaining after vector deserialization completed"): + ctype_four.deserialize(ctype_five_bytes, 0) + ZERO = datetime.timedelta(0) From 7fdc76d5fcad6bbaf398d82f5efe57c3932d8703 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebasti=C3=A1n=20Est=C3=A9vez?= Date: Wed, 4 Sep 2024 12:53:13 -0400 Subject: [PATCH 1973/2005] cloud config connect_timeout (#1224) Co-authored-by: Madhavan --- docs/cloud.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/cloud.rst b/docs/cloud.rst index 3607ef8b4e..0b4260393a 100644 --- a/docs/cloud.rst +++ b/docs/cloud.rst @@ -38,6 +38,20 @@ the default temporary directory of the system will be used as base dir. } ... +connect_timeout ++++++++++++++++++++ +`cloud_config.connect_timeout` controls the timeout for the request connection against the Astra DB metadata service in seconds. Increase this value if the error message suggests a timeout in `get_cloud_config`. + + +.. code:: python + + cloud_config = { + 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip', + 'connect_timeout': 120 + } + ... + + Astra Differences ================== In most circumstances, the client code for interacting with an Astra cluster will be the same as interacting with any other Cassandra cluster. The exceptions being: From b5fe2908a1235dc21945a292883ad646678d598c Mon Sep 17 00:00:00 2001 From: absurdfarce Date: Wed, 4 Sep 2024 12:05:13 -0500 Subject: [PATCH 1974/2005] Minor refactoring of prior commit --- docs/cloud.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/cloud.rst b/docs/cloud.rst index 0b4260393a..3230720ec9 100644 --- a/docs/cloud.rst +++ b/docs/cloud.rst @@ -40,8 +40,11 @@ the default temporary directory of the system will be used as base dir. connect_timeout +++++++++++++++++++ -`cloud_config.connect_timeout` controls the timeout for the request connection against the Astra DB metadata service in seconds. Increase this value if the error message suggests a timeout in `get_cloud_config`. +As part of the process of connecting to Astra the Python driver will query a service to retrieve +current information about your cluster. You can control the connection timeout for this operation +using *connect_timeout*. If you observe errors in `read_metadata_info` you might consider increasing +this parameter. This timeout is specified in seconds. .. code:: python @@ -51,7 +54,6 @@ connect_timeout } ... - Astra Differences ================== In most circumstances, the client code for interacting with an Astra cluster will be the same as interacting with any other Cassandra cluster. The exceptions being: From c38f3615886f1340d9433068a924bd9b662b7ec0 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 5 Sep 2024 17:09:53 -0500 Subject: [PATCH 1975/2005] Release 3.29.2: changelog, version and documentation (#1226) --- CHANGELOG.rst | 25 +++++++++++++++++++++++++ cassandra/__init__.py | 2 +- docs/installation.rst | 8 +++++--- 3 files changed, 31 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 38a7d03f89..bcb0fdb7a0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,28 @@ +3.29.2 +====== +September 9, 2024 + +Features +-------- +* Convert to pytest for running unit and integration tests (PYTHON-1297) +* Add support for Cassandra 4.1.x and 5.0 releases to CI (PYTHON-1393) +* Extend driver vector support to arbitrary subtypes and fix handling of variable length types (PYTHON-1369) + +Bug Fixes +--------- +* Python NumpyProtocolHandler does not work with NumPy 1.24.0 or greater (PYTHON-1359) +* cibuildwheel appears to not be stripping Cython-generated shared objects (PYTHON-1387) +* Windows build for Python 3.12 compiled without libev support (PYTHON-1386) + +Others +------ +* Update README.rst with badges for version and license (PR 1210) +* Remove dependency on old mock external module (PR 1201) +* Removed future print_function, division, and with and some pre 3.7 handling (PR 1208) +* Update geomet dependency (PR 1207) +* Remove problematic escape sequences in some docstrings to avoid SyntaxWarning in Python 3.12 (PR 1205) +* Use timezone-aware API to avoid deprecated warning (PR 1213) + 3.29.1 ====== March 19, 2024 diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 045fc98cdc..a9602a9f88 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 29, 1) +__version_info__ = (3, 29, 2) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/installation.rst b/docs/installation.rst index 8081f3b22c..84ea16b32b 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -26,7 +26,7 @@ To check if the installation was successful, you can run:: python -c 'import cassandra; print(cassandra.__version__)' -It should print something like "3.29.1". +It should print something like "3.29.2". .. _installation-datastax-graph: @@ -238,8 +238,10 @@ through `Homebrew `_. For example, on Mac OS X:: $ brew install libev -The libev extension is not built for Windows (the build process is complex, and the Windows implementation uses -select anyway). +The libev extension can now be built for Windows as of Python driver version 3.29.2. You can +install libev using any Windows package manager. For example, to install using `vcpkg `_: + + $ vcpkg install libev If successful, you should be able to build and install the extension (just using ``setup.py build`` or ``setup.py install``) and then use From a2d38f41dedd21f4c93784e558bec3b03fff4f19 Mon Sep 17 00:00:00 2001 From: absurdfarce Date: Wed, 11 Sep 2024 15:36:09 -0500 Subject: [PATCH 1976/2005] Update 3.29 hash in docs.yaml to 3.29.2 tag --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 07e2742637..96ff2f8e9f 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.29' - ref: 1a947f84 + ref: c38f3615 - name: '3.28' ref: 4325afb6 - name: '3.27' From 434b1f52736c57ffc0ac296286d7ece3a16e06e4 Mon Sep 17 00:00:00 2001 From: absurdfarce Date: Wed, 11 Sep 2024 16:50:15 -0500 Subject: [PATCH 1977/2005] Installation doc formatting fix --- docs/installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/installation.rst b/docs/installation.rst index 84ea16b32b..c231f8e5d9 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -239,7 +239,7 @@ through `Homebrew `_. For example, on Mac OS X:: $ brew install libev The libev extension can now be built for Windows as of Python driver version 3.29.2. You can -install libev using any Windows package manager. For example, to install using `vcpkg `_: +install libev using any Windows package manager. For example, to install using `vcpkg `_:: $ vcpkg install libev From 6e2ffd4e1ddc3c7180c585c9a11de3d1ce350e62 Mon Sep 17 00:00:00 2001 From: absurdfarce Date: Wed, 11 Sep 2024 16:58:29 -0500 Subject: [PATCH 1978/2005] Update 3.29 hash in docs.yaml to include post-release doc fix --- docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.yaml b/docs.yaml index 96ff2f8e9f..c0f97a8c5a 100644 --- a/docs.yaml +++ b/docs.yaml @@ -23,7 +23,7 @@ sections: CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force versions: - name: '3.29' - ref: c38f3615 + ref: 434b1f52 - name: '3.28' ref: 4325afb6 - name: '3.27' From 1798cf7464e53e01abf7f4c820b60d75beac37da Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Mon, 23 Sep 2024 16:50:12 -0400 Subject: [PATCH 1979/2005] Upgraded cython to 3.0.x (#1221) --- setup.py | 2 +- test-requirements.txt | 2 +- tox.ini | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 0e48c3c949..79fda90eac 100644 --- a/setup.py +++ b/setup.py @@ -375,7 +375,7 @@ def run_setup(extensions): # 1.) build_ext eats errors at compile time, letting the install complete while producing useful feedback # 2.) there could be a case where the python environment has cython installed but the system doesn't have build tools if pre_build_check(): - cython_dep = 'Cython>=0.20,!=0.25,<0.30' + cython_dep = 'Cython>=3.0' user_specified_cython_version = os.environ.get('CASS_DRIVER_ALLOWED_CYTHON_VERSION') if user_specified_cython_version is not None: cython_dep = 'Cython==%s' % (user_specified_cython_version,) diff --git a/test-requirements.txt b/test-requirements.txt index 94ac611707..6499e20841 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -8,7 +8,7 @@ pure-sasl twisted[tls] gevent eventlet -cython>=0.20,<0.30 +cython>=3.0 packaging futurist asynctest diff --git a/tox.ini b/tox.ini index a8dda2d9c2..6748a6e45f 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,7 @@ envlist = py{38,39,310,311,312},pypy [base] deps = pytest packaging - cython>=0.20,<0.30 + cython>=3.0 eventlet gevent twisted[tls] From c4aff64f61f02df9aeebe6396fc7776bcb29e8c7 Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Wed, 25 Sep 2024 16:12:31 -0400 Subject: [PATCH 1980/2005] Fixing spelling and minor whitespace issues in tests (#1225) --- tests/integration/__init__.py | 2 +- tests/integration/advanced/__init__.py | 4 ++-- .../advanced/graph/test_graph_query.py | 4 ++-- tests/integration/advanced/test_auth.py | 6 ++--- .../integration/advanced/test_cont_paging.py | 10 ++++----- tests/integration/advanced/test_geometry.py | 2 +- tests/integration/advanced/test_spark.py | 4 ++-- tests/integration/cloud/conftest.py | 3 ++- tests/integration/cloud/test_cloud_schema.py | 4 ++-- tests/integration/conftest.py | 3 ++- .../integration/cqlengine/test_connections.py | 4 ++-- tests/integration/cqlengine/test_ifexists.py | 13 +++++------ .../integration/cqlengine/test_ifnotexists.py | 1 - .../cqlengine/test_lwt_conditional.py | 1 - tests/integration/cqlengine/test_timestamp.py | 4 +--- tests/integration/cqlengine/test_ttl.py | 6 ++--- .../column_encryption/test_policies.py | 22 ++++++++++--------- tests/integration/standard/test_concurrent.py | 16 +++++++------- .../standard/test_custom_payload.py | 2 ++ .../standard/test_custom_protocol_handler.py | 8 +++---- .../standard/test_cython_protocol_handlers.py | 2 +- tests/integration/util.py | 4 ++-- tests/unit/test_concurrent.py | 8 +++---- tests/unit/test_policies.py | 2 +- tests/unit/test_protocol.py | 2 +- tests/unit/test_segment.py | 9 ++++---- tests/unit/test_types.py | 18 +++++++-------- tests/unit/test_util_types.py | 4 ++-- tests/util.py | 2 +- 29 files changed, 86 insertions(+), 84 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index e389742b74..c8c111f211 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -699,7 +699,7 @@ def drop_keyspace_shutdown_cluster(keyspace_name, session, cluster): try: execute_with_long_wait_retry(session, "DROP KEYSPACE {0}".format(keyspace_name)) except: - log.warning("Error encountered when droping keyspace {0}".format(keyspace_name)) + log.warning("Error encountered when dropping keyspace {0}".format(keyspace_name)) ex_type, ex, tb = sys.exc_info() log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py index dffaccd190..1238d2ed72 100644 --- a/tests/integration/advanced/__init__.py +++ b/tests/integration/advanced/__init__.py @@ -88,7 +88,7 @@ def use_singledc_wth_graph_and_spark(start=True): def use_cluster_with_graph(num_nodes): """ - This is a work around to account for the fact that spark nodes will conflict over master assignment + This is a workaround to account for the fact that spark nodes will conflict over master assignment when started all at once. """ if USE_CASS_EXTERNAL: @@ -125,7 +125,7 @@ def use_cluster_with_graph(num_nodes): class BasicGeometricUnitTestCase(BasicKeyspaceUnitTestCase): """ - This base test class is used by all the geomteric tests. It contains class level teardown and setup + This base test class is used by all the geometric tests. It contains class level teardown and setup methods. It also contains the test fixtures used by those tests """ diff --git a/tests/integration/advanced/graph/test_graph_query.py b/tests/integration/advanced/graph/test_graph_query.py index 0c889938d8..d1b3ef62b1 100644 --- a/tests/integration/advanced/graph/test_graph_query.py +++ b/tests/integration/advanced/graph/test_graph_query.py @@ -244,7 +244,7 @@ def _test_range_query(self, schema, graphson): """ Test to validate range queries are handled correctly. - Creates a very large line graph script and executes it. Then proceeds to to a range + Creates a very large line graph script and executes it. Then proceeds to a range limited query against it, and ensure that the results are formatted correctly and that the result set is properly sized. @@ -331,7 +331,7 @@ def _test_large_create_script(self, schema, graphson): @test_category dse graph """ self.execute_graph(schema.fixtures.line(150), graphson) - self.execute_graph(schema.fixtures.line(300), graphson) # This should passed since the queries are splitted + self.execute_graph(schema.fixtures.line(300), graphson) # This should pass since the queries are split self.assertRaises(SyntaxException, self.execute_graph, schema.fixtures.line(300, single_script=True), graphson) # this is not and too big def _test_large_result_set(self, schema, graphson): diff --git a/tests/integration/advanced/test_auth.py b/tests/integration/advanced/test_auth.py index 3443419ab4..cf8b66df55 100644 --- a/tests/integration/advanced/test_auth.py +++ b/tests/integration/advanced/test_auth.py @@ -67,7 +67,7 @@ class BasicDseAuthTest(unittest.TestCase): @classmethod def setUpClass(self): """ - This will setup the necessary infrastructure to run our authentication tests. It requres the ADS_HOME environment variable + This will setup the necessary infrastructure to run our authentication tests. It requires the ADS_HOME environment variable and our custom embedded apache directory server jar in order to run. """ if not DSE_VERSION: @@ -86,7 +86,7 @@ def setUpClass(self): self.charlie_keytab = os.path.join(self.conf_file_dir, "charlie.keytab") actual_jar = os.path.join(ADS_HOME, "embedded-ads.jar") - # Create configuration directories if they don't already exists + # Create configuration directories if they don't already exist if not os.path.exists(self.conf_file_dir): os.makedirs(self.conf_file_dir) if not os.path.exists(actual_jar): @@ -175,7 +175,7 @@ def test_should_not_authenticate_with_bad_user_ticket(self): auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"]) self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider) - def test_should_not_athenticate_without_ticket(self): + def test_should_not_authenticate_without_ticket(self): """ This tests will attempt to authenticate with a user that is valid but has no ticket @since 3.20 diff --git a/tests/integration/advanced/test_cont_paging.py b/tests/integration/advanced/test_cont_paging.py index 99de82647d..191d6f1faf 100644 --- a/tests/integration/advanced/test_cont_paging.py +++ b/tests/integration/advanced/test_cont_paging.py @@ -70,12 +70,12 @@ def create_cluster(cls): cls.select_all_statement = "SELECT * FROM {0}.{0}".format(cls.ks_name) - def test_continous_paging(self): + def test_continuous_paging(self): """ Test to ensure that various continuous paging schemes return the full set of results. @since 3.20 @jira_ticket PYTHON-615 - @expected_result various continous paging options should fetch all the results + @expected_result various continuous paging options should fetch all the results @test_category queries """ @@ -131,9 +131,9 @@ def test_paging_cancel(self): self.session_with_profiles.default_fetch_size = 1 # This combination should fetch one result a second. We should see a very few results results = self.session_with_profiles.execute_async(self.select_all_statement, execution_profile= "SLOW") - result_set =results.result() + result_set = results.result() result_set.cancel_continuous_paging() - result_lst =list(result_set) + result_lst = list(result_set) self.assertLess(len(result_lst), 2, "Cancel should have aborted fetch immediately") def test_con_paging_verify_writes(self): @@ -183,7 +183,7 @@ def test_con_paging_verify_writes(self): def test_can_get_results_when_no_more_pages(self): """ - Test to validate that the resutls can be fetched when + Test to validate that the results can be fetched when has_more_pages is False @since 3.20 @jira_ticket PYTHON-946 diff --git a/tests/integration/advanced/test_geometry.py b/tests/integration/advanced/test_geometry.py index 6a6737bd50..f40e27bf48 100644 --- a/tests/integration/advanced/test_geometry.py +++ b/tests/integration/advanced/test_geometry.py @@ -35,7 +35,7 @@ class AbstractGeometricTypeTest(): def test_should_insert_simple(self): """ - This tests will attempt to insert a point, polygon, or line, using simple inline formating. + This tests will attempt to insert a point, polygon, or line, using simple inline formatting. @since 3.20 @jira_ticket PYTHON-456 @test_category dse geometric diff --git a/tests/integration/advanced/test_spark.py b/tests/integration/advanced/test_spark.py index a307913abb..ca37dc6b53 100644 --- a/tests/integration/advanced/test_spark.py +++ b/tests/integration/advanced/test_spark.py @@ -30,7 +30,7 @@ def setup_module(): @requiredse class SparkLBTests(BasicGraphUnitTestCase): """ - Test to validate that analtics query can run in a multi-node enviroment. Also check to to ensure + Test to validate that analytics query can run in a multi-node environment. Also check to ensure that the master spark node is correctly targeted when OLAP queries are run @since 3.20 @@ -42,7 +42,7 @@ def test_spark_analytic_query(self): self.session.execute_graph(ClassicGraphFixtures.classic()) spark_master = find_spark_master(self.session) - # Run multipltle times to ensure we don't round robin + # Run multiple times to ensure we don't round-robin for i in range(3): to_run = SimpleGraphStatement("g.V().count()") rs = self.session.execute_graph(to_run, execution_profile=EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT) diff --git a/tests/integration/cloud/conftest.py b/tests/integration/cloud/conftest.py index fb08b04194..6bfda32534 100644 --- a/tests/integration/cloud/conftest.py +++ b/tests/integration/cloud/conftest.py @@ -2,8 +2,9 @@ from tests.integration.cloud import setup_package, teardown_package + @pytest.fixture(scope='session', autouse=True) def setup_and_teardown_packages(): setup_package() yield - teardown_package() \ No newline at end of file + teardown_package() diff --git a/tests/integration/cloud/test_cloud_schema.py b/tests/integration/cloud/test_cloud_schema.py index 1d52e8e428..8dff49508a 100644 --- a/tests/integration/cloud/test_cloud_schema.py +++ b/tests/integration/cloud/test_cloud_schema.py @@ -110,9 +110,9 @@ def test_for_schema_disagreement_attribute(self): self.check_and_wait_for_agreement(session, rs, True) cluster.shutdown() - def check_and_wait_for_agreement(self, session, rs, exepected): + def check_and_wait_for_agreement(self, session, rs, expected): # Wait for RESULT_KIND_SCHEMA_CHANGE message to arrive time.sleep(1) - self.assertEqual(rs.response_future.is_schema_agreed, exepected) + self.assertEqual(rs.response_future.is_schema_agreed, expected) if not rs.response_future.is_schema_agreed: session.cluster.control_connection.wait_for_schema_agreement(wait_time=1000) \ No newline at end of file diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index b2eb9a02fc..e17ac302c8 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -2,8 +2,9 @@ from tests.integration import teardown_package + @pytest.fixture(scope='session', autouse=True) def setup_and_teardown_packages(): print('setup') yield - teardown_package() \ No newline at end of file + teardown_package() diff --git a/tests/integration/cqlengine/test_connections.py b/tests/integration/cqlengine/test_connections.py index 15adff3380..28a0323f24 100644 --- a/tests/integration/cqlengine/test_connections.py +++ b/tests/integration/cqlengine/test_connections.py @@ -79,7 +79,7 @@ def test_context_connection_priority(self): """ Tests to ensure the proper connection priority is honored. - Explicit connection should have higest priority, + Explicit connection should have the highest priority, Followed by context query connection Default connection should be honored last. @@ -458,7 +458,7 @@ def test_keyspace(self): @since 3.7 @jira_ticket PYTHON-613 - @expected_result Keyspace segration is honored + @expected_result Keyspace segregation is honored @test_category object_mapper """ diff --git a/tests/integration/cqlengine/test_ifexists.py b/tests/integration/cqlengine/test_ifexists.py index 1674bbd266..68efe077ba 100644 --- a/tests/integration/cqlengine/test_ifexists.py +++ b/tests/integration/cqlengine/test_ifexists.py @@ -100,7 +100,7 @@ def test_update_if_exists(self): m = TestIfExistsModel.get(id=id) self.assertEqual(m.text, 'changed_again') - m = TestIfExistsModel(id=uuid4(), count=44) # do not exists + m = TestIfExistsModel(id=uuid4(), count=44) # do not exist with self.assertRaises(LWTException) as assertion: m.if_exists().update() @@ -155,7 +155,7 @@ def test_batch_update_if_exists_success(self): @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_batch_mixed_update_if_exists_success(self): """ - Tests that batch update with with one bad query will still fail with LWTException + Tests that batch update with one bad query will still fail with LWTException @since 3.1 @jira_ticket PYTHON-432 @@ -177,7 +177,7 @@ def test_batch_mixed_update_if_exists_success(self): @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_delete_if_exists(self): """ - Tests that delete with if_exists work, and throw proper LWT exception when they are are not applied + Tests that delete with if_exists work, and throws proper LWT exception when they are not applied @since 3.1 @jira_ticket PYTHON-432 @@ -193,7 +193,7 @@ def test_delete_if_exists(self): q = TestIfExistsModel.objects(id=id) self.assertEqual(len(q), 0) - m = TestIfExistsModel(id=uuid4(), count=44) # do not exists + m = TestIfExistsModel(id=uuid4(), count=44) # do not exist with self.assertRaises(LWTException) as assertion: m.if_exists().delete() @@ -212,7 +212,7 @@ def test_delete_if_exists(self): @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_batch_delete_if_exists_success(self): """ - Tests that batch deletes with if_exists work, and throw proper LWTException when they are are not applied + Tests that batch deletes with if_exists work, and throws proper LWTException when they are not applied @since 3.1 @jira_ticket PYTHON-432 @@ -243,7 +243,7 @@ def test_batch_delete_if_exists_success(self): @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_batch_delete_mixed(self): """ - Tests that batch deletes with multiple queries and throw proper LWTException when they are are not all applicable + Tests that batch deletes with multiple queries and throws proper LWTException when they are not all applicable @since 3.1 @jira_ticket PYTHON-432 @@ -309,4 +309,3 @@ def test_instance_raise_exception(self): id = uuid4() with self.assertRaises(IfExistsWithCounterColumn): TestIfExistsWithCounterModel.if_exists() - diff --git a/tests/integration/cqlengine/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py index 5b24070690..b2f2fd98c0 100644 --- a/tests/integration/cqlengine/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -198,4 +198,3 @@ def test_instance_raise_exception(self): id = uuid4() with self.assertRaises(IfNotExistsWithCounterColumn): TestIfNotExistsWithCounterModel.if_not_exists() - diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index 6f2e13361f..06fbf465da 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -158,7 +158,6 @@ def test_batch_update_conditional_several_rows(self): second_row.delete() b.execute() - def test_delete_conditional(self): # DML path t = TestConditionalModel.if_not_exists().create(text='something', count=5) diff --git a/tests/integration/cqlengine/test_timestamp.py b/tests/integration/cqlengine/test_timestamp.py index bfed7f9808..6ddce91099 100644 --- a/tests/integration/cqlengine/test_timestamp.py +++ b/tests/integration/cqlengine/test_timestamp.py @@ -199,9 +199,7 @@ def test_delete_in_the_past(self): TestTimestampModel.get(id=uid).should.be.ok - # delete the in past, should not affect the object created above + # delete in the past, should not affect the object created above TestTimestampModel.objects(id=uid).timestamp(timedelta(seconds=-60)).delete() TestTimestampModel.get(id=uid) - - diff --git a/tests/integration/cqlengine/test_ttl.py b/tests/integration/cqlengine/test_ttl.py index bd0c7f60aa..47359a03e3 100644 --- a/tests/integration/cqlengine/test_ttl.py +++ b/tests/integration/cqlengine/test_ttl.py @@ -179,7 +179,7 @@ def test_default_ttl_not_set(self): self.assertEqual(default_ttl, 0) with mock.patch.object(session, 'execute') as m: - TestTTLModel.objects(id=tid).update(text="aligators") + TestTTLModel.objects(id=tid).update(text="alligators") query = m.call_args[0][0].query_string self.assertNotIn("USING TTL", query) @@ -197,7 +197,7 @@ def test_default_ttl_set(self): self.assertEqual(default_ttl, 20) with mock.patch.object(session, 'execute') as m: - TestTTLModel.objects(id=tid).update(text="aligators expired") + TestTTLModel.objects(id=tid).update(text="alligators expired") # Should not be set either query = m.call_args[0][0].query_string @@ -228,7 +228,7 @@ def test_override_default_ttl(self): self.assertEqual(o._ttl, 3600) with mock.patch.object(session, 'execute') as m: - TestDefaultTTLModel.objects(id=tid).ttl(None).update(text="aligators expired") + TestDefaultTTLModel.objects(id=tid).ttl(None).update(text="alligators expired") query = m.call_args[0][0].query_string self.assertNotIn("USING TTL", query) diff --git a/tests/integration/standard/column_encryption/test_policies.py b/tests/integration/standard/column_encryption/test_policies.py index 84c4e76871..325c19cf3a 100644 --- a/tests/integration/standard/column_encryption/test_policies.py +++ b/tests/integration/standard/column_encryption/test_policies.py @@ -22,9 +22,11 @@ from cassandra.column_encryption.policies import AES256ColumnEncryptionPolicy, \ AES256_KEY_SIZE_BYTES, AES256_BLOCK_SIZE_BYTES + def setup_module(): use_singledc() + class ColumnEncryptionPolicyTest(unittest.TestCase): def _recreate_keyspace(self, session): @@ -57,13 +59,13 @@ def test_end_to_end_prepared(self): # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted # values here to confirm that we don't interfere with regular processing of unencrypted vals. - (encrypted,unencrypted) = session.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() + (encrypted, unencrypted) = session.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() self.assertEqual(expected, encrypted) self.assertEqual(expected, unencrypted) # Confirm the same behaviour from a subsequent prepared statement as well prepared = session.prepare("select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering") - (encrypted,unencrypted) = session.execute(prepared, [expected]).one() + (encrypted, unencrypted) = session.execute(prepared, [expected]).one() self.assertEqual(expected, encrypted) self.assertEqual(expected, unencrypted) @@ -78,20 +80,20 @@ def test_end_to_end_simple(self): self._recreate_keyspace(session) # Use encode_and_encrypt helper function to populate date - for i in range(1,100): + for i in range(1, 100): self.assertIsNotNone(i) encrypted = cl_policy.encode_and_encrypt(col_desc, i) session.execute("insert into foo.bar (encrypted, unencrypted) values (%s,%s)", (encrypted, i)) # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted # values here to confirm that we don't interfere with regular processing of unencrypted vals. - (encrypted,unencrypted) = session.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() + (encrypted, unencrypted) = session.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() self.assertEqual(expected, encrypted) self.assertEqual(expected, unencrypted) # Confirm the same behaviour from a subsequent prepared statement as well prepared = session.prepare("select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering") - (encrypted,unencrypted) = session.execute(prepared, [expected]).one() + (encrypted, unencrypted) = session.execute(prepared, [expected]).one() self.assertEqual(expected, encrypted) self.assertEqual(expected, unencrypted) @@ -117,7 +119,7 @@ def test_end_to_end_different_cle_contexts_different_ivs(self): self._recreate_keyspace(session1) # Use encode_and_encrypt helper function to populate date - for i in range(1,100): + for i in range(1, 100): self.assertIsNotNone(i) encrypted = cl_policy1.encode_and_encrypt(col_desc1, i) session1.execute("insert into foo.bar (encrypted, unencrypted) values (%s,%s)", (encrypted, i)) @@ -134,7 +136,7 @@ def test_end_to_end_different_cle_contexts_different_ivs(self): (_, cl_policy2) = self._create_policy(key, iv=iv2) cluster2 = TestCluster(column_encryption_policy=cl_policy2) session2 = cluster2.connect() - (encrypted,unencrypted) = session2.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() + (encrypted, unencrypted) = session2.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() self.assertEqual(expected, encrypted) self.assertEqual(expected, unencrypted) @@ -152,7 +154,7 @@ def test_end_to_end_different_cle_contexts_different_policies(self): self._recreate_keyspace(session) # Use encode_and_encrypt helper function to populate date - session.execute("insert into foo.bar (encrypted, unencrypted) values (%s,%s)",(cl_policy.encode_and_encrypt(col_desc, expected), expected)) + session.execute("insert into foo.bar (encrypted, unencrypted) values (%s,%s)", (cl_policy.encode_and_encrypt(col_desc, expected), expected)) # We now open a new session _without_ the CLE policy specified. We should _not_ be able to read decrypted bits from this session. cluster2 = TestCluster() @@ -160,11 +162,11 @@ def test_end_to_end_different_cle_contexts_different_policies(self): # A straight select from the database will now return the decrypted bits. We select both encrypted and unencrypted # values here to confirm that we don't interfere with regular processing of unencrypted vals. - (encrypted,unencrypted) = session2.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() + (encrypted, unencrypted) = session2.execute("select encrypted, unencrypted from foo.bar where unencrypted = %s allow filtering", (expected,)).one() self.assertEqual(cl_policy.encode_and_encrypt(col_desc, expected), encrypted) self.assertEqual(expected, unencrypted) # Confirm the same behaviour from a subsequent prepared statement as well prepared = session2.prepare("select encrypted, unencrypted from foo.bar where unencrypted = ? allow filtering") - (encrypted,unencrypted) = session2.execute(prepared, [expected]).one() + (encrypted, unencrypted) = session2.execute(prepared, [expected]).one() self.assertEqual(cl_policy.encode_and_encrypt(col_desc, expected), encrypted) diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index ba891b4bd0..c076d9f553 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -107,38 +107,38 @@ def execute_concurrent_base(self, test_fn, validate_fn, zip_args=True): test_fn(self.session, statement, parameters) validate_fn(num_statements, results) - def execute_concurrent_valiate_tuple(self, num_statements, results): + def execute_concurrent_validate_tuple(self, num_statements, results): self.assertEqual(num_statements, len(results)) self.assertEqual([(True, [(i,)]) for i in range(num_statements)], results) - def execute_concurrent_valiate_dict(self, num_statements, results): + def execute_concurrent_validate_dict(self, num_statements, results): self.assertEqual(num_statements, len(results)) self.assertEqual([(True, [{"v":i}]) for i in range(num_statements)], results) def test_execute_concurrent(self): self.execute_concurrent_base(self.execute_concurrent_helper, \ - self.execute_concurrent_valiate_tuple) + self.execute_concurrent_validate_tuple) def test_execute_concurrent_with_args(self): self.execute_concurrent_base(self.execute_concurrent_args_helper, \ - self.execute_concurrent_valiate_tuple, \ + self.execute_concurrent_validate_tuple, \ zip_args=False) def test_execute_concurrent_with_execution_profile(self): def run_fn(*args, **kwargs): return self.execute_concurrent_helper(*args, execution_profile=EXEC_PROFILE_DICT, **kwargs) - self.execute_concurrent_base(run_fn, self.execute_concurrent_valiate_dict) + self.execute_concurrent_base(run_fn, self.execute_concurrent_validate_dict) def test_execute_concurrent_with_args_and_execution_profile(self): def run_fn(*args, **kwargs): return self.execute_concurrent_args_helper(*args, execution_profile=EXEC_PROFILE_DICT, **kwargs) - self.execute_concurrent_base(run_fn, self.execute_concurrent_valiate_dict, zip_args=False) + self.execute_concurrent_base(run_fn, self.execute_concurrent_validate_dict, zip_args=False) def test_execute_concurrent_with_args_generator(self): """ Test to validate that generator based results are surfaced correctly - Repeatedly inserts data into a a table and attempts to query it. It then validates that the + Repeatedly inserts data into a table and attempts to query it. It then validates that the results are returned in the order expected @since 2.7.0 @@ -212,7 +212,7 @@ def test_execute_concurrent_paged_result_generator(self): """ Test to validate that generator based results are surfaced correctly when paging is used - Inserts data into a a table and attempts to query it. It then validates that the + Inserts data into a table and attempts to query it. It then validates that the results are returned as expected (no order specified) @since 2.7.0 diff --git a/tests/integration/standard/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py index f33ab4f04f..a97efeaa68 100644 --- a/tests/integration/standard/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -25,6 +25,8 @@ def setup_module(): #These test rely on the custom payload being returned but by default C* #ignores all the payloads. + + @local class CustomPayloadTests(unittest.TestCase): diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 16d43bbd65..68ef240795 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -73,7 +73,7 @@ def test_custom_raw_uuid_row_results(self): uuid_type = result[0][0] self.assertEqual(type(uuid_type), uuid.UUID) - # use our custom protocol handlder + # use our custom protocol handler session.client_protocol_handler = CustomTestRawRowType result_set = session.execute("SELECT schema_version FROM system.local") raw_value = result_set[0][0] @@ -271,7 +271,7 @@ def recv_results_rows(self, f, protocol_version, user_type_map, result_metadata, class CustomTestRawRowType(ProtocolHandler): """ - This is the a custom protocol handler that will substitute the the + This is a custom protocol handler that will substitute the customResultMesageRowRaw Result message for our own implementation """ my_opcodes = ProtocolHandler.message_types_by_opcode.copy() @@ -281,7 +281,7 @@ class CustomTestRawRowType(ProtocolHandler): class CustomResultMessageTracked(ResultMessage): """ - This is a custom Result Message that is use to track what primitive types + This is a custom Result Message that is used to track what primitive types have been processed when it receives results """ my_type_codes = ResultMessage.type_codes.copy() @@ -305,7 +305,7 @@ def recv_results_rows(self, f, protocol_version, user_type_map, result_metadata, class CustomProtocolHandlerResultMessageTracked(ProtocolHandler): """ - This is the a custom protocol handler that will substitute the the + This is a custom protocol handler that will substitute the CustomTestRawRowTypeTracked Result message for our own implementation """ my_opcodes = ProtocolHandler.message_types_by_opcode.copy() diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index 9cb5914f16..83d39407c4 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -234,7 +234,7 @@ def test_null_types(self): begin_unset = max(s.execute('select primkey from %s' % (table,))[0]['primkey']) + 1 keys_null = range(begin_unset, begin_unset + 10) - # scatter some emptry rows in here + # scatter some empty rows in here insert = "insert into %s (primkey) values (%%s)" % (table,) execute_concurrent_with_args(s, insert, ((k,) for k in keys_null)) diff --git a/tests/integration/util.py b/tests/integration/util.py index bcc4cb829b..64c101d9da 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -41,8 +41,8 @@ def assert_quiescent_pool_state(test_case, cluster, wait=None): test_case.assertFalse(state['shutdown']) test_case.assertGreater(state['open_count'], 0) no_in_flight = all((i == 0 for i in state['in_flights'])) - orphans_and_inflights = zip(state['orphan_requests'],state['in_flights']) - all_orphaned = all((len(orphans) == inflight for (orphans,inflight) in orphans_and_inflights)) + orphans_and_inflights = zip(state['orphan_requests'], state['in_flights']) + all_orphaned = all((len(orphans) == inflight for (orphans, inflight) in orphans_and_inflights)) test_case.assertTrue(no_in_flight or all_orphaned) for holder in cluster.get_connection_holders(): diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index bdfd08126e..db641b66a7 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -40,7 +40,7 @@ class MockResponseResponseFuture(): _col_names = None _col_types = None - # a list pending callbacks, these will be prioritized in reverse or normal orderd + # a list pending callbacks, these will be prioritized in reverse or normal order pending_callbacks = PriorityQueue() def __init__(self, reverse): @@ -179,7 +179,7 @@ def insert_and_validate_list_results(self, reverse, slowdown): This utility method will execute submit various statements for execution using the ConcurrentExecutorListResults, then invoke a separate thread to execute the callback associated with the futures registered for those statements. The parameters will toggle various timing, and ordering changes. - Finally it will validate that the results were returned in the order they were submitted + Finally, it will validate that the results were returned in the order they were submitted :param reverse: Execute the callbacks in the opposite order that they were submitted :param slowdown: Cause intermittent queries to perform slowly """ @@ -203,7 +203,7 @@ def insert_and_validate_list_generator(self, reverse, slowdown): This utility method will execute submit various statements for execution using the ConcurrentExecutorGenResults, then invoke a separate thread to execute the callback associated with the futures registered for those statements. The parameters will toggle various timing, and ordering changes. - Finally it will validate that the results were returned in the order they were submitted + Finally, it will validate that the results were returned in the order they were submitted :param reverse: Execute the callbacks in the opposite order that they were submitted :param slowdown: Cause intermittent queries to perform slowly """ @@ -232,7 +232,7 @@ def validate_result_ordering(self, results): self.assertTrue(success) current_time_added = list(result)[0] - #Windows clock granularity makes this equal most of the times + # Windows clock granularity makes this equal most of the time if "Windows" in platform.system(): self.assertLessEqual(last_time_added, current_time_added) else: diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index e1bd17a00c..824b34c956 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -202,7 +202,7 @@ def test_with_remotes(self): local_hosts = set(h for h in hosts if h.datacenter == "dc1") remote_hosts = set(h for h in hosts if h.datacenter != "dc1") - # allow all of the remote hosts to be used + # allow all the remote hosts to be used policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=2) policy.populate(Mock(), hosts) qplan = list(policy.make_query_plan()) diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index 907f62f2bb..03910bb08a 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -95,7 +95,7 @@ def _check_calls(self, io, expected): def test_continuous_paging(self): """ - Test to check continuous paging throws an Exception if it's not supported and the correct valuesa + Test to check continuous paging throws an Exception if it's not supported and the correct values are written to the buffer if the option is enabled. @since DSE 2.0b3 GRAPH 1.0b1 diff --git a/tests/unit/test_segment.py b/tests/unit/test_segment.py index 0d0f146c16..a494e64414 100644 --- a/tests/unit/test_segment.py +++ b/tests/unit/test_segment.py @@ -24,6 +24,7 @@ def to_bits(b): return '{:08b}'.format(b) + class SegmentCodecTest(unittest.TestCase): small_msg = b'b' * 50 @@ -38,12 +39,12 @@ def _header_to_bits(data): data = data[:5] bits = ''.join([to_bits(b) for b in reversed(data)]) # return the compressed payload length, the uncompressed payload length, - # the self contained flag and the padding as bits + # the self-contained flag and the padding as bits return bits[23:40] + bits[6:23] + bits[5:6] + bits[:5] else: # uncompressed data = data[:3] bits = ''.join([to_bits(b) for b in reversed(data)]) - # return the payload length, the self contained flag and + # return the payload length, the self-contained flag and # the padding as bits return bits[7:24] + bits[6:7] + bits[:6] @@ -88,7 +89,7 @@ def test_encode_uncompressed_header_not_self_contained_msg(self): self.assertEqual( self._header_to_bits(buffer.getvalue()), ("11111111111111111" - "0" # not self contained + "0" # not self-contained "000000")) @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') @@ -112,7 +113,7 @@ def test_encode_compressed_header_not_self_contained_msg(self): self._header_to_bits(buffer.getvalue()), ("{:017b}".format(compressed_length) + "11111111111111111" - "0" # not self contained + "0" # not self-contained "00000")) def test_decode_uncompressed_header(self): diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index aba11d4ced..8554d5e356 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -382,12 +382,12 @@ def test_round_trip_basic_types_without_fixed_serialized_size(self): # UTF8 text self._round_trip_test(["abc", "def", "ghi", "jkl"], \ "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.UTF8Type, 4)") - # Time is something of a weird one. By rights it should be a fixed size type but C* code marks it as variable + # Time is something of a weird one. By rights, it should be a fixed size type but C* code marks it as variable # size. We're forced to follow the C* code base (since that's who'll be providing the data we're parsing) so # we match what they're doing. self._round_trip_test([datetime.time(1,1,1), datetime.time(2,2,2), datetime.time(3,3,3)], \ "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.TimeType, 3)") - # Duration (containts varints) + # Duration (contains varints) self._round_trip_test([util.Duration(1,1,1), util.Duration(2,2,2), util.Duration(3,3,3)], \ "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.DurationType, 3)") @@ -456,7 +456,7 @@ def test_round_trip_vector_of_vectors(self): "org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.VectorType \ (org.apache.cassandra.db.marshal.AsciiType,2), 4)") - # parse_casstype_args() is tested above... we're explicitly concerned about cql_parapmeterized_type() output here + # parse_casstype_args() is tested above... we're explicitly concerned about cql_parameterized_type() output here def test_cql_parameterized_type(self): # Base vector functionality ctype = parse_casstype_args("org.apache.cassandra.db.marshal.VectorType(org.apache.cassandra.db.marshal.FloatType, 4)") @@ -914,7 +914,7 @@ def test_deserialize_date_range_day(self): 999, lambda original_value, i: original_value + i * 900 * 50 * 60 * 24) - @unittest.skip("This is currently failig, see PYTHON-912") + @unittest.skip("This is currently failing, see PYTHON-912") def test_deserialize_date_range_month(self): """ Test rounding from DateRange for months @@ -931,7 +931,7 @@ def get_upper_bound(seconds): but with the microseconds set to 999999, seconds to 59, minutes to 59, hours to 23 and days 28, 29, 30 or 31 depending on the month. The way to do this is to add one month and leave the date at YEAR-MONTH-01 00:00:00 000000. - Then substract one millisecond. + Then subtract one millisecond. """ dt = datetime.datetime.fromtimestamp(seconds / 1000.0, tz=utc_timezone) dt = dt + datetime.timedelta(days=32) @@ -958,7 +958,7 @@ def get_upper_bound(seconds): but with the microseconds set to 999999, seconds to 59, minutes to 59, hours to 23 days 28, 29, 30 or 31 depending on the month and months to 12. The way to do this is to add one year and leave the date at YEAR-01-01 00:00:00 000000. - Then substract one millisecond. + Then subtract one millisecond. """ dt = datetime.datetime.fromtimestamp(seconds / 1000.0, tz=utc_timezone) dt = dt + datetime.timedelta(days=370) @@ -980,14 +980,14 @@ def _deserialize_date_range(self, truncate_kwargs, precision, lower_value upper_value which are given as a value that represents seconds since the epoch. We want to make sure the lower_value is correctly rounded down and the upper value is correctly rounded up. In the case of rounding down we verify that the rounded down value - has the appropriate fields set to the minimum they could possible have. That is + has the appropriate fields set to the minimum they could possibly have. That is 1 for months, 1 for days, 0 for hours, 0 for minutes, 0 for seconds, 0 for microseconds. We use the generic function truncate_date which depends on truncate_kwargs for this In the case of rounding up we verify that the rounded up value has the appropriate fields set - to the maximum they could possible have. This is calculated by round_up_truncated_upper_value + to the maximum they could possibly have. This is calculated by round_up_truncated_upper_value which input is the truncated value from before. It is passed as an argument as the way - of calculating this is is different for every precision. + of calculating this is different for every precision. :param truncate_kwargs: determine what values to truncate in truncate_date :param precision: :class:`~util.DateRangePrecision` diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py index 5d6058b394..7afec29372 100644 --- a/tests/unit/test_util_types.py +++ b/tests/unit/test_util_types.py @@ -51,7 +51,7 @@ def test_limits(self): max_builtin = Date(datetime.date(9999, 12, 31)) self.assertEqual(Date(min_builtin.days_from_epoch), min_builtin) self.assertEqual(Date(max_builtin.days_from_epoch), max_builtin) - # just proving we can construct with on offset outside buildin range + # just proving we can construct with on offset outside builtin range self.assertEqual(Date(min_builtin.days_from_epoch - 1).days_from_epoch, min_builtin.days_from_epoch - 1) self.assertEqual(Date(max_builtin.days_from_epoch + 1).days_from_epoch, @@ -191,7 +191,7 @@ def test_equality(self): second = Duration(1000, 10000, 2345345) self.assertEqual(first, second) - first = Duration(12, 0 , 100) + first = Duration(12, 0, 100) second = Duration(nanoseconds=100, months=12) self.assertEqual(first, second) diff --git a/tests/util.py b/tests/util.py index 5c7ac2416f..c28a94b1c7 100644 --- a/tests/util.py +++ b/tests/util.py @@ -40,7 +40,7 @@ def wait_until_not_raised(condition, delay, max_attempts): doesn't raise an exception and the amount of attempts < maxAttempts. :param condition: a function :param delay: the delay in second - :param max_attempts: the maximum number of attemps. So the timeout + :param max_attempts: the maximum number of attempts. So the timeout of this function will be delay*max_attempts """ def wrapped_condition(): From d15412f654c00de8cb0fde0968b204fa5765f812 Mon Sep 17 00:00:00 2001 From: Lukasz Antoniak Date: Tue, 15 Oct 2024 17:57:12 +0200 Subject: [PATCH 1981/2005] PYTHON-1402 Support running test suite with DSE 6.9.0 (#1233) --- tests/integration/__init__.py | 1 + tests/integration/cqlengine/statements/test_base_statement.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index c8c111f211..6c8af4376b 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -342,6 +342,7 @@ def _id_and_mark(f): greaterthanorequaldse50 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('5.0'), "DSE 5.0 or greater required for this test") lessthandse51 = unittest.skipUnless(DSE_VERSION and DSE_VERSION < Version('5.1'), "DSE version less than 5.1 required") lessthandse60 = unittest.skipUnless(DSE_VERSION and DSE_VERSION < Version('6.0'), "DSE version less than 6.0 required") +lessthandse69 = unittest.skipUnless(DSE_VERSION and DSE_VERSION < Version('6.9'), "DSE version less than 6.9 required") pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") requiresmallclockgranularity = unittest.skipIf("Windows" in platform.system() or "asyncore" in EVENT_LOOP_MANAGER, diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index f245744352..ef5f3b2585 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -25,7 +25,7 @@ from tests.integration.cqlengine.base import BaseCassEngTestCase, TestQueryUpdateModel from tests.integration.cqlengine import DEFAULT_KEYSPACE -from tests.integration import greaterthanorequalcass3_10, TestCluster +from tests.integration import greaterthanorequalcass3_10, lessthandse69, TestCluster from cassandra.cqlengine.connection import execute @@ -101,6 +101,7 @@ def test_insert_statement_execute(self): self.assertEqual(TestQueryUpdateModel.objects.count(), 0) @greaterthanorequalcass3_10 + @lessthandse69 def test_like_operator(self): """ Test to verify the like operator works appropriately From 1b335d41886afdd0b665ac523e8dc2bec55b8ff2 Mon Sep 17 00:00:00 2001 From: Lukasz Antoniak Date: Wed, 16 Oct 2024 06:24:01 +0200 Subject: [PATCH 1982/2005] PYTHON-1402 Support running test suite with HCD 1.0.0 (#1234) * Run integration tests with HCD 1.0.0 * Apply review comments --- Jenkinsfile | 59 +++++++++++++++------ test-requirements.txt | 2 +- tests/integration/__init__.py | 30 ++++++++++- tests/integration/standard/test_metadata.py | 11 ++-- tests/integration/standard/test_query.py | 2 +- 5 files changed, 81 insertions(+), 23 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 0aec3b7ba6..3dcba78780 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -15,6 +15,7 @@ Matrix Types: Full: All server versions, python runtimes tested with and without Cython. Cassandra: All cassandra server versions. Dse: All dse server versions. + Hcd: All hcd server versions. Smoke: CI-friendly configurations. Currently-supported Python version + modern Cassandra/DSE instances. We also avoid cython since it's tested as part of the nightlies @@ -31,7 +32,8 @@ import com.datastax.jenkins.drivers.python.Slack slack = new Slack() DEFAULT_CASSANDRA = ['3.0', '3.11', '4.0', '4.1', '5.0-beta1'] -DEFAULT_DSE = ['dse-5.1.35', 'dse-6.8.30'] +DEFAULT_DSE = ['dse-5.1.35', 'dse-6.8.30', 'dse-6.9.0'] +DEFAULT_HCD = ['hcd-1.0.0'] DEFAULT_RUNTIME = ['3.8.16', '3.9.16', '3.10.11', '3.11.3', '3.12.0'] DEFAULT_CYTHON = ["True", "False"] matrices = [ @@ -51,7 +53,7 @@ matrices = [ "CYTHON": DEFAULT_CYTHON ], "SMOKE": [ - "SERVER": DEFAULT_CASSANDRA.takeRight(2) + DEFAULT_DSE.takeRight(1), + "SERVER": DEFAULT_CASSANDRA.takeRight(2) + DEFAULT_DSE.takeRight(2) + DEFAULT_HCD.takeRight(1), "RUNTIME": DEFAULT_RUNTIME.take(1) + DEFAULT_RUNTIME.takeRight(1), "CYTHON": ["True"] ] @@ -224,6 +226,19 @@ DSE_VERSION=${DSE_FIXED_VERSION} CCM_IS_DSE=true CCM_BRANCH=${DSE_FIXED_VERSION} DSE_BRANCH=${DSE_FIXED_VERSION} +ENVIRONMENT_EOF + ''' + } else if (env.CASSANDRA_VERSION.split('-')[0] == 'hcd') { + env.HCD_FIXED_VERSION = env.CASSANDRA_VERSION.split('-')[1] + sh label: 'Update environment for DataStax Enterprise', script: '''#!/bin/bash -le + cat >> ${HOME}/environment.txt << ENVIRONMENT_EOF +CCM_CASSANDRA_VERSION=${HCD_FIXED_VERSION} # maintain for backwards compatibility +CCM_VERSION=${HCD_FIXED_VERSION} +CCM_SERVER_TYPE=hcd +HCD_VERSION=${HCD_FIXED_VERSION} +CCM_IS_HCD=true +CCM_BRANCH=${HCD_FIXED_VERSION} +HCD_BRANCH=${HCD_FIXED_VERSION} ENVIRONMENT_EOF ''' } @@ -276,13 +291,13 @@ def executeStandardTests() { jabba use 1.8 SIMULACRON_JAR="${HOME}/simulacron.jar" - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_results.xml tests/integration/simulacron/ || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_results.xml tests/integration/simulacron/ || true # Run backpressure tests separately to avoid memory issue - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || true + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || true ''' sh label: 'Execute CQL engine integration tests', script: '''#!/bin/bash -lex @@ -294,7 +309,7 @@ def executeStandardTests() { . ${JABBA_SHELL} jabba use 1.8 - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=cqle_results.xml tests/integration/cqlengine/ || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=cqle_results.xml tests/integration/cqlengine/ || true ''' sh label: 'Execute Apache CassandraⓇ integration tests', script: '''#!/bin/bash -lex @@ -306,7 +321,7 @@ def executeStandardTests() { . ${JABBA_SHELL} jabba use 1.8 - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=standard_results.xml tests/integration/standard/ || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=standard_results.xml tests/integration/standard/ || true ''' if (env.CASSANDRA_VERSION.split('-')[0] == 'dse' && env.CASSANDRA_VERSION.split('-')[1] != '4.8') { @@ -323,7 +338,7 @@ def executeStandardTests() { . ${JABBA_SHELL} jabba use 1.8 - EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=dse_results.xml tests/integration/advanced/ || true + EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=dse_results.xml tests/integration/advanced/ || true ''' } } @@ -350,7 +365,7 @@ def executeStandardTests() { . ${JABBA_SHELL} jabba use 1.8 - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=tests/integration/long/upgrade --junit-xml=long_results.xml tests/integration/long/ || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=tests/integration/long/upgrade --junit-xml=long_results.xml tests/integration/long/ || true ''' } } @@ -389,7 +404,7 @@ def executeEventLoopTests() { "tests/integration/simulacron/test_endpoint.py" "tests/integration/long/test_ssl.py" ) - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true ''' } @@ -496,7 +511,7 @@ pipeline { ''') choice( name: 'MATRIX', - choices: ['DEFAULT', 'SMOKE', 'FULL', 'CASSANDRA', 'DSE'], + choices: ['DEFAULT', 'SMOKE', 'FULL', 'CASSANDRA', 'DSE', 'HCD'], description: '''

The matrix for the build.

@@ -525,6 +540,10 @@ pipeline { + + + +
DSE All dse server versions.
HCDAll hcd server versions.
''') choice( name: 'PYTHON_VERSION', @@ -532,7 +551,7 @@ pipeline { description: 'Python runtime version. Default to the build context.') choice( name: 'SERVER_VERSION', - choices: ['DEFAULT'] + DEFAULT_CASSANDRA + DEFAULT_DSE, + choices: ['DEFAULT'] + DEFAULT_CASSANDRA + DEFAULT_DSE + DEFAULT_HCD, description: '''Apache CassandraⓇ and DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY! @@ -567,7 +586,15 @@ pipeline { - + + + + + + + + +
dse-6.8.30DataStax Enterprise v6.8.x (CURRENTLY UNDER DEVELOPMENT)DataStax Enterprise v6.8.x
dse-6.9.0DataStax Enterprise v6.9.x (CURRENTLY UNDER DEVELOPMENT)
hcd-1.0.0DataStax HCD v1.0.x (CURRENTLY UNDER DEVELOPMENT)
''') choice( @@ -648,7 +675,7 @@ pipeline { parameterizedCron(branchPatternCron().matcher(env.BRANCH_NAME).matches() ? """ # Every weeknight (Monday - Friday) around 4:00 AM # These schedules will run with and without Cython enabled for Python 3.8.16 and 3.12.0 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.16 3.12.0;CI_SCHEDULE_SERVER_VERSION=3.11 4.0 5.0-beta1 dse-5.1.35 dse-6.8.30 + H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.16 3.12.0;CI_SCHEDULE_SERVER_VERSION=3.11 4.0 5.0-beta1 dse-5.1.35 dse-6.8.30 dse-6.9.0 hcd-1.0.0 """ : "") } diff --git a/test-requirements.txt b/test-requirements.txt index 6499e20841..1074f4f60d 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,7 +1,7 @@ -r requirements.txt scales pytest -ccm>=2.1.2 +ccm>=3.1.5 pytz sure pure-sasl diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 6c8af4376b..4185f52756 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -40,6 +40,7 @@ try: from ccmlib.dse_cluster import DseCluster + from ccmlib.hcd_cluster import HcdCluster from ccmlib.cluster import Cluster as CCMCluster from ccmlib.cluster_factory import ClusterFactory as CCMClusterFactory from ccmlib import common @@ -147,6 +148,10 @@ def _get_cass_version_from_dse(dse_version): return Version(cass_ver) +def _get_cass_version_from_hcd(hcd_version): + return Version("4.0.11") + + def _get_dse_version_from_cass(cass_version): if cass_version.startswith('2.1'): dse_ver = "4.8.15" @@ -166,13 +171,18 @@ def _get_dse_version_from_cass(cass_version): SIMULACRON_JAR = os.getenv('SIMULACRON_JAR', None) CLOUD_PROXY_PATH = os.getenv('CLOUD_PROXY_PATH', None) -# Supported Clusters: Cassandra, DDAC, DSE +# Supported Clusters: Cassandra, DDAC, DSE, HCD DSE_VERSION = None +HCD_VERSION = None if os.getenv('DSE_VERSION', None): # we are testing against DSE DSE_VERSION = Version(os.getenv('DSE_VERSION', None)) DSE_CRED = os.getenv('DSE_CREDS', None) CASSANDRA_VERSION = _get_cass_version_from_dse(DSE_VERSION.base_version) CCM_VERSION = DSE_VERSION.base_version +elif os.getenv('HCD_VERSION', None): # we are testing against HCD + HCD_VERSION = Version(os.getenv('HCD_VERSION', None)) + CASSANDRA_VERSION = _get_cass_version_from_hcd(HCD_VERSION.base_version) + CCM_VERSION = HCD_VERSION.base_version else: # we are testing against Cassandra or DDAC cv_string = os.getenv('CASSANDRA_VERSION', None) mcv_string = os.getenv('MAPPED_CASSANDRA_VERSION', None) @@ -196,6 +206,9 @@ def _get_dse_version_from_cass(cass_version): if DSE_CRED: log.info("Using DSE credentials file located at {0}".format(DSE_CRED)) CCM_KWARGS['dse_credentials_file'] = DSE_CRED +elif HCD_VERSION: + log.info('Using HCD version: %s', HCD_VERSION) + CCM_KWARGS['version'] = HCD_VERSION elif CASSANDRA_DIR: log.info("Using Cassandra dir: %s", CASSANDRA_DIR) CCM_KWARGS['install_dir'] = CASSANDRA_DIR @@ -464,8 +477,9 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, dse_options = dse_options or {} workloads = workloads or [] dse_cluster = True if DSE_VERSION else False + hcd_cluster = True if HCD_VERSION else False - if ccm_options is None and DSE_VERSION: + if ccm_options is None and (DSE_VERSION or HCD_VERSION): ccm_options = {"version": CCM_VERSION} elif ccm_options is None: ccm_options = CCM_KWARGS.copy() @@ -563,6 +577,18 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, CCM_CLUSTER.populate(nodes, ipformat=ipformat) CCM_CLUSTER.set_dse_configuration_options(dse_options) + elif hcd_cluster: + CCM_CLUSTER = HcdCluster(path, cluster_name, **ccm_options) + CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) + CCM_CLUSTER.set_configuration_options({'batch_size_warn_threshold_in_kb': 5}) + CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) + CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) + CCM_CLUSTER.set_configuration_options({'enable_materialized_views': True}) + CCM_CLUSTER.set_configuration_options({'enable_sasi_indexes': True}) + CCM_CLUSTER.set_configuration_options({'enable_transient_replication': True}) + common.switch_cluster(path, cluster_name) + CCM_CLUSTER.set_configuration_options(configuration_options) + CCM_CLUSTER.populate(nodes, ipformat=ipformat, use_single_interface=use_single_interface) else: ccm_cluster_clz = CCMCluster if Version(cassandra_version) < Version('4.1') else Cassandra41CCMCluster CCM_CLUSTER = ccm_cluster_clz(path, cluster_name, **ccm_options) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 30db02fbd8..221c4be702 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -38,7 +38,7 @@ get_supported_protocol_versions, greaterthancass20, greaterthancass21, assert_startswith, greaterthanorequalcass40, greaterthanorequaldse67, lessthancass40, - TestCluster, DSE_VERSION) + TestCluster, DSE_VERSION, HCD_VERSION) log = logging.getLogger(__name__) @@ -2078,10 +2078,15 @@ def test_materialized_view_metadata_alter(self): @test_category metadata """ - self.assertIn("SizeTieredCompactionStrategy", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].options["compaction"]["class"]) + compaction = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].options["compaction"]["class"] + if HCD_VERSION: + self.assertIn("UnifiedCompactionStrategy", compaction) + else: + self.assertIn("SizeTieredCompactionStrategy", compaction) self.session.execute("ALTER MATERIALIZED VIEW {0}.mv1 WITH compaction = {{ 'class' : 'LeveledCompactionStrategy' }}".format(self.keyspace_name)) - self.assertIn("LeveledCompactionStrategy", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].options["compaction"]["class"]) + compaction = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].options["compaction"]["class"] + self.assertIn("LeveledCompactionStrategy", compaction) def test_materialized_view_metadata_drop(self): """ diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 89486802b4..1759875857 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -942,7 +942,7 @@ def test_no_connection_refused_on_timeout(self): exception_type = type(result).__name__ if exception_type == "NoHostAvailable": self.fail("PYTHON-91: Disconnected from Cassandra: %s" % result.message) - if exception_type in ["WriteTimeout", "WriteFailure", "ReadTimeout", "ReadFailure", "ErrorMessageSub"]: + if exception_type in ["WriteTimeout", "WriteFailure", "ReadTimeout", "ReadFailure", "ErrorMessageSub", "ErrorMessage"]: if type(result).__name__ in ["WriteTimeout", "WriteFailure"]: received_timeout = True continue From c5bed09bf03cd1ceb7780572043713f946da8cd8 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Tue, 12 Nov 2024 15:46:32 -0600 Subject: [PATCH 1983/2005] Update Apache Cassandra 5.0-beta1 references to 5.0 (#1238) --- Jenkinsfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 3dcba78780..3457a779da 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -31,7 +31,7 @@ import com.datastax.jenkins.drivers.python.Slack slack = new Slack() -DEFAULT_CASSANDRA = ['3.0', '3.11', '4.0', '4.1', '5.0-beta1'] +DEFAULT_CASSANDRA = ['3.0', '3.11', '4.0', '4.1', '5.0'] DEFAULT_DSE = ['dse-5.1.35', 'dse-6.8.30', 'dse-6.9.0'] DEFAULT_HCD = ['hcd-1.0.0'] DEFAULT_RUNTIME = ['3.8.16', '3.9.16', '3.10.11', '3.11.3', '3.12.0'] @@ -577,8 +577,8 @@ pipeline { Apache CassandraⓇ v4.0.x - 5.0-beta1 - Apache CassandraⓇ v5.0-beta1 + 5.0 + Apache CassandraⓇ v5.0.x dse-5.1.35 @@ -675,7 +675,7 @@ pipeline { parameterizedCron(branchPatternCron().matcher(env.BRANCH_NAME).matches() ? """ # Every weeknight (Monday - Friday) around 4:00 AM # These schedules will run with and without Cython enabled for Python 3.8.16 and 3.12.0 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.16 3.12.0;CI_SCHEDULE_SERVER_VERSION=3.11 4.0 5.0-beta1 dse-5.1.35 dse-6.8.30 dse-6.9.0 hcd-1.0.0 + H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.16 3.12.0;CI_SCHEDULE_SERVER_VERSION=3.11 4.0 5.0 dse-5.1.35 dse-6.8.30 dse-6.9.0 hcd-1.0.0 """ : "") } From d7f4541e471d76aa6706cf4f28e7411244450100 Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Wed, 28 May 2025 23:47:20 -0400 Subject: [PATCH 1984/2005] Update README.rst to add python version badge (#1237) --- README.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 8682bff298..f8078d0a31 100644 --- a/README.rst +++ b/README.rst @@ -3,8 +3,9 @@ :target: https://opensource.org/licenses/Apache-2.0 .. |version| image:: https://badge.fury.io/py/cassandra-driver.svg :target: https://badge.fury.io/py/cassandra-driver +.. |pyversion| image:: https://img.shields.io/pypi/pyversions/cassandra-driver.svg -|license| |version| +|license| |version| |pyversion| DataStax Driver for Apache Cassandra ==================================== From ef176a57977e0b265202dd020bd918fe3675da1a Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Wed, 28 May 2025 23:57:09 -0400 Subject: [PATCH 1985/2005] Fix spelling issues in python code comments and messages (#1231) --- cassandra/__init__.py | 8 +++---- cassandra/auth.py | 4 ++-- cassandra/cluster.py | 24 +++++++++---------- cassandra/connection.py | 12 +++++----- cassandra/cqlengine/connection.py | 8 +++---- cassandra/cqlengine/management.py | 2 +- cassandra/cqlengine/models.py | 6 ++--- cassandra/cqlengine/query.py | 10 ++++---- cassandra/cqlengine/statements.py | 4 ++-- cassandra/marshal.py | 8 +++---- cassandra/metrics.py | 2 +- cassandra/policies.py | 14 +++++------ cassandra/pool.py | 4 ++-- cassandra/query.py | 4 ++-- cassandra/util.py | 6 ++--- .../advanced/graph/test_graph_query.py | 2 +- .../cqlengine/connections/test_connection.py | 2 +- tests/integration/standard/test_metadata.py | 8 +++---- tests/integration/standard/test_metrics.py | 8 +++---- .../standard/test_prepared_statements.py | 12 +++++----- tests/unit/test_policies.py | 4 ++-- tests/unit/test_resultset.py | 4 ++-- 22 files changed, 78 insertions(+), 78 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index a9602a9f88..badefb29de 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -28,7 +28,7 @@ def emit(self, record): class ConsistencyLevel(object): """ - Spcifies how many replicas must respond for an operation to be considered + Specifies how many replicas must respond for an operation to be considered a success. By default, ``ONE`` is used for all operations. """ @@ -247,7 +247,7 @@ def has_checksumming_support(cls, version): class WriteType(object): """ - For usage with :class:`.RetryPolicy`, this describe a type + For usage with :class:`.RetryPolicy`, this describes a type of write operation. """ @@ -272,7 +272,7 @@ class WriteType(object): COUNTER = 3 """ A counter write (for one or multiple partition keys). Such writes should - not be replayed in order to avoid overcount. + not be replayed in order to avoid over counting. """ BATCH_LOG = 4 @@ -283,7 +283,7 @@ class WriteType(object): CAS = 5 """ - A lighweight-transaction write, such as "DELETE ... IF EXISTS". + A lightweight-transaction write, such as "DELETE ... IF EXISTS". """ VIEW = 6 diff --git a/cassandra/auth.py b/cassandra/auth.py index 10200aa387..01c1ba444a 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -77,7 +77,7 @@ class Authenticator(object): 3) When the server indicates that authentication is successful, :meth:`~.on_authentication_success` will be called a token string that - that the server may optionally have sent. + the server may optionally have sent. The exact nature of the negotiation between the client and server is specific to the authentication mechanism configured server-side. @@ -90,7 +90,7 @@ class Authenticator(object): def initial_response(self): """ - Returns an message to send to the server to initiate the SASL handshake. + Returns a message to send to the server to initiate the SASL handshake. :const:`None` may be returned to send an empty message. """ return None diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d5f80290a9..02df46526d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -101,7 +101,7 @@ from cassandra.io.eventletreactor import EventletConnection # PYTHON-1364 # -# At the moment eventlet initialization is chucking AttributeErrors due to it's dependence on pyOpenSSL +# At the moment eventlet initialization is chucking AttributeErrors due to its dependence on pyOpenSSL # and some changes in Python 3.12 which have some knock-on effects there. except (ImportError, AttributeError): EventletConnection = None @@ -174,7 +174,7 @@ def _connection_reduce_fn(val,import_fn): DefaultConnection = conn_class # Forces load of utf8 encoding module to avoid deadlock that occurs -# if code that is being imported tries to import the module in a seperate +# if code that is being imported tries to import the module in a separate # thread. # See http://bugs.python.org/issue10923 "".encode('utf8') @@ -1024,7 +1024,7 @@ def default_retry_policy(self, policy): application_version = '' """ - A string identifiying this application's version to Insights + A string identifying this application's version to Insights """ cloud = None @@ -1154,7 +1154,7 @@ def __init__(self, column_encryption_policy=None): """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as - extablishing connection pools or refreshing metadata. + establishing connection pools or refreshing metadata. Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. """ @@ -1461,7 +1461,7 @@ def register_user_type(self, keyspace, user_type, klass): for. `klass` should be a class with attributes whose names match the - fields of the user-defined type. The constructor must accepts kwargs + fields of the user-defined type. The constructor must accept kwargs for each of the fields in the UDT. This method should only be called after the type has been created @@ -3168,7 +3168,7 @@ def prepare_on_all_hosts(self, query, excluded_host, keyspace=None): continue if request_id is None: - # the error has already been logged by ResponsFuture + # the error has already been logged by ResponseFuture log.debug("Failed to prepare query for host %s: %r", host, future._errors.get(host)) continue @@ -3965,7 +3965,7 @@ def _handle_status_change(self, event): elif change_type == "DOWN": # Note that there is a slight risk we can receive the event late and thus # mark the host down even though we already had reconnected successfully. - # But it is unlikely, and don't have too much consequence since we'll try reconnecting + # This is unlikely, and will not have much consequence because we'll try reconnecting # right away, so we favor the detection to make the Host.is_up more accurate. if host is not None: # this will be run by the scheduler @@ -4447,7 +4447,7 @@ def _on_speculative_execute(self): # PYTHON-836, the speculative queries must be after # the query is sent from the main thread, otherwise the # query from the main thread may raise NoHostAvailable - # if the _query_plan has been exhausted by the specualtive queries. + # if the _query_plan has been exhausted by the speculative queries. # This also prevents a race condition accessing the iterator. # We reschedule this call until the main thread has succeeded # making a query @@ -4559,7 +4559,7 @@ def warnings(self): Ensure the future is complete before trying to access this property (call :meth:`.result()`, or after callback is invoked). - Otherwise it may throw if the response has not been received. + Otherwise, it may throw if the response has not been received. """ # TODO: When timers are introduced, just make this wait if not self._event.is_set(): @@ -4575,7 +4575,7 @@ def custom_payload(self): Ensure the future is complete before trying to access this property (call :meth:`.result()`, or after callback is invoked). - Otherwise it may throw if the response has not been received. + Otherwise, it may throw if the response has not been received. :return: :ref:`custom_payload`. """ @@ -5285,7 +5285,7 @@ def cancel_continuous_paging(self): try: self.response_future._continuous_paging_session.cancel() except AttributeError: - raise DriverException("Attempted to cancel paging with no active session. This is only for requests with ContinuousdPagingOptions.") + raise DriverException("Attempted to cancel paging with no active session. This is only for requests with ContinuousPagingOptions.") @property def was_applied(self): @@ -5296,7 +5296,7 @@ def was_applied(self): a :class:`.query.BatchStatement` containing LWT. In the latter case either all the batch succeeds or fails. - Only valid when one of the of the internal row factories is in use. + Only valid when one of the internal row factories is in use. """ if self.response_future.row_factory not in (named_tuple_factory, dict_factory, tuple_factory): raise RuntimeError("Cannot determine LWT result with row factory %s" % (self.response_future.row_factory,)) diff --git a/cassandra/connection.py b/cassandra/connection.py index bfe38fc702..4af6b0624a 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -825,7 +825,7 @@ def initialize_reactor(cls): @classmethod def handle_fork(cls): """ - Called after a forking. This should cleanup any remaining reactor state + Called after a forking. This should clean up any remaining reactor state from the parent process. """ pass @@ -862,7 +862,7 @@ def _build_ssl_context_from_options(self): ssl_context_opt_names = ['ssl_version', 'cert_reqs', 'check_hostname', 'keyfile', 'certfile', 'ca_certs', 'ciphers'] opts = {k:self.ssl_options.get(k, None) for k in ssl_context_opt_names if k in self.ssl_options} - # Python >= 3.10 requires either PROTOCOL_TLS_CLIENT or PROTOCOL_TLS_SERVER so we'll get ahead of things by always + # Python >= 3.10 requires either PROTOCOL_TLS_CLIENT or PROTOCOL_TLS_SERVER, so we'll get ahead of things by always # being explicit ssl_version = opts.get('ssl_version', None) or ssl.PROTOCOL_TLS_CLIENT cert_reqs = opts.get('cert_reqs', None) or ssl.CERT_REQUIRED @@ -891,7 +891,7 @@ def _wrap_socket_from_context(self): opts = {k:self.ssl_options.get(k, None) for k in wrap_socket_opt_names if k in self.ssl_options} # PYTHON-1186: set the server_hostname only if the SSLContext has - # check_hostname enabled and it is not already provided by the EndPoint ssl options + # check_hostname enabled, and it is not already provided by the EndPoint ssl options #opts['server_hostname'] = self.endpoint.address if (self.ssl_context.check_hostname and 'server_hostname' not in opts): server_hostname = self.endpoint.address @@ -1210,11 +1210,11 @@ def process_io_buffer(self): if not self._current_frame or pos < self._current_frame.end_pos: if self._is_checksumming_enabled and self._io_buffer.readable_io_bytes(): - # We have a multi-segments message and we need to read more + # We have a multi-segments message, and we need to read more # data to complete the current cql frame continue - # we don't have a complete header yet or we + # we don't have a complete header yet, or we # already saw a header, but we don't have a # complete message yet return @@ -1713,7 +1713,7 @@ def run(self): else: log.debug("Cannot send heartbeat message on connection (%s) to %s", id(connection), connection.endpoint) - # make sure the owner sees this defunt/closed connection + # make sure the owner sees this defunct/closed connection owner.return_connection(connection) self._raise_if_stopped() diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 20ef497a73..60aee8d9e7 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -88,7 +88,7 @@ def from_session(cls, name, session): return instance def setup(self): - """Setup the connection""" + """Set up the connection""" global cluster, session if 'username' in self.cluster_options or 'password' in self.cluster_options: @@ -134,7 +134,7 @@ def setup_session(self): def handle_lazy_connect(self): - # if lazy_connect is False, it means the cluster is setup and ready + # if lazy_connect is False, it means the cluster is set up and ready # No need to acquire the lock if not self.lazy_connect: return @@ -280,7 +280,7 @@ def set_session(s): try: conn = get_connection() except CQLEngineException: - # no default connection set; initalize one + # no default connection set; initialize one register_connection('default', session=s, default=True) conn = get_connection() else: @@ -316,7 +316,7 @@ def setup( retry_connect=False, **kwargs): """ - Setup a the driver connection used by the mapper + Set up the driver connection used by the mapper :param list hosts: list of hosts, (``contact_points`` for :class:`cassandra.cluster.Cluster`) :param str default_keyspace: The default keyspace to use diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 6790a117c7..7ca30f9e15 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -525,7 +525,7 @@ def _drop_table(model, connection=None): connection = connection or model._get_connection() - # don't try to delete non existant tables + # don't try to delete non existent tables meta = get_cluster(connection).metadata ks_name = model._get_keyspace() diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index bc00001666..ff57d7c53e 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -183,7 +183,7 @@ def __call__(self, *args, **kwargs): class IfNotExistsDescriptor(object): """ - return a query set descriptor with a if_not_exists flag specified + return a query set descriptor with an if_not_exists flag specified """ def __get__(self, instance, model): if instance: @@ -201,7 +201,7 @@ def __call__(self, *args, **kwargs): class IfExistsDescriptor(object): """ - return a query set descriptor with a if_exists flag specified + return a query set descriptor with an if_exists flag specified """ def __get__(self, instance, model): if instance: @@ -398,7 +398,7 @@ def __init__(self, **values): self._values = {} for name, column in self._columns.items(): # Set default values on instantiation. Thanks to this, we don't have - # to wait anylonger for a call to validate() to have CQLengine set + # to wait any longer for a call to validate() to have CQLengine set # default columns values. column_default = column.get_default() if column.has_default else None value = values.get(name, column_default) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index afc7ceeef6..ee09e70d2d 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -938,7 +938,7 @@ def fetch_size(self, v): def allow_filtering(self): """ - Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key + Enables the (usually) unwise practice of querying on a clustering key without also defining a partition key """ clone = copy.deepcopy(self) clone._allow_filtering = True @@ -1417,7 +1417,7 @@ def update(self): prior to calling this. """ if self.instance is None: - raise CQLEngineException("DML Query intance attribute is None") + raise CQLEngineException("DML Query instance attribute is None") assert type(self.instance) == self.model null_clustering_key = False if len(self.instance._clustering_keys) == 0 else True static_changed_only = True @@ -1429,7 +1429,7 @@ def update(self): updated_columns = set() # get defined fields and their column names for name, col in self.model._columns.items(): - # if clustering key is null, don't include non static columns + # if clustering key is null, don't include non-static columns if null_clustering_key and not col.static and not col.partition_key: continue if not col.is_primary_key: @@ -1448,7 +1448,7 @@ def update(self): if statement.assignments: for name, col in self.model._primary_keys.items(): - # only include clustering key if clustering key is not null, and non static columns are changed to avoid cql error + # only include clustering key if clustering key is not null, and non-static columns are changed to avoid cql error if (null_clustering_key or static_changed_only) and (not col.partition_key): continue statement.add_where(col, EqualsOperator(), getattr(self.instance, name)) @@ -1468,7 +1468,7 @@ def save(self): prior to calling this. """ if self.instance is None: - raise CQLEngineException("DML Query intance attribute is None") + raise CQLEngineException("DML Query instance attribute is None") assert type(self.instance) == self.model nulled_fields = set() diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index d92d0b2452..75e4426a5f 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -550,7 +550,7 @@ def get_context(self): def add_conditional_clause(self, clause): """ - Adds a iff clause to this statement + Adds an iff clause to this statement :param clause: The clause that will be added to the iff statement :type clause: ConditionalClause @@ -575,7 +575,7 @@ def update_context_id(self, i): @property def timestamp_normalized(self): """ - we're expecting self.timestamp to be either a long, int, a datetime, or a timedelta + We're expecting self.timestamp to be either a long, int, a datetime, or a timedelta :return: """ if not self.timestamp: diff --git a/cassandra/marshal.py b/cassandra/marshal.py index a527a9e1d7..c60944b6c9 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -122,8 +122,8 @@ def vints_pack(values): num_extra_bytes = 0 num_bits = v.bit_length() # We need to reserve (num_extra_bytes+1) bits in the first byte - # ie. with 1 extra byte, the first byte needs to be something like '10XXXXXX' # 2 bits reserved - # ie. with 8 extra bytes, the first byte needs to be '11111111' # 8 bits reserved + # i.e. with 1 extra byte, the first byte needs to be something like '10XXXXXX' # 2 bits reserved + # i.e. with 8 extra bytes, the first byte needs to be '11111111' # 8 bits reserved reserved_bits = num_extra_bytes + 1 while num_bits > (8-(reserved_bits)): num_extra_bytes += 1 @@ -167,8 +167,8 @@ def uvint_pack(val): num_extra_bytes = 0 num_bits = v.bit_length() # We need to reserve (num_extra_bytes+1) bits in the first byte - # ie. with 1 extra byte, the first byte needs to be something like '10XXXXXX' # 2 bits reserved - # ie. with 8 extra bytes, the first byte needs to be '11111111' # 8 bits reserved + # i.e. with 1 extra byte, the first byte needs to be something like '10XXXXXX' # 2 bits reserved + # i.e. with 8 extra bytes, the first byte needs to be '11111111' # 8 bits reserved reserved_bits = num_extra_bytes + 1 while num_bits > (8-(reserved_bits)): num_extra_bytes += 1 diff --git a/cassandra/metrics.py b/cassandra/metrics.py index 223b0c7c6e..d3fe4a6fc3 100644 --- a/cassandra/metrics.py +++ b/cassandra/metrics.py @@ -185,7 +185,7 @@ def get_stats(self): def set_stats_name(self, stats_name): """ Set the metrics stats name. - The stats_name is a string used to access the metris through scales: scales.getStats()[] + The stats_name is a string used to access the metrics through scales: scales.getStats()[] Default is 'cassandra-'. """ diff --git a/cassandra/policies.py b/cassandra/policies.py index c60e558465..2357639c48 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -27,7 +27,7 @@ # This is done this way because WriteType was originally # defined here and in order not to break the API. -# It may removed in the next mayor. +# It may be removed in the next major. WriteType = WT from cassandra import ConsistencyLevel, OperationTimedOut @@ -126,7 +126,7 @@ def populate(self, cluster, hosts): def make_query_plan(self, working_keyspace=None, query=None): """ - Given a :class:`~.query.Statement` instance, return a iterable + Given a :class:`~.query.Statement` instance, return an iterable of :class:`.Host` instances which should be queried in that order. A generator may work well for custom implementations of this method. @@ -809,8 +809,8 @@ def on_write_timeout(self, query, consistency, write_type, `retry_num` counts how many times the operation has been retried, so the first time this method is called, `retry_num` will be 0. - By default, failed write operations will retried at most once, and - they will only be retried if the `write_type` was + By default, a failed write operations will be retried at most once, and + will only be retried if the `write_type` was :attr:`~.WriteType.BATCH_LOG`. """ if retry_num != 0: @@ -907,7 +907,7 @@ class DowngradingConsistencyRetryPolicy(RetryPolicy): policy unless you have understood the cases where this can happen and are ok with that. It is also recommended to subclass this class so that queries that required a consistency level downgrade can be - recorded (so that repairs can be made later, etc). + recorded (so that repairs can be made later, etc.). This policy implements the same retries as :class:`.RetryPolicy`, but on top of that, it also retries in the following cases: @@ -1006,7 +1006,7 @@ class AddressTranslator(object): The driver discovers nodes using server metadata and topology change events. Normally, the endpoint defined by the server is the right way to connect to a node. In some environments, these addresses may not be reachable, or not preferred (public vs. private IPs in cloud environments, - suboptimal routing, etc). This interface allows for translating from server defined endpoints to + suboptimal routing, etc.). This interface allows for translating from server defined endpoints to preferred addresses for driver connections. *Note:* :attr:`~Cluster.contact_points` provided while creating the :class:`~.Cluster` instance are not @@ -1036,7 +1036,7 @@ def translate(self, addr): Reverse DNS the public broadcast_address, then lookup that hostname to get the AWS-resolved IP, which will point to the private IP address within the same datacenter. """ - # get family of this address so we translate to the same + # get family of this address, so we translate to the same family = socket.getaddrinfo(addr, 0, socket.AF_UNSPEC, socket.SOCK_STREAM)[0][0] host = socket.getfqdn(addr) for a in socket.getaddrinfo(host, 0, family, socket.SOCK_STREAM): diff --git a/cassandra/pool.py b/cassandra/pool.py index d61e81cd0d..2ffe66e7af 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -666,7 +666,7 @@ def borrow_connection(self, timeout): # wait_for_conn will increment in_flight on the conn least_busy, request_id = self._wait_for_conn(timeout) - # if we have too many requests on this connection but we still + # if we have too many requests on this connection, but we still # have space to open a new connection against this host, go ahead # and schedule the creation of a new connection if least_busy.in_flight >= max_reqs and len(self._connections) < max_conns: @@ -716,7 +716,7 @@ def _add_conn_if_under_max(self): with self._lock: new_connections = self._connections[:] + [conn] self._connections = new_connections - log.debug("Added new connection (%s) to pool for host %s, signaling availablility", + log.debug("Added new connection (%s) to pool for host %s, signaling availability", id(conn), self.host) self._signal_available_conn() return True diff --git a/cassandra/query.py b/cassandra/query.py index e29c2a3113..4a732df275 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -990,7 +990,7 @@ def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): This can be used to query events from partial sessions. `query_cl` specifies a consistency level to use for polling the trace tables, - if it should be different than the session default. + if different from the session default. """ attempt = 0 start = time.time() @@ -1014,7 +1014,7 @@ def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): if is_complete: log.debug("Fetched trace info for trace ID: %s", self.trace_id) else: - log.debug("Fetching parital trace info for trace ID: %s", self.trace_id) + log.debug("Fetching partial trace info for trace ID: %s", self.trace_id) self.request_type = session_row.request self.duration = timedelta(microseconds=session_row.duration) if is_complete else None diff --git a/cassandra/util.py b/cassandra/util.py index 9c07339759..f069c439f7 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -640,7 +640,7 @@ def _find_insertion(self, x): # could not compare a[mid] with x # start scanning to find insertion point while swallowing type errors lo = 0 - compared_one = False # flag is used to determine whether uncomparables are grouped at the front or back + compared_one = False # flag is used to determine whether un-comparables are grouped at the front or back while lo < hi: try: if a[lo] == x or a[lo] >= x: break @@ -1393,7 +1393,7 @@ def _round_to_precision(cls, ms, precision, default_dt): if precision_idx <= cls._to_int(DateRangePrecision.MINUTE): replace_kwargs['second'] = default_dt.second if precision_idx <= cls._to_int(DateRangePrecision.SECOND): - # truncate to nearest 1000 so we deal in ms, not us + # truncate to nearest 1000, so we deal in ms, not us replace_kwargs['microsecond'] = (default_dt.microsecond // 1000) * 1000 if precision_idx == cls._to_int(DateRangePrecision.MILLISECOND): replace_kwargs['microsecond'] = int(round(dt.microsecond, -3)) @@ -1402,7 +1402,7 @@ def _round_to_precision(cls, ms, precision, default_dt): @classmethod def round_up_to_precision(cls, ms, precision): # PYTHON-912: this is the only case in which we can't take as upper bound - # datetime.datetime.max because the month from ms may be February and we'd + # datetime.datetime.max because the month from ms may be February, and we'd # be setting 31 as the month day if precision == cls.MONTH: date_ms = utc_datetime_from_ms_timestamp(ms) diff --git a/tests/integration/advanced/graph/test_graph_query.py b/tests/integration/advanced/graph/test_graph_query.py index d1b3ef62b1..3496c9c828 100644 --- a/tests/integration/advanced/graph/test_graph_query.py +++ b/tests/integration/advanced/graph/test_graph_query.py @@ -55,7 +55,7 @@ def test_consistency_passing(self): cl_attrs = ('graph_read_consistency_level', 'graph_write_consistency_level') # Iterates over the graph options and constructs an array containing - # The graph_options that correlate to graoh read and write consistency levels + # The graph_options that correlate to graph read and write consistency levels graph_params = [a[2] for a in _graph_options if a[0] in cl_attrs] s = self.session diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index 92b6992573..5d6fb248d6 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -102,7 +102,7 @@ def tearDown(self): def test_connection_session_switch(self): """ Test to ensure that when the default keyspace is changed in a session and that session, - is set in the connection class, that the new defaul keyspace is honored. + is set in the connection class, that the new default keyspace is honored. @since 3.1 @jira_ticket PYTHON-486 diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 221c4be702..573fc99d56 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -669,11 +669,11 @@ def test_refresh_table_metadata(self): """ test for synchronously refreshing table metadata - test_refresh_table_metatadata tests that table metadata is refreshed when calling test_refresh_table_metatadata(). + test_refresh_table_metadata tests that table metadata is refreshed when calling test_refresh_table_metadata(). It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled for schema change push events. It then alters the table, adding a new column, using the first cluster object, and verifies that the table metadata has not changed in the second cluster object. Finally, it calls - test_refresh_table_metatadata() and verifies that the table metadata is updated in the second cluster object. + test_refresh_table_metadata() and verifies that the table metadata is updated in the second cluster object. @since 2.6.0 @jira_ticket PYTHON-291 @@ -703,10 +703,10 @@ def test_refresh_metadata_for_mv(self): test for synchronously refreshing materialized view metadata test_refresh_table_metadata_for_materialized_views tests that materialized view metadata is refreshed when calling - test_refresh_table_metatadata() with the materialized view name as the table. It creates a second cluster object + test_refresh_table_metadata() with the materialized view name as the table. It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled for schema change push events. It then creates a new materialized view , using the first cluster object, and verifies that the materialized view - metadata has not changed in the second cluster object. Finally, it calls test_refresh_table_metatadata() with the + metadata has not changed in the second cluster object. Finally, it calls test_refresh_table_metadata() with the materialized view name as the table name, and verifies that the materialized view metadata is updated in the second cluster object. diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index ddc1091dc6..12c13a76fa 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -276,7 +276,7 @@ def test_duplicate_metrics_per_cluster(self): session2 = cluster2.connect(self.ks_name, wait_for_all_pools=True) session3 = cluster3.connect(self.ks_name, wait_for_all_pools=True) - # Basic validation that naming metrics doesn't impact their segration or accuracy + # Basic validation that naming metrics doesn't impact their segregation or accuracy for i in range(10): query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name), consistency_level=ConsistencyLevel.ALL) session2.execute(query) @@ -370,7 +370,7 @@ def test_metrics_per_cluster(self): check to ensure that on_success and on_error methods are invoked appropriately. @since 3.7.0 @jira_ticket PYTHON-284 - @expected_result in_error, and on_success should be invoked apropriately + @expected_result in_error, and on_success should be invoked appropriately @test_category metrics """ @@ -381,7 +381,7 @@ def test_metrics_per_cluster(self): for _ in range(3): try: - self.session.execute("nonesense") + self.session.execute("nonsense") except SyntaxException: continue @@ -398,6 +398,6 @@ def test_metrics_per_cluster(self): RequestAnalyzer(self.session, throw_on_success=True) try: - self.session.execute("nonesense") + self.session.execute("nonsense") except SyntaxException: pass diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index a643b19c07..c25f8b90d2 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -241,7 +241,7 @@ def test_unset_values(self): @since 2.6.0 @jira_ticket PYTHON-317 - @expected_result UNSET_VALUE is implicitly added to bind parameters, and properly encoded, leving unset values unaffected. + @expected_result UNSET_VALUE is implicitly added to bind parameters, and properly encoded, leaving unset values unaffected. @test_category prepared_statements:binding """ @@ -464,7 +464,7 @@ def test_prepared_id_is_update(self): @since 3.12 @jira_ticket PYTHON-808 - The query id from the prepared statment must have changed + The query id from the prepared statement must have changed """ prepared_statement = self.session.prepare("SELECT * from {} WHERE a = ?".format(self.table_name)) id_before = prepared_statement.result_metadata_id @@ -552,7 +552,7 @@ def test_not_reprepare_invalid_statements(self): def test_id_is_not_updated_conditional_v4(self): """ Test that verifies that the result_metadata and the - result_metadata_id are udpated correctly in conditional statements + result_metadata_id are updated correctly in conditional statements in protocol V4 @since 3.13 @@ -567,7 +567,7 @@ def test_id_is_not_updated_conditional_v4(self): def test_id_is_not_updated_conditional_v5(self): """ Test that verifies that the result_metadata and the - result_metadata_id are udpated correctly in conditional statements + result_metadata_id are updated correctly in conditional statements in protocol V5 @since 3.13 @jira_ticket PYTHON-847 @@ -581,7 +581,7 @@ def test_id_is_not_updated_conditional_v5(self): def test_id_is_not_updated_conditional_dsev1(self): """ Test that verifies that the result_metadata and the - result_metadata_id are udpated correctly in conditional statements + result_metadata_id are updated correctly in conditional statements in protocol DSE V1 @since 3.13 @@ -596,7 +596,7 @@ def test_id_is_not_updated_conditional_dsev1(self): def test_id_is_not_updated_conditional_dsev2(self): """ Test that verifies that the result_metadata and the - result_metadata_id are udpated correctly in conditional statements + result_metadata_id are updated correctly in conditional statements in protocol DSE V2 @since 3.13 diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 824b34c956..71710c050e 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -303,7 +303,7 @@ def test_modification_during_generation(self): policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=3) policy.populate(Mock(), hosts) - # The general concept here is to change thee internal state of the + # The general concept here is to change the internal state of the # policy during plan generation. In this case we use a grey-box # approach that changes specific things during known phases of the # generator. @@ -1196,7 +1196,7 @@ def test_write_timeout(self): query=None, consistency=ONE, write_type=write_type, required_responses=1, received_responses=2, retry_num=0) self.assertEqual(retry, RetryPolicy.IGNORE) - # retrhow if we can't be sure we have a replica + # rethrow if we can't be sure we have a replica retry, consistency = policy.on_write_timeout( query=None, consistency=ONE, write_type=write_type, required_responses=1, received_responses=0, retry_num=0) diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index 7ff6352394..e3a79c3c0a 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -120,7 +120,7 @@ def test_index_list_mode(self): # index access before iteration causes list to be materialized self.assertEqual(rs[0], expected[0]) - # resusable iteration + # reusable iteration self.assertListEqual(list(rs), expected) self.assertListEqual(list(rs), expected) @@ -135,7 +135,7 @@ def test_index_list_mode(self): # index access before iteration causes list to be materialized self.assertEqual(rs[0], expected[0]) self.assertEqual(rs[9], expected[9]) - # resusable iteration + # reusable iteration self.assertListEqual(list(rs), expected) self.assertListEqual(list(rs), expected) From 84ece0bba0eec14db286af683e5b5a845f132d56 Mon Sep 17 00:00:00 2001 From: Jonathan Ellis Date: Wed, 28 May 2025 23:06:26 -0500 Subject: [PATCH 1986/2005] add execute_concurrent_async and expose execute_concurrent_* in Session (#1229) --- cassandra/cluster.py | 92 ++++++++++++++++++++++++++++ cassandra/concurrent.py | 112 ++++++++++++++-------------------- tests/unit/test_concurrent.py | 54 +++++++++++++++- 3 files changed, 192 insertions(+), 66 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 02df46526d..37dc6598bd 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2725,6 +2725,98 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None future.send_request() return future + def execute_concurrent(self, statements_and_parameters, concurrency=100, raise_on_first_error=True, results_generator=False, execution_profile=EXEC_PROFILE_DEFAULT): + """ + Executes a sequence of (statement, parameters) tuples concurrently. Each + ``parameters`` item must be a sequence or :const:`None`. + + The `concurrency` parameter controls how many statements will be executed + concurrently. When :attr:`.Cluster.protocol_version` is set to 1 or 2, + it is recommended that this be kept below 100 times the number of + core connections per host times the number of connected hosts (see + :meth:`.Cluster.set_core_connections_per_host`). If that amount is exceeded, + the event loop thread may attempt to block on new connection creation, + substantially impacting throughput. If :attr:`~.Cluster.protocol_version` + is 3 or higher, you can safely experiment with higher levels of concurrency. + + If `raise_on_first_error` is left as :const:`True`, execution will stop + after the first failed statement and the corresponding exception will be + raised. + + `results_generator` controls how the results are returned. + + * If :const:`False`, the results are returned only after all requests have completed. + * If :const:`True`, a generator expression is returned. Using a generator results in a constrained + memory footprint when the results set will be large -- results are yielded + as they return instead of materializing the entire list at once. The trade for lower memory + footprint is marginal CPU overhead (more thread coordination and sorting out-of-order results + on-the-fly). + + `execution_profile` argument is the execution profile to use for this + request, it is passed directly to :meth:`Session.execute_async`. + + A sequence of ``ExecutionResult(success, result_or_exc)`` namedtuples is returned + in the same order that the statements were passed in. If ``success`` is :const:`False`, + there was an error executing the statement, and ``result_or_exc`` + will be an :class:`Exception`. If ``success`` is :const:`True`, ``result_or_exc`` + will be the query result. + + Example usage:: + + select_statement = session.prepare("SELECT * FROM users WHERE id=?") + + statements_and_params = [] + for user_id in user_ids: + params = (user_id, ) + statements_and_params.append((select_statement, params)) + + results = session.execute_concurrent(statements_and_params, raise_on_first_error=False) + + for (success, result) in results: + if not success: + handle_error(result) # result will be an Exception + else: + process_user(result[0]) # result will be a list of rows + + Note: in the case that `generators` are used, it is important to ensure the consumers do not + block or attempt further synchronous requests, because no further IO will be processed until + the consumer returns. This may also produce a deadlock in the IO event thread. + """ + from cassandra.concurrent import execute_concurrent + return execute_concurrent(self, statements_and_parameters, concurrency, raise_on_first_error, results_generator, execution_profile) + + def execute_concurrent_with_args(self, statement, parameters, *args, **kwargs): + """ + Like :meth:`~cassandra.concurrent.execute_concurrent()`, but takes a single + statement and a sequence of parameters. Each item in ``parameters`` + should be a sequence or :const:`None`. + + Example usage:: + + statement = session.prepare("INSERT INTO mytable (a, b) VALUES (1, ?)") + parameters = [(x,) for x in range(1000)] + session.execute_concurrent_with_args(statement, parameters, concurrency=50) + """ + from cassandra.concurrent import execute_concurrent_with_args + return execute_concurrent_with_args(self, statement, parameters, *args, **kwargs) + + def execute_concurrent_async(self, statements_and_parameters, concurrency=100, raise_on_first_error=False, execution_profile=EXEC_PROFILE_DEFAULT): + """ + Asynchronously executes a sequence of (statement, parameters) tuples concurrently. + + Args: + session: Cassandra session object. + statement_and_parameters: Iterable of (prepared CQL statement, bind parameters) tuples. + concurrency (int, optional): Number of concurrent operations. Default is 100. + raise_on_first_error (bool, optional): If True, execution stops on the first error. Default is True. + execution_profile (ExecutionProfile, optional): Execution profile to use. Default is EXEC_PROFILE_DEFAULT. + + Returns: + A `Future` object that will be completed when all operations are done. + """ + from cassandra.concurrent import execute_concurrent_async + return execute_concurrent_async(self, statements_and_parameters, concurrency, raise_on_first_error, execution_profile) + def execute_graph(self, query, parameters=None, trace=False, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT, execute_as=None): """ Executes a Gremlin query string or GraphStatement synchronously, diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index fb8f26e1cc..607c29c332 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -13,15 +13,15 @@ # limitations under the License. +import logging from collections import namedtuple +from concurrent.futures import Future from heapq import heappush, heappop from itertools import cycle from threading import Condition -import sys from cassandra.cluster import ResultSet, EXEC_PROFILE_DEFAULT -import logging log = logging.getLogger(__name__) @@ -29,61 +29,7 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, raise_on_first_error=True, results_generator=False, execution_profile=EXEC_PROFILE_DEFAULT): """ - Executes a sequence of (statement, parameters) tuples concurrently. Each - ``parameters`` item must be a sequence or :const:`None`. - - The `concurrency` parameter controls how many statements will be executed - concurrently. When :attr:`.Cluster.protocol_version` is set to 1 or 2, - it is recommended that this be kept below 100 times the number of - core connections per host times the number of connected hosts (see - :meth:`.Cluster.set_core_connections_per_host`). If that amount is exceeded, - the event loop thread may attempt to block on new connection creation, - substantially impacting throughput. If :attr:`~.Cluster.protocol_version` - is 3 or higher, you can safely experiment with higher levels of concurrency. - - If `raise_on_first_error` is left as :const:`True`, execution will stop - after the first failed statement and the corresponding exception will be - raised. - - `results_generator` controls how the results are returned. - - * If :const:`False`, the results are returned only after all requests have completed. - * If :const:`True`, a generator expression is returned. Using a generator results in a constrained - memory footprint when the results set will be large -- results are yielded - as they return instead of materializing the entire list at once. The trade for lower memory - footprint is marginal CPU overhead (more thread coordination and sorting out-of-order results - on-the-fly). - - `execution_profile` argument is the execution profile to use for this - request, it is passed directly to :meth:`Session.execute_async`. - - A sequence of ``ExecutionResult(success, result_or_exc)`` namedtuples is returned - in the same order that the statements were passed in. If ``success`` is :const:`False`, - there was an error executing the statement, and ``result_or_exc`` will be - an :class:`Exception`. If ``success`` is :const:`True`, ``result_or_exc`` - will be the query result. - - Example usage:: - - select_statement = session.prepare("SELECT * FROM users WHERE id=?") - - statements_and_params = [] - for user_id in user_ids: - params = (user_id, ) - statements_and_params.append((select_statement, params)) - - results = execute_concurrent( - session, statements_and_params, raise_on_first_error=False) - - for (success, result) in results: - if not success: - handle_error(result) # result will be an Exception - else: - process_user(result[0]) # result will be a list of rows - - Note: in the case that `generators` are used, it is important to ensure the consumers do not - block or attempt further synchronous requests, because no further IO will be processed until - the consumer returns. This may also produce a deadlock in the IO event thread. + See :meth:`.Session.execute_concurrent`. """ if concurrency <= 0: raise ValueError("concurrency must be greater than 0") @@ -216,14 +162,50 @@ def _results(self): def execute_concurrent_with_args(session, statement, parameters, *args, **kwargs): """ - Like :meth:`~cassandra.concurrent.execute_concurrent()`, but takes a single - statement and a sequence of parameters. Each item in ``parameters`` - should be a sequence or :const:`None`. + See :meth:`.Session.execute_concurrent_with_args`. + """ + return execute_concurrent(session, zip(cycle((statement,)), parameters), *args, **kwargs) - Example usage:: - statement = session.prepare("INSERT INTO mytable (a, b) VALUES (1, ?)") - parameters = [(x,) for x in range(1000)] - execute_concurrent_with_args(session, statement, parameters, concurrency=50) +class ConcurrentExecutorFutureResults(ConcurrentExecutorListResults): + def __init__(self, session, statements_and_params, execution_profile, future): + super().__init__(session, statements_and_params, execution_profile) + self.future = future + + def _put_result(self, result, idx, success): + super()._put_result(result, idx, success) + with self._condition: + if self._current == self._exec_count: + if self._exception and self._fail_fast: + self.future.set_exception(self._exception) + else: + sorted_results = [r[1] for r in sorted(self._results_queue)] + self.future.set_result(sorted_results) + + +def execute_concurrent_async( + session, + statements_and_parameters, + concurrency=100, + raise_on_first_error=False, + execution_profile=EXEC_PROFILE_DEFAULT +): """ - return execute_concurrent(session, zip(cycle((statement,)), parameters), *args, **kwargs) + See :meth:`.Session.execute_concurrent_async`. + """ + # Create a Future object and initialize the custom ConcurrentExecutor with the Future + future = Future() + executor = ConcurrentExecutorFutureResults( + session=session, + statements_and_params=statements_and_parameters, + execution_profile=execution_profile, + future=future + ) + + # Execute concurrently + try: + executor.execute(concurrency=concurrency, fail_fast=raise_on_first_error) + except Exception as e: + future.set_exception(e) + + return future diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index db641b66a7..9840a22b92 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -24,7 +24,7 @@ import platform from cassandra.cluster import Cluster, Session -from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args +from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args, execute_concurrent_async from cassandra.pool import Host from cassandra.policies import SimpleConvictionPolicy from tests.unit.utils import mock_session_pools @@ -239,6 +239,58 @@ def validate_result_ordering(self, results): self.assertLess(last_time_added, current_time_added) last_time_added = current_time_added + def insert_and_validate_list_async(self, reverse, slowdown): + """ + This utility method will execute submit various statements for execution using execute_concurrent_async, + then invoke a separate thread to execute the callback associated with the futures registered + for those statements. The parameters will toggle various timing, and ordering changes. + Finally it will validate that the results were returned in the order they were submitted + :param reverse: Execute the callbacks in the opposite order that they were submitted + :param slowdown: Cause intermittent queries to perform slowly + """ + our_handler = MockResponseResponseFuture(reverse=reverse) + mock_session = Mock() + statements_and_params = zip(cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]), + [(i, ) for i in range(100)]) + mock_session.execute_async.return_value = our_handler + + t = TimedCallableInvoker(our_handler, slowdown=slowdown) + t.start() + try: + future = execute_concurrent_async(mock_session, statements_and_params) + results = future.result() + self.validate_result_ordering(results) + finally: + t.stop() + + def test_results_ordering_async_forward(self): + """ + This tests the ordering of our execute_concurrent_async function + when queries complete in the order they were executed. + """ + self.insert_and_validate_list_async(False, False) + + def test_results_ordering_async_reverse(self): + """ + This tests the ordering of our execute_concurrent_async function + when queries complete in the reverse order they were executed. + """ + self.insert_and_validate_list_async(True, False) + + def test_results_ordering_async_forward_slowdown(self): + """ + This tests the ordering of our execute_concurrent_async function + when queries complete in the order they were executed, with slow queries mixed in. + """ + self.insert_and_validate_list_async(False, True) + + def test_results_ordering_async_reverse_slowdown(self): + """ + This tests the ordering of our execute_concurrent_async function + when queries complete in the reverse order they were executed, with slow queries mixed in. + """ + self.insert_and_validate_list_async(True, True) + @mock_session_pools def test_recursion_limited(self): """ From fe8e926f03496bf40b0af4629a60ca64c5a3d77f Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Thu, 29 May 2025 14:21:46 -0400 Subject: [PATCH 1987/2005] Update geomet to align with requirements.txt (#1236) --- setup.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index 79fda90eac..9058aafc52 100644 --- a/setup.py +++ b/setup.py @@ -74,7 +74,7 @@ def run(self): try: os.makedirs(path) - except: + except OSError: pass if has_subprocess: @@ -82,7 +82,7 @@ def run(self): # http://docs.cython.org/src/userguide/special_methods.html#docstrings import glob for f in glob.glob("cassandra/*.so"): - print("Removing '%s' to allow docs to run on pure python modules." %(f,)) + print("Removing '%s' to allow docs to run on pure python modules." % (f,)) os.unlink(f) # Build io extension to make import and docstrings work @@ -114,16 +114,19 @@ class BuildFailed(Exception): def __init__(self, ext): self.ext = ext + is_windows = sys.platform.startswith('win32') is_macos = sys.platform.startswith('darwin') murmur3_ext = Extension('cassandra.cmurmur3', sources=['cassandra/cmurmur3.c']) + def eval_env_var_as_array(varname): val = os.environ.get(varname) return None if not val else [v.strip() for v in val.split(',')] + DEFAULT_LIBEV_INCLUDES = ['/usr/include/libev', '/usr/local/include', '/opt/local/include', '/usr/include'] DEFAULT_LIBEV_LIBDIRS = ['/usr/local/lib', '/opt/local/lib', '/usr/lib64'] libev_includes = eval_env_var_as_array('CASS_DRIVER_LIBEV_INCLUDES') or DEFAULT_LIBEV_INCLUDES @@ -338,7 +341,7 @@ def pre_build_check(): # We must be able to initialize the compiler if it has that method if hasattr(compiler, "initialize"): compiler.initialize() - except: + except OSError: return False executables = [] @@ -383,7 +386,7 @@ def run_setup(extensions): else: sys.stderr.write("Bypassing Cython setup requirement\n") - dependencies = ['geomet>=0.1,<0.3'] + dependencies = ['geomet>=1.1'] _EXTRAS_REQUIRE = { 'graph': ['gremlinpython==3.4.6'], From eebca73df365c5a5d73f0fe6e40194159edbf51a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Danilo=20de=20Ara=C3=BAjo=20Rodrigues?= <102780828+daniloarodrigues@users.noreply.github.com> Date: Wed, 18 Jun 2025 00:20:30 -0300 Subject: [PATCH 1988/2005] Remove duplicated condition in primary key check (#1240) --- cassandra/cqlengine/management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 7ca30f9e15..b34b003f54 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -256,7 +256,7 @@ def _sync_table(model, connection=None): continue - if col.primary_key or col.primary_key: + if col.primary_key: msg = format_log_context("Cannot add primary key '{0}' (with db_field '{1}') to existing table {2}", keyspace=ks_name, connection=connection) raise CQLEngineException(msg.format(model_name, db_name, cf_name)) From 1fd9c828802140bf833c41a3c3b52d962095ed87 Mon Sep 17 00:00:00 2001 From: Lukasz Antoniak Date: Thu, 3 Jul 2025 00:37:50 +0200 Subject: [PATCH 1989/2005] PYTHON-1419 Connection failure to SNI endpoint when first host is unavailable (#1243) --- cassandra/connection.py | 23 +++++++++++++++++------ tests/unit/test_cluster.py | 29 +++++++++++++++++++++++++++++ tests/unit/test_endpoints.py | 12 ++++++++++++ 3 files changed, 58 insertions(+), 6 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 4af6b0624a..2078252aca 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -245,9 +245,9 @@ def create(self, row): class SniEndPoint(EndPoint): """SNI Proxy EndPoint implementation.""" - def __init__(self, proxy_address, server_name, port=9042): + def __init__(self, proxy_address, server_name, port=9042, init_index=0): self._proxy_address = proxy_address - self._index = 0 + self._index = init_index self._resolved_address = None # resolved address self._port = port self._server_name = server_name @@ -267,8 +267,7 @@ def ssl_options(self): def resolve(self): try: - resolved_addresses = socket.getaddrinfo(self._proxy_address, self._port, - socket.AF_UNSPEC, socket.SOCK_STREAM) + resolved_addresses = self._resolve_proxy_addresses() except socket.gaierror: log.debug('Could not resolve sni proxy hostname "%s" ' 'with port %d' % (self._proxy_address, self._port)) @@ -280,6 +279,10 @@ def resolve(self): return self._resolved_address, self._port + def _resolve_proxy_addresses(self): + return socket.getaddrinfo(self._proxy_address, self._port, + socket.AF_UNSPEC, socket.SOCK_STREAM) + def __eq__(self, other): return (isinstance(other, SniEndPoint) and self.address == other.address and self.port == other.port and @@ -305,16 +308,24 @@ class SniEndPointFactory(EndPointFactory): def __init__(self, proxy_address, port): self._proxy_address = proxy_address self._port = port + # Initial lookup index to prevent all SNI endpoints to be resolved + # into the same starting IP address (which might not be available currently). + # If SNI resolves to 3 IPs, first endpoint will connect to first + # IP address, and subsequent resolutions to next IPs in round-robin + # fusion. + self._init_index = -1 def create(self, row): host_id = row.get("host_id") if host_id is None: raise ValueError("No host_id to create the SniEndPoint") - return SniEndPoint(self._proxy_address, str(host_id), self._port) + self._init_index += 1 + return SniEndPoint(self._proxy_address, str(host_id), self._port, self._init_index) def create_from_sni(self, sni): - return SniEndPoint(self._proxy_address, sni, self._port) + self._init_index += 1 + return SniEndPoint(self._proxy_address, sni, self._port, self._init_index) @total_ordering diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 90bcfbdca8..bc6ae90142 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -15,6 +15,7 @@ import logging import socket +import uuid from unittest.mock import patch, Mock @@ -22,6 +23,7 @@ InvalidRequest, Unauthorized, AuthenticationFailed, OperationTimedOut, UnsupportedOperation, RequestValidationException, ConfigurationException, ProtocolVersion from cassandra.cluster import _Scheduler, Session, Cluster, default_lbp_factory, \ ExecutionProfile, _ConfigMode, EXEC_PROFILE_DEFAULT +from cassandra.connection import SniEndPoint, SniEndPointFactory from cassandra.pool import Host from cassandra.policies import HostDistance, RetryPolicy, RoundRobinPolicy, DowngradingConsistencyRetryPolicy, SimpleConvictionPolicy from cassandra.query import SimpleStatement, named_tuple_factory, tuple_factory @@ -31,6 +33,7 @@ log = logging.getLogger(__name__) + class ExceptionTypeTest(unittest.TestCase): def test_exception_types(self): @@ -85,6 +88,12 @@ def test_exception_types(self): self.assertTrue(issubclass(UnsupportedOperation, DriverException)) +class MockOrderedPolicy(RoundRobinPolicy): + all_hosts = set() + + def make_query_plan(self, working_keyspace=None, query=None): + return sorted(self.all_hosts, key=lambda x: x.endpoint.ssl_options['server_hostname']) + class ClusterTest(unittest.TestCase): def test_tuple_for_contact_points(self): @@ -119,6 +128,26 @@ def test_requests_in_flight_threshold(self): for n in (0, mn, 128): self.assertRaises(ValueError, c.set_max_requests_per_connection, d, n) + # Validate that at least the default LBP can create a query plan with end points that resolve + # to different addresses initially. This may not be exactly how things play out in practice + # (the control connection will muck with this even if nothing else does) but it should be + # a pretty good approximation. + def test_query_plan_for_sni_contains_unique_addresses(self): + node_cnt = 5 + def _mocked_proxy_dns_resolution(self): + return [(socket.AF_UNIX, socket.SOCK_STREAM, 0, None, ('127.0.0.%s' % (i,), 9042)) for i in range(node_cnt)] + + c = Cluster() + lbp = c.load_balancing_policy + lbp.local_dc = "dc1" + factory = SniEndPointFactory("proxy.foo.bar", 9042) + for host in (Host(factory.create({"host_id": uuid.uuid4().hex, "dc": "dc1"}), SimpleConvictionPolicy) for _ in range(node_cnt)): + lbp.on_up(host) + with patch.object(SniEndPoint, '_resolve_proxy_addresses', _mocked_proxy_dns_resolution): + addrs = [host.endpoint.resolve() for host in lbp.make_query_plan()] + # single SNI endpoint should be resolved to multiple unique IP addresses + self.assertEqual(len(addrs), len(set(addrs))) + class SchedulerTest(unittest.TestCase): # TODO: this suite could be expanded; for now just adding a test covering a ticket diff --git a/tests/unit/test_endpoints.py b/tests/unit/test_endpoints.py index b0841962ca..4352afb9a5 100644 --- a/tests/unit/test_endpoints.py +++ b/tests/unit/test_endpoints.py @@ -65,3 +65,15 @@ def test_endpoint_resolve(self): for i in range(10): (address, _) = endpoint.resolve() self.assertEqual(address, next(it)) + + def test_sni_resolution_start_index(self): + factory = SniEndPointFactory("proxy.datastax.com", 9999) + initial_index = factory._init_index + + endpoint1 = factory.create_from_sni('sni1') + self.assertEqual(factory._init_index, initial_index + 1) + self.assertEqual(endpoint1._index, factory._init_index) + + endpoint2 = factory.create_from_sni('sni2') + self.assertEqual(factory._init_index, initial_index + 2) + self.assertEqual(endpoint2._index, factory._init_index) From b1dc94f65382784fce9aed3d68af7784f6cbcbc9 Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Thu, 3 Jul 2025 12:31:38 -0400 Subject: [PATCH 1990/2005] Remove Python 3.8 which reached EOL on October 2024, update Python runtimes, update cryptography lib to 42 (Q4 2024 release) (#1247) --- Jenkinsfile | 8 ++++---- README-dev.rst | 2 +- README.rst | 2 +- docs.yaml | 4 ---- docs/index.rst | 2 +- docs/installation.rst | 2 +- setup.py | 4 ++-- test-datastax-requirements.txt | 2 +- tox.ini | 4 ++-- 9 files changed, 13 insertions(+), 17 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 3457a779da..4c328d10d5 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -31,10 +31,10 @@ import com.datastax.jenkins.drivers.python.Slack slack = new Slack() -DEFAULT_CASSANDRA = ['3.0', '3.11', '4.0', '4.1', '5.0'] +DEFAULT_CASSANDRA = ['3.11', '4.0', '4.1', '5.0'] DEFAULT_DSE = ['dse-5.1.35', 'dse-6.8.30', 'dse-6.9.0'] DEFAULT_HCD = ['hcd-1.0.0'] -DEFAULT_RUNTIME = ['3.8.16', '3.9.16', '3.10.11', '3.11.3', '3.12.0'] +DEFAULT_RUNTIME = ['3.9.23', '3.10.18', '3.11.13', '3.12.11', '3.13.5'] DEFAULT_CYTHON = ["True", "False"] matrices = [ "FULL": [ @@ -674,8 +674,8 @@ pipeline { triggers { parameterizedCron(branchPatternCron().matcher(env.BRANCH_NAME).matches() ? """ # Every weeknight (Monday - Friday) around 4:00 AM - # These schedules will run with and without Cython enabled for Python 3.8.16 and 3.12.0 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.16 3.12.0;CI_SCHEDULE_SERVER_VERSION=3.11 4.0 5.0 dse-5.1.35 dse-6.8.30 dse-6.9.0 hcd-1.0.0 + # These schedules will run with and without Cython enabled for Python 3.9.23 and 3.13.5 + H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.9.23 3.13.5;CI_SCHEDULE_SERVER_VERSION=3.11 4.0 5.0 dse-5.1.35 dse-6.8.30 dse-6.9.0 hcd-1.0.0 """ : "") } diff --git a/README-dev.rst b/README-dev.rst index 7ffa52b0b1..c619b0beaf 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -166,7 +166,7 @@ it with the ``PROTOCOL_VERSION`` environment variable:: Testing Multiple Python Versions -------------------------------- -Use tox to test all of Python 3.8 through 3.12 and pypy (this is what TravisCI runs):: +Use tox to test all of Python 3.9 through 3.13 and pypy (this is what TravisCI runs):: tox diff --git a/README.rst b/README.rst index f8078d0a31..8426fa34a7 100644 --- a/README.rst +++ b/README.rst @@ -16,7 +16,7 @@ DataStax Driver for Apache Cassandra A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (2.1+) and DataStax Enterprise (4.7+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. -The driver supports Python 3.8 through 3.12. +The driver supports Python 3.9 through 3.13. **Note:** DataStax products do not support big-endian systems. diff --git a/docs.yaml b/docs.yaml index c0f97a8c5a..63269a3001 100644 --- a/docs.yaml +++ b/docs.yaml @@ -2,10 +2,6 @@ title: DataStax Python Driver summary: DataStax Python Driver for Apache Cassandra® output: docs/_build/ swiftype_drivers: pythondrivers -checks: - external_links: - exclude: - - 'http://aka.ms/vcpython27' sections: - title: N/A prefix: / diff --git a/docs/index.rst b/docs/index.rst index 2370ccefaf..2fcaf43884 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,7 +4,7 @@ A Python client driver for `Apache Cassandra® `_. This driver works exclusively with the Cassandra Query Language v3 (CQL3) and Cassandra's native protocol. Cassandra 2.1+ is supported, including DSE 4.7+. -The driver supports Python 3.8 through 3.12. +The driver supports Python 3.9 through 3.13. This driver is open source under the `Apache v2 License `_. diff --git a/docs/installation.rst b/docs/installation.rst index c231f8e5d9..569717df53 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -3,7 +3,7 @@ Installation Supported Platforms ------------------- -Python 3.8 through 3.12 are supported. Both CPython (the standard Python +Python 3.9 through 3.13 are supported. Both CPython (the standard Python implementation) and `PyPy `_ are supported and tested. Linux, OSX, and Windows are supported. diff --git a/setup.py b/setup.py index 9058aafc52..2534754494 100644 --- a/setup.py +++ b/setup.py @@ -390,7 +390,7 @@ def run_setup(extensions): _EXTRAS_REQUIRE = { 'graph': ['gremlinpython==3.4.6'], - 'cle': ['cryptography>=35.0'] + 'cle': ['cryptography>=42.0'] } setup( @@ -424,11 +424,11 @@ def run_setup(extensions): 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', - 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3.13', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Software Development :: Libraries :: Python Modules' diff --git a/test-datastax-requirements.txt b/test-datastax-requirements.txt index 038a8b571d..d605f6dc51 100644 --- a/test-datastax-requirements.txt +++ b/test-datastax-requirements.txt @@ -1,4 +1,4 @@ -r test-requirements.txt kerberos gremlinpython==3.4.6 -cryptography >= 35.0 +cryptography >= 42.0 diff --git a/tox.ini b/tox.ini index 6748a6e45f..e77835f0da 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py{38,39,310,311,312},pypy +envlist = py{39,310,311,312,313},pypy [base] deps = pytest @@ -12,7 +12,7 @@ deps = pytest kerberos futurist lz4 - cryptography>=35.0 + cryptography>=42.0 [testenv] deps = {[base]deps} From e9345607ef2f19c252e7f6dc5221bd3a2e642071 Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Thu, 3 Jul 2025 12:46:37 -0400 Subject: [PATCH 1991/2005] Remove obsolete urllib2 (#1248) In Python 2, you would import using import urllib2. In Python 3, you use import urllib.request. --- ez_setup.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/ez_setup.py b/ez_setup.py index 1c5e5b73a8..76e71057f0 100644 --- a/ez_setup.py +++ b/ez_setup.py @@ -20,6 +20,7 @@ import tarfile import optparse import subprocess +from urllib.request import urlopen from distutils import log @@ -148,10 +149,6 @@ def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, """ # making sure we use the absolute path to_dir = os.path.abspath(to_dir) - try: - from urllib.request import urlopen - except ImportError: - from urllib2 import urlopen tgz_name = "setuptools-%s.tar.gz" % version url = download_base + tgz_name saveto = os.path.join(to_dir, tgz_name) From 0979b897549de4578eda31dfd9e1e1a2f080c926 Mon Sep 17 00:00:00 2001 From: absurdfarce Date: Mon, 7 Jul 2025 10:02:03 -0500 Subject: [PATCH 1992/2005] Ninja fix: updating OS label in Jenkinsfile after upgrade to Focal for runner --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 4c328d10d5..9f4b20c681 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -680,7 +680,7 @@ pipeline { } environment { - OS_VERSION = 'ubuntu/bionic64/python-driver' + OS_VERSION = 'ubuntu/focal64/python-driver' CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' CCM_MAX_HEAP_SIZE = '1536M' JABBA_SHELL = '/usr/lib/jabba/jabba.sh' From 5d4fd2349119a3237ad351a96e7f2b3317159305 Mon Sep 17 00:00:00 2001 From: Alexandre Detiste Date: Thu, 14 Aug 2025 07:16:56 +0200 Subject: [PATCH 1993/2005] remove stale dependency on "sure" (#1227) --- setup.py | 2 +- test-requirements.txt | 1 - tests/integration/cqlengine/test_timestamp.py | 37 +++++++++---------- 3 files changed, 19 insertions(+), 21 deletions(-) diff --git a/setup.py b/setup.py index 2534754494..ef735b7566 100644 --- a/setup.py +++ b/setup.py @@ -416,7 +416,7 @@ def run_setup(extensions): include_package_data=True, install_requires=dependencies, extras_require=_EXTRAS_REQUIRE, - tests_require=['pytest', 'PyYAML', 'pytz', 'sure'], + tests_require=['pytest', 'PyYAML', 'pytz'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', diff --git a/test-requirements.txt b/test-requirements.txt index 1074f4f60d..513451b496 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -3,7 +3,6 @@ scales pytest ccm>=3.1.5 pytz -sure pure-sasl twisted[tls] gevent diff --git a/tests/integration/cqlengine/test_timestamp.py b/tests/integration/cqlengine/test_timestamp.py index 6ddce91099..a1a711fc67 100644 --- a/tests/integration/cqlengine/test_timestamp.py +++ b/tests/integration/cqlengine/test_timestamp.py @@ -14,7 +14,6 @@ from datetime import timedelta, datetime from unittest import mock -import sure from uuid import uuid4 from cassandra.cqlengine import columns @@ -44,7 +43,7 @@ def test_batch_is_included(self): with BatchQuery(timestamp=timedelta(seconds=30)) as b: TestTimestampModel.batch(b).create(count=1) - "USING TIMESTAMP".should.be.within(m.call_args[0][0].query_string) + self.assertIn("USING TIMESTAMP", m.call_args[0][0].query_string) class CreateWithTimestampTest(BaseTimestampTest): @@ -56,27 +55,27 @@ def test_batch(self): query = m.call_args[0][0].query_string - query.should.match(r"INSERT.*USING TIMESTAMP") - query.should_not.match(r"TIMESTAMP.*INSERT") + self.assertRegex(query, r"INSERT.*USING TIMESTAMP") + self.assertNotRegex(query, r"TIMESTAMP.*INSERT") def test_timestamp_not_included_on_normal_create(self): with mock.patch.object(self.session, "execute") as m: TestTimestampModel.create(count=2) - "USING TIMESTAMP".shouldnt.be.within(m.call_args[0][0].query_string) + self.assertNotIn("USING TIMESTAMP", m.call_args[0][0].query_string) def test_timestamp_is_set_on_model_queryset(self): delta = timedelta(seconds=30) tmp = TestTimestampModel.timestamp(delta) - tmp._timestamp.should.equal(delta) + self.assertEqual(tmp._timestamp, delta) def test_non_batch_syntax_integration(self): tmp = TestTimestampModel.timestamp(timedelta(seconds=30)).create(count=1) - tmp.should.be.ok + self.assertIsNotNone(tmp) def test_non_batch_syntax_with_tll_integration(self): tmp = TestTimestampModel.timestamp(timedelta(seconds=30)).ttl(30).create(count=1) - tmp.should.be.ok + self.assertIsNotNone(tmp) def test_non_batch_syntax_unit(self): @@ -85,7 +84,7 @@ def test_non_batch_syntax_unit(self): query = m.call_args[0][0].query_string - "USING TIMESTAMP".should.be.within(query) + self.assertIn("USING TIMESTAMP", query) def test_non_batch_syntax_with_ttl_unit(self): @@ -95,7 +94,7 @@ def test_non_batch_syntax_with_ttl_unit(self): query = m.call_args[0][0].query_string - query.should.match(r"USING TTL \d* AND TIMESTAMP") + self.assertRegex(query, r"USING TTL \d* AND TIMESTAMP") class UpdateWithTimestampTest(BaseTimestampTest): @@ -109,7 +108,7 @@ def test_instance_update_includes_timestamp_in_query(self): with mock.patch.object(self.session, "execute") as m: self.instance.timestamp(timedelta(seconds=30)).update(count=2) - "USING TIMESTAMP".should.be.within(m.call_args[0][0].query_string) + self.assertIn("USING TIMESTAMP", m.call_args[0][0].query_string) def test_instance_update_in_batch(self): with mock.patch.object(self.session, "execute") as m: @@ -117,7 +116,7 @@ def test_instance_update_in_batch(self): self.instance.batch(b).timestamp(timedelta(seconds=30)).update(count=2) query = m.call_args[0][0].query_string - "USING TIMESTAMP".should.be.within(query) + self.assertIn("USING TIMESTAMP", query) class DeleteWithTimestampTest(BaseTimestampTest): @@ -129,7 +128,7 @@ def test_non_batch(self): uid = uuid4() tmp = TestTimestampModel.create(id=uid, count=1) - TestTimestampModel.get(id=uid).should.be.ok + self.assertIsNotNone(TestTimestampModel.get(id=uid)) tmp.timestamp(timedelta(seconds=5)).delete() @@ -143,15 +142,15 @@ def test_non_batch(self): # calling .timestamp sets the TS on the model tmp.timestamp(timedelta(seconds=5)) - tmp._timestamp.should.be.ok + self.assertIsNotNone(tmp._timestamp) # calling save clears the set timestamp tmp.save() - tmp._timestamp.shouldnt.be.ok + self.assertIsNone(tmp._timestamp) tmp.timestamp(timedelta(seconds=5)) tmp.update() - tmp._timestamp.shouldnt.be.ok + self.assertIsNone(tmp._timestamp) def test_blind_delete(self): """ @@ -160,7 +159,7 @@ def test_blind_delete(self): uid = uuid4() tmp = TestTimestampModel.create(id=uid, count=1) - TestTimestampModel.get(id=uid).should.be.ok + self.assertIsNotNone(TestTimestampModel.get(id=uid)) TestTimestampModel.objects(id=uid).timestamp(timedelta(seconds=5)).delete() @@ -179,7 +178,7 @@ def test_blind_delete_with_datetime(self): uid = uuid4() tmp = TestTimestampModel.create(id=uid, count=1) - TestTimestampModel.get(id=uid).should.be.ok + self.assertIsNotNone(TestTimestampModel.get(id=uid)) plus_five_seconds = datetime.now() + timedelta(seconds=5) @@ -197,7 +196,7 @@ def test_delete_in_the_past(self): uid = uuid4() tmp = TestTimestampModel.create(id=uid, count=1) - TestTimestampModel.get(id=uid).should.be.ok + self.assertIsNotNone(TestTimestampModel.get(id=uid)) # delete in the past, should not affect the object created above TestTimestampModel.objects(id=uid).timestamp(timedelta(seconds=-60)).delete() From 95cdc9ad74c167d548fd6d20d76f4198ec308bd3 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 18 Sep 2025 14:31:02 -0500 Subject: [PATCH 1994/2005] Make pipeline step fail when tests fail (#1257) Entire implementation by Joao Reis --- Jenkinsfile | 172 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 102 insertions(+), 70 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 9f4b20c681..dc70acc6c5 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -270,93 +270,105 @@ def installDriverAndCompileExtensions() { def executeStandardTests() { - sh label: 'Execute unit tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport + try { + sh label: 'Execute unit tests', script: '''#!/bin/bash -lex + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport - EVENT_LOOP=${EVENT_LOOP} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=unit_results.xml tests/unit/ || true - EVENT_LOOP_MANAGER=eventlet VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true - EVENT_LOOP_MANAGER=gevent VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true - ''' + failure=0 + EVENT_LOOP=${EVENT_LOOP} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=unit_results.xml tests/unit/ || failure=1 + EVENT_LOOP_MANAGER=eventlet VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || failure=1 + EVENT_LOOP_MANAGER=gevent VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || failure=1 + exit $failure + ''' + } catch (err) { + currentBuild.result = 'UNSTABLE' + } - sh label: 'Execute Simulacron integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport + try { + sh label: 'Execute Simulacron integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport - . ${JABBA_SHELL} - jabba use 1.8 + . ${JABBA_SHELL} + jabba use 1.8 - SIMULACRON_JAR="${HOME}/simulacron.jar" - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_results.xml tests/integration/simulacron/ || true + failure=0 + SIMULACRON_JAR="${HOME}/simulacron.jar" + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_results.xml tests/integration/simulacron/ || true - # Run backpressure tests separately to avoid memory issue - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || true - ''' + # Run backpressure tests separately to avoid memory issue + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || failure=1 + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || failure=1 + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || failure=1 + SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=test_backpressure.py --junit-xml=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || failure=1 + exit $failure + ''' + } catch (err) { + currentBuild.result = 'UNSTABLE' + } - sh label: 'Execute CQL engine integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport + try { + sh label: 'Execute CQL engine integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport - . ${JABBA_SHELL} - jabba use 1.8 + . ${JABBA_SHELL} + jabba use 1.8 - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=cqle_results.xml tests/integration/cqlengine/ || true - ''' + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=cqle_results.xml tests/integration/cqlengine/ + ''' + } catch (err) { + currentBuild.result = 'UNSTABLE' + } - sh label: 'Execute Apache CassandraⓇ integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport + try { + sh label: 'Execute Apache CassandraⓇ integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport - . ${JABBA_SHELL} - jabba use 1.8 + . ${JABBA_SHELL} + jabba use 1.8 - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=standard_results.xml tests/integration/standard/ || true - ''' + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=standard_results.xml tests/integration/standard/ + ''' + } catch (err) { + currentBuild.result = 'UNSTABLE' + } if (env.CASSANDRA_VERSION.split('-')[0] == 'dse' && env.CASSANDRA_VERSION.split('-')[1] != '4.8') { if (env.PYTHON_VERSION =~ /3\.12\.\d+/) { echo "Cannot install DSE dependencies for Python 3.12.x. See PYTHON-1368 for more detail." } else { - sh label: 'Execute DataStax Enterprise integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - . ${JABBA_SHELL} - jabba use 1.8 - - EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=dse_results.xml tests/integration/advanced/ || true - ''' + try { + sh label: 'Execute DataStax Enterprise integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variable + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + . ${JABBA_SHELL} + jabba use 1.8 + + EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=dse_results.xml tests/integration/advanced/ + ''' + } catch (err) { + currentBuild.result = 'UNSTABLE' + } } } - sh label: 'Execute DataStax Astra integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - . ${JABBA_SHELL} - jabba use 1.8 - - EVENT_LOOP=${EVENT_LOOP} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=advanced_results.xml tests/integration/cloud/ || true - ''' - - if (env.PROFILE == 'FULL') { - sh label: 'Execute long running integration tests', script: '''#!/bin/bash -lex + try { + sh label: 'Execute DataStax Astra integration tests', script: '''#!/bin/bash -lex # Load CCM environment variable set -o allexport . ${HOME}/environment.txt @@ -365,8 +377,28 @@ def executeStandardTests() { . ${JABBA_SHELL} jabba use 1.8 - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=tests/integration/long/upgrade --junit-xml=long_results.xml tests/integration/long/ || true + EVENT_LOOP=${EVENT_LOOP} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=advanced_results.xml tests/integration/cloud/ ''' + } catch (err) { + currentBuild.result = 'UNSTABLE' + } + + if (env.PROFILE == 'FULL') { + try { + sh label: 'Execute long running integration tests', script: '''#!/bin/bash -lex + # Load CCM environment variable + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + . ${JABBA_SHELL} + jabba use 1.8 + + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --ignore=tests/integration/long/upgrade --junit-xml=long_results.xml tests/integration/long/ + ''' + } catch (err) { + currentBuild.result = 'UNSTABLE' + } } } @@ -380,7 +412,7 @@ def executeDseSmokeTests() { . ${JABBA_SHELL} jabba use 1.8 - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} DSE_VERSION=${DSE_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=standard_results.xml tests/integration/standard/test_dse.py || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} DSE_VERSION=${DSE_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=standard_results.xml tests/integration/standard/test_dse.py ''' } @@ -404,7 +436,7 @@ def executeEventLoopTests() { "tests/integration/simulacron/test_endpoint.py" "tests/integration/long/test_ssl.py" ) - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true + EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} HCD_VERSION=${HCD_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Xss384k" pytest -s -v --log-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --junit-xml=standard_results.xml ${EVENT_LOOP_TESTS[@]} ''' } From a9d21fc0d7005ad0466edeb31dc5fd2cf2dea12e Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Fri, 19 Sep 2025 11:33:18 -0500 Subject: [PATCH 1995/2005] Documentation cleanup (#1258) --- README.rst | 40 +++++++++++++++++------------------- cassandra/cluster.py | 2 +- cassandra/io/libevreactor.py | 2 +- docs/object_mapper.rst | 2 +- 4 files changed, 22 insertions(+), 24 deletions(-) diff --git a/README.rst b/README.rst index 8426fa34a7..f5085df79b 100644 --- a/README.rst +++ b/README.rst @@ -4,15 +4,14 @@ .. |version| image:: https://badge.fury.io/py/cassandra-driver.svg :target: https://badge.fury.io/py/cassandra-driver .. |pyversion| image:: https://img.shields.io/pypi/pyversions/cassandra-driver.svg +.. |travis| image:: https://api.travis-ci.com/datastax/python-driver.svg?branch=master + :target: https://travis-ci.com/github/datastax/python-driver -|license| |version| |pyversion| +|license| |version| |pyversion| |travis| DataStax Driver for Apache Cassandra ==================================== -.. image:: https://travis-ci.com/datastax/python-driver.png?branch=master - :target: https://travis-ci.com/github/datastax/python-driver - A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (2.1+) and DataStax Enterprise (4.7+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. @@ -22,15 +21,15 @@ The driver supports Python 3.9 through 3.13. Features -------- -* `Synchronous `_ and `Asynchronous `_ APIs -* `Simple, Prepared, and Batch statements `_ +* `Synchronous `_ and `Asynchronous `_ APIs +* `Simple, Prepared, and Batch statements `_ * Asynchronous IO, parallel execution, request pipelining -* `Connection pooling `_ +* `Connection pooling `_ * Automatic node discovery -* `Automatic reconnection `_ -* Configurable `load balancing `_ and `retry policies `_ -* `Concurrent execution utilities `_ -* `Object mapper `_ +* `Automatic reconnection `_ +* Configurable `load balancing `_ and `retry policies `_ +* `Concurrent execution utilities `_ +* `Object mapper `_ * `Connecting to DataStax Astra database (cloud) `_ * DSE Graph execution API * DSE Geometric type serialization @@ -43,24 +42,24 @@ Installation through pip is recommended:: $ pip install cassandra-driver For more complete installation instructions, see the -`installation guide `_. +`installation guide `_. Documentation ------------- -The documentation can be found online `here `_. +The documentation can be found online `here `_. A couple of links for getting up to speed: -* `Installation `_ -* `Getting started guide `_ -* `API docs `_ -* `Performance tips `_ +* `Installation `_ +* `Getting started guide `_ +* `API docs `_ +* `Performance tips `_ Object Mapper ------------- cqlengine (originally developed by Blake Eggleston and Jon Haddad, with contributions from the community) is now maintained as an integral part of this package. Refer to -`documentation here `_. +`documentation here `_. Contributing ------------ @@ -79,9 +78,8 @@ If you would like to contribute, please feel free to open a pull request. Getting Help ------------ -Your best options for getting help with the driver are the -`mailing list `_ -and the `DataStax Community `_. +Your best options for getting help with the driver is the +`mailing list `_. License ------- diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 37dc6598bd..bb1e5da877 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1700,7 +1700,7 @@ def protocol_downgrade(self, host_endpoint, previous_version): log.warning("Downgrading core protocol version from %d to %d for %s. " "To avoid this, it is best practice to explicitly set Cluster(protocol_version) to the version supported by your cluster. " - "http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Cluster.protocol_version", self.protocol_version, new_version, host_endpoint) + "https://docs.datastax.com/en/developer/python-driver/latest/api/cassandra/cluster.html#cassandra.cluster.Cluster.protocol_version", self.protocol_version, new_version, host_endpoint) self.protocol_version = new_version def connect(self, keyspace=None, wait_for_all_pools=False): diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index d28c6af0f1..76a830270d 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -29,7 +29,7 @@ "The C extension needed to use libev was not found. This " "probably means that you didn't have the required build dependencies " "when installing the driver. See " - "http://datastax.github.io/python-driver/installation.html#c-extensions " + "https://docs.datastax.com/en/developer/python-driver/latest/installation/index.html#c-extensions " "for instructions on installing build dependencies and building " "the C extension.") diff --git a/docs/object_mapper.rst b/docs/object_mapper.rst index 4366c0ad52..21d2954f4b 100644 --- a/docs/object_mapper.rst +++ b/docs/object_mapper.rst @@ -63,7 +63,7 @@ Getting Started description = columns.Text(required=False) #next, setup the connection to your cassandra server(s)... - # see http://datastax.github.io/python-driver/api/cassandra/cluster.html for options + # see https://docs.datastax.com/en/developer/python-driver/latest/api/cassandra/cluster.html for options # the list of hosts will be passed to create a Cluster() instance connection.setup(['127.0.0.1'], "cqlengine", protocol_version=3) From b144a84a1f97002c4545b335efaac719519cd9fa Mon Sep 17 00:00:00 2001 From: Daniel Bershatsky Date: Thu, 2 Oct 2025 01:49:25 +0300 Subject: [PATCH 1996/2005] Maintain compatibility with CPython 3.13 (#1242) --- cassandra/io/libevwrapper.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cassandra/io/libevwrapper.c b/cassandra/io/libevwrapper.c index 99e1df30f7..44612adb8a 100644 --- a/cassandra/io/libevwrapper.c +++ b/cassandra/io/libevwrapper.c @@ -665,9 +665,14 @@ initlibevwrapper(void) if (PyModule_AddObject(module, "Timer", (PyObject *)&libevwrapper_TimerType) == -1) INITERROR; +#if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 7 + // Since CPython 3.7, `Py_Initialize()` routing always initializes GIL. + // Routine `PyEval_ThreadsInitialized()` has been deprecated in CPython 3.7 + // and completely removed in CPython 3.13. if (!PyEval_ThreadsInitialized()) { PyEval_InitThreads(); } +#endif #if PY_MAJOR_VERSION >= 3 return module; From a5c6c5f890fca4e05940bc4b11ab5455f94b4277 Mon Sep 17 00:00:00 2001 From: absurdfarce Date: Thu, 2 Oct 2025 15:44:04 -0500 Subject: [PATCH 1997/2005] ninja-fix Slight tweak to conditional logic in last commit --- cassandra/io/libevwrapper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/io/libevwrapper.c b/cassandra/io/libevwrapper.c index 44612adb8a..1555f7d330 100644 --- a/cassandra/io/libevwrapper.c +++ b/cassandra/io/libevwrapper.c @@ -665,7 +665,7 @@ initlibevwrapper(void) if (PyModule_AddObject(module, "Timer", (PyObject *)&libevwrapper_TimerType) == -1) INITERROR; -#if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 7 +#if PY_MAJOR_VERSION < 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 7) // Since CPython 3.7, `Py_Initialize()` routing always initializes GIL. // Routine `PyEval_ThreadsInitialized()` has been deprecated in CPython 3.7 // and completely removed in CPython 3.13. From dd2343d8fecead045245a6679d8de8194cea215f Mon Sep 17 00:00:00 2001 From: Brad Schoening <5796692+bschoening@users.noreply.github.com> Date: Mon, 6 Oct 2025 18:43:59 -0400 Subject: [PATCH 1998/2005] Removed 2.7 Cpython defines (#1252) --- cassandra/cmurmur3.c | 20 -------------------- cassandra/io/libevwrapper.c | 14 -------------- 2 files changed, 34 deletions(-) diff --git a/cassandra/cmurmur3.c b/cassandra/cmurmur3.c index bce513217d..4affdad46c 100644 --- a/cassandra/cmurmur3.c +++ b/cassandra/cmurmur3.c @@ -14,12 +14,6 @@ #include #include -#if PY_VERSION_HEX < 0x02050000 -typedef int Py_ssize_t; -#define PY_SSIZE_T_MAX INT_MAX -#define PY_SSIZE_T_MIN INT_MIN -#endif - #ifdef PYPY_VERSION #define COMPILING_IN_PYPY 1 #define COMPILING_IN_CPYTHON 0 @@ -216,8 +210,6 @@ static PyMethodDef cmurmur3_methods[] = { {NULL, NULL, 0, NULL} }; -#if PY_MAJOR_VERSION >= 3 - static int cmurmur3_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->error); return 0; @@ -245,18 +237,8 @@ static struct PyModuleDef moduledef = { PyObject * PyInit_cmurmur3(void) -#else -#define INITERROR return - -void -initcmurmur3(void) -#endif { -#if PY_MAJOR_VERSION >= 3 PyObject *module = PyModule_Create(&moduledef); -#else - PyObject *module = Py_InitModule("cmurmur3", cmurmur3_methods); -#endif struct module_state *st = NULL; if (module == NULL) @@ -269,7 +251,5 @@ initcmurmur3(void) INITERROR; } -#if PY_MAJOR_VERSION >= 3 return module; -#endif } diff --git a/cassandra/io/libevwrapper.c b/cassandra/io/libevwrapper.c index 1555f7d330..85ed551951 100644 --- a/cassandra/io/libevwrapper.c +++ b/cassandra/io/libevwrapper.c @@ -583,7 +583,6 @@ static PyMethodDef module_methods[] = { PyDoc_STRVAR(module_doc, "libev wrapper methods"); -#if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "libevwrapper", @@ -600,13 +599,6 @@ static struct PyModuleDef moduledef = { PyObject * PyInit_libevwrapper(void) - -# else -# define INITERROR return - -void -initlibevwrapper(void) -#endif { PyObject *module = NULL; @@ -629,11 +621,7 @@ initlibevwrapper(void) if (PyType_Ready(&libevwrapper_TimerType) < 0) INITERROR; -# if PY_MAJOR_VERSION >= 3 module = PyModule_Create(&moduledef); -# else - module = Py_InitModule3("libevwrapper", module_methods, module_doc); -# endif if (module == NULL) INITERROR; @@ -674,7 +662,5 @@ initlibevwrapper(void) } #endif -#if PY_MAJOR_VERSION >= 3 return module; -#endif } From 27a1770933e490ff4282a0f6b098de472576ff14 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Tue, 21 Oct 2025 01:13:56 -0500 Subject: [PATCH 1999/2005] Release 3.29.3: changelog, version and documentation (#1259) --- CHANGELOG.rst | 24 ++++++++++++++++++++++++ cassandra/__init__.py | 2 +- docs/installation.rst | 2 +- 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index bcb0fdb7a0..6da84ae7a4 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,27 @@ +3.29.3 +====== +October 20, 2025 + +Features +-------- +* Upgraded cython to 3.0.x (PR 1221 & PYTHON-1390) +* Add support for DSE 6.9.x and HCD releases to CI (PYTHON-1402) +* Add execute_concurrent_async and expose execute_concurrent_* in Session (PR 1229) + +Bug Fixes +--------- +* Update geomet to align with requirements.txt (PR 1236) +* Connection failure to SNI endpoint when first host is unavailable (PYTHON-1419) +* Maintain compatibility with CPython 3.13 (PR 1242) + +Others +------ +* Remove duplicated condition in primary key check (PR 1240) +* Remove Python 3.8 which reached EOL on Oct 2024, update cryptography lib to 42 (PR 1247) +* Remove obsolete urllib2 from ez_setup.py (PR 1248) +* Remove stale dependency on sure (PR 1227) +* Removed 2.7 Cpython defines (PR 1252) + 3.29.2 ====== September 9, 2024 diff --git a/cassandra/__init__.py b/cassandra/__init__.py index badefb29de..94772c6450 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 29, 2) +__version_info__ = (3, 29, 3) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/installation.rst b/docs/installation.rst index 569717df53..be31551e79 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -26,7 +26,7 @@ To check if the installation was successful, you can run:: python -c 'import cassandra; print(cassandra.__version__)' -It should print something like "3.29.2". +It should print something like "3.29.3". .. _installation-datastax-graph: From 02e8270628398121bd53496ae1e029ad46b7f8a1 Mon Sep 17 00:00:00 2001 From: mck Date: Sat, 23 Aug 2025 11:46:21 +0200 Subject: [PATCH 2000/2005] Donation to Apache Cassandra and ASF Reassignment of copyright, where approved, and relicensing. New repository will be github.com/apache/cassandra-python-driver patch by Mick; reviewed by Bret McGuire for CASSPYTHON-2 --- NOTICE | 115 ++++++++++++++++++ README.rst | 6 +- benchmarks/base.py | 14 ++- benchmarks/callback_full_pipeline.py | 14 ++- benchmarks/future_batches.py | 14 ++- benchmarks/future_full_pipeline.py | 14 ++- benchmarks/future_full_throttle.py | 14 ++- benchmarks/sync.py | 14 ++- cassandra/__init__.py | 14 ++- cassandra/auth.py | 14 ++- cassandra/buffer.pxd | 14 ++- cassandra/bytesio.pxd | 14 ++- cassandra/bytesio.pyx | 14 ++- cassandra/cluster.py | 14 ++- cassandra/column_encryption/_policies.py | 14 ++- cassandra/column_encryption/policies.py | 14 ++- cassandra/concurrent.py | 14 ++- cassandra/connection.py | 14 ++- cassandra/cqlengine/__init__.py | 14 ++- cassandra/cqlengine/columns.py | 14 ++- cassandra/cqlengine/connection.py | 14 ++- cassandra/cqlengine/functions.py | 14 ++- cassandra/cqlengine/management.py | 14 ++- cassandra/cqlengine/models.py | 14 ++- cassandra/cqlengine/named.py | 14 ++- cassandra/cqlengine/operators.py | 14 ++- cassandra/cqlengine/query.py | 14 ++- cassandra/cqlengine/statements.py | 14 ++- cassandra/cqlengine/usertype.py | 14 ++- cassandra/cqltypes.py | 14 ++- cassandra/cython_marshal.pyx | 14 ++- cassandra/cython_utils.pyx | 14 ++- cassandra/datastax/__init__.py | 14 ++- cassandra/datastax/cloud/__init__.py | 14 ++- cassandra/datastax/graph/__init__.py | 14 ++- cassandra/datastax/graph/fluent/__init__.py | 14 ++- .../datastax/graph/fluent/_predicates.py | 14 ++- cassandra/datastax/graph/fluent/_query.py | 14 ++- .../datastax/graph/fluent/_serializers.py | 14 ++- cassandra/datastax/graph/fluent/predicates.py | 14 ++- cassandra/datastax/graph/fluent/query.py | 14 ++- .../datastax/graph/fluent/serializers.py | 14 ++- cassandra/datastax/graph/graphson.py | 14 ++- cassandra/datastax/graph/query.py | 14 ++- cassandra/datastax/graph/types.py | 14 ++- cassandra/datastax/insights/__init__.py | 14 ++- cassandra/datastax/insights/registry.py | 14 ++- cassandra/datastax/insights/reporter.py | 14 ++- cassandra/datastax/insights/serializers.py | 14 ++- cassandra/datastax/insights/util.py | 14 ++- cassandra/deserializers.pxd | 14 ++- cassandra/deserializers.pyx | 14 ++- cassandra/encoder.py | 14 ++- cassandra/graph/__init__.py | 14 ++- cassandra/graph/graphson.py | 14 ++- cassandra/graph/query.py | 14 ++- cassandra/graph/types.py | 14 ++- cassandra/io/__init__.py | 14 ++- cassandra/io/asyncorereactor.py | 14 ++- cassandra/io/eventletreactor.py | 14 ++- cassandra/io/geventreactor.py | 14 ++- cassandra/io/libevreactor.py | 14 ++- cassandra/io/twistedreactor.py | 14 ++- cassandra/ioutils.pyx | 14 ++- cassandra/marshal.py | 14 ++- cassandra/metadata.py | 14 ++- cassandra/metrics.py | 14 ++- cassandra/numpy_parser.pyx | 14 ++- cassandra/obj_parser.pyx | 14 ++- cassandra/parsing.pxd | 14 ++- cassandra/parsing.pyx | 14 ++- cassandra/policies.py | 14 ++- cassandra/pool.py | 14 ++- cassandra/protocol.py | 14 ++- cassandra/query.py | 14 ++- cassandra/row_parser.pyx | 14 ++- cassandra/segment.py | 14 ++- cassandra/timestamps.py | 14 ++- cassandra/tuple.pxd | 14 ++- cassandra/type_codes.pxd | 14 ++- cassandra/util.py | 14 ++- example_core.py | 14 ++- example_mapper.py | 14 ++- .../execute_async_with_queue.py | 14 ++- .../execute_with_threads.py | 14 ++- examples/request_init_listener.py | 14 ++- setup.py | 16 +-- tests/__init__.py | 14 ++- tests/integration/__init__.py | 14 ++- tests/integration/advanced/__init__.py | 14 ++- tests/integration/advanced/graph/__init__.py | 14 ++- .../advanced/graph/fluent/__init__.py | 14 ++- .../advanced/graph/fluent/test_graph.py | 14 ++- .../fluent/test_graph_explicit_execution.py | 14 ++- .../fluent/test_graph_implicit_execution.py | 14 ++- .../advanced/graph/fluent/test_search.py | 14 ++- .../integration/advanced/graph/test_graph.py | 14 ++- .../advanced/graph/test_graph_cont_paging.py | 14 ++- .../advanced/graph/test_graph_datatype.py | 14 ++- .../advanced/graph/test_graph_query.py | 14 ++- .../integration/advanced/test_adv_metadata.py | 14 ++- tests/integration/advanced/test_auth.py | 14 ++- .../integration/advanced/test_cont_paging.py | 14 ++- .../test_cqlengine_where_operators.py | 14 ++- tests/integration/advanced/test_geometry.py | 14 ++- tests/integration/advanced/test_spark.py | 14 ++- tests/integration/cqlengine/__init__.py | 14 ++- .../cqlengine/advanced/__init__.py | 14 ++- .../cqlengine/advanced/test_cont_paging.py | 14 ++- tests/integration/cqlengine/base.py | 14 ++- .../integration/cqlengine/columns/__init__.py | 14 ++- .../columns/test_container_columns.py | 14 ++- .../cqlengine/columns/test_counter_column.py | 14 ++- .../cqlengine/columns/test_static_column.py | 14 ++- .../cqlengine/columns/test_validation.py | 14 ++- .../cqlengine/columns/test_value_io.py | 14 ++- .../cqlengine/connections/__init__.py | 14 ++- .../cqlengine/connections/test_connection.py | 14 ++- .../cqlengine/management/__init__.py | 14 ++- .../management/test_compaction_settings.py | 14 ++- .../cqlengine/management/test_management.py | 14 ++- tests/integration/cqlengine/model/__init__.py | 14 ++- .../model/test_class_construction.py | 14 ++- .../model/test_equality_operations.py | 14 ++- .../integration/cqlengine/model/test_model.py | 14 ++- .../cqlengine/model/test_model_io.py | 14 ++- .../cqlengine/model/test_polymorphism.py | 14 ++- .../integration/cqlengine/model/test_udts.py | 14 ++- .../cqlengine/model/test_updates.py | 14 ++- .../cqlengine/model/test_value_lists.py | 14 ++- .../cqlengine/operators/__init__.py | 14 ++- .../operators/test_where_operators.py | 14 ++- tests/integration/cqlengine/query/__init__.py | 14 ++- .../cqlengine/query/test_batch_query.py | 14 ++- .../cqlengine/query/test_datetime_queries.py | 14 ++- .../integration/cqlengine/query/test_named.py | 14 ++- .../cqlengine/query/test_queryoperators.py | 14 ++- .../cqlengine/query/test_queryset.py | 14 ++- .../cqlengine/query/test_updates.py | 14 ++- .../cqlengine/statements/__init__.py | 14 ++- .../statements/test_assignment_clauses.py | 14 ++- .../cqlengine/statements/test_base_clause.py | 14 ++- .../statements/test_base_statement.py | 14 ++- .../statements/test_delete_statement.py | 14 ++- .../statements/test_insert_statement.py | 14 ++- .../statements/test_select_statement.py | 14 ++- .../statements/test_update_statement.py | 14 ++- .../cqlengine/statements/test_where_clause.py | 14 ++- .../integration/cqlengine/test_batch_query.py | 14 ++- .../integration/cqlengine/test_connections.py | 14 ++- .../integration/cqlengine/test_consistency.py | 14 ++- .../cqlengine/test_context_query.py | 14 ++- tests/integration/cqlengine/test_ifexists.py | 14 ++- .../integration/cqlengine/test_ifnotexists.py | 14 ++- .../cqlengine/test_lwt_conditional.py | 14 ++- tests/integration/cqlengine/test_timestamp.py | 14 ++- tests/integration/cqlengine/test_ttl.py | 14 ++- tests/integration/datatype_utils.py | 14 ++- tests/integration/long/__init__.py | 14 ++- tests/integration/long/test_consistency.py | 14 ++- tests/integration/long/test_failure_types.py | 14 ++- tests/integration/long/test_ipv6.py | 14 ++- tests/integration/long/test_large_data.py | 14 ++- .../long/test_loadbalancingpolicies.py | 14 ++- tests/integration/long/test_policies.py | 14 ++- tests/integration/long/test_schema.py | 14 ++- tests/integration/long/test_ssl.py | 14 ++- tests/integration/long/utils.py | 14 ++- .../simulacron/advanced/__init__.py | 14 ++- .../simulacron/advanced/test_insights.py | 14 ++- .../simulacron/test_backpressure.py | 14 ++- tests/integration/simulacron/test_cluster.py | 14 ++- .../integration/simulacron/test_connection.py | 14 ++- .../simulacron/test_empty_column.py | 14 ++- tests/integration/simulacron/test_endpoint.py | 14 ++- tests/integration/simulacron/test_policies.py | 14 ++- tests/integration/standard/__init__.py | 14 ++- .../column_encryption/test_policies.py | 14 ++- .../standard/test_authentication.py | 14 ++- .../test_authentication_misconfiguration.py | 14 ++- .../standard/test_client_warnings.py | 14 ++- tests/integration/standard/test_cluster.py | 14 ++- tests/integration/standard/test_concurrent.py | 14 ++- tests/integration/standard/test_connection.py | 14 ++- .../standard/test_custom_cluster.py | 14 ++- .../standard/test_custom_payload.py | 14 ++- .../standard/test_custom_protocol_handler.py | 14 ++- tests/integration/standard/test_dse.py | 14 ++- tests/integration/standard/test_metadata.py | 14 ++- tests/integration/standard/test_metrics.py | 14 ++- tests/integration/standard/test_policies.py | 14 ++- .../standard/test_prepared_statements.py | 14 ++- tests/integration/standard/test_query.py | 14 ++- .../integration/standard/test_query_paging.py | 14 ++- tests/integration/standard/test_routing.py | 14 ++- .../standard/test_row_factories.py | 14 ++- .../standard/test_single_interface.py | 14 ++- tests/integration/standard/test_types.py | 14 ++- tests/integration/standard/test_udts.py | 14 ++- tests/integration/upgrade/__init__.py | 14 ++- tests/integration/upgrade/test_upgrade.py | 14 ++- tests/integration/util.py | 14 ++- tests/stress_tests/test_load.py | 14 ++- tests/stress_tests/test_multi_inserts.py | 14 ++- tests/unit/__init__.py | 14 ++- tests/unit/advanced/__init__.py | 14 ++- tests/unit/advanced/test_auth.py | 14 ++- tests/unit/advanced/test_execution_profile.py | 14 ++- tests/unit/advanced/test_geometry.py | 14 ++- tests/unit/advanced/test_graph.py | 14 ++- tests/unit/advanced/test_insights.py | 14 ++- tests/unit/advanced/test_metadata.py | 14 ++- tests/unit/advanced/test_policies.py | 14 ++- tests/unit/column_encryption/test_policies.py | 14 ++- tests/unit/cqlengine/__init__.py | 14 ++- tests/unit/cqlengine/test_columns.py | 14 ++- tests/unit/cqlengine/test_connection.py | 14 ++- tests/unit/cqlengine/test_udt.py | 14 ++- tests/unit/cython/__init__.py | 14 ++- tests/unit/cython/bytesio_testhelper.pyx | 14 ++- tests/unit/cython/test_bytesio.py | 14 ++- tests/unit/cython/test_types.py | 14 ++- tests/unit/cython/test_utils.py | 14 ++- tests/unit/cython/types_testhelper.pyx | 14 ++- tests/unit/cython/utils.py | 14 ++- tests/unit/cython/utils_testhelper.pyx | 14 ++- tests/unit/io/__init__.py | 14 ++- tests/unit/io/eventlet_utils.py | 14 ++- tests/unit/io/gevent_utils.py | 14 ++- tests/unit/io/test_asyncorereactor.py | 14 ++- tests/unit/io/test_eventletreactor.py | 14 ++- tests/unit/io/test_geventreactor.py | 14 ++- tests/unit/io/test_libevreactor.py | 14 ++- tests/unit/io/test_twistedreactor.py | 14 ++- tests/unit/io/utils.py | 14 ++- tests/unit/test_auth.py | 14 ++- tests/unit/test_cluster.py | 14 ++- tests/unit/test_concurrent.py | 14 ++- tests/unit/test_connection.py | 14 ++- tests/unit/test_control_connection.py | 14 ++- tests/unit/test_exception.py | 14 ++- tests/unit/test_host_connection_pool.py | 14 ++- tests/unit/test_marshalling.py | 14 ++- tests/unit/test_metadata.py | 14 ++- tests/unit/test_orderedmap.py | 14 ++- tests/unit/test_parameter_binding.py | 14 ++- tests/unit/test_policies.py | 14 ++- tests/unit/test_protocol.py | 14 ++- tests/unit/test_query.py | 14 ++- tests/unit/test_response_future.py | 14 ++- tests/unit/test_resultset.py | 14 ++- tests/unit/test_row_factories.py | 14 ++- tests/unit/test_segment.py | 14 ++- tests/unit/test_sortedset.py | 14 ++- tests/unit/test_time_util.py | 14 ++- tests/unit/test_timestamps.py | 14 ++- tests/unit/test_types.py | 14 ++- tests/unit/test_util_types.py | 14 ++- tests/unit/utils.py | 14 ++- tests/util.py | 14 ++- 260 files changed, 2183 insertions(+), 1552 deletions(-) create mode 100644 NOTICE diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000000..58250f616b --- /dev/null +++ b/NOTICE @@ -0,0 +1,115 @@ +Apache Cassandra Python Driver +Copyright 2013 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + +This product originates, before git sha +5d4fd2349119a3237ad351a96e7f2b3317159305, from software from DataStax and other +individual contributors. All work was previously copyrighted to DataStax. + +Non-DataStax contributors are listed below. Those marked with asterisk have +explicitly consented to their contributions being donated to the ASF. + +a-detiste Alexandre Detiste * +a-lst Andrey Istochkin * +aboudreault Alan Boudreault alan@alanb.ca +advance512 Alon Diamant diamant.alon@gmail.com * +alanjds Alan Justino da Silva alan.justino@yahoo.com.br * +alistair-broomhead Alistair Broomhead * +amygdalama Amy Hanlon * +andy-slac Andy Salnikov * +andy8zhao Andy Zhao +anthony-cervantes Anthony Cervantes anthony@cervantes.io * +BackEndTea Gert de Pagter * +barvinograd Bar Vinograd +bbirand Berk Birand +bergundy Roey Berman roey.berman@gmail.com * +bohdantan * +codesnik Alexey Trofimenko aronaxis@gmail.com * +coldeasy coldeasy +DanieleSalatti Daniele Salatti me@danielesalatti.com * +daniloarodrigues Danilo de Araújo Rodrigues * +daubman Aaron Daubman github@ajd.us * +dcosson Danny Cosson dcosson@gmail.com * +detzgk Eli Green eli@zigr.org * +devdazed Russ Bradberry * +dizpers Dmitry Belaventsev dizpers@gmail.com +dkropachev Dmitry Kropachev dmitry.kropachev@gmail.com * +dmglab Daniel dmg.lab@outlook.com +dokai Kai Lautaportti * +eamanu Emmanuel Arias eamanu@yaerobi.com +figpope Andrew FigPope andrew.figpope@gmail.com * +flupke Luper Rouch * +frensjan Frens Jan Rumph * +frew Fred Wulff frew@cs.stanford.edu * +gdoermann Greg Doermann +haaawk Piotr Jastrzębski +ikapl Irina Kaplounova +ittus Thang Minh Vu * +JeremyOT Jeremy Olmsted-Thompson * +jeremyschlatter Jeremy Schlatter * +jpuerta Ernesto Puerta * +julien-duponchelle Julien Duponchelle julien@duponchelle.info * +justinsb Justin Santa Barbara justinsb@google.com * +Kami Tomaz Muraus tomaz@tomaz.me +kandul Michał Kandulski michal.kandulski@gmail.com +kdeldycke Kevin Deldycke * +kishkaru Kishan Karunaratne kishan@karu.io * +kracekumar Kracekumar kracethekingmaker@gmail.com +lenards Andrew Lenards andrew.lenards@gmail.com * +lenolib +Lifto Ellis Low +Lorak-mmk Karol Baryła git@baryla.org * +lukaselmer Lukas Elmer lukas.elmer@gmail.com * +mahall Michael Hall +markflorisson Mark Florisson * +mattrobenolt Matt Robenolt m@robenolt.com * +mattstibbs Matt Stibbs * +Mhs-220 Mo Shahmohammadi hos1377@gmail.com * +mikeokner Mike Okner * +Mishail Mikhail Stepura mstepura@apple.com * +mission-liao mission.liao missionaryliao@gmail.com * +mkocikowski Mik Kocikowski +Mokto Théo Mathieu * +mrk-its Mariusz Kryński * +multani Jonathan Ballet jon@multani.info * +niklaskorz Niklas Korz * +nisanharamati nisanharamati +nschrader Nick Schrader nick.schrader@mailbox.org * +Orenef11 Oren Efraimov * +oz123 Oz Tiram * +pistolero Sergii Kyryllov * +pmcnett Paul McNett p@ulmcnett.com * +psarna Piotr Sarna * +r4fek Rafał Furmański * +raopm +rbranson Rick Branson * +rqx Roman Khanenko * +rtb-zla-karma xyz * +sigmunau +silviot Silvio Tomatis +sontek John Anderson sontek@gmail.com * +stanhu Stan Hu +stefanor Stefano Rivera stefanor@debian.org * +strixcuriosus Ash Hoover strixcuriosus@gmail.com +tarzanjw Học Đỗ hoc3010@gmail.com +tbarbugli Tommaso Barbugli +tchaikov Kefu Chai tchaikov@gmail.com * +tglines Travis Glines +thoslin Tom Lin +tigrus Nikolay Fominykh nikolayfn@gmail.com +timgates42 Tim Gates +timsavage Tim Savage * +tirkarthi Karthikeyan Singaravelan tir.karthi@gmail.com * +Trundle Andreas Stührk andy@hammerhartes.de +ubombi Vitalii Kozlovskyi vitalii@kozlovskyi.dev * +ultrabug Ultrabug * +vetal4444 Shevchenko Vitaliy * +victorpoluceno Victor Godoy Poluceno victorpoluceno@gmail.com +weisslj Johannes Weißl * +wenheping wenheping wenheping2000@hotmail.com +yi719 +yinyin Yinyin * +yriveiro Yago Riveiro * \ No newline at end of file diff --git a/README.rst b/README.rst index f5085df79b..955d07260d 100644 --- a/README.rst +++ b/README.rst @@ -9,8 +9,8 @@ |license| |version| |pyversion| |travis| -DataStax Driver for Apache Cassandra -==================================== +Apache Cassandra Python Driver +============================== A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (2.1+) and DataStax Enterprise (4.7+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. @@ -83,7 +83,7 @@ Your best options for getting help with the driver is the License ------- -Copyright DataStax, Inc. +Copyright 2013 The Apache Software Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/benchmarks/base.py b/benchmarks/base.py index 47a03bbd68..290ba28788 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/benchmarks/callback_full_pipeline.py b/benchmarks/callback_full_pipeline.py index a4a4c33315..5eafa5df8b 100644 --- a/benchmarks/callback_full_pipeline.py +++ b/benchmarks/callback_full_pipeline.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/benchmarks/future_batches.py b/benchmarks/future_batches.py index de4484e617..112cc24981 100644 --- a/benchmarks/future_batches.py +++ b/benchmarks/future_batches.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/benchmarks/future_full_pipeline.py b/benchmarks/future_full_pipeline.py index 901573c18e..ca95b742d2 100644 --- a/benchmarks/future_full_pipeline.py +++ b/benchmarks/future_full_pipeline.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/benchmarks/future_full_throttle.py b/benchmarks/future_full_throttle.py index b4ba951c28..f85eb99b0d 100644 --- a/benchmarks/future_full_throttle.py +++ b/benchmarks/future_full_throttle.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/benchmarks/sync.py b/benchmarks/sync.py index 96e744f700..090a265579 100644 --- a/benchmarks/sync.py +++ b/benchmarks/sync.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 94772c6450..6d0744aa6e 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/auth.py b/cassandra/auth.py index 01c1ba444a..86759afe4d 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/buffer.pxd b/cassandra/buffer.pxd index 0bbb1d5f57..3383fcd272 100644 --- a/cassandra/buffer.pxd +++ b/cassandra/buffer.pxd @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/bytesio.pxd b/cassandra/bytesio.pxd index d52d3fa8fe..24320f0ae1 100644 --- a/cassandra/bytesio.pxd +++ b/cassandra/bytesio.pxd @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/bytesio.pyx b/cassandra/bytesio.pyx index 1a57911fcf..d9781035ef 100644 --- a/cassandra/bytesio.pyx +++ b/cassandra/bytesio.pyx @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/cluster.py b/cassandra/cluster.py index bb1e5da877..43066f73f0 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/column_encryption/_policies.py b/cassandra/column_encryption/_policies.py index ef8097bfbd..e1519f6b79 100644 --- a/cassandra/column_encryption/_policies.py +++ b/cassandra/column_encryption/_policies.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/column_encryption/policies.py b/cassandra/column_encryption/policies.py index 770084bd48..a1bd25d3e6 100644 --- a/cassandra/column_encryption/policies.py +++ b/cassandra/column_encryption/policies.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index 607c29c332..394f462fa4 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/connection.py b/cassandra/connection.py index 2078252aca..4a16c46ab4 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/cqlengine/__init__.py b/cassandra/cqlengine/__init__.py index b9466e961b..200d04b831 100644 --- a/cassandra/cqlengine/__init__.py +++ b/cassandra/cqlengine/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index b8248e99ac..7d50687d95 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 60aee8d9e7..55437d7b7f 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index 606f5bc330..69bdc3feb4 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index b34b003f54..66b391b714 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index ff57d7c53e..f0f5a207ec 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/cqlengine/named.py b/cassandra/cqlengine/named.py index 265d5c91e4..219155818c 100644 --- a/cassandra/cqlengine/named.py +++ b/cassandra/cqlengine/named.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index 2adf51758d..a9e7db2545 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index ee09e70d2d..329bc7fade 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 75e4426a5f..b20b07ef56 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index 7fa85f1919..e96534f9c6 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 4c3af57887..5e063a0141 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/cython_marshal.pyx b/cassandra/cython_marshal.pyx index 0a926b6eef..4733a47935 100644 --- a/cassandra/cython_marshal.pyx +++ b/cassandra/cython_marshal.pyx @@ -1,12 +1,14 @@ # -- cython: profile=True # -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/cython_utils.pyx b/cassandra/cython_utils.pyx index 7539f33f31..1b6a136c69 100644 --- a/cassandra/cython_utils.pyx +++ b/cassandra/cython_utils.pyx @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/__init__.py b/cassandra/datastax/__init__.py index 2c9ca172f8..635f0d9e60 100644 --- a/cassandra/datastax/__init__.py +++ b/cassandra/datastax/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/cloud/__init__.py b/cassandra/datastax/cloud/__init__.py index 0f042ff1c8..e175b2928b 100644 --- a/cassandra/datastax/cloud/__init__.py +++ b/cassandra/datastax/cloud/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/graph/__init__.py b/cassandra/datastax/graph/__init__.py index 11785c84f6..8315843a36 100644 --- a/cassandra/datastax/graph/__init__.py +++ b/cassandra/datastax/graph/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/graph/fluent/__init__.py b/cassandra/datastax/graph/fluent/__init__.py index 92f148721e..0dfd5230e5 100644 --- a/cassandra/datastax/graph/fluent/__init__.py +++ b/cassandra/datastax/graph/fluent/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/graph/fluent/_predicates.py b/cassandra/datastax/graph/fluent/_predicates.py index 95bd533d5e..1c7825455a 100644 --- a/cassandra/datastax/graph/fluent/_predicates.py +++ b/cassandra/datastax/graph/fluent/_predicates.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/graph/fluent/_query.py b/cassandra/datastax/graph/fluent/_query.py index d5eb7f6373..c476653541 100644 --- a/cassandra/datastax/graph/fluent/_query.py +++ b/cassandra/datastax/graph/fluent/_query.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/graph/fluent/_serializers.py b/cassandra/datastax/graph/fluent/_serializers.py index 83b3afb22d..b6c705771f 100644 --- a/cassandra/datastax/graph/fluent/_serializers.py +++ b/cassandra/datastax/graph/fluent/_serializers.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/graph/fluent/predicates.py b/cassandra/datastax/graph/fluent/predicates.py index 6bfd6b3113..8dca8b84ce 100644 --- a/cassandra/datastax/graph/fluent/predicates.py +++ b/cassandra/datastax/graph/fluent/predicates.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/graph/fluent/query.py b/cassandra/datastax/graph/fluent/query.py index c5026cc046..f599f2c979 100644 --- a/cassandra/datastax/graph/fluent/query.py +++ b/cassandra/datastax/graph/fluent/query.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/graph/fluent/serializers.py b/cassandra/datastax/graph/fluent/serializers.py index 680e613edf..3c175f92d4 100644 --- a/cassandra/datastax/graph/fluent/serializers.py +++ b/cassandra/datastax/graph/fluent/serializers.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/graph/graphson.py b/cassandra/datastax/graph/graphson.py index 335c7f7825..7b284c4c26 100644 --- a/cassandra/datastax/graph/graphson.py +++ b/cassandra/datastax/graph/graphson.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/graph/query.py b/cassandra/datastax/graph/query.py index 866df7a94c..d5f2a594b3 100644 --- a/cassandra/datastax/graph/query.py +++ b/cassandra/datastax/graph/query.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/graph/types.py b/cassandra/datastax/graph/types.py index 9817c99d7d..75902c6622 100644 --- a/cassandra/datastax/graph/types.py +++ b/cassandra/datastax/graph/types.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/insights/__init__.py b/cassandra/datastax/insights/__init__.py index 2c9ca172f8..635f0d9e60 100644 --- a/cassandra/datastax/insights/__init__.py +++ b/cassandra/datastax/insights/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/insights/registry.py b/cassandra/datastax/insights/registry.py index 03daebd86e..523af4dc84 100644 --- a/cassandra/datastax/insights/registry.py +++ b/cassandra/datastax/insights/registry.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/insights/reporter.py b/cassandra/datastax/insights/reporter.py index 83205fc458..607c723a1a 100644 --- a/cassandra/datastax/insights/reporter.py +++ b/cassandra/datastax/insights/reporter.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/insights/serializers.py b/cassandra/datastax/insights/serializers.py index 289c165e8a..b1fe0ac5e9 100644 --- a/cassandra/datastax/insights/serializers.py +++ b/cassandra/datastax/insights/serializers.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/datastax/insights/util.py b/cassandra/datastax/insights/util.py index a483b3f64d..0ce96c7edf 100644 --- a/cassandra/datastax/insights/util.py +++ b/cassandra/datastax/insights/util.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/deserializers.pxd b/cassandra/deserializers.pxd index 7b307226ad..c8408a57b6 100644 --- a/cassandra/deserializers.pxd +++ b/cassandra/deserializers.pxd @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/deserializers.pyx b/cassandra/deserializers.pyx index 7c256674b0..c07d67be91 100644 --- a/cassandra/deserializers.pyx +++ b/cassandra/deserializers.pyx @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/encoder.py b/cassandra/encoder.py index e834550fd3..94093e85b6 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/graph/__init__.py b/cassandra/graph/__init__.py index 51bd1de16a..1d33345aad 100644 --- a/cassandra/graph/__init__.py +++ b/cassandra/graph/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/graph/graphson.py b/cassandra/graph/graphson.py index d37c172a6b..576d5063fe 100644 --- a/cassandra/graph/graphson.py +++ b/cassandra/graph/graphson.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/graph/query.py b/cassandra/graph/query.py index 50eef72ad0..9003fe280f 100644 --- a/cassandra/graph/query.py +++ b/cassandra/graph/query.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/graph/types.py b/cassandra/graph/types.py index c8b613f8e4..53febe7e9c 100644 --- a/cassandra/graph/types.py +++ b/cassandra/graph/types.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/io/__init__.py b/cassandra/io/__init__.py index 386372eb4a..588a655d98 100644 --- a/cassandra/io/__init__.py +++ b/cassandra/io/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index d2ea62cc3c..e1bcafb39e 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index c51bfd7591..94e1e49544 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -1,11 +1,13 @@ # Copyright 2014 Symantec Corporation -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 4f1f158aa7..8ad4ee99e7 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 76a830270d..275f79c374 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index e4605a7446..b55ac4d1a3 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/ioutils.pyx b/cassandra/ioutils.pyx index b0ab4f16cb..91c2bf9542 100644 --- a/cassandra/ioutils.pyx +++ b/cassandra/ioutils.pyx @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/marshal.py b/cassandra/marshal.py index c60944b6c9..e8733f0544 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/metadata.py b/cassandra/metadata.py index f52bfd9317..2c13f92e42 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/metrics.py b/cassandra/metrics.py index d3fe4a6fc3..a1eadc1fc4 100644 --- a/cassandra/metrics.py +++ b/cassandra/metrics.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/numpy_parser.pyx b/cassandra/numpy_parser.pyx index 030c2c65c7..2377258b36 100644 --- a/cassandra/numpy_parser.pyx +++ b/cassandra/numpy_parser.pyx @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/obj_parser.pyx b/cassandra/obj_parser.pyx index cf43771dd7..f1bfb551ef 100644 --- a/cassandra/obj_parser.pyx +++ b/cassandra/obj_parser.pyx @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/parsing.pxd b/cassandra/parsing.pxd index 27dc368b07..1b3ed3dcbf 100644 --- a/cassandra/parsing.pxd +++ b/cassandra/parsing.pxd @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/parsing.pyx b/cassandra/parsing.pyx index 954767d227..085544a362 100644 --- a/cassandra/parsing.pyx +++ b/cassandra/parsing.pyx @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/policies.py b/cassandra/policies.py index 2357639c48..d6f7063e7a 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/pool.py b/cassandra/pool.py index 2ffe66e7af..37fdaee96b 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 3e4e984410..510aea44a8 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/query.py b/cassandra/query.py index 4a732df275..40e4d63c9e 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/row_parser.pyx b/cassandra/row_parser.pyx index 88277a4593..d172f1bcaf 100644 --- a/cassandra/row_parser.pyx +++ b/cassandra/row_parser.pyx @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/segment.py b/cassandra/segment.py index 78161fe520..2d7a107566 100644 --- a/cassandra/segment.py +++ b/cassandra/segment.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/timestamps.py b/cassandra/timestamps.py index d11359cf13..e2a2c1ea4c 100644 --- a/cassandra/timestamps.py +++ b/cassandra/timestamps.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/tuple.pxd b/cassandra/tuple.pxd index 08d95b6c1f..b519e177bb 100644 --- a/cassandra/tuple.pxd +++ b/cassandra/tuple.pxd @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/type_codes.pxd b/cassandra/type_codes.pxd index 076cacd3de..336263b83c 100644 --- a/cassandra/type_codes.pxd +++ b/cassandra/type_codes.pxd @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/cassandra/util.py b/cassandra/util.py index f069c439f7..f973912574 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/example_core.py b/example_core.py index 01c766e109..56e8924d1d 100644 --- a/example_core.py +++ b/example_core.py @@ -1,12 +1,14 @@ #!/usr/bin/env python -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/example_mapper.py b/example_mapper.py index 35100471c7..8105dbe2b1 100755 --- a/example_mapper.py +++ b/example_mapper.py @@ -1,12 +1,14 @@ #!/usr/bin/env python -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/examples/concurrent_executions/execute_async_with_queue.py b/examples/concurrent_executions/execute_async_with_queue.py index 72d2c101cb..44a91a530c 100644 --- a/examples/concurrent_executions/execute_async_with_queue.py +++ b/examples/concurrent_executions/execute_async_with_queue.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/examples/concurrent_executions/execute_with_threads.py b/examples/concurrent_executions/execute_with_threads.py index e3c80f5d6b..69126de6ec 100644 --- a/examples/concurrent_executions/execute_with_threads.py +++ b/examples/concurrent_executions/execute_with_threads.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/examples/request_init_listener.py b/examples/request_init_listener.py index fcbaf18ad7..6cce4953e2 100644 --- a/examples/request_init_listener.py +++ b/examples/request_init_listener.py @@ -1,11 +1,13 @@ #!/usr/bin/env python -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/setup.py b/setup.py index ef735b7566..5144e90501 100644 --- a/setup.py +++ b/setup.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -396,7 +398,7 @@ def run_setup(extensions): setup( name='cassandra-driver', version=__version__, - description=' DataStax Driver for Apache Cassandra', + description='Apache Cassandra Python Driver', long_description=long_description, long_description_content_type='text/x-rst', url='http://github.com/datastax/python-driver', diff --git a/tests/__init__.py b/tests/__init__.py index 4735bbd383..7799b51399 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 4185f52756..3b0103db31 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py index 1238d2ed72..b1ed70f157 100644 --- a/tests/integration/advanced/__init__.py +++ b/tests/integration/advanced/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/graph/__init__.py b/tests/integration/advanced/graph/__init__.py index cc40c6906a..71554c9bad 100644 --- a/tests/integration/advanced/graph/__init__.py +++ b/tests/integration/advanced/graph/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/graph/fluent/__init__.py b/tests/integration/advanced/graph/fluent/__init__.py index 155de026c5..1c07cd46c0 100644 --- a/tests/integration/advanced/graph/fluent/__init__.py +++ b/tests/integration/advanced/graph/fluent/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/graph/fluent/test_graph.py b/tests/integration/advanced/graph/fluent/test_graph.py index 911e6d5d57..a2c01affb3 100644 --- a/tests/integration/advanced/graph/fluent/test_graph.py +++ b/tests/integration/advanced/graph/fluent/test_graph.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/graph/fluent/test_graph_explicit_execution.py b/tests/integration/advanced/graph/fluent/test_graph_explicit_execution.py index 1a5846203d..a5dd4306c5 100644 --- a/tests/integration/advanced/graph/fluent/test_graph_explicit_execution.py +++ b/tests/integration/advanced/graph/fluent/test_graph_explicit_execution.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/graph/fluent/test_graph_implicit_execution.py b/tests/integration/advanced/graph/fluent/test_graph_implicit_execution.py index 50e6795867..1407dd1ea3 100644 --- a/tests/integration/advanced/graph/fluent/test_graph_implicit_execution.py +++ b/tests/integration/advanced/graph/fluent/test_graph_implicit_execution.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/graph/fluent/test_search.py b/tests/integration/advanced/graph/fluent/test_search.py index d50016d576..b6857f3560 100644 --- a/tests/integration/advanced/graph/fluent/test_search.py +++ b/tests/integration/advanced/graph/fluent/test_search.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/graph/test_graph.py b/tests/integration/advanced/graph/test_graph.py index 7f55229911..3624b5e1ef 100644 --- a/tests/integration/advanced/graph/test_graph.py +++ b/tests/integration/advanced/graph/test_graph.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/graph/test_graph_cont_paging.py b/tests/integration/advanced/graph/test_graph_cont_paging.py index 065d01d939..17c43c4e3d 100644 --- a/tests/integration/advanced/graph/test_graph_cont_paging.py +++ b/tests/integration/advanced/graph/test_graph_cont_paging.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/graph/test_graph_datatype.py b/tests/integration/advanced/graph/test_graph_datatype.py index 8a261c94d9..0fda2f0d44 100644 --- a/tests/integration/advanced/graph/test_graph_datatype.py +++ b/tests/integration/advanced/graph/test_graph_datatype.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/graph/test_graph_query.py b/tests/integration/advanced/graph/test_graph_query.py index 3496c9c828..5bad1b71c5 100644 --- a/tests/integration/advanced/graph/test_graph_query.py +++ b/tests/integration/advanced/graph/test_graph_query.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/test_adv_metadata.py b/tests/integration/advanced/test_adv_metadata.py index 8228bfe220..80309302f0 100644 --- a/tests/integration/advanced/test_adv_metadata.py +++ b/tests/integration/advanced/test_adv_metadata.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/test_auth.py b/tests/integration/advanced/test_auth.py index cf8b66df55..df1f385f74 100644 --- a/tests/integration/advanced/test_auth.py +++ b/tests/integration/advanced/test_auth.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/test_cont_paging.py b/tests/integration/advanced/test_cont_paging.py index 191d6f1faf..0f64835674 100644 --- a/tests/integration/advanced/test_cont_paging.py +++ b/tests/integration/advanced/test_cont_paging.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/test_cqlengine_where_operators.py b/tests/integration/advanced/test_cqlengine_where_operators.py index b2e4d4ba9e..b39cde0f02 100644 --- a/tests/integration/advanced/test_cqlengine_where_operators.py +++ b/tests/integration/advanced/test_cqlengine_where_operators.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/test_geometry.py b/tests/integration/advanced/test_geometry.py index f40e27bf48..3bbf04bb7a 100644 --- a/tests/integration/advanced/test_geometry.py +++ b/tests/integration/advanced/test_geometry.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/advanced/test_spark.py b/tests/integration/advanced/test_spark.py index ca37dc6b53..197f99c934 100644 --- a/tests/integration/advanced/test_spark.py +++ b/tests/integration/advanced/test_spark.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index cd8f031ed1..5148d6417f 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/advanced/__init__.py b/tests/integration/cqlengine/advanced/__init__.py index 386372eb4a..588a655d98 100644 --- a/tests/integration/cqlengine/advanced/__init__.py +++ b/tests/integration/cqlengine/advanced/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/advanced/test_cont_paging.py b/tests/integration/cqlengine/advanced/test_cont_paging.py index 89e05950e3..82b0818fae 100644 --- a/tests/integration/cqlengine/advanced/test_cont_paging.py +++ b/tests/integration/cqlengine/advanced/test_cont_paging.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/base.py b/tests/integration/cqlengine/base.py index bdb62aa2a3..1b99005fc4 100644 --- a/tests/integration/cqlengine/base.py +++ b/tests/integration/cqlengine/base.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/columns/__init__.py b/tests/integration/cqlengine/columns/__init__.py index 386372eb4a..588a655d98 100644 --- a/tests/integration/cqlengine/columns/__init__.py +++ b/tests/integration/cqlengine/columns/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index 1f51770eac..abdbb6185b 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/columns/test_counter_column.py b/tests/integration/cqlengine/columns/test_counter_column.py index 95792dd452..e68af62050 100644 --- a/tests/integration/cqlengine/columns/test_counter_column.py +++ b/tests/integration/cqlengine/columns/test_counter_column.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/columns/test_static_column.py b/tests/integration/cqlengine/columns/test_static_column.py index 0e8ace8c8f..8d16ec6227 100644 --- a/tests/integration/cqlengine/columns/test_static_column.py +++ b/tests/integration/cqlengine/columns/test_static_column.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 32f20d52ff..48ae74b5ab 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py index 758ca714a6..faca854fdb 100644 --- a/tests/integration/cqlengine/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/connections/__init__.py b/tests/integration/cqlengine/connections/__init__.py index 2c9ca172f8..635f0d9e60 100644 --- a/tests/integration/cqlengine/connections/__init__.py +++ b/tests/integration/cqlengine/connections/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index 5d6fb248d6..2235fc0c56 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/management/__init__.py b/tests/integration/cqlengine/management/__init__.py index 386372eb4a..588a655d98 100644 --- a/tests/integration/cqlengine/management/__init__.py +++ b/tests/integration/cqlengine/management/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index ac56e4b074..fbb5870ebb 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index eafcf5de50..c424c187ce 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/model/__init__.py b/tests/integration/cqlengine/model/__init__.py index 386372eb4a..588a655d98 100644 --- a/tests/integration/cqlengine/model/__init__.py +++ b/tests/integration/cqlengine/model/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index dae97c4438..00051d9248 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/model/test_equality_operations.py b/tests/integration/cqlengine/model/test_equality_operations.py index 3b40ed4bf3..89045d7714 100644 --- a/tests/integration/cqlengine/model/test_equality_operations.py +++ b/tests/integration/cqlengine/model/test_equality_operations.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index 168de4a928..c2c4906441 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index ce43422e28..9cff0af6a6 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/model/test_polymorphism.py b/tests/integration/cqlengine/model/test_polymorphism.py index f27703367d..fc5e9c57ff 100644 --- a/tests/integration/cqlengine/model/test_polymorphism.py +++ b/tests/integration/cqlengine/model/test_polymorphism.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 7063df8caa..bab9c51c1f 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index b03f6a5444..096417baac 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/model/test_value_lists.py b/tests/integration/cqlengine/model/test_value_lists.py index 0c913158cf..a6fc0b25f3 100644 --- a/tests/integration/cqlengine/model/test_value_lists.py +++ b/tests/integration/cqlengine/model/test_value_lists.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/operators/__init__.py b/tests/integration/cqlengine/operators/__init__.py index 05a41c46fd..9d1d6564dc 100644 --- a/tests/integration/cqlengine/operators/__init__.py +++ b/tests/integration/cqlengine/operators/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index 1e0134dbac..808e14df04 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/query/__init__.py b/tests/integration/cqlengine/query/__init__.py index 386372eb4a..588a655d98 100644 --- a/tests/integration/cqlengine/query/__init__.py +++ b/tests/integration/cqlengine/query/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/query/test_batch_query.py b/tests/integration/cqlengine/query/test_batch_query.py index db9bab8588..d3cddc0c7e 100644 --- a/tests/integration/cqlengine/query/test_batch_query.py +++ b/tests/integration/cqlengine/query/test_batch_query.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/query/test_datetime_queries.py b/tests/integration/cqlengine/query/test_datetime_queries.py index ba1c90bb9e..8225b2d9f3 100644 --- a/tests/integration/cqlengine/query/test_datetime_queries.py +++ b/tests/integration/cqlengine/query/test_datetime_queries.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index eb85bbbb85..b6ba23a2e1 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/query/test_queryoperators.py b/tests/integration/cqlengine/query/test_queryoperators.py index fd148bafcf..8f0dae06e7 100644 --- a/tests/integration/cqlengine/query/test_queryoperators.py +++ b/tests/integration/cqlengine/query/test_queryoperators.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 9e16a63fa8..d09d7eeb04 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index fb6082bfe2..b0b9155ea2 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/statements/__init__.py b/tests/integration/cqlengine/statements/__init__.py index 2c9ca172f8..635f0d9e60 100644 --- a/tests/integration/cqlengine/statements/__init__.py +++ b/tests/integration/cqlengine/statements/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/statements/test_assignment_clauses.py b/tests/integration/cqlengine/statements/test_assignment_clauses.py index 82bf067cb4..c6d75a447e 100644 --- a/tests/integration/cqlengine/statements/test_assignment_clauses.py +++ b/tests/integration/cqlengine/statements/test_assignment_clauses.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/statements/test_base_clause.py b/tests/integration/cqlengine/statements/test_base_clause.py index 351983806b..cbba1ae36e 100644 --- a/tests/integration/cqlengine/statements/test_base_clause.py +++ b/tests/integration/cqlengine/statements/test_base_clause.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index ef5f3b2585..211d76cf5c 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/statements/test_delete_statement.py b/tests/integration/cqlengine/statements/test_delete_statement.py index 745881f42f..433fa759ac 100644 --- a/tests/integration/cqlengine/statements/test_delete_statement.py +++ b/tests/integration/cqlengine/statements/test_delete_statement.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/statements/test_insert_statement.py b/tests/integration/cqlengine/statements/test_insert_statement.py index 45485af912..f3f6b4fd92 100644 --- a/tests/integration/cqlengine/statements/test_insert_statement.py +++ b/tests/integration/cqlengine/statements/test_insert_statement.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/statements/test_select_statement.py b/tests/integration/cqlengine/statements/test_select_statement.py index 26c9c804cb..9478202786 100644 --- a/tests/integration/cqlengine/statements/test_select_statement.py +++ b/tests/integration/cqlengine/statements/test_select_statement.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/statements/test_update_statement.py b/tests/integration/cqlengine/statements/test_update_statement.py index 4429625bf4..4c6966b10f 100644 --- a/tests/integration/cqlengine/statements/test_update_statement.py +++ b/tests/integration/cqlengine/statements/test_update_statement.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/statements/test_where_clause.py b/tests/integration/cqlengine/statements/test_where_clause.py index 0090fa0123..76eab13c3e 100644 --- a/tests/integration/cqlengine/statements/test_where_clause.py +++ b/tests/integration/cqlengine/statements/test_where_clause.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index 2d30bd0f53..26f312c50a 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/test_connections.py b/tests/integration/cqlengine/test_connections.py index 28a0323f24..e767ece617 100644 --- a/tests/integration/cqlengine/test_connections.py +++ b/tests/integration/cqlengine/test_connections.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/test_consistency.py b/tests/integration/cqlengine/test_consistency.py index c9797184f2..3a6485eaed 100644 --- a/tests/integration/cqlengine/test_consistency.py +++ b/tests/integration/cqlengine/test_consistency.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/test_context_query.py b/tests/integration/cqlengine/test_context_query.py index 6f2a161352..bb226f58ce 100644 --- a/tests/integration/cqlengine/test_context_query.py +++ b/tests/integration/cqlengine/test_context_query.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/test_ifexists.py b/tests/integration/cqlengine/test_ifexists.py index 68efe077ba..32f48b58ff 100644 --- a/tests/integration/cqlengine/test_ifexists.py +++ b/tests/integration/cqlengine/test_ifexists.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py index b2f2fd98c0..793ca80355 100644 --- a/tests/integration/cqlengine/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index 06fbf465da..91edce44c1 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/test_timestamp.py b/tests/integration/cqlengine/test_timestamp.py index a1a711fc67..c68fc8fa5b 100644 --- a/tests/integration/cqlengine/test_timestamp.py +++ b/tests/integration/cqlengine/test_timestamp.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/cqlengine/test_ttl.py b/tests/integration/cqlengine/test_ttl.py index 47359a03e3..0e0f8d2c28 100644 --- a/tests/integration/cqlengine/test_ttl.py +++ b/tests/integration/cqlengine/test_ttl.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/datatype_utils.py b/tests/integration/datatype_utils.py index 1f7fb50a05..a4c4cdb4d8 100644 --- a/tests/integration/datatype_utils.py +++ b/tests/integration/datatype_utils.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/long/__init__.py b/tests/integration/long/__init__.py index 19e7ed2c64..f369b97a81 100644 --- a/tests/integration/long/__init__.py +++ b/tests/integration/long/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index 0b9ebab3ed..dfb30297f0 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index ea8897185a..c4751657e8 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index 4a741b70b3..e20f11cc9c 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/long/test_large_data.py b/tests/integration/long/test_large_data.py index 59873204a4..8ff482271e 100644 --- a/tests/integration/long/test_large_data.py +++ b/tests/integration/long/test_large_data.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 7848a21b1d..7cb173643c 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/long/test_policies.py b/tests/integration/long/test_policies.py index 680d0d7980..751c6131ec 100644 --- a/tests/integration/long/test_policies.py +++ b/tests/integration/long/test_policies.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index f1cc80a17a..4e6784a967 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 0e39cb21ad..5d86063d3e 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index 58c3241a42..cdbb177ec4 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/simulacron/advanced/__init__.py b/tests/integration/simulacron/advanced/__init__.py index 2c9ca172f8..635f0d9e60 100644 --- a/tests/integration/simulacron/advanced/__init__.py +++ b/tests/integration/simulacron/advanced/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/simulacron/advanced/test_insights.py b/tests/integration/simulacron/advanced/test_insights.py index 5ddae4ec7c..07005a479b 100644 --- a/tests/integration/simulacron/advanced/test_insights.py +++ b/tests/integration/simulacron/advanced/test_insights.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/simulacron/test_backpressure.py b/tests/integration/simulacron/test_backpressure.py index 69c38da8fe..0418c05814 100644 --- a/tests/integration/simulacron/test_backpressure.py +++ b/tests/integration/simulacron/test_backpressure.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/simulacron/test_cluster.py b/tests/integration/simulacron/test_cluster.py index f859a5dd05..2dfbc1f786 100644 --- a/tests/integration/simulacron/test_cluster.py +++ b/tests/integration/simulacron/test_cluster.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 6af180af27..de8060da2d 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/simulacron/test_empty_column.py b/tests/integration/simulacron/test_empty_column.py index 046aaacf79..38d4c0f2a9 100644 --- a/tests/integration/simulacron/test_empty_column.py +++ b/tests/integration/simulacron/test_empty_column.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/simulacron/test_endpoint.py b/tests/integration/simulacron/test_endpoint.py index 9e2d91b6d3..6ab190091d 100644 --- a/tests/integration/simulacron/test_endpoint.py +++ b/tests/integration/simulacron/test_endpoint.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index 6d0d081889..a41fd54c59 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/__init__.py b/tests/integration/standard/__init__.py index 1f14bd6ec4..13d6eb6071 100644 --- a/tests/integration/standard/__init__.py +++ b/tests/integration/standard/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/column_encryption/test_policies.py b/tests/integration/standard/column_encryption/test_policies.py index 325c19cf3a..0d692ac5c1 100644 --- a/tests/integration/standard/column_encryption/test_policies.py +++ b/tests/integration/standard/column_encryption/test_policies.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 94f77a6916..2d47a93529 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_authentication_misconfiguration.py b/tests/integration/standard/test_authentication_misconfiguration.py index 546141d801..a2e2c019a5 100644 --- a/tests/integration/standard/test_authentication_misconfiguration.py +++ b/tests/integration/standard/test_authentication_misconfiguration.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index 8d6818c91a..d20251772a 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 4c286426a5..c6fc2a717f 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index c076d9f553..c935763bcb 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index b109728921..e7177d8770 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_custom_cluster.py b/tests/integration/standard/test_custom_cluster.py index d0f10d51db..bb3f716984 100644 --- a/tests/integration/standard/test_custom_cluster.py +++ b/tests/integration/standard/test_custom_cluster.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py index a97efeaa68..374bee9046 100644 --- a/tests/integration/standard/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 68ef240795..5c75684787 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_dse.py b/tests/integration/standard/test_dse.py index 7b96094b3f..0a339b6b3d 100644 --- a/tests/integration/standard/test_dse.py +++ b/tests/integration/standard/test_dse.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 573fc99d56..8f7ba04883 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 12c13a76fa..c33ea26573 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 696bc46856..bb69243212 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index c25f8b90d2..429aa0efc7 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 1759875857..3ede0ac326 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index 26c1ca0da6..465ef8b601 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_routing.py b/tests/integration/standard/test_routing.py index 47697ee9c8..d41e06df6b 100644 --- a/tests/integration/standard/test_routing.py +++ b/tests/integration/standard/test_routing.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index 6855e8a410..97f16ea106 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_single_interface.py b/tests/integration/standard/test_single_interface.py index 3a49541032..6ff331060a 100644 --- a/tests/integration/standard/test_single_interface.py +++ b/tests/integration/standard/test_single_interface.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 55bf117ace..3a6de0d4b7 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index ae056d7773..9c3e560b76 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/upgrade/__init__.py b/tests/integration/upgrade/__init__.py index e307a3e3cc..5dfb4fecf8 100644 --- a/tests/integration/upgrade/__init__.py +++ b/tests/integration/upgrade/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/upgrade/test_upgrade.py b/tests/integration/upgrade/test_upgrade.py index 63e1a64b9d..837d8232cb 100644 --- a/tests/integration/upgrade/test_upgrade.py +++ b/tests/integration/upgrade/test_upgrade.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/integration/util.py b/tests/integration/util.py index 64c101d9da..efdf258b2b 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/stress_tests/test_load.py b/tests/stress_tests/test_load.py index 3492ff2923..30c384f098 100644 --- a/tests/stress_tests/test_load.py +++ b/tests/stress_tests/test_load.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/stress_tests/test_multi_inserts.py b/tests/stress_tests/test_multi_inserts.py index 84dfc5e6f7..3e32e233f1 100644 --- a/tests/stress_tests/test_multi_inserts.py +++ b/tests/stress_tests/test_multi_inserts.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 386372eb4a..588a655d98 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/advanced/__init__.py b/tests/unit/advanced/__init__.py index 2c9ca172f8..635f0d9e60 100644 --- a/tests/unit/advanced/__init__.py +++ b/tests/unit/advanced/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/advanced/test_auth.py b/tests/unit/advanced/test_auth.py index 840073e9e1..6457810a6f 100644 --- a/tests/unit/advanced/test_auth.py +++ b/tests/unit/advanced/test_auth.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/advanced/test_execution_profile.py b/tests/unit/advanced/test_execution_profile.py index 478322f95b..143a391f72 100644 --- a/tests/unit/advanced/test_execution_profile.py +++ b/tests/unit/advanced/test_execution_profile.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/advanced/test_geometry.py b/tests/unit/advanced/test_geometry.py index d85f1bc293..0e5dc8f93f 100644 --- a/tests/unit/advanced/test_geometry.py +++ b/tests/unit/advanced/test_geometry.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/advanced/test_graph.py b/tests/unit/advanced/test_graph.py index 2870b9b1ee..456addb769 100644 --- a/tests/unit/advanced/test_graph.py +++ b/tests/unit/advanced/test_graph.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/advanced/test_insights.py b/tests/unit/advanced/test_insights.py index 4047fe12b8..e6be6fc3d1 100644 --- a/tests/unit/advanced/test_insights.py +++ b/tests/unit/advanced/test_insights.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/advanced/test_metadata.py b/tests/unit/advanced/test_metadata.py index cf730ebec5..052ad3f465 100644 --- a/tests/unit/advanced/test_metadata.py +++ b/tests/unit/advanced/test_metadata.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/advanced/test_policies.py b/tests/unit/advanced/test_policies.py index 553e7dba87..406263f42b 100644 --- a/tests/unit/advanced/test_policies.py +++ b/tests/unit/advanced/test_policies.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/column_encryption/test_policies.py b/tests/unit/column_encryption/test_policies.py index 38136c69d4..f78701aa2f 100644 --- a/tests/unit/column_encryption/test_policies.py +++ b/tests/unit/column_encryption/test_policies.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/cqlengine/__init__.py b/tests/unit/cqlengine/__init__.py index 386372eb4a..588a655d98 100644 --- a/tests/unit/cqlengine/__init__.py +++ b/tests/unit/cqlengine/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/cqlengine/test_columns.py b/tests/unit/cqlengine/test_columns.py index a7bf74ec23..4d264df07c 100644 --- a/tests/unit/cqlengine/test_columns.py +++ b/tests/unit/cqlengine/test_columns.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/cqlengine/test_connection.py b/tests/unit/cqlengine/test_connection.py index 76266cff23..dd7586aff0 100644 --- a/tests/unit/cqlengine/test_connection.py +++ b/tests/unit/cqlengine/test_connection.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/cqlengine/test_udt.py b/tests/unit/cqlengine/test_udt.py index 0a126513d5..de87bf3833 100644 --- a/tests/unit/cqlengine/test_udt.py +++ b/tests/unit/cqlengine/test_udt.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/cython/__init__.py b/tests/unit/cython/__init__.py index 386372eb4a..588a655d98 100644 --- a/tests/unit/cython/__init__.py +++ b/tests/unit/cython/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/cython/bytesio_testhelper.pyx b/tests/unit/cython/bytesio_testhelper.pyx index 7ba91bc4c0..dcb8c4a4de 100644 --- a/tests/unit/cython/bytesio_testhelper.pyx +++ b/tests/unit/cython/bytesio_testhelper.pyx @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/cython/test_bytesio.py b/tests/unit/cython/test_bytesio.py index cd4ea86f52..08ca284ff3 100644 --- a/tests/unit/cython/test_bytesio.py +++ b/tests/unit/cython/test_bytesio.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/cython/test_types.py b/tests/unit/cython/test_types.py index 545b82fc11..4ae18639f6 100644 --- a/tests/unit/cython/test_types.py +++ b/tests/unit/cython/test_types.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/cython/test_utils.py b/tests/unit/cython/test_utils.py index 0e79c235d8..e43ae343f4 100644 --- a/tests/unit/cython/test_utils.py +++ b/tests/unit/cython/test_utils.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/cython/types_testhelper.pyx b/tests/unit/cython/types_testhelper.pyx index 66d2516319..a9252df7ee 100644 --- a/tests/unit/cython/types_testhelper.pyx +++ b/tests/unit/cython/types_testhelper.pyx @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/cython/utils.py b/tests/unit/cython/utils.py index fc21597c7d..de348afffa 100644 --- a/tests/unit/cython/utils.py +++ b/tests/unit/cython/utils.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/cython/utils_testhelper.pyx b/tests/unit/cython/utils_testhelper.pyx index fe67691aa8..8a8294d9c7 100644 --- a/tests/unit/cython/utils_testhelper.pyx +++ b/tests/unit/cython/utils_testhelper.pyx @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/io/__init__.py b/tests/unit/io/__init__.py index 386372eb4a..588a655d98 100644 --- a/tests/unit/io/__init__.py +++ b/tests/unit/io/__init__.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/io/eventlet_utils.py b/tests/unit/io/eventlet_utils.py index 785856be20..ef3e633ac7 100644 --- a/tests/unit/io/eventlet_utils.py +++ b/tests/unit/io/eventlet_utils.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/io/gevent_utils.py b/tests/unit/io/gevent_utils.py index a341fd9385..b458d13170 100644 --- a/tests/unit/io/gevent_utils.py +++ b/tests/unit/io/gevent_utils.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 06328a6f45..b37df83bf6 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index 21837bb52d..8228884a4a 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py index 58aa02869d..9bf0c7895f 100644 --- a/tests/unit/io/test_geventreactor.py +++ b/tests/unit/io/test_geventreactor.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 6e6f3d8faa..a4050c79c1 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index e545620033..67c4d8eaf3 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 765a561207..d4483d08c7 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_auth.py b/tests/unit/test_auth.py index 0a2427c7ff..49607d4e48 100644 --- a/tests/unit/test_auth.py +++ b/tests/unit/test_auth.py @@ -1,11 +1,13 @@ # -*- coding: utf-8 -*- -# # Copyright DataStax, Inc. +# # Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index bc6ae90142..69a65855a0 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index 9840a22b92..18e8381185 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 51e6247313..3bca654c55 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index a8a64219e6..618bb42b1f 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_exception.py b/tests/unit/test_exception.py index b39b22239c..4758970d9c 100644 --- a/tests/unit/test_exception.py +++ b/tests/unit/test_exception.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index ded5bd1a86..d8b5ca976e 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index 1fdbfa6a4b..9b44bb5ac2 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 578c493e21..76e47a4331 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_orderedmap.py b/tests/unit/test_orderedmap.py index 5d99fc74a8..a26994dd7b 100644 --- a/tests/unit/test_orderedmap.py +++ b/tests/unit/test_orderedmap.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index 78f3898e01..fd44728c25 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 71710c050e..792268cd7f 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index 03910bb08a..08516eba9e 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_query.py b/tests/unit/test_query.py index 8a3f00fa9d..2e87da389b 100644 --- a/tests/unit/test_query.py +++ b/tests/unit/test_query.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index ba3bd5b140..f9d32780de 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index e3a79c3c0a..340169d198 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_row_factories.py b/tests/unit/test_row_factories.py index 70691ad8fd..0055497a54 100644 --- a/tests/unit/test_row_factories.py +++ b/tests/unit/test_row_factories.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_segment.py b/tests/unit/test_segment.py index a494e64414..e94bcf9809 100644 --- a/tests/unit/test_segment.py +++ b/tests/unit/test_segment.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_sortedset.py b/tests/unit/test_sortedset.py index 49c3658df8..875485f824 100644 --- a/tests/unit/test_sortedset.py +++ b/tests/unit/test_sortedset.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_time_util.py b/tests/unit/test_time_util.py index 2605992d1c..be5c984907 100644 --- a/tests/unit/test_time_util.py +++ b/tests/unit/test_time_util.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_timestamps.py b/tests/unit/test_timestamps.py index 151c004c90..676cb6442a 100644 --- a/tests/unit/test_timestamps.py +++ b/tests/unit/test_timestamps.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 8554d5e356..ba01538b2a 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py index 7afec29372..779d416923 100644 --- a/tests/unit/test_util_types.py +++ b/tests/unit/test_util_types.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/unit/utils.py b/tests/unit/utils.py index ec9a674799..fc3ce4b481 100644 --- a/tests/unit/utils.py +++ b/tests/unit/utils.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/util.py b/tests/util.py index c28a94b1c7..d44d6c91c8 100644 --- a/tests/util.py +++ b/tests/util.py @@ -1,10 +1,12 @@ -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, From 7596cc98cd4454baa2fedefb294b15cbc0a9f4b9 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 4 Dec 2025 14:42:50 -0600 Subject: [PATCH 2001/2005] Removing interface to Travis CI (#1261) Patch by Bret McGuire; reviewed by Andy Tolbert and Bret McGuire reference: https://github.com/apache/cassandra-python-driver/pull/1261 --- .travis.yml | 32 -------------------------------- 1 file changed, 32 deletions(-) delete mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 859394584c..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,32 +0,0 @@ -dist: jammy -sudo: false - -language: python -python: - - "3.8" - - "3.9" - - "3.10" - - "3.11" - -env: - - CASS_DRIVER_NO_CYTHON=1 - -addons: - apt: - packages: - - build-essential - - python3-dev - - pypy-dev - - libc-ares-dev - - libev4 - - libev-dev - -install: - - pip install --upgrade setuptools importlib-metadata - - pip install tox-travis - - if [[ $TRAVIS_PYTHON_VERSION != pypy3.5 ]]; then pip install lz4; fi - -script: - - tox - - tox -e gevent_loop - - tox -e eventlet_loop From 6f229a71f38e94f8e1ace9c25d6f753cc987d000 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Fri, 5 Dec 2025 15:02:28 -0600 Subject: [PATCH 2002/2005] Updating a few docs after donation to the ASF. (#1262) patch by Bret McGuire; reviewed by Bret McGuire and Brad Schoening reference: https://github.com/apache/cassandra-python-driver/pull/1262 --- CONTRIBUTING.rst | 9 ++--- README-dev.rst | 96 +++--------------------------------------------- README.rst | 13 +++---- 3 files changed, 14 insertions(+), 104 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index e5da81d74f..f71ebabdbb 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -5,7 +5,8 @@ Contributions are welcome in the form of bug reports or pull requests. Bug Reports ----------- -Quality bug reports are welcome at the `DataStax Python Driver JIRA `_. +Quality bug reports are welcome at the `CASSPYTHON project `_ +of the ASF JIRA. There are plenty of `good resources `_ describing how to create good bug reports. They will not be repeated in detail here, but in general, the bug report include where appropriate: @@ -18,11 +19,7 @@ good bug reports. They will not be repeated in detail here, but in general, the Pull Requests ------------- If you're able to fix a bug yourself, you can `fork the repository `_ and submit a `Pull Request `_ with the fix. -Please include tests demonstrating the issue and fix. For examples of how to run the tests, consult the `dev README `_. - -Contribution License Agreement ------------------------------- -To protect the community, all contributors are required to `sign the DataStax Contribution License Agreement `_. The process is completely electronic and should only take a few minutes. +Please include tests demonstrating the issue and fix. For examples of how to run the tests, consult the `dev README `_. Design and Implementation Guidelines ------------------------------------ diff --git a/README-dev.rst b/README-dev.rst index c619b0beaf..939d3fa480 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -1,5 +1,7 @@ Releasing ========= +Note: the precise details of some of these steps have changed. Leaving this here as a guide only. + * Run the tests and ensure they all pass * Update CHANGELOG.rst * Check for any missing entries @@ -13,7 +15,8 @@ Releasing * Tag the release. For example: ``git tag -a 1.0.0 -m 'version 1.0.0'`` * Push the tag and new ``master``: ``git push origin 1.0.0 ; git push origin master`` * Update the `python-driver` submodule of `python-driver-wheels`, - commit then push. This will trigger TravisCI and the wheels building. + commit then push. +* Trigger the Github Actions necessary to build wheels for the various platforms * For a GA release, upload the package to pypi:: # Clean the working directory @@ -49,85 +52,12 @@ Releasing * this is typically a matter of merging or rebasing onto master * test and push updated branch to origin -* Update the JIRA versions: https://datastax-oss.atlassian.net/plugins/servlet/project-config/PYTHON/versions +* Update the JIRA releases: https://issues.apache.org/jira/projects/CASSPYTHON?selectedItem=com.atlassian.jira.jira-projects-plugin:release-page * add release dates and set version as "released" * Make an announcement on the mailing list -Building the Docs -================= -Sphinx is required to build the docs. You probably want to install through apt, -if possible:: - - sudo apt-get install python-sphinx - -pip may also work:: - - sudo pip install -U Sphinx - -To build the docs, run:: - - python setup.py doc - -Upload the Docs -================= - -This is deprecated. The docs is now only published on https://docs.datastax.com. - -To upload the docs, checkout the ``gh-pages`` branch and copy the entire -contents all of ``docs/_build/X.Y.Z/*`` into the root of the ``gh-pages`` branch -and then push that branch to github. - -For example:: - - git checkout 1.0.0 - python setup.py doc - git checkout gh-pages - cp -R docs/_build/1.0.0/* . - git add --update # add modified files - # Also make sure to add any new documentation files! - git commit -m 'Update docs (version 1.0.0)' - git push origin gh-pages - -If docs build includes errors, those errors may not show up in the next build unless -you have changed the files with errors. It's good to occassionally clear the build -directory and build from scratch:: - - rm -rf docs/_build/* - -Documentor -========== -We now also use another tool called Documentor with Sphinx source to build docs. -This gives us versioned docs with nice integrated search. This is a private tool -of DataStax. - -Dependencies ------------- -Sphinx -~~~~~~ -Installed as described above - -Documentor -~~~~~~~~~~ -Clone and setup Documentor as specified in `the project `_. -This tool assumes Ruby, bundler, and npm are present. - -Building --------- -The setup script expects documentor to be in the system path. You can either add it permanently or run with something -like this:: - - PATH=$PATH:/bin python setup.py doc - -The docs will not display properly just browsing the filesystem in a browser. To view the docs as they would be in most -web servers, use the SimpleHTTPServer module:: - - cd docs/_build/ - python -m SimpleHTTPServer - -Then, browse to `localhost:8000 `_. - Tests ===== @@ -166,7 +96,7 @@ it with the ``PROTOCOL_VERSION`` environment variable:: Testing Multiple Python Versions -------------------------------- -Use tox to test all of Python 3.9 through 3.13 and pypy (this is what TravisCI runs):: +Use tox to test all of Python 3.9 through 3.13 and pypy:: tox @@ -223,17 +153,3 @@ An EAP release is only uploaded on a private server and it is not published on p python setup.py doc * Upload the docs on the EAP download server. - -Adding a New Python Runtime Support -=================================== - -* Add the new python version to our jenkins image: - https://github.com/riptano/openstack-jenkins-drivers/ - -* Add the new python version in the Jenkinsfile and TravisCI configs as appropriate - -* Run the tests and ensure they all pass - * also test all event loops - -* Update the wheels building repo to support that version: - https://github.com/datastax/python-driver-wheels diff --git a/README.rst b/README.rst index 955d07260d..47b5593ee9 100644 --- a/README.rst +++ b/README.rst @@ -63,23 +63,20 @@ community) is now maintained as an integral part of this package. Refer to Contributing ------------ -See `CONTRIBUTING.md `_. - -Error Handling --------------- -While originally written for the Java driver, users may reference the `Cassandra error handling done right blog `_ for resolving error handling scenarios with Apache Cassandra. +See `CONTRIBUTING.rst `_. Reporting Problems ------------------ Please report any bugs and make any feature requests on the -`JIRA `_ issue tracker. +`CASSPYTHON project `_ +of the ASF JIRA. If you would like to contribute, please feel free to open a pull request. Getting Help ------------ -Your best options for getting help with the driver is the -`mailing list `_. +You can talk about the driver, ask questions and get help in the #cassandra-drivers channel on +`ASF Slack `_. License ------- From d0407ea0a9d51be1498a93d9d62ac3a9a0bacfa2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hannes=20J=C3=A4rrendal?= Date: Tue, 11 Nov 2025 12:03:12 +0100 Subject: [PATCH 2003/2005] PYTHON-1354 do not set timeout to None when calling execute_async in execute_concurrent The default timeout=_NOT_SET will use the request_timeout specified in the execution_profile. This is not the same as setting the timeout to None. This will instead result in having no timeout which is not ideal. If this behaviour really is wanted, the request_timeout can be set to None in the execution_profile instead. patch by osttra-h-jarrendal; reviewed by Bret McGuire and Brad Schoening --- cassandra/concurrent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index 394f462fa4..012f52f954 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -83,7 +83,7 @@ def _execute_next(self): def _execute(self, idx, statement, params): self._exec_depth += 1 try: - future = self.session.execute_async(statement, params, timeout=None, execution_profile=self._execution_profile) + future = self.session.execute_async(statement, params, execution_profile=self._execution_profile) args = (future, idx) future.add_callbacks( callback=self._on_success, callback_args=args, From 8225b73640d4991a685b13451ac4a6525a59695a Mon Sep 17 00:00:00 2001 From: mck Date: Sat, 20 Dec 2025 13:57:41 +0100 Subject: [PATCH 2004/2005] =?UTF-8?q?ninja=20=E2=80=93=20add=20.asf.yaml?= =?UTF-8?q?=20(CASSPYTHON-2)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .asf.yaml | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 .asf.yaml diff --git a/.asf.yaml b/.asf.yaml new file mode 100644 index 0000000000..be276a2e87 --- /dev/null +++ b/.asf.yaml @@ -0,0 +1,36 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +notifications: + commits: commits@cassandra.apache.org + issues: commits@cassandra.apache.org + pullrequests: pr@cassandra.apache.org + jira_options: link worklog + +github: + description: "Python Driver for Apache Cassandra®" + homepage: https://cassandra.apache.org/ + enabled_merge_buttons: + squash: false + merge: false + rebase: true + features: + wiki: false + issues: false + projects: false + autolink_jira: + - CASSPYTHON From 56713fa560c3885c173bb53cf1f65047de132cbd Mon Sep 17 00:00:00 2001 From: mck Date: Sat, 20 Dec 2025 13:57:41 +0100 Subject: [PATCH 2005/2005] =?UTF-8?q?ninja=20=E2=80=93=20add=20.asf.yaml?= =?UTF-8?q?=20(CASSPYTHON-2)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .asf.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.asf.yaml b/.asf.yaml index be276a2e87..0bacf232d1 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -23,7 +23,7 @@ notifications: github: description: "Python Driver for Apache Cassandra®" - homepage: https://cassandra.apache.org/ + homepage: https://docs.datastax.com/en/developer/python-driver/3.29/index.html enabled_merge_buttons: squash: false merge: false @@ -32,5 +32,10 @@ github: wiki: false issues: false projects: false + discussions: false autolink_jira: + - CASSANDRA - CASSPYTHON + protected_branches: + trunk: + required_linear_history: true